from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from .backup_utils import perform_base_backup, restore_to_base_backup import os import json import logging from datetime import datetime from apscheduler.schedulers.background import BackgroundScheduler from django.conf import settings import math logger = logging.getLogger(__name__) # 初始化调度器 scheduler = BackgroundScheduler() def scheduled_backup(): """定时备份任务""" try: backup_path = perform_base_backup() logger.info(f"定时备份完成: {backup_path}") # 更新托盘分类任务(如果存在) try: from container.utils import update_container_categories_task,reconcile_material_history update_container_categories_task() reconcile_material_history() logger.info(f"定时更新托盘分类完成") except ImportError: logger.warning("更新托盘分类模块未找到,跳过更新") except Exception as e: logger.error(f"定时备份失败: {str(e)}") # 启动定时备份(每6小时执行一次) if not scheduler.running: scheduler.add_job( scheduled_backup, 'cron', hour='*/6', # 每6小时执行一次 minute=0, # 在0分钟时执行 id='db_backup_job' ) scheduler.start() logger.info("定时备份任务已启动") def get_backup_files(page=1, page_size=5): """获取备份文件列表(带分页)""" backup_dir = "E:/code/backup/postgres" all_backups = [] # 遍历备份目录 for root, dirs, files in os.walk(backup_dir): for file in files: if file.endswith(".backup"): file_path = os.path.join(root, file) file_size = os.path.getsize(file_path) timestamp = os.path.getmtime(file_path) all_backups.append({ "name": file, "path": file_path, "size": f"{file_size / (1024 * 1024):.2f} MB", "date": datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") }) # 按时间倒序排序 all_backups.sort(key=lambda x: x["date"], reverse=True) # 分页处理 total_items = len(all_backups) total_pages = math.ceil(total_items / page_size) start_index = (page - 1) * page_size end_index = min(start_index + page_size, total_items) return { "backups": all_backups[start_index:end_index], "page": page, "page_size": page_size, "total_items": total_items, "total_pages": total_pages } def get_base_backups(page=1, page_size=5): """获取基础备份列表(带分页)""" base_backup_dir = "E:/code/backup/postgres/base_backup" all_backups = [] # 遍历基础备份目录 for dir_name in os.listdir(base_backup_dir): dir_path = os.path.join(base_backup_dir, dir_name) if os.path.isdir(dir_path): timestamp = os.path.getmtime(dir_path) all_backups.append({ "name": dir_name, "path": dir_path, "date": datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") }) # 按时间倒序排序 all_backups.sort(key=lambda x: x["date"], reverse=True) # 分页处理 total_items = len(all_backups) total_pages = math.ceil(total_items / page_size) start_index = (page - 1) * page_size end_index = min(start_index + page_size, total_items) return { "backups": all_backups[start_index:end_index], "page": page, "page_size": page_size, "total_items": total_items, "total_pages": total_pages } @csrf_exempt @require_POST def trigger_backup(request): """手动触发备份的API接口""" try: backup_path = perform_base_backup() return JsonResponse({ 'status': 'success', 'message': '数据库备份完成', 'path': backup_path }) except Exception as e: return JsonResponse({ 'status': 'error', 'message': str(e) }, status=500) @csrf_exempt @require_POST def list_backups(request): """获取备份文件列表API(带分页)""" try: data = json.loads(request.body) backup_type = data.get('type', 'file') page = data.get('page', 1) page_size = data.get('page_size', 5) if backup_type == 'file': result = get_backup_files(page, page_size) elif backup_type == 'base': result = get_base_backups(page, page_size) else: return JsonResponse({ 'status': 'error', 'message': '无效的备份类型' }, status=400) return JsonResponse({ 'status': 'success', 'data': result }) except Exception as e: logger.error(f"获取备份列表失败: {str(e)}") return JsonResponse({ 'status': 'error', 'message': str(e) }, status=500) @csrf_exempt @require_POST def restore_to_point(request): """执行时间点恢复API""" try: data = json.loads(request.body) base_backup = data.get('base_backup') if not base_backup or not os.path.exists(base_backup): return JsonResponse({ 'status': 'error', 'message': '无效的基础备份路径' }, status=400) # 暂停定时备份任务 scheduler.pause_job('db_backup_job') logger.info("定时备份任务已暂停") # 执行时间点恢复 restore_to_base_backup( base_backup) # 恢复定时备份任务 scheduler.resume_job('db_backup_job') logger.info("定时备份任务已恢复") return JsonResponse({ 'status': 'success', 'message': f'已成功恢复到{base_backup}' }) except Exception as e: logger.error(f"时间点恢复失败: {str(e)}") # 确保恢复定时备份任务 if scheduler.get_job('db_backup_job') and scheduler.get_job('db_backup_job').next_run_time is None: scheduler.resume_job('db_backup_job') logger.info("恢复失败后定时备份任务已恢复") return JsonResponse({ 'status': 'error', 'message': str(e) }, status=500)