# backup/backup_manager.py
"""
Système de sauvegarde automatique pour NeuroPulse Monitor Pro v2.0
Gère les sauvegardes de la base de données, configuration et logs
"""

import os
import sys
import shutil
import tarfile
import gzip
import sqlite3
import json
import logging
import schedule
import time
import hashlib
import smtplib
from datetime import datetime, timedelta
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import threading
import subprocess
from pathlib import Path
import psutil

# Configuration du logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('/var/log/neuropulse/backup.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class BackupManager:
    """Gestionnaire principal des sauvegardes"""
    
    def __init__(self, config_file='/opt/neuropulse/config.json'):
        self.config_file = config_file
        self.config = self.load_config()
        self.backup_config = self.config.get('backup', {})
        
        # Répertoires par défaut
        self.source_dirs = {
            'app': '/opt/neuropulse/app',
            'config': '/opt/neuropulse',
            'database': '/opt/neuropulse',
            'logs': '/var/log/neuropulse'
        }
        
        self.backup_base_dir = self.backup_config.get('backup_directory', '/var/backups/neuropulse')
        self.retention_days = self.backup_config.get('retention_days', 30)
        self.max_backup_size = self.backup_config.get('max_size_gb', 10) * 1024 * 1024 * 1024  # en bytes
        
        # Assurer que le répertoire de sauvegarde existe
        os.makedirs(self.backup_base_dir, exist_ok=True)
        
        # Statistiques des sauvegardes
        self.backup_stats = {
            'total_backups': 0,
            'successful_backups': 0,
            'failed_backups': 0,
            'last_backup': None,
            'last_success': None,
            'total_size': 0
        }
        
        self.load_backup_stats()
    
    def load_config(self):
        """Charger la configuration"""
        try:
            with open(self.config_file, 'r') as f:
                return json.load(f)
        except FileNotFoundError:
            logger.warning(f"Fichier de configuration non trouvé: {self.config_file}")
            return {}
        except json.JSONDecodeError as e:
            logger.error(f"Erreur de parsing JSON: {e}")
            return {}
    
    def load_backup_stats(self):
        """Charger les statistiques des sauvegardes"""
        stats_file = os.path.join(self.backup_base_dir, '.backup_stats.json')
        try:
            if os.path.exists(stats_file):
                with open(stats_file, 'r') as f:
                    self.backup_stats.update(json.load(f))
        except Exception as e:
            logger.error(f"Erreur chargement statistiques: {e}")
    
    def save_backup_stats(self):
        """Sauvegarder les statistiques"""
        stats_file = os.path.join(self.backup_base_dir, '.backup_stats.json')
        try:
            with open(stats_file, 'w') as f:
                json.dump(self.backup_stats, f, indent=2, default=str)
        except Exception as e:
            logger.error(f"Erreur sauvegarde statistiques: {e}")
    
    def get_backup_name(self, backup_type='full'):
        """Générer un nom de sauvegarde avec timestamp"""
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        hostname = os.uname().nodename
        return f"neuropulse_{backup_type}_{hostname}_{timestamp}.tar.gz"
    
    def calculate_checksum(self, file_path):
        """Calculer le checksum MD5 d'un fichier"""
        hash_md5 = hashlib.md5()
        try:
            with open(file_path, "rb") as f:
                for chunk in iter(lambda: f.read(4096), b""):
                    hash_md5.update(chunk)
            return hash_md5.hexdigest()
        except Exception as e:
            logger.error(f"Erreur calcul checksum {file_path}: {e}")
            return None
    
    def create_backup_manifest(self, backup_path, included_files):
        """Créer un manifeste de sauvegarde"""
        manifest = {
            'backup_name': os.path.basename(backup_path),
            'created_at': datetime.now().isoformat(),
            'hostname': os.uname().nodename,
            'neuropulse_version': '2.0',
            'backup_type': 'full',
            'included_files': included_files,
            'checksum': self.calculate_checksum(backup_path),
            'size_bytes': os.path.getsize(backup_path),
            'compression': 'gzip'
        }
        
        manifest_path = backup_path.replace('.tar.gz', '_manifest.json')
        try:
            with open(manifest_path, 'w') as f:
                json.dump(manifest, f, indent=2)
            logger.info(f"Manifeste créé: {manifest_path}")
            return manifest_path
        except Exception as e:
            logger.error(f"Erreur création manifeste: {e}")
            return None
    
    def backup_database(self, temp_dir):
        """Sauvegarder la base de données SQLite"""
        logger.info("Sauvegarde de la base de données...")
        
        db_source = os.path.join(self.source_dirs['database'], 'neuropulse.db')
        db_backup = os.path.join(temp_dir, 'neuropulse.db')
        
        try:
            # Sauvegarde avec vacuum pour optimiser
            source_conn = sqlite3.connect(db_source)
            backup_conn = sqlite3.connect(db_backup)
            
            source_conn.backup(backup_conn)
            
            # Vacuum pour optimiser
            backup_conn.execute('VACUUM')
            
            source_conn.close()
            backup_conn.close()
            
            # Créer aussi un dump SQL pour la récupération
            sql_dump = os.path.join(temp_dir, 'neuropulse_dump.sql')
            with open(sql_dump, 'w') as f:
                subprocess.run(['sqlite3', db_backup, '.dump'], stdout=f)
            
            logger.info("Base de données sauvegardée avec succès")
            return [db_backup, sql_dump]
            
        except Exception as e:
            logger.error(f"Erreur sauvegarde base de données: {e}")
            return []
    
    def backup_configuration(self, temp_dir):
        """Sauvegarder les fichiers de configuration"""
        logger.info("Sauvegarde de la configuration...")
        
        config_files = []
        config_source = self.source_dirs['config']
        
        try:
            # Fichiers de configuration à sauvegarder
            config_patterns = [
                'config.json',
                '*.conf',
                '*.cfg',
                'settings.py',
                'requirements.txt'
            ]
            
            config_backup_dir = os.path.join(temp_dir, 'config')
            os.makedirs(config_backup_dir, exist_ok=True)
            
            for pattern in config_patterns:
                if pattern.startswith('*'):
                    # Recherche par motif
                    import glob
                    for file_path in glob.glob(os.path.join(config_source, pattern)):
                        if os.path.isfile(file_path):
                            dest_path = os.path.join(config_backup_dir, os.path.basename(file_path))
                            shutil.copy2(file_path, dest_path)
                            config_files.append(dest_path)
                else:
                    # Fichier spécifique
                    source_path = os.path.join(config_source, pattern)
                    if os.path.exists(source_path):
                        dest_path = os.path.join(config_backup_dir, pattern)
                        shutil.copy2(source_path, dest_path)
                        config_files.append(dest_path)
            
            logger.info(f"Configuration sauvegardée: {len(config_files)} fichiers")
            return config_files
            
        except Exception as e:
            logger.error(f"Erreur sauvegarde configuration: {e}")
            return []
    
    def backup_application(self, temp_dir):
        """Sauvegarder les fichiers de l'application"""
        logger.info("Sauvegarde de l'application...")
        
        app_source = self.source_dirs['app']
        app_backup_dir = os.path.join(temp_dir, 'app')
        
        try:
            # Copier le répertoire de l'application
            if os.path.exists(app_source):
                shutil.copytree(app_source, app_backup_dir, 
                              ignore=shutil.ignore_patterns(
                                  '*.pyc', '__pycache__', '*.log', 
                                  '.git', 'node_modules', 'venv'
                              ))
                
                # Compter les fichiers copiés
                file_count = sum([len(files) for r, d, files in os.walk(app_backup_dir)])
                logger.info(f"Application sauvegardée: {file_count} fichiers")
                
                return [app_backup_dir]
            else:
                logger.warning(f"Répertoire application non trouvé: {app_source}")
                return []
                
        except Exception as e:
            logger.error(f"Erreur sauvegarde application: {e}")
            return []
    
    def backup_logs(self, temp_dir):
        """Sauvegarder les logs importants"""
        logger.info("Sauvegarde des logs...")
        
        logs_source = self.source_dirs['logs']
        logs_backup_dir = os.path.join(temp_dir, 'logs')
        
        try:
            os.makedirs(logs_backup_dir, exist_ok=True)
            
            # Logs à sauvegarder (seulement les récents)
            log_patterns = [
                'neuropulse*.log',
                'api.log',
                'backup.log',
                'error.log'
            ]
            
            # Limite de taille pour les logs (dernières 48h)
            time_limit = datetime.now() - timedelta(hours=48)
            
            log_files = []
            import glob
            
            for pattern in log_patterns:
                for log_file in glob.glob(os.path.join(logs_source, pattern)):
                    if os.path.isfile(log_file):
                        # Vérifier la date de modification
                        mtime = datetime.fromtimestamp(os.path.getmtime(log_file))
                        if mtime > time_limit:
                            dest_path = os.path.join(logs_backup_dir, os.path.basename(log_file))
                            shutil.copy2(log_file, dest_path)
                            log_files.append(dest_path)
            
            logger.info(f"Logs sauvegardés: {len(log_files)} fichiers")
            return log_files
            
        except Exception as e:
            logger.error(f"Erreur sauvegarde logs: {e}")
            return []
    
    def create_system_snapshot(self, temp_dir):
        """Créer un instantané des informations système"""
        logger.info("Création de l'instantané système...")
        
        try:
            snapshot = {
                'timestamp': datetime.now().isoformat(),
                'hostname': os.uname().nodename,
                'platform': {
                    'system': os.uname().sysname,
                    'release': os.uname().release,
                    'version': os.uname().version,
                    'machine': os.uname().machine
                },
                'python_version': sys.version,
                'disk_usage': {},
                'memory_info': {},
                'network_interfaces': [],
                'installed_packages': [],
                'running_services': [],
                'system_load': {}
            }
            
            # Utilisation du disque
            for partition in psutil.disk_partitions():
                try:
                    usage = psutil.disk_usage(partition.mountpoint)
                    snapshot['disk_usage'][partition.device] = {
                        'mountpoint': partition.mountpoint,
                        'fstype': partition.fstype,
                        'total': usage.total,
                        'used': usage.used,
                        'free': usage.free,
                        'percent': round((usage.used / usage.total) * 100, 2)
                    }
                except PermissionError:
                    continue
            
            # Informations mémoire
            memory = psutil.virtual_memory()
            snapshot['memory_info'] = {
                'total': memory.total,
                'available': memory.available,
                'percent': memory.percent,
                'used': memory.used,
                'free': memory.free
            }
            
            # Interfaces réseau
            for interface, addrs in psutil.net_if_addrs().items():
                interface_info = {'name': interface, 'addresses': []}
                for addr in addrs:
                    interface_info['addresses'].append({
                        'family': str(addr.family),
                        'address': addr.address,
                        'netmask': addr.netmask,
                        'broadcast': addr.broadcast
                    })
                snapshot['network_interfaces'].append(interface_info)
            
            # Charge système
            snapshot['system_load'] = {
                'load_1m': os.getloadavg()[0] if hasattr(os, 'getloadavg') else None,
                'load_5m': os.getloadavg()[1] if hasattr(os, 'getloadavg') else None,
                'load_15m': os.getloadavg()[2] if hasattr(os, 'getloadavg') else None,
                'cpu_count': psutil.cpu_count(),
                'boot_time': psutil.boot_time()
            }
            
            # Packages installés (si pip disponible)
            try:
                result = subprocess.run(['pip', 'freeze'], capture_output=True, text=True)
                if result.returncode == 0:
                    snapshot['installed_packages'] = result.stdout.strip().split('\n')
            except:
                pass
            
            # Services en cours d'exécution
            try:
                result = subprocess.run(['systemctl', 'list-units', '--type=service', '--state=running', '--no-legend'], 
                                      capture_output=True, text=True)
                if result.returncode == 0:
                    for line in result.stdout.strip().split('\n'):
                        if line.strip():
                            service_name = line.split()[0]
                            snapshot['running_services'].append(service_name)
            except:
                pass
            
            # Sauvegarder l'instantané
            snapshot_file = os.path.join(temp_dir, 'system_snapshot.json')
            with open(snapshot_file, 'w') as f:
                json.dump(snapshot, f, indent=2)
            
            logger.info("Instantané système créé")
            return [snapshot_file]
            
        except Exception as e:
            logger.error(f"Erreur création instantané système: {e}")
            return []
    
    def create_full_backup(self):
        """Créer une sauvegarde complète"""
        logger.info("Début de la sauvegarde complète...")
        
        start_time = time.time()
        backup_name = self.get_backup_name('full')
        backup_path = os.path.join(self.backup_base_dir, backup_name)
        temp_dir = None
        
        try:
            # Créer un répertoire temporaire
            import tempfile
            temp_dir = tempfile.mkdtemp(prefix='neuropulse_backup_')
            logger.info(f"Répertoire temporaire: {temp_dir}")
            
            all_files = []
            
            # Sauvegarder chaque composant
            all_files.extend(self.backup_database(temp_dir))
            all_files.extend(self.backup_configuration(temp_dir))
            all_files.extend(self.backup_application(temp_dir))
            all_files.extend(self.backup_logs(temp_dir))
            all_files.extend(self.create_system_snapshot(temp_dir))
            
            if not all_files:
                raise Exception("Aucun fichier à sauvegarder")
            
            # Créer l'archive compressée
            logger.info(f"Création de l'archive: {backup_path}")
            with tarfile.open(backup_path, 'w:gz') as tar:
                for file_path in all_files:
                    if os.path.exists(file_path):
                        # Chemin relatif dans l'archive
                        arcname = os.path.relpath(file_path, temp_dir)
                        tar.add(file_path, arcname=arcname)
            
            # Vérifier que l'archive a été créée
            if not os.path.exists(backup_path):
                raise Exception("Échec de la création de l'archive")
            
            backup_size = os.path.getsize(backup_path)
            
            # Vérifier la taille
            if backup_size > self.max_backup_size:
                logger.warning(f"Sauvegarde très volumineuse: {backup_size / (1024**3):.2f} GB")
            
            # Créer le manifeste
            manifest_path = self.create_backup_manifest(backup_path, 
                                                       [os.path.relpath(f, temp_dir) for f in all_files])
            
            # Mettre à jour les statistiques
            execution_time = time.time() - start_time
            self.backup_stats.update({
                'total_backups': self.backup_stats['total_backups'] + 1,
                'successful_backups': self.backup_stats['successful_backups'] + 1,
                'last_backup': datetime.now().isoformat(),
                'last_success': datetime.now().isoformat(),
                'total_size': self.backup_stats['total_size'] + backup_size
            })
            self.save_backup_stats()
            
            logger.info(f"Sauvegarde complète réussie en {execution_time:.2f}s")
            logger.info(f"Taille: {backup_size / (1024**2):.2f} MB")
            logger.info(f"Fichiers inclus: {len(all_files)}")
            
            # Notification de succès
            self.send_backup_notification(True, backup_name, backup_size, execution_time)
            
            return {
                'success': True,
                'backup_path': backup_path,
                'backup_size': backup_size,
                'execution_time': execution_time,
                'files_count': len(all_files),
                'manifest_path': manifest_path
            }
            
        except Exception as e:
            logger.error(f"Erreur lors de la sauvegarde: {e}")
            
            # Nettoyer en cas d'erreur
            if os.path.exists(backup_path):
                os.remove(backup_path)
            
            # Mettre à jour les statistiques d'erreur
            self.backup_stats.update({
                'total_backups': self.backup_stats['total_backups'] + 1,
                'failed_backups': self.backup_stats['failed_backups'] + 1,
                'last_backup': datetime.now().isoformat()
            })
            self.save_backup_stats()
            
            # Notification d'échec
            self.send_backup_notification(False, backup_name, 0, time.time() - start_time, str(e))
            
            return {
                'success': False,
                'error': str(e),
                'execution_time': time.time() - start_time
            }
            
        finally:
            # Nettoyer le répertoire temporaire
            if temp_dir and os.path.exists(temp_dir):
                shutil.rmtree(temp_dir, ignore_errors=True)
    
    def cleanup_old_backups(self):
        """Supprimer les anciennes sauvegardes"""
        logger.info("Nettoyage des anciennes sauvegardes...")
        
        try:
            cutoff_date = datetime.now() - timedelta(days=self.retention_days)
            deleted_count = 0
            freed_space = 0
            
            # Parcourir les fichiers de sauvegarde
            for filename in os.listdir(self.backup_base_dir):
                if filename.startswith('neuropulse_') and filename.endswith('.tar.gz'):
                    file_path = os.path.join(self.backup_base_dir, filename)
                    file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
                    
                    if file_mtime < cutoff_date:
                        file_size = os.path.getsize(file_path)
                        
                        # Supprimer la sauvegarde
                        os.remove(file_path)
                        deleted_count += 1
                        freed_space += file_size
                        
                        # Supprimer le manifeste associé
                        manifest_path = file_path.replace('.tar.gz', '_manifest.json')
                        if os.path.exists(manifest_path):
                            os.remove(manifest_path)
                        
                        logger.info(f"Sauvegarde supprimée: {filename}")
            
            if deleted_count > 0:
                logger.info(f"Nettoyage terminé: {deleted_count} sauvegardes supprimées")
                logger.info(f"Espace libéré: {freed_space / (1024**2):.2f} MB")
            else:
                logger.info("Aucune sauvegarde à supprimer")
            
            return {
                'deleted_count': deleted_count,
                'freed_space': freed_space
            }
            
        except Exception as e:
            logger.error(f"Erreur lors du nettoyage: {e}")
            return {'error': str(e)}
    
    def verify_backup(self, backup_path):
        """Vérifier l'intégrité d'une sauvegarde"""
        logger.info(f"Vérification de la sauvegarde: {backup_path}")
        
        try:
            # Vérifier que le fichier existe
            if not os.path.exists(backup_path):
                return {'success': False, 'error': 'Fichier non trouvé'}
            
            # Vérifier le manifeste
            manifest_path = backup_path.replace('.tar.gz', '_manifest.json')
            if os.path.exists(manifest_path):
                with open(manifest_path, 'r') as f:
                    manifest = json.load(f)
                
                # Vérifier le checksum
                current_checksum = self.calculate_checksum(backup_path)
                if manifest.get('checksum') != current_checksum:
                    return {'success': False, 'error': 'Checksum invalide'}
            
            # Vérifier que l'archive peut être ouverte
            with tarfile.open(backup_path, 'r:gz') as tar:
                # Tester quelques fichiers
                members = tar.getmembers()
                if len(members) == 0:
                    return {'success': False, 'error': 'Archive vide'}
                
                # Vérifier quelques fichiers aléatoirement
                import random
                sample_size = min(5, len(members))
                sample_members = random.sample(members, sample_size)
                
                for member in sample_members:
                    try:
                        tar.extractfile(member)
                    except:
                        return {'success': False, 'error': f'Fichier corrompu: {member.name}'}
            
            logger.info("Vérification de sauvegarde réussie")
            return {'success': True}
            
        except Exception as e:
            logger.error(f"Erreur vérification sauvegarde: {e}")
            return {'success': False, 'error': str(e)}
    
    def list_backups(self):
        """Lister toutes les sauvegardes disponibles"""
        backups = []
        
        try:
            for filename in os.listdir(self.backup_base_dir):
                if filename.startswith('neuropulse_') and filename.endswith('.tar.gz'):
                    file_path = os.path.join(self.backup_base_dir, filename)
                    file_stat = os.stat(file_path)
                    
                    backup_info = {
                        'filename': filename,
                        'path': file_path,
                        'size': file_stat.st_size,
                        'created': datetime.fromtimestamp(file_stat.st_mtime).isoformat(),
                        'verified': False
                    }
                    
                    # Charger le manifeste si disponible
                    manifest_path = file_path.replace('.tar.gz', '_manifest.json')
                    if os.path.exists(manifest_path):
                        try:
                            with open(manifest_path, 'r') as f:
                                manifest = json.load(f)
                            backup_info['manifest'] = manifest
                        except:
                            pass
                    
                    backups.append(backup_info)
            
            # Trier par date de création (plus récent en premier)
            backups.sort(key=lambda x: x['created'], reverse=True)
            
        except Exception as e:
            logger.error(f"Erreur listage sauvegardes: {e}")
        
        return backups
    
    def send_backup_notification(self, success, backup_name, size, duration, error_msg=None):
        """Envoyer une notification de sauvegarde"""
        try:
            notifications_config = self.config.get('notifications', {})
            email_config = notifications_config.get('email', {})
            
            if not email_config.get('enabled', False):
                return
            
            # Préparer le message
            if success:
                subject = f"✅ Sauvegarde NeuroPulse réussie - {backup_name}"
                body = f"""
Sauvegarde réussie !

Détails:
- Nom: {backup_name}
- Taille: {size / (1024**2):.2f} MB
- Durée: {duration:.2f} secondes
- Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

Système NeuroPulse Monitor Pro
"""
            else:
                subject = f"❌ Échec sauvegarde NeuroPulse - {backup_name}"
                body = f"""
Échec de la sauvegarde !

Détails:
- Nom: {backup_name}
- Erreur: {error_msg}
- Durée: {duration:.2f} secondes
- Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

Veuillez vérifier les logs et corriger le problème.

Système NeuroPulse Monitor Pro
"""
            
            # Envoyer l'email
            self._send_email(email_config, subject, body)
            
        except Exception as e:
            logger.error(f"Erreur envoi notification: {e}")
    
    def _send_email(self, email_config, subject, body):
        """Envoyer un email"""
        try:
            msg = MIMEMultipart()
            msg['From'] = email_config.get('from_email', 'neuropulse@localhost')
            msg['To'] = ', '.join(email_config.get('to_emails', []))
            msg['Subject'] = subject
            
            msg.attach(MIMEText(body, 'plain'))
            
            server = smtplib.SMTP(email_config['smtp_server'], email_config.get('smtp_port', 587))
            if email_config.get('tls_enabled', True):
                server.starttls()
            
            if email_config.get('smtp_username') and email_config.get('smtp_password'):
                server.login(email_config['smtp_username'], email_config['smtp_password'])
            
            server.send_message(msg)
            server.quit()
            
            logger.info("Notification email envoyée")
            
        except Exception as e:
            logger.error(f"Erreur envoi email: {e}")
    
    def schedule_backups(self):
        """Programmer les sauvegardes automatiques"""
        backup_schedule = self.backup_config.get('schedule', {})
        
        if backup_schedule.get('enabled', False):
            # Sauvegarde quotidienne
            daily_time = backup_schedule.get('daily_time', '02:00')
            schedule.every().day.at(daily_time).do(self.create_full_backup)
            logger.info(f"Sauvegarde quotidienne programmée à {daily_time}")
            
            # Sauvegarde hebdomadaire (optionnelle)
            if backup_schedule.get('weekly_enabled', False):
                weekly_day = backup_schedule.get('weekly_day', 'sunday')
                weekly_time = backup_schedule.get('weekly_time', '01:00')
                getattr(schedule.every(), weekly_day).at(weekly_time).do(self.create_full_backup)
                logger.info(f"Sauvegarde hebdomadaire programmée le {weekly_day} à {weekly_time}")
            
            # Nettoyage hebdomadaire
            schedule.every().week.do(self.cleanup_old_backups)
            logger.info("Nettoyage hebdomadaire programmé")
    
    def run_scheduler(self):
        """Exécuter le planificateur de sauvegardes"""
        logger.info("Démarrage du planificateur de sauvegardes...")
        
        self.schedule_backups()
        
        while True:
            try:
                schedule.run_pending()
                time.sleep(60)  # Vérifier chaque minute
            except KeyboardInterrupt:
                logger.info("Arrêt du planificateur de sauvegardes")
                break
            except Exception as e:
                logger.error(f"Erreur dans le planificateur: {e}")
                time.sleep(60)

def main():
    """Fonction principale pour utilisation en ligne de commande"""
    import argparse
    
    parser = argparse.ArgumentParser(description='Gestionnaire de sauvegardes NeuroPulse')
    parser.add_argument('action', choices=['backup', 'cleanup', 'list', 'verify', 'schedule'],
                       help='Action à effectuer')
    parser.add_argument('--config', default='/opt/neuropulse/config.json',
                       help='Fichier de configuration')
    parser.add_argument('--file', help='Fichier de sauvegarde à vérifier')
    
    args = parser.parse_args()
    
    backup_manager = BackupManager(args.config)
    
    if args.action == 'backup':
        result = backup_manager.create_full_backup()
        if result['success']:
            print(f"✅ Sauvegarde réussie: {result['backup_path']}")
            print(f"   Taille: {result['backup_size'] / (1024**2):.2f} MB")
            print(f"   Durée: {result['execution_time']:.2f}s")
        else:
            print(f"❌ Échec de la sauvegarde: {result['error']}")
            sys.exit(1)
    
    elif args.action == 'cleanup':
        result = backup_manager.cleanup_old_backups()
        if 'error' not in result:
            print(f"✅ Nettoyage terminé: {result['deleted_count']} sauvegardes supprimées")
        else:
            print(f"❌ Erreur nettoyage: {result['error']}")
    
    elif args.action == 'list':
        backups = backup_manager.list_backups()
        if backups:
            print(f"📋 {len(backups)} sauvegarde(s) trouvée(s):")
            for backup in backups:
                size_mb = backup['size'] / (1024**2)
                print(f"   • {backup['filename']} ({size_mb:.2f} MB) - {backup['created']}")
        else:
            print("ℹ️  Aucune sauvegarde trouvée")
    
    elif args.action == 'verify':
        if not args.file:
            print("❌ Fichier de sauvegarde requis pour la vérification")
            sys.exit(1)
        
        result = backup_manager.verify_backup(args.file)
        if result['success']:
            print(f"✅ Sauvegarde valide: {args.file}")
        else:
            print(f"❌ Sauvegarde invalide: {result['error']}")
    
    elif args.action == 'schedule':
        backup_manager.run_scheduler()

if __name__ == '__main__':
    main()