#!/usr/bin/env python3
"""
Système de surveillance pour le scraper Jobs.ch
Monitore les performances, la mémoire, les processus et génère des alertes
"""

import psutil
import json
import time
import os
import logging
import subprocess
import threading
from datetime import datetime, timedelta
from pathlib import Path

class SystemMonitor:
    def __init__(self, config_file='configs/monitor_config.json'):
        self.config_file = config_file
        self.config = self.load_config()
        self.setup_logging()
        self.monitoring_active = False
        self.alerts = []
        
    def load_config(self):
        """Charger la configuration du monitoring"""
        default_config = {
            "monitoring": {
                "check_interval": 10,  # secondes
                "max_memory_usage_mb": 1024,  # 1GB
                "max_cpu_usage_percent": 80,
                "max_disk_usage_percent": 90,
                "max_log_file_size_mb": 50,
                "alert_email": None
            },
            "scraper": {
                "status_file": "scraper_status.json",
                "log_file": "scraper.log",
                "debug_log_file": "scraper_debug.log",
                "pid_file": "scraper.pid"
            },
            "alerts": {
                "enabled": True,
                "max_alerts_per_hour": 10,
                "alert_levels": ["WARNING", "ERROR", "CRITICAL"]
            }
        }
        
        if os.path.exists(self.config_file):
            try:
                with open(self.config_file, 'r', encoding='utf-8') as f:
                    config = json.load(f)
                # Fusionner avec la config par défaut
                return {**default_config, **config}
            except Exception as e:
                print(f"Erreur lecture config: {e}")
                
        # Créer le fichier de config s'il n'existe pas
        os.makedirs(os.path.dirname(self.config_file), exist_ok=True)
        with open(self.config_file, 'w', encoding='utf-8') as f:
            json.dump(default_config, f, indent=2)
            
        return default_config
        
    def setup_logging(self):
        """Configurer le système de logs"""
        log_dir = Path('logs')
        log_dir.mkdir(exist_ok=True)
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_dir / 'system_monitor.log', encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
        
    def get_scraper_process_info(self):
        """Obtenir les informations sur le processus scraper"""
        pid_file = self.config['scraper']['pid_file']
        
        if not os.path.exists(pid_file):
            return None
            
        try:
            with open(pid_file, 'r') as f:
                pid = int(f.read().strip())
                
            if psutil.pid_exists(pid):
                process = psutil.Process(pid)
                return {
                    'pid': pid,
                    'status': process.status(),
                    'cpu_percent': process.cpu_percent(),
                    'memory_mb': process.memory_info().rss / 1024 / 1024,
                    'create_time': datetime.fromtimestamp(process.create_time()),
                    'num_threads': process.num_threads(),
                    'open_files': len(process.open_files()) if hasattr(process, 'open_files') else 0
                }
        except (ValueError, psutil.NoSuchProcess, psutil.AccessDenied):
            return None
            
        return None
        
    def get_system_metrics(self):
        """Récupérer les métriques système"""
        try:
            # CPU
            cpu_percent = psutil.cpu_percent(interval=1)
            cpu_count = psutil.cpu_count()
            
            # Mémoire
            memory = psutil.virtual_memory()
            swap = psutil.swap_memory()
            
            # Disque
            disk = psutil.disk_usage('.')
            
            # Réseau
            network = psutil.net_io_counters()
            
            # Charge système
            load_avg = os.getloadavg() if hasattr(os, 'getloadavg') else [0, 0, 0]
            
            return {
                'timestamp': datetime.now().isoformat(),
                'cpu': {
                    'percent': cpu_percent,
                    'count': cpu_count,
                    'load_avg': load_avg
                },
                'memory': {
                    'total_mb': memory.total / 1024 / 1024,
                    'available_mb': memory.available / 1024 / 1024,
                    'used_mb': memory.used / 1024 / 1024,
                    'percent': memory.percent,
                    'swap_used_mb': swap.used / 1024 / 1024,
                    'swap_percent': swap.percent
                },
                'disk': {
                    'total_gb': disk.total / 1024 / 1024 / 1024,
                    'used_gb': disk.used / 1024 / 1024 / 1024,
                    'free_gb': disk.free / 1024 / 1024 / 1024,
                    'percent': (disk.used / disk.total) * 100
                },
                'network': {
                    'bytes_sent': network.bytes_sent,
                    'bytes_recv': network.bytes_recv,
                    'packets_sent': network.packets_sent,
                    'packets_recv': network.packets_recv
                }
            }
            
        except Exception as e:
            self.logger.error(f"Erreur récupération métriques système: {e}")
            return None
            
    def get_scraper_status(self):
        """Récupérer le statut du scraper"""
        status_file = self.config['scraper']['status_file']
        
        if not os.path.exists(status_file):
            return None
            
        try:
            with open(status_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            self.logger.error(f"Erreur lecture statut scraper: {e}")
            return None
            
    def analyze_log_files(self):
        """Analyser les fichiers de logs"""
        log_files = [
            self.config['scraper']['log_file'],
            self.config['scraper']['debug_log_file'],
            'logs/system_monitor.log'
        ]
        
        analysis = {
            'total_size_mb': 0,
            'error_count': 0,
            'warning_count': 0,
            'last_errors': [],
            'growth_rate_mb_per_hour': 0
        }
        
        for log_file in log_files:
            if os.path.exists(log_file):
                try:
                    stat = os.stat(log_file)
                    size_mb = stat.st_size / 1024 / 1024
                    analysis['total_size_mb'] += size_mb
                    
                    # Analyser les dernières lignes pour les erreurs
                    with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
                        lines = f.readlines()[-100:]  # Dernières 100 lignes
                        
                    for line in lines:
                        line_lower = line.lower()
                        if 'error' in line_lower:
                            analysis['error_count'] += 1
                            if len(analysis['last_errors']) < 5:
                                analysis['last_errors'].append(line.strip())
                        elif 'warning' in line_lower:
                            analysis['warning_count'] += 1
                            
                except Exception as e:
                    self.logger.debug(f"Erreur analyse {log_file}: {e}")
                    
        return analysis
        
    def check_alerts(self, system_metrics, scraper_process, scraper_status):
        """Vérifier et générer les alertes"""
        alerts = []
        config = self.config['monitoring']
        
        if not self.config['alerts']['enabled']:
            return alerts
            
        # Vérification mémoire
        if system_metrics and system_metrics['memory']['used_mb'] > config['max_memory_usage_mb']:
            alerts.append({
                'level': 'WARNING',
                'type': 'memory',
                'message': f"Utilisation mémoire élevée: {system_metrics['memory']['used_mb']:.1f}MB > {config['max_memory_usage_mb']}MB",
                'timestamp': datetime.now().isoformat()
            })
            
        # Vérification CPU
        if system_metrics and system_metrics['cpu']['percent'] > config['max_cpu_usage_percent']:
            alerts.append({
                'level': 'WARNING',
                'type': 'cpu',
                'message': f"Utilisation CPU élevée: {system_metrics['cpu']['percent']:.1f}% > {config['max_cpu_usage_percent']}%",
                'timestamp': datetime.now().isoformat()
            })
            
        # Vérification disque
        if system_metrics and system_metrics['disk']['percent'] > config['max_disk_usage_percent']:
            alerts.append({
                'level': 'WARNING',
                'type': 'disk',
                'message': f"Utilisation disque élevée: {system_metrics['disk']['percent']:.1f}% > {config['max_disk_usage_percent']}%",
                'timestamp': datetime.now().isoformat()
            })
            
        # Vérification processus scraper
        if scraper_process and scraper_process['memory_mb'] > config['max_memory_usage_mb']:
            alerts.append({
                'level': 'WARNING',
                'type': 'scraper_memory',
                'message': f"Scraper utilise trop de mémoire: {scraper_process['memory_mb']:.1f}MB",
                'timestamp': datetime.now().isoformat()
            })
            
        # Vérification statut scraper
        if scraper_status:
            status_str = scraper_status.get('status', '').lower()
            if 'error' in status_str or 'erreur' in status_str:
                alerts.append({
                    'level': 'ERROR',
                    'type': 'scraper_error',
                    'message': f"Erreur détectée dans le scraper: {scraper_status.get('status', '')}",
                    'timestamp': datetime.now().isoformat()
                })
                
        # Vérification des captchas fréquents
        if scraper_status and scraper_status.get('captcha_count', 0) > 10:
            alerts.append({
                'level': 'WARNING',
                'type': 'captcha',
                'message': f"Nombreux captchas détectés: {scraper_status.get('captcha_count', 0)}",
                'timestamp': datetime.now().isoformat()
            })
            
        return alerts
        
    def save_monitoring_data(self, data):
        """Sauvegarder les données de monitoring"""
        timestamp = datetime.now().strftime('%Y%m%d_%H')
        filename = f"logs/monitoring_{timestamp}.json"
        
        # Charger les données existantes
        monitoring_data = []
        if os.path.exists(filename):
            try:
                with open(filename, 'r', encoding='utf-8') as f:
                    monitoring_data = json.load(f)
            except:
                monitoring_data = []
                
        monitoring_data.append(data)
        
        # Garder seulement les 720 derniers points (12 heures à 60s d'intervalle)
        if len(monitoring_data) > 720:
            monitoring_data = monitoring_data[-720:]
            
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(monitoring_data, f, indent=2, ensure_ascii=False)
        except Exception as e:
            self.logger.error(f"Erreur sauvegarde monitoring: {e}")
            
    def cleanup_old_files(self):
        """Nettoyer les anciens fichiers de monitoring"""
        logs_dir = Path('logs')
        if not logs_dir.exists():
            return
            
        # Supprimer les fichiers de monitoring de plus de 7 jours
        cutoff_date = datetime.now() - timedelta(days=7)
        
        for file_path in logs_dir.glob('monitoring_*.json'):
            try:
                if file_path.stat().st_mtime < cutoff_date.timestamp():
                    file_path.unlink()
                    self.logger.info(f"Fichier de monitoring supprimé: {file_path}")
            except Exception as e:
                self.logger.error(f"Erreur suppression {file_path}: {e}")
                
    def generate_report(self):
        """Générer un rapport de monitoring"""
        report = {
            'generated_at': datetime.now().isoformat(),
            'system_metrics': self.get_system_metrics(),
            'scraper_process': self.get_scraper_process_info(),
            'scraper_status': self.get_scraper_status(),
            'log_analysis': self.analyze_log_files(),
            'recent_alerts': self.alerts[-10:] if self.alerts else []
        }
        
        return report
        
    def run_monitoring_cycle(self):
        """Exécuter un cycle de monitoring"""
        try:
            # Récupérer les métriques
            system_metrics = self.get_system_metrics()
            scraper_process = self.get_scraper_process_info()
            scraper_status = self.get_scraper_status()
            log_analysis = self.analyze_log_files()
            
            # Vérifier les alertes
            new_alerts = self.check_alerts(system_metrics, scraper_process, scraper_status)
            self.alerts.extend(new_alerts)
            
            # Logger les nouvelles alertes
            for alert in new_alerts:
                level = getattr(logging, alert['level'])
                self.logger.log(level, f"ALERT [{alert['type']}]: {alert['message']}")
                
            # Préparer les données de monitoring
            monitoring_data = {
                'timestamp': datetime.now().isoformat(),
                'system': system_metrics,
                'scraper_process': scraper_process,
                'scraper_status': scraper_status,
                'logs': log_analysis,
                'alerts': new_alerts
            }
            
            # Sauvegarder
            self.save_monitoring_data(monitoring_data)
            
            # Log d'info périodique
            if system_metrics:
                self.logger.info(
                    f"Monitoring - CPU: {system_metrics['cpu']['percent']:.1f}%, "
                    f"RAM: {system_metrics['memory']['percent']:.1f}%, "
                    f"Disk: {system_metrics['disk']['percent']:.1f}%, "
                    f"Scraper: {'Actif' if scraper_process else 'Inactif'}"
                )
                
        except Exception as e:
            self.logger.error(f"Erreur cycle de monitoring: {e}")
            
    def start_monitoring(self, daemon=False):
        """Démarrer le monitoring"""
        if self.monitoring_active:
            self.logger.warning("Monitoring déjà actif")
            return
            
        self.monitoring_active = True
        self.logger.info("Démarrage du monitoring système")
        
        def monitoring_loop():
            interval = self.config['monitoring']['check_interval']
            cleanup_counter = 0
            
            while self.monitoring_active:
                try:
                    self.run_monitoring_cycle()
                    
                    # Nettoyage périodique (toutes les heures)
                    cleanup_counter += 1
                    if cleanup_counter >= (3600 / interval):
                        self.cleanup_old_files()
                        cleanup_counter = 0
                        
                    time.sleep(interval)
                    
                except KeyboardInterrupt:
                    break
                except Exception as e:
                    self.logger.error(f"Erreur dans la boucle de monitoring: {e}")
                    time.sleep(interval)
                    
            self.logger.info("Monitoring arrêté")
            
        if daemon:
            thread = threading.Thread(target=monitoring_loop, daemon=True)
            thread.start()
            return thread
        else:
            monitoring_loop()
            
    def stop_monitoring(self):
        """Arrêter le monitoring"""
        self.monitoring_active = False
        self.logger.info("Arrêt du monitoring demandé")

def main():
    import argparse
    
    parser = argparse.ArgumentParser(description='Système de monitoring pour le scraper Jobs.ch')
    parser.add_argument('--config', '-c', help='Fichier de configuration')
    parser.add_argument('--daemon', '-d', action='store_true', help='Mode daemon')
    parser.add_argument('--report', '-r', action='store_true', help='Générer un rapport')
    parser.add_argument('--interval', '-i', type=int, help='Intervalle de monitoring (secondes)')
    
    args = parser.parse_args()
    
    # Créer le moniteur
    monitor = SystemMonitor(args.config or 'configs/monitor_config.json')
    
    # Ajuster l'intervalle si spécifié
    if args.interval:
        monitor.config['monitoring']['check_interval'] = args.interval
        
    try:
        if args.report:
            # Générer seulement un rapport
            report = monitor.generate_report()
            print(json.dumps(report, indent=2, ensure_ascii=False))
        else:
            # Démarrer le monitoring
            print("🔍 Démarrage du monitoring système...")
            print("Appuyez sur Ctrl+C pour arrêter")
            monitor.start_monitoring(daemon=args.daemon)
            
    except KeyboardInterrupt:
        print("\n🛑 Arrêt du monitoring...")
        monitor.stop_monitoring()
        
    except Exception as e:
        print(f"❌ Erreur: {e}")

if __name__ == "__main__":
    main()