#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
NeuroPulse Monitor Pro v2.1 - Version Améliorée
Application Flask pour la surveillance système avancée
"""

import os
import sys
import json
import time
import sqlite3
import logging
import subprocess
import threading
import socket
import platform
from datetime import datetime, timedelta
from functools import wraps
from collections import defaultdict, deque

from flask import Flask, render_template, request, jsonify, session, redirect, url_for, flash
from flask_cors import CORS
from werkzeug.security import generate_password_hash, check_password_hash

# Import des modules de monitoring avec fallback amélioré
try:
    import psutil
    PSUTIL_AVAILABLE = True
    logger = logging.getLogger(__name__)
    logger.info("Module psutil disponible")
except ImportError:
    PSUTIL_AVAILABLE = False
    logger = logging.getLogger(__name__)
    logger.warning("Module psutil non disponible - utilisation de données fictives")

# Configuration Flask
app = Flask(__name__)
CORS(app)

# Configuration par défaut améliorée
DEFAULT_CONFIG = {
    "version": "2.1",
    "application": {
        "name": "NeuroPulse Monitor Pro",
        "port": 5000,
        "host": "0.0.0.0",
        "debug": True,
        "secret_key": "neuropulse-secret-key-2024-enhanced"
    },
    "monitoring": {
        "refresh_interval": 2000,
        "metrics_retention_days": 30,
        "enable_real_time_alerts": True,
        "collect_detailed_metrics": True,
        "network_speed_calculation": True,
        "process_monitoring": True
    },
    "alert_thresholds": {
        "cpu": {"warning": 70, "critical": 90},
        "ram": {"warning": 80, "critical": 95},
        "disk": {"warning": 85, "critical": 95},
        "network": {"warning": 10000, "critical": 50000},  # KB/s
        "temperature": {"warning": 70, "critical": 85}
    },
    "services_to_monitor": [
        {
            "name": "apache2",
            "title": "Apache HTTP Server",
            "port": 80,
            "enabled": True,
            "criticality": "Critique"
        },
        {
            "name": "nginx",
            "title": "Nginx Web Server", 
            "port": 80,
            "enabled": True,
            "criticality": "Critique"
        },
        {
            "name": "mysql",
            "title": "MySQL Database",
            "port": 3306,
            "enabled": True,
            "criticality": "Élevée"
        },
        {
            "name": "ssh",
            "title": "SSH Server",
            "port": 22,
            "enabled": True,
            "criticality": "Élevée"
        }
    ],
    "notifications": {
        "email": {"enabled": False},
        "slack": {"enabled": False}
    },
    "backup": {
        "enabled": True,
        "schedule": "02:00",
        "retention_days": 30,
        "backup_directory": "/var/www/html/backups"
    }
}

# Variables globales améliorées
config = DEFAULT_CONFIG.copy()
metrics_cache = {"timestamp": 0, "data": {}, "history": deque(maxlen=100)}
alert_history = deque(maxlen=1000)
command_history = deque(maxlen=100)
network_stats_cache = {"last_bytes_sent": 0, "last_bytes_recv": 0, "last_timestamp": 0}
process_stats_cache = {"top_processes": [], "last_update": 0}

# Paths configurables
BASE_PATH = "/var/www/html"
CONFIG_PATH = os.path.join(BASE_PATH, "config", "config.json")
DB_PATH = os.path.join(BASE_PATH, "config", "neuropulse.db")
LOG_PATH = os.path.join(BASE_PATH, "logs", "app.log")

# Configuration des logs améliorée
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(LOG_PATH),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# Commandes interdites pour sécurité
FORBIDDEN_COMMANDS = [
    'rm -rf', 'mkfs', 'dd if=', 'shutdown', 'reboot', 'halt',
    'passwd', 'userdel', 'fdisk', 'parted', 'crontab -r',
    'init 0', 'init 6', 'poweroff', 'systemctl poweroff',
    'systemctl reboot', 'format', 'del /q', 'deltree',
    'sudo rm', 'sudo dd', 'sudo mkfs'
]

def load_config():
    """Charger la configuration depuis le fichier"""
    global config
    
    try:
        if os.path.exists(CONFIG_PATH):
            with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
                loaded_config = json.load(f)
                config.update(loaded_config)
                logger.info(f"Configuration chargée depuis {CONFIG_PATH}")
        else:
            # Créer le fichier de config par défaut
            os.makedirs(os.path.dirname(CONFIG_PATH), exist_ok=True)
            with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
                json.dump(config, f, indent=2, ensure_ascii=False)
            logger.info(f"Configuration par défaut créée: {CONFIG_PATH}")
    except Exception as e:
        logger.error(f"Erreur lors du chargement de la configuration: {e}")
        config = DEFAULT_CONFIG.copy()

def save_config():
    """Sauvegarder la configuration"""
    try:
        os.makedirs(os.path.dirname(CONFIG_PATH), exist_ok=True)
        with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
            json.dump(config, f, indent=2, ensure_ascii=False)
        logger.info("Configuration sauvegardée")
        return True
    except Exception as e:
        logger.error(f"Erreur lors de la sauvegarde: {e}")
        return False

def init_database():
    """Initialiser la base de données SQLite avec tables améliorées"""
    try:
        os.makedirs(os.path.dirname(DB_PATH), exist_ok=True)
        conn = sqlite3.connect(DB_PATH)
        cursor = conn.cursor()
        
        # Table des utilisateurs
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS users (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                username TEXT UNIQUE NOT NULL,
                password_hash TEXT NOT NULL,
                email TEXT,
                role TEXT DEFAULT 'admin',
                last_login TIMESTAMP,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')
        
        # Table des tickets
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS tickets (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                title TEXT NOT NULL,
                description TEXT,
                severity TEXT DEFAULT 'medium',
                status TEXT DEFAULT 'open',
                service TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                resolved_at TIMESTAMP,
                auto_created BOOLEAN DEFAULT 0
            )
        ''')
        
        # Table des métriques historiques améliorée
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS metrics_history (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                cpu_percent REAL,
                memory_percent REAL,
                memory_used_gb REAL,
                memory_total_gb REAL,
                disk_percent REAL,
                disk_used_gb REAL,
                disk_total_gb REAL,
                network_in REAL,
                network_out REAL,
                load_avg_1 REAL,
                load_avg_5 REAL,
                load_avg_15 REAL,
                process_count INTEGER,
                temperature REAL
            )
        ''')
        
        # Table des commandes exécutées
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS command_history (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                command TEXT NOT NULL,
                user TEXT,
                timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                success BOOLEAN,
                output TEXT,
                error TEXT
            )
        ''')
        
        # Table des alertes
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS alerts (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                type TEXT NOT NULL,
                severity TEXT NOT NULL,
                message TEXT NOT NULL,
                timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                resolved BOOLEAN DEFAULT 0,
                resolved_at TIMESTAMP
            )
        ''')
        
        # Créer l'utilisateur admin par défaut
        cursor.execute("SELECT COUNT(*) FROM users WHERE username = 'admin'")
        if cursor.fetchone()[0] == 0:
            admin_hash = generate_password_hash('admin')
            cursor.execute(
                "INSERT INTO users (username, password_hash, email, role) VALUES (?, ?, ?, ?)",
                ('admin', admin_hash, 'admin@neuropulse.local', 'admin')
            )
            logger.info("Utilisateur admin créé avec mot de passe par défaut")
        
        conn.commit()
        conn.close()
        logger.info(f"Base de données initialisée: {DB_PATH}")
        
    except Exception as e:
        logger.error(f"Erreur lors de l'initialisation de la base de données: {e}")

def get_db():
    """Obtenir une connexion à la base de données"""
    return sqlite3.connect(DB_PATH)

def login_required(f):
    """Décorateur pour vérifier l'authentification"""
    @wraps(f)
    def decorated_function(*args, **kwargs):
        if 'user_id' not in session or 'username' not in session:
            if request.path.startswith('/api/'):
                return jsonify({'error': 'Authentification requise'}), 401
            return redirect(url_for('login'))
        return f(*args, **kwargs)
    return decorated_function

def is_command_forbidden(command):
    """Vérifier si une commande est interdite"""
    command_lower = command.lower().strip()
    for forbidden in FORBIDDEN_COMMANDS:
        if forbidden in command_lower:
            return True
    return False

def execute_command(command, user='unknown'):
    """Exécuter une commande système de manière sécurisée"""
    if is_command_forbidden(command):
        return {
            'success': False,
            'error': 'Commande interdite pour des raisons de sécurité',
            'output': '',
            'return_code': 1
        }
    
    try:
        result = subprocess.run(
            command,
            shell=True,
            capture_output=True,
            text=True,
            timeout=30,
            cwd='/tmp'
        )
        
        success = result.returncode == 0
        
        # Enregistrer dans l'historique
        try:
            conn = get_db()
            cursor = conn.cursor()
            cursor.execute(
                "INSERT INTO command_history (command, user, success, output, error) VALUES (?, ?, ?, ?, ?)",
                (command, user, success, result.stdout, result.stderr)
            )
            conn.commit()
            conn.close()
        except Exception as e:
            logger.error(f"Erreur lors de l'enregistrement de la commande: {e}")
        
        return {
            'success': success,
            'output': result.stdout,
            'error': result.stderr,
            'return_code': result.returncode
        }
        
    except subprocess.TimeoutExpired:
        return {
            'success': False,
            'error': 'Commande timeout (>30s)',
            'output': '',
            'return_code': 124
        }
    except Exception as e:
        return {
            'success': False,
            'error': str(e),
            'output': '',
            'return_code': 1
        }

def get_system_metrics_enhanced():
    """Collecte de métriques système améliorée avec psutil"""
    if not PSUTIL_AVAILABLE:
        return get_test_metrics()
    
    try:
        logger.info("Collecte des métriques système améliorées")
        
        # Métriques CPU améliorées
        cpu_percent = psutil.cpu_percent(interval=0.1)  # Intervalle réduit pour éviter le blocage
        cpu_count = psutil.cpu_count()
        cpu_freq = psutil.cpu_freq()
        
        # Load average (Linux/Unix seulement)
        load_avg = [0, 0, 0]
        try:
            if hasattr(os, 'getloadavg'):
                load_avg = list(os.getloadavg())
        except (OSError, AttributeError):
            pass
        
        # Métriques mémoire détaillées
        memory = psutil.virtual_memory()
        swap = psutil.swap_memory()
        
        # Métriques disque améliorées
        disk_usage = psutil.disk_usage('/')
        disk_io = psutil.disk_io_counters()
        
        # Métriques réseau avec calcul de vitesse amélioré
        network_io = psutil.net_io_counters()
        network_speeds = calculate_network_speeds(network_io)
        
        # Température (si disponible)
        temperature = get_temperature()
        
        # Processus les plus gourmands
        top_processes = get_top_processes()
        
        # Informations système
        boot_time = psutil.boot_time()
        hostname = socket.gethostname()
        
        # Services surveillés
        services = check_monitored_services()
        
        # Génération des alertes
        anomalies = generate_alerts(cpu_percent, memory.percent, disk_usage.percent, network_speeds, temperature)
        
        metrics = {
            'cpu': round(cpu_percent, 1),
            'cpu_details': {
                'percent': round(cpu_percent, 1),
                'count': cpu_count,
                'frequency': {
                    'current': round(cpu_freq.current, 1) if cpu_freq else 0,
                    'min': round(cpu_freq.min, 1) if cpu_freq else 0,
                    'max': round(cpu_freq.max, 1) if cpu_freq else 0
                } if cpu_freq else {}
            },
            'load_avg': {
                '1min': round(load_avg[0], 2),
                '5min': round(load_avg[1], 2),
                '15min': round(load_avg[2], 2)
            },
            'ram': round(memory.percent, 1),
            'memory': {
                'percent': round(memory.percent, 1),
                'used_gb': round(memory.used / (1024**3), 2),
                'total_gb': round(memory.total / (1024**3), 2),
                'available_gb': round(memory.available / (1024**3), 2),
                'buffers_gb': round(memory.buffers / (1024**3), 2) if hasattr(memory, 'buffers') else 0,
                'cached_gb': round(memory.cached / (1024**3), 2) if hasattr(memory, 'cached') else 0
            },
            'swap': {
                'percent': round(swap.percent, 1),
                'used_gb': round(swap.used / (1024**3), 2),
                'total_gb': round(swap.total / (1024**3), 2)
            },
            'disk': round((disk_usage.used / disk_usage.total) * 100, 1),
            'disk_details': {
                'percent': round((disk_usage.used / disk_usage.total) * 100, 1),
                'used_gb': round(disk_usage.used / (1024**3), 2),
                'total_gb': round(disk_usage.total / (1024**3), 2),
                'free_gb': round(disk_usage.free / (1024**3), 2),
                'io': {
                    'read_bytes': disk_io.read_bytes if disk_io else 0,
                    'write_bytes': disk_io.write_bytes if disk_io else 0,
                    'read_count': disk_io.read_count if disk_io else 0,
                    'write_count': disk_io.write_count if disk_io else 0
                } if disk_io else {}
            },
            'network': {
                'kb_in_per_sec': round(network_speeds['in'], 1),
                'kb_out_per_sec': round(network_speeds['out'], 1),
                'bytes_sent': network_io.bytes_sent if network_io else 0,
                'bytes_recv': network_io.bytes_recv if network_io else 0,
                'packets_sent': network_io.packets_sent if network_io else 0,
                'packets_recv': network_io.packets_recv if network_io else 0,
                'errors_in': network_io.errin if network_io else 0,
                'errors_out': network_io.errout if network_io else 0
            },
            'processes': {
                'count': len(psutil.pids()),
                'top': top_processes
            },
            'temperature': temperature,
            'services': services,
            'system': {
                'hostname': hostname,
                'uptime': round(time.time() - boot_time, 0),
                'boot_time': datetime.fromtimestamp(boot_time).isoformat(),
                'platform': {
                    'system': platform.system(),
                    'machine': platform.machine(),
                    'release': platform.release(),
                    'version': platform.version()
                }
            },
            'anomalies': anomalies,
            'timestamp': datetime.now().isoformat(),
            'source': 'enhanced_psutil'
        }
        
        logger.info(f"Métriques collectées: CPU={metrics['cpu']}%, RAM={metrics['ram']}%, Disk={metrics['disk']}%")
        return metrics
        
    except Exception as e:
        logger.error(f"Erreur dans la collecte des métriques améliorées: {e}")
        return get_test_metrics()

def calculate_network_speeds(network_io):
    """Calculer les vitesses réseau de manière robuste"""
    global network_stats_cache
    
    current_time = time.time()
    speeds = {'in': 0, 'out': 0}
    
    if network_io and network_stats_cache['last_timestamp'] > 0:
        time_diff = current_time - network_stats_cache['last_timestamp']
        
        if time_diff > 0:
            bytes_in_diff = network_io.bytes_recv - network_stats_cache['last_bytes_recv']
            bytes_out_diff = network_io.bytes_sent - network_stats_cache['last_bytes_sent']
            
            speeds['in'] = max(0, bytes_in_diff / time_diff / 1024)  # KB/s
            speeds['out'] = max(0, bytes_out_diff / time_diff / 1024)  # KB/s
    
    # Mettre à jour le cache
    if network_io:
        network_stats_cache.update({
            'last_bytes_sent': network_io.bytes_sent,
            'last_bytes_recv': network_io.bytes_recv,
            'last_timestamp': current_time
        })
    
    return speeds

def get_temperature():
    """Obtenir la température du système (si disponible)"""
    try:
        if hasattr(psutil, 'sensors_temperatures'):
            temps = psutil.sensors_temperatures()
            if temps:
                # Prendre la première température disponible
                for name, entries in temps.items():
                    if entries:
                        return round(entries[0].current, 1)
        return None
    except (AttributeError, Exception):
        return None

def get_top_processes():
    """Obtenir les processus les plus gourmands"""
    global process_stats_cache
    
    current_time = time.time()
    
    # Mettre à jour le cache toutes les 10 secondes
    if current_time - process_stats_cache['last_update'] > 10:
        try:
            processes = []
            for proc in psutil.process_iter(['pid', 'name', 'cpu_percent', 'memory_percent']):
                try:
                    proc_info = proc.info
                    if proc_info['cpu_percent'] > 0:
                        processes.append({
                            'pid': proc_info['pid'],
                            'name': proc_info['name'],
                            'cpu_percent': round(proc_info['cpu_percent'], 1),
                            'memory_percent': round(proc_info['memory_percent'], 1)
                        })
                except (psutil.NoSuchProcess, psutil.AccessDenied):
                    continue
            
            # Trier par CPU et prendre les 10 premiers
            processes.sort(key=lambda x: x['cpu_percent'], reverse=True)
            process_stats_cache['top_processes'] = processes[:10]
            process_stats_cache['last_update'] = current_time
            
        except Exception as e:
            logger.warning(f"Erreur lors de la collecte des processus: {e}")
    
    return process_stats_cache['top_processes']

def check_monitored_services():
    """Vérifier l'état des services surveillés de manière améliorée"""
    services = []
    
    for service_config in config.get('services_to_monitor', []):
        if not service_config.get('enabled', True):
            continue
            
        service_name = service_config['name']
        port = service_config.get('port')
        
        try:
            active = False
            response_time = None
            
            if port:
                # Test de connexion au port
                start_time = time.time()
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(3)
                result = sock.connect_ex(('localhost', port))
                sock.close()
                
                if result == 0:
                    active = True
                    response_time = round((time.time() - start_time) * 1000, 1)  # ms
            else:
                # Test avec systemctl
                try:
                    result = subprocess.run(
                        ['systemctl', 'is-active', service_name],
                        capture_output=True, text=True, timeout=5
                    )
                    active = result.stdout.strip() == 'active'
                except (subprocess.TimeoutExpired, FileNotFoundError):
                    # Fallback: vérifier si le processus existe
                    try:
                        for proc in psutil.process_iter(['name']):
                            if service_name.lower() in proc.info['name'].lower():
                                active = True
                                break
                    except:
                        pass
            
            services.append({
                'name': service_name,
                'title': service_config.get('title', service_name),
                'active': active,
                'port': port,
                'criticality': service_config.get('criticality', 'Moyenne'),
                'response_time': response_time
            })
            
        except Exception as e:
            logger.warning(f"Erreur lors du test du service {service_name}: {e}")
            services.append({
                'name': service_name,
                'title': service_config.get('title', service_name),
                'active': False,
                'port': port,
                'criticality': service_config.get('criticality', 'Moyenne'),
                'error': str(e)
            })
    
    return services

def generate_alerts(cpu, memory, disk, network_speeds, temperature):
    """Générer des alertes basées sur les seuils configurés"""
    anomalies = []
    current_time = datetime.now().isoformat()
    thresholds = config.get('alert_thresholds', {})
    
    # Alertes CPU
    cpu_thresholds = thresholds.get('cpu', {'warning': 70, 'critical': 90})
    if cpu > cpu_thresholds['critical']:
        anomalies.append({
            'type': 'cpu_critical',
            'severity': 'critical',
            'message': f'CPU usage critically high: {cpu:.1f}%',
            'timestamp': current_time,
            'value': cpu,
            'threshold': cpu_thresholds['critical']
        })
    elif cpu > cpu_thresholds['warning']:
        anomalies.append({
            'type': 'cpu_warning',
            'severity': 'warning',
            'message': f'CPU usage high: {cpu:.1f}%',
            'timestamp': current_time,
            'value': cpu,
            'threshold': cpu_thresholds['warning']
        })
    
    # Alertes mémoire
    mem_thresholds = thresholds.get('ram', {'warning': 80, 'critical': 95})
    if memory > mem_thresholds['critical']:
        anomalies.append({
            'type': 'memory_critical',
            'severity': 'critical',
            'message': f'Memory usage critically high: {memory:.1f}%',
            'timestamp': current_time,
            'value': memory,
            'threshold': mem_thresholds['critical']
        })
    elif memory > mem_thresholds['warning']:
        anomalies.append({
            'type': 'memory_warning',
            'severity': 'warning',
            'message': f'Memory usage high: {memory:.1f}%',
            'timestamp': current_time,
            'value': memory,
            'threshold': mem_thresholds['warning']
        })
    
    # Alertes disque
    disk_thresholds = thresholds.get('disk', {'warning': 85, 'critical': 95})
    if disk > disk_thresholds['critical']:
        anomalies.append({
            'type': 'disk_critical',
            'severity': 'critical',
            'message': f'Disk usage critically high: {disk:.1f}%',
            'timestamp': current_time,
            'value': disk,
            'threshold': disk_thresholds['critical']
        })
    elif disk > disk_thresholds['warning']:
        anomalies.append({
            'type': 'disk_warning',
            'severity': 'warning',
            'message': f'Disk usage high: {disk:.1f}%',
            'timestamp': current_time,
            'value': disk,
            'threshold': disk_thresholds['warning']
        })
    
    # Alertes réseau
    network_thresholds = thresholds.get('network', {'warning': 10000, 'critical': 50000})
    total_network = network_speeds['in'] + network_speeds['out']
    if total_network > network_thresholds['critical']:
        anomalies.append({
            'type': 'network_critical',
            'severity': 'critical',
            'message': f'Network traffic very high: {total_network:.1f} KB/s',
            'timestamp': current_time,
            'value': total_network,
            'threshold': network_thresholds['critical']
        })
    elif total_network > network_thresholds['warning']:
        anomalies.append({
            'type': 'network_warning',
            'severity': 'warning',
            'message': f'Network traffic high: {total_network:.1f} KB/s',
            'timestamp': current_time,
            'value': total_network,
            'threshold': network_thresholds['warning']
        })
    
    # Alertes température
    if temperature:
        temp_thresholds = thresholds.get('temperature', {'warning': 70, 'critical': 85})
        if temperature > temp_thresholds['critical']:
            anomalies.append({
                'type': 'temperature_critical',
                'severity': 'critical',
                'message': f'System temperature critically high: {temperature}°C',
                'timestamp': current_time,
                'value': temperature,
                'threshold': temp_thresholds['critical']
            })
        elif temperature > temp_thresholds['warning']:
            anomalies.append({
                'type': 'temperature_warning',
                'severity': 'warning',
                'message': f'System temperature high: {temperature}°C',
                'timestamp': current_time,
                'value': temperature,
                'threshold': temp_thresholds['warning']
            })
    
    return anomalies

def get_test_metrics():
    """Générer des métriques de test pour le debug"""
    import random
    
    logger.warning("Utilisation de métriques de test - psutil non disponible")
    
    current_time = datetime.now()
    
    return {
        'cpu': round(random.uniform(20, 80), 1),
        'cpu_details': {
            'percent': round(random.uniform(20, 80), 1),
            'count': 4,
            'frequency': {
                'current': round(random.uniform(2000, 3000), 1),
                'min': 800.0,
                'max': 3200.0
            }
        },
        'load_avg': {
            '1min': round(random.uniform(0.5, 2.0), 2),
            '5min': round(random.uniform(0.3, 1.5), 2),
            '15min': round(random.uniform(0.2, 1.0), 2)
        },
        'ram': round(random.uniform(30, 70), 1),
        'memory': {
            'percent': round(random.uniform(30, 70), 1),
            'used_gb': round(random.uniform(2, 6), 2),
            'total_gb': 8.0,
            'available_gb': round(random.uniform(2, 4), 2),
            'buffers_gb': 0.2,
            'cached_gb': 1.5
        },
        'swap': {
            'percent': round(random.uniform(0, 20), 1),
            'used_gb': round(random.uniform(0, 1), 2),
            'total_gb': 2.0
        },
        'disk': round(random.uniform(15, 45), 1),
        'disk_details': {
            'percent': round(random.uniform(15, 45), 1),
            'used_gb': round(random.uniform(50, 200), 2),
            'total_gb': 500.0,
            'free_gb': round(random.uniform(300, 450), 2),
            'io': {
                'read_bytes': random.randint(1000000, 10000000),
                'write_bytes': random.randint(500000, 5000000),
                'read_count': random.randint(10000, 100000),
                'write_count': random.randint(5000, 50000)
            }
        },
        'network': {
            'kb_in_per_sec': round(random.uniform(100, 1000), 1),
            'kb_out_per_sec': round(random.uniform(50, 500), 1),
            'bytes_sent': random.randint(1000000, 10000000),
            'bytes_recv': random.randint(1000000, 10000000),
            'packets_sent': random.randint(10000, 100000),
            'packets_recv': random.randint(10000, 100000),
            'errors_in': 0,
            'errors_out': 0
        },
        'processes': {
            'count': random.randint(150, 300),
            'top': [
                {'pid': 1234, 'name': 'python3', 'cpu_percent': 15.2, 'memory_percent': 8.5},
                {'pid': 5678, 'name': 'nginx', 'cpu_percent': 5.1, 'memory_percent': 2.3},
                {'pid': 9012, 'name': 'mysql', 'cpu_percent': 3.8, 'memory_percent': 12.1}
            ]
        },
        'temperature': round(random.uniform(45, 65), 1),
        'services': [
            {
                'name': 'test-service',
                'title': 'Service de Test',
                'active': True,
                'port': 80,
                'criticality': 'Élevée',
                'response_time': round(random.uniform(10, 100), 1)
            }
        ],
        'system': {
            'hostname': 'neuropulse-test',
            'uptime': 86400,
            'boot_time': (current_time - timedelta(days=1)).isoformat(),
            'platform': {
                'system': 'Linux',
                'machine': 'x86_64',
                'release': '5.4.0',
                'version': 'Ubuntu 20.04'
            }
        },
        'anomalies': [],
        'timestamp': current_time.isoformat(),
        'source': 'test_data_enhanced'
    }

def get_cached_metrics():
    """Obtenir les métriques avec cache amélioré"""
    global metrics_cache
    current_time = time.time()
    
    # Cache de 2 secondes pour éviter la surcharge
    if current_time - metrics_cache['timestamp'] > 2:
        try:
            logger.debug("Collecte des métriques en cours...")
            
            # Utiliser la nouvelle fonction de métriques améliorée
            metrics_data = get_system_metrics_enhanced()
            
            # Stocker dans le cache
            metrics_cache['data'] = metrics_data
            metrics_cache['timestamp'] = current_time
            
            # Ajouter à l'historique
            metrics_cache['history'].append({
                'timestamp': current_time,
                'cpu': metrics_data.get('cpu', 0),
                'memory': metrics_data.get('ram', 0),
                'disk': metrics_data.get('disk', 0),
                'network_in': metrics_data.get('network', {}).get('kb_in_per_sec', 0),
                'network_out': metrics_data.get('network', {}).get('kb_out_per_sec', 0)
            })
            
        except Exception as e:
            logger.error(f"Erreur lors de la collecte des métriques: {e}")
            # Utiliser des données de test en cas d'erreur
            metrics_cache['data'] = get_test_metrics()
            metrics_cache['timestamp'] = current_time
    
    return metrics_cache['data']

def create_ticket(title, description, severity='medium', service='System', auto_created=False):
    """Créer un nouveau ticket"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        # Vérifier si un ticket similaire existe déjà (pour éviter les doublons)
        if auto_created:
            cursor.execute(
                "SELECT id FROM tickets WHERE title = ? AND status = 'open' AND DATE(created_at) = DATE('now')",
                (title,)
            )
            if cursor.fetchone():
                conn.close()
                return None  # Ticket déjà existant
        
        cursor.execute(
            "INSERT INTO tickets (title, description, severity, service, auto_created) VALUES (?, ?, ?, ?, ?)",
            (title, description, severity, service, auto_created)
        )
        
        ticket_id = cursor.lastrowid
        conn.commit()
        conn.close()
        
        logger.info(f"Ticket créé: #{ticket_id} - {title}")
        return ticket_id
        
    except Exception as e:
        logger.error(f"Erreur lors de la création du ticket: {e}")
        return None

# ========================
# ROUTES PRINCIPALES
# ========================

@app.route('/')
def index():
    """Page d'accueil - redirige vers dashboard si connecté, sinon login"""
    if 'user_id' in session:
        return redirect(url_for('dashboard'))
    return redirect(url_for('login'))

@app.route('/login', methods=['GET', 'POST'])
def login():
    """Page de connexion"""
    if request.method == 'POST':
        username = request.form.get('username', '').strip()
        password = request.form.get('password', '').strip()
        
        if not username or not password:
            flash('Nom d\'utilisateur et mot de passe requis', 'error')
            return render_template('login.html')
        
        try:
            conn = get_db()
            cursor = conn.cursor()
            cursor.execute("SELECT id, password_hash FROM users WHERE username = ?", (username,))
            user = cursor.fetchone()
            
            if user and check_password_hash(user[1], password):
                # Connexion réussie
                session['user_id'] = user[0]
                session['username'] = username
                session.permanent = True
                
                # Mettre à jour la dernière connexion
                cursor.execute("UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = ?", (user[0],))
                conn.commit()
                conn.close()
                
                logger.info(f"Connexion réussie pour l'utilisateur: {username}")
                flash(f'Bienvenue {username} !', 'success')
                return redirect(url_for('dashboard'))
            else:
                conn.close()
                flash('Identifiants invalides', 'error')
                logger.warning(f"Tentative de connexion échouée pour: {username}")
                
        except Exception as e:
            logger.error(f"Erreur lors de la connexion: {e}")
            flash('Erreur interne', 'error')
    
    return render_template('login.html')

@app.route('/logout')
def logout():
    """Déconnexion"""
    username = session.get('username', 'inconnu')
    session.clear()
    logger.info(f"Déconnexion de l'utilisateur: {username}")
    flash('Vous avez été déconnecté', 'info')
    return redirect(url_for('login'))

@app.route('/dashboard')
@login_required
def dashboard():
    """Dashboard principal amélioré"""
    try:
        metrics = get_cached_metrics()
        
        # Obtenir les tickets récents
        conn = get_db()
        cursor = conn.cursor()
        cursor.execute(
            "SELECT id, title, severity, status, created_at FROM tickets ORDER BY created_at DESC LIMIT 5"
        )
        recent_tickets = cursor.fetchall()
        conn.close()
        
        return render_template('dashboard.html',
                             user=session['username'],
                             metrics=metrics,
                             config=config,
                             tickets=recent_tickets)
    except Exception as e:
        logger.error(f"Erreur dashboard: {e}")
        return render_template('error.html', error=str(e)), 500

# ========================
# API REST ENDPOINTS AMÉLIORÉS
# ========================

@app.route('/api/health')
def api_health():
    """API de vérification de santé améliorée"""
    try:
        checks = {
            'database': 'ok',
            'config': 'ok',
            'monitoring': 'ok',
            'psutil': 'ok' if PSUTIL_AVAILABLE else 'unavailable'
        }
        
        # Test base de données
        try:
            conn = get_db()
            cursor = conn.cursor()
            cursor.execute("SELECT 1")
            conn.close()
        except:
            checks['database'] = 'error'
        
        # Test monitoring
        try:
            get_cached_metrics()
        except:
            checks['monitoring'] = 'error'
        
        status = 'healthy' if all(check in ['ok', 'unavailable'] for check in checks.values()) else 'degraded'
        
        return jsonify({
            'status': status,
            'timestamp': datetime.now().isoformat(),
            'checks': checks,
            'version': config.get('version', '2.1'),
            'authenticated': 'user_id' in session,
            'uptime': time.time() - psutil.boot_time() if PSUTIL_AVAILABLE else 0
        })
        
    except Exception as e:
        logger.error(f"Erreur health check: {e}")
        return jsonify({
            'status': 'unhealthy',
            'timestamp': datetime.now().isoformat(),
            'error': str(e)
        }), 500

@app.route('/api/metrics/current')
@login_required
def api_current_metrics():
    """API pour obtenir les métriques actuelles améliorée"""
    try:
        metrics = get_cached_metrics()
        return jsonify({
            'success': True,
            'data': metrics,
            'timestamp': datetime.now().isoformat(),
            'cache_age': time.time() - metrics_cache['timestamp']
        })
    except Exception as e:
        logger.error(f"Erreur API métriques: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/metrics/history')
@login_required
def api_metrics_history():
    """API pour obtenir l'historique des métriques"""
    try:
        limit = min(int(request.args.get('limit', 50)), 200)  # Max 200 points
        
        # Historique depuis le cache
        history_data = list(metrics_cache['history'])[-limit:]
        
        return jsonify({
            'success': True,
            'data': history_data,
            'count': len(history_data)
        })
    except Exception as e:
        logger.error(f"Erreur API historique: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/services')
@login_required
def api_services_list():
    """API pour lister les services"""
    try:
        metrics = get_cached_metrics()
        services = metrics.get('services', [])
        
        return jsonify({
            'success': True,
            'services': services,
            'count': len(services)
        })
    except Exception as e:
        logger.error(f"Erreur API services: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/execute', methods=['POST'])
@login_required
def api_execute_command():
    """API pour exécuter des commandes"""
    try:
        data = request.get_json()
        command = data.get('command', '').strip()
        
        if not command:
            return jsonify({'success': False, 'error': 'Commande vide'}), 400
        
        result = execute_command(command, session['username'])
        
        return jsonify({
            'success': result['success'],
            'command': command,
            'output': result['output'],
            'error': result['error'],
            'return_code': result['return_code'],
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"Erreur exécution commande: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/alerts')
@login_required
def api_alerts_list():
    """API pour lister les alertes"""
    try:
        metrics = get_cached_metrics()
        alerts = metrics.get('anomalies', [])
        
        return jsonify({
            'success': True,
            'alerts': alerts,
            'count': len(alerts)
        })
    except Exception as e:
        logger.error(f"Erreur API alertes: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

# ========================
# ROUTES ADDITIONNELLES
# ========================

@app.route('/metrics')
@login_required
def metrics_page():
    """Page des métriques détaillées"""
    return render_template('metrics.html', 
                         user=session['username'], 
                         config=config)

@app.route('/services')
@login_required
def services_page():
    """Page de gestion des services"""
    try:
        metrics = get_cached_metrics()
        services = metrics.get('services', [])
        return render_template('services.html', 
                             user=session['username'], 
                             services=services, 
                             config=config)
    except Exception as e:
        logger.error(f"Erreur page services: {e}")
        return render_template('error.html', error=str(e)), 500

@app.route('/tickets')
@login_required
def tickets_page():
    """Page de gestion des tickets"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        # Filtres
        status_filter = request.args.get('status', 'all')
        severity_filter = request.args.get('severity', 'all')
        
        query = "SELECT id, title, description, severity, status, service, created_at, resolved_at FROM tickets"
        params = []
        conditions = []
        
        if status_filter != 'all':
            conditions.append("status = ?")
            params.append(status_filter)
        
        if severity_filter != 'all':
            conditions.append("severity = ?")
            params.append(severity_filter)
        
        if conditions:
            query += " WHERE " + " AND ".join(conditions)
        
        query += " ORDER BY created_at DESC LIMIT 100"
        
        cursor.execute(query, params)
        tickets = cursor.fetchall()
        conn.close()
        
        return render_template('tickets.html', 
                             user=session['username'], 
                             tickets=tickets,
                             status_filter=status_filter,
                             severity_filter=severity_filter,
                             config=config)
    except Exception as e:
        logger.error(f"Erreur page tickets: {e}")
        return render_template('error.html', error=str(e)), 500

@app.route('/terminal')
@login_required
def terminal_page():
    """Page terminal web"""
    return render_template('terminal.html', 
                         user=session['username'], 
                         config=config)

@app.route('/settings')
@login_required
def settings_page():
    """Page de configuration"""
    return render_template('settings.html', 
                         user=session['username'], 
                         config=config)

# ========================
# TÂCHES D'ARRIÈRE-PLAN
# ========================

def save_metrics_to_history():
    """Sauvegarder les métriques dans l'historique"""
    try:
        metrics = get_cached_metrics()
        
        conn = get_db()
        cursor = conn.cursor()
        
        # Extraire toutes les données importantes
        cursor.execute('''
            INSERT INTO metrics_history 
            (cpu_percent, memory_percent, memory_used_gb, memory_total_gb,
             disk_percent, disk_used_gb, disk_total_gb, network_in, network_out,
             load_avg_1, load_avg_5, load_avg_15, process_count, temperature) 
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ''', (
            metrics.get('cpu', 0),
            metrics.get('ram', 0),
            metrics.get('memory', {}).get('used_gb', 0),
            metrics.get('memory', {}).get('total_gb', 0),
            metrics.get('disk', 0),
            metrics.get('disk_details', {}).get('used_gb', 0),
            metrics.get('disk_details', {}).get('total_gb', 0),
            metrics.get('network', {}).get('kb_in_per_sec', 0),
            metrics.get('network', {}).get('kb_out_per_sec', 0),
            metrics.get('load_avg', {}).get('1min', 0),
            metrics.get('load_avg', {}).get('5min', 0),
            metrics.get('load_avg', {}).get('15min', 0),
            metrics.get('processes', {}).get('count', 0),
            metrics.get('temperature')
        ))
        
        conn.commit()
        conn.close()
        
    except Exception as e:
        logger.error(f"Erreur lors de la sauvegarde des métriques: {e}")

def background_tasks():
    """Tâches exécutées en arrière-plan améliorées"""
    while True:
        try:
            # Sauvegarder les métriques toutes les minutes
            save_metrics_to_history()
            
            # Nettoyer l'historique des métriques (garder 30 jours)
            if datetime.now().hour == 2 and datetime.now().minute < 1:  # 2h du matin
                conn = get_db()
                cursor = conn.cursor()
                cursor.execute(
                    "DELETE FROM metrics_history WHERE timestamp < datetime('now', '-30 days')"
                )
                cursor.execute(
                    "DELETE FROM command_history WHERE timestamp < datetime('now', '-30 days')"
                )
                conn.commit()
                conn.close()
                logger.info("Nettoyage historique effectué")
            
            # Vérifier les alertes critiques et créer des tickets automatiquement
            metrics = get_cached_metrics()
            anomalies = metrics.get('anomalies', [])
            
            for anomaly in anomalies:
                if anomaly['severity'] == 'critical':
                    create_ticket(
                        title=f"Alerte critique: {anomaly['type']}",
                        description=anomaly['message'],
                        severity='critical',
                        service='System',
                        auto_created=True
                    )
            
        except Exception as e:
            logger.error(f"Erreur tâche arrière-plan: {e}")
        
        time.sleep(60)  # Attendre 1 minute

def initialize_app():
    """Initialiser l'application améliorée"""
    logger.info("Initialisation de NeuroPulse Monitor Pro v2.1...")
    
    # Charger la configuration
    load_config()
    
    # Configurer Flask
    app.config['SECRET_KEY'] = config['application']['secret_key']
    app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=8)
    app.config['SESSION_COOKIE_SECURE'] = False  # True en production avec HTTPS
    app.config['SESSION_COOKIE_HTTPONLY'] = True
    app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
    
    # Initialiser la base de données
    init_database()
    
    # Initialiser le cache réseau
    if PSUTIL_AVAILABLE:
        try:
            net_io = psutil.net_io_counters()
            if net_io:
                global network_stats_cache
                network_stats_cache.update({
                    'last_bytes_sent': net_io.bytes_sent,
                    'last_bytes_recv': net_io.bytes_recv,
                    'last_timestamp': time.time()
                })
        except:
            pass
    
    # Démarrer les tâches en arrière-plan
    background_thread = threading.Thread(target=background_tasks, daemon=True)
    background_thread.start()
    
    logger.info("✅ NeuroPulse Monitor Pro v2.1 initialisé avec succès")
    logger.info(f"📁 Base path: {BASE_PATH}")
    logger.info(f"🗄️ Database: {DB_PATH}")
    logger.info(f"⚙️ Config: {CONFIG_PATH}")
    logger.info(f"📝 Logs: {LOG_PATH}")
    logger.info(f"🔧 psutil: {'✅ Disponible' if PSUTIL_AVAILABLE else '❌ Non disponible'}")

if __name__ == '__main__':
    try:
        initialize_app()
        
        host = config['application']['host']
        port = config['application']['port']
        debug = config['application']['debug']
        
        logger.info(f"🚀 Démarrage de NeuroPulse Monitor Pro v2.1 sur {host}:{port}")
        logger.info(f"🌐 Accès: http://{host}:{port}/")
        logger.info(f"👤 Identifiants par défaut: admin / admin")
        
        app.run(host=host, port=port, debug=debug, threaded=True)
        
    except KeyboardInterrupt:
        logger.info("Arrêt de NeuroPulse Monitor Pro")
    except Exception as e:
        logger.error(f"Erreur critique: {e}")
        sys.exit(1)