# ========================
# ROUTES API POUR TICKETS (à ajouter dans app.py)
# ========================

@app.route('/api/tickets', methods=['GET'])
@login_required
def api_tickets_list():
    """API pour lister les tickets"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        # Paramètres de filtre
        status_filter = request.args.get('status', 'all')
        severity_filter = request.args.get('severity', 'all')
        limit = int(request.args.get('limit', 100))
        
        query = """
            SELECT id, title, description, severity, status, service, created_at, resolved_at 
            FROM tickets
        """
        params = []
        conditions = []
        
        if status_filter != 'all':
            conditions.append("status = ?")
            params.append(status_filter)
        
        if severity_filter != 'all':
            conditions.append("severity = ?")
            params.append(severity_filter)
        
        if conditions:
            query += " WHERE " + " AND ".join(conditions)
        
        query += " ORDER BY created_at DESC LIMIT ?"
        params.append(limit)
        
        cursor.execute(query, params)
        tickets = cursor.fetchall()
        conn.close()
        
        # Convertir en format JSON
        tickets_data = []
        for ticket in tickets:
            tickets_data.append({
                'id': ticket[0],
                'title': ticket[1],
                'description': ticket[2],
                'severity': ticket[3],
                'status': ticket[4],
                'service': ticket[5],
                'created_at': ticket[6],
                'resolved_at': ticket[7]
            })
        
        return jsonify({
            'success': True,
            'tickets': tickets_data,
            'count': len(tickets_data)
        })
        
    except Exception as e:
        logger.error(f"Erreur API tickets list: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/tickets', methods=['POST'])
@login_required
def api_create_ticket():
    """API pour créer un nouveau ticket"""
    try:
        data = request.get_json()
        
        title = data.get('title', '').strip()
        description = data.get('description', '').strip()
        severity = data.get('severity', 'medium')
        service = data.get('service', 'System')
        
        if not title:
            return jsonify({'success': False, 'error': 'Le titre est requis'}), 400
            
        if not description:
            return jsonify({'success': False, 'error': 'La description est requise'}), 400
        
        # Valider la sévérité
        valid_severities = ['low', 'medium', 'high', 'critical']
        if severity not in valid_severities:
            severity = 'medium'
        
        ticket_id = create_ticket(title, description, severity, service)
        
        if ticket_id:
            logger.info(f"Ticket créé avec succès: #{ticket_id} par {session['username']}")
            return jsonify({
                'success': True, 
                'ticket_id': ticket_id,
                'message': f'Ticket #{ticket_id} créé avec succès'
            })
        else:
            return jsonify({'success': False, 'error': 'Erreur lors de la création du ticket'}), 500
            
    except Exception as e:
        logger.error(f"Erreur création ticket: {e}")
        return jsonify({'success': False, 'error': 'Erreur interne du serveur'}), 500

@app.route('/api/tickets/<int:ticket_id>', methods=['PUT'])
@login_required
def api_update_ticket(ticket_id):
    """API pour mettre à jour un ticket"""
    try:
        data = request.get_json()
        status = data.get('status')
        
        if not status:
            return jsonify({'success': False, 'error': 'Statut requis'}), 400
        
        conn = get_db()
        cursor = conn.cursor()
        
        if status == 'resolved':
            cursor.execute(
                "UPDATE tickets SET status = ?, resolved_at = CURRENT_TIMESTAMP WHERE id = ?",
                (status, ticket_id)
            )
        else:
            cursor.execute(
                "UPDATE tickets SET status = ? WHERE id = ?",
                (status, ticket_id)
            )
        
        conn.commit()
        conn.close()
        
        logger.info(f"Ticket #{ticket_id} mis à jour: statut = {status}")
        return jsonify({'success': True, 'message': f'Ticket #{ticket_id} mis à jour'})
        
    except Exception as e:
        logger.error(f"Erreur mise à jour ticket {ticket_id}: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/tickets/<int:ticket_id>', methods=['DELETE'])
@login_required
def api_delete_ticket(ticket_id):
    """API pour supprimer un ticket"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        cursor.execute("DELETE FROM tickets WHERE id = ?", (ticket_id,))
        
        if cursor.rowcount > 0:
            conn.commit()
            conn.close()
            logger.info(f"Ticket #{ticket_id} supprimé")
            return jsonify({'success': True, 'message': f'Ticket #{ticket_id} supprimé'})
        else:
            conn.close()
            return jsonify({'success': False, 'error': 'Ticket non trouvé'}), 404
            
    except Exception as e:
        logger.error(f"Erreur suppression ticket {ticket_id}: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

# ========================
# SYSTÈME DE SURVEILLANCE AUTOMATIQUE AVANCÉ
# ========================

import re
import hashlib
import ipaddress
from datetime import datetime, timedelta
from threading import Thread, Lock
import sqlite3

# Variables globales pour la surveillance avancée
discovered_services = {}
monitoring_probes = {}
security_events = deque(maxlen=1000)
performance_history = defaultdict(lambda: deque(maxlen=100))
network_topology = {}
system_fingerprint = {}
monitoring_lock = Lock()

def init_advanced_monitoring_db():
    """Initialiser les tables pour la surveillance avancée"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        # Table des services découverts
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS discovered_services (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                name TEXT NOT NULL,
                description TEXT,
                port INTEGER,
                protocol TEXT DEFAULT 'tcp',
                process_name TEXT,
                command_line TEXT,
                status TEXT DEFAULT 'active',
                first_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                auto_discovered BOOLEAN DEFAULT 1,
                monitoring_enabled BOOLEAN DEFAULT 1,
                probe_config TEXT,
                fingerprint TEXT
            )
        ''')
        
        # Table des sondes de surveillance
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS monitoring_probes (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                service_id INTEGER,
                probe_type TEXT NOT NULL,
                probe_config TEXT,
                check_interval INTEGER DEFAULT 60,
                timeout INTEGER DEFAULT 30,
                enabled BOOLEAN DEFAULT 1,
                last_check TIMESTAMP,
                last_status TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                FOREIGN KEY (service_id) REFERENCES discovered_services (id)
            )
        ''')
        
        # Table des événements de sécurité
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS security_events (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                event_type TEXT NOT NULL,
                source_ip TEXT,
                destination_ip TEXT,
                port INTEGER,
                severity TEXT DEFAULT 'medium',
                description TEXT,
                raw_data TEXT,
                detected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                acknowledged BOOLEAN DEFAULT 0
            )
        ''')
        
        # Table de la topologie réseau
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS network_topology (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                source_ip TEXT NOT NULL,
                destination_ip TEXT NOT NULL,
                port INTEGER,
                protocol TEXT,
                connection_type TEXT,
                first_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                packet_count INTEGER DEFAULT 1
            )
        ''')
        
        # Table des performances système
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS performance_metrics (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                metric_type TEXT NOT NULL,
                metric_name TEXT NOT NULL,
                value REAL NOT NULL,
                unit TEXT,
                timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                metadata TEXT
            )
        ''')
        
        # Table des configurations de surveillance
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS monitoring_config (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                config_key TEXT UNIQUE NOT NULL,
                config_value TEXT NOT NULL,
                description TEXT,
                updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')
        
        # Configurations par défaut
        default_configs = [
            ('auto_discovery_enabled', 'true', 'Activer la découverte automatique des services'),
            ('security_monitoring_enabled', 'true', 'Activer la surveillance de sécurité'),
            ('network_scanning_enabled', 'true', 'Activer le scan réseau'),
            ('log_analysis_enabled', 'true', 'Activer l\'analyse des logs'),
            ('auto_probe_creation', 'true', 'Créer automatiquement les sondes'),
            ('discovery_interval', '300', 'Intervalle de découverte en secondes'),
            ('probe_timeout', '30', 'Timeout des sondes en secondes'),
            ('max_concurrent_probes', '50', 'Nombre maximum de sondes simultanées')
        ]
        
        for key, value, desc in default_configs:
            cursor.execute(
                "INSERT OR IGNORE INTO monitoring_config (config_key, config_value, description) VALUES (?, ?, ?)",
                (key, value, desc)
            )
        
        conn.commit()
        conn.close()
        logger.info("Base de données de surveillance avancée initialisée")
        
    except Exception as e:
        logger.error(f"Erreur lors de l'initialisation de la BD surveillance: {e}")

def get_monitoring_config(key, default=None):
    """Récupérer une configuration de surveillance"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        cursor.execute("SELECT config_value FROM monitoring_config WHERE config_key = ?", (key,))
        result = cursor.fetchone()
        conn.close()
        return result[0] if result else default
    except:
        return default

def scan_running_processes():
    """Scanner les processus en cours d'exécution"""
    discovered = {}
    
    if not PSUTIL_AVAILABLE:
        logger.warning("psutil non disponible - scan des processus impossible")
        return discovered
    
    try:
        for proc in psutil.process_iter(['pid', 'name', 'cmdline', 'connections']):
            try:
                proc_info = proc.info
                
                # Analyser les connexions réseau
                connections = proc_info.get('connections', [])
                for conn in connections:
                    if conn.status == 'LISTEN':
                        port = conn.laddr.port
                        service_name = identify_service_by_port(port)
                        
                        if service_name:
                            service_key = f"{service_name}_{port}"
                            discovered[service_key] = {
                                'name': service_name,
                                'port': port,
                                'protocol': 'tcp',
                                'process_name': proc_info['name'],
                                'command_line': ' '.join(proc_info.get('cmdline', [])),
                                'pid': proc_info['pid'],
                                'status': 'active'
                            }
                
                # Identifier les services par nom de processus
                proc_name = proc_info['name'].lower()
                known_services = identify_service_by_process_name(proc_name)
                
                for service in known_services:
                    service_key = f"{service['name']}_{service.get('port', 0)}"
                    if service_key not in discovered:
                        discovered[service_key] = {
                            **service,
                            'process_name': proc_info['name'],
                            'command_line': ' '.join(proc_info.get('cmdline', [])),
                            'pid': proc_info['pid'],
                            'status': 'active'
                        }
                        
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue
                
        logger.info(f"Scan des processus terminé: {len(discovered)} services découverts")
        return discovered
        
    except Exception as e:
        logger.error(f"Erreur lors du scan des processus: {e}")
        return {}

def identify_service_by_port(port):
    """Identifier un service par son port"""
    port_services = {
        20: 'ftp-data', 21: 'ftp', 22: 'ssh', 23: 'telnet', 25: 'smtp',
        53: 'dns', 67: 'dhcp-server', 68: 'dhcp-client', 69: 'tftp',
        80: 'http', 110: 'pop3', 119: 'nntp', 123: 'ntp', 135: 'msrpc',
        139: 'netbios-ssn', 143: 'imap', 161: 'snmp', 162: 'snmp-trap',
        389: 'ldap', 443: 'https', 445: 'microsoft-ds', 465: 'smtps',
        514: 'syslog', 587: 'smtp-submission', 631: 'ipp', 636: 'ldaps',
        993: 'imaps', 995: 'pop3s', 1433: 'mssql', 1521: 'oracle',
        3306: 'mysql', 3389: 'rdp', 5432: 'postgresql', 5672: 'amqp',
        5984: 'couchdb', 6379: 'redis', 8080: 'http-alt', 8443: 'https-alt',
        9200: 'elasticsearch', 27017: 'mongodb'
    }
    return port_services.get(port)

def identify_service_by_process_name(proc_name):
    """Identifier des services par nom de processus"""
    process_services = {
        'apache2': [{'name': 'apache', 'port': 80, 'description': 'Apache HTTP Server'}],
        'httpd': [{'name': 'apache', 'port': 80, 'description': 'Apache HTTP Server'}],
        'nginx': [{'name': 'nginx', 'port': 80, 'description': 'Nginx Web Server'}],
        'mysqld': [{'name': 'mysql', 'port': 3306, 'description': 'MySQL Database'}],
        'postgres': [{'name': 'postgresql', 'port': 5432, 'description': 'PostgreSQL Database'}],
        'sshd': [{'name': 'ssh', 'port': 22, 'description': 'SSH Server'}],
        'redis-server': [{'name': 'redis', 'port': 6379, 'description': 'Redis Server'}],
        'mongod': [{'name': 'mongodb', 'port': 27017, 'description': 'MongoDB Database'}],
        'elasticsearch': [{'name': 'elasticsearch', 'port': 9200, 'description': 'Elasticsearch'}],
        'docker': [{'name': 'docker', 'description': 'Docker Container Engine'}],
        'containerd': [{'name': 'containerd', 'description': 'Container Runtime'}],
        'node': [{'name': 'nodejs', 'description': 'Node.js Application'}],
        'python': [{'name': 'python-app', 'description': 'Python Application'}],
        'java': [{'name': 'java-app', 'description': 'Java Application'}],
        'php-fpm': [{'name': 'php-fpm', 'description': 'PHP FastCGI Process Manager'}],
        'systemd': [{'name': 'systemd', 'description': 'System and Service Manager'}],
        'cron': [{'name': 'cron', 'description': 'Cron Daemon'}],
        'rsyslog': [{'name': 'syslog', 'port': 514, 'description': 'System Logging'}],
        'bind9': [{'name': 'dns', 'port': 53, 'description': 'DNS Server'}],
        'named': [{'name': 'dns', 'port': 53, 'description': 'DNS Server'}],
        'postfix': [{'name': 'smtp', 'port': 25, 'description': 'Postfix Mail Server'}],
        'dovecot': [{'name': 'imap', 'port': 143, 'description': 'Dovecot IMAP Server'}],
        'vsftpd': [{'name': 'ftp', 'port': 21, 'description': 'FTP Server'}],
        'proftpd': [{'name': 'ftp', 'port': 21, 'description': 'ProFTPD Server'}]
    }
    
    services = []
    for pattern, service_list in process_services.items():
        if pattern in proc_name:
            services.extend(service_list)
    
    return services

def scan_network_ports(target_range="127.0.0.1", ports=None):
    """Scanner les ports réseau"""
    if ports is None:
        # Ports communs à scanner
        ports = [21, 22, 23, 25, 53, 80, 110, 119, 135, 139, 143, 443, 445, 993, 995, 1433, 3306, 3389, 5432, 8080]
    
    open_ports = {}
    
    try:
        import socket
        
        for port in ports:
            try:
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(1)
                result = sock.connect_ex((target_range, port))
                sock.close()
                
                if result == 0:
                    service_name = identify_service_by_port(port)
                    open_ports[port] = {
                        'port': port,
                        'status': 'open',
                        'service': service_name or 'unknown'
                    }
                    
            except Exception as e:
                logger.debug(f"Erreur scan port {port}: {e}")
                continue
        
        logger.info(f"Scan réseau terminé: {len(open_ports)} ports ouverts trouvés")
        return open_ports
        
    except Exception as e:
        logger.error(f"Erreur lors du scan réseau: {e}")
        return {}

def analyze_system_logs():
    """Analyser les logs système pour détecter des événements"""
    events = []
    
    try:
        log_files = [
            '/var/log/auth.log',
            '/var/log/syslog',
            '/var/log/apache2/access.log',
            '/var/log/apache2/error.log',
            '/var/log/nginx/access.log',
            '/var/log/nginx/error.log',
            '/var/log/mysql/error.log'
        ]
        
        patterns = {
            'ssh_bruteforce': r'Failed password for .* from (\d+\.\d+\.\d+\.\d+)',
            'web_attack': r'(\d+\.\d+\.\d+\.\d+).*"[GET|POST].*(?:union|select|script|alert)',
            'login_success': r'Accepted password for (\w+) from (\d+\.\d+\.\d+\.\d+)',
            'service_start': r'Starting (\w+)',
            'service_stop': r'Stopping (\w+)',
            'disk_error': r'I/O error.*device (\w+)',
            'memory_error': r'Out of memory',
            'network_error': r'Network is unreachable'
        }
        
        for log_file in log_files:
            if os.path.exists(log_file):
                try:
                    with open(log_file, 'r') as f:
                        # Lire seulement les dernières lignes pour éviter la surcharge
                        lines = f.readlines()[-1000:]
                        
                        for line in lines:
                            for event_type, pattern in patterns.items():
                                match = re.search(pattern, line, re.IGNORECASE)
                                if match:
                                    event = {
                                        'type': event_type,
                                        'source': log_file,
                                        'line': line.strip(),
                                        'match': match.groups(),
                                        'timestamp': datetime.now().isoformat()
                                    }
                                    events.append(event)
                                    
                except PermissionError:
                    logger.warning(f"Permission refusée pour lire {log_file}")
                except Exception as e:
                    logger.warning(f"Erreur lecture {log_file}: {e}")
        
        # Sauvegarder les événements de sécurité
        if events:
            save_security_events(events)
        
        logger.info(f"Analyse des logs terminée: {len(events)} événements détectés")
        return events
        
    except Exception as e:
        logger.error(f"Erreur lors de l'analyse des logs: {e}")
        return []

def save_security_events(events):
    """Sauvegarder les événements de sécurité"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        for event in events:
            severity = 'medium'
            if event['type'] in ['ssh_bruteforce', 'web_attack']:
                severity = 'high'
            elif event['type'] in ['memory_error', 'disk_error']:
                severity = 'critical'
            
            cursor.execute('''
                INSERT INTO security_events 
                (event_type, severity, description, raw_data, source_ip) 
                VALUES (?, ?, ?, ?, ?)
            ''', (
                event['type'],
                severity,
                f"Événement {event['type']} détecté",
                event['line'],
                event['match'][0] if event['match'] and '.' in str(event['match'][0]) else None
            ))
        
        conn.commit()
        conn.close()
        
    except Exception as e:
        logger.error(f"Erreur sauvegarde événements sécurité: {e}")

def create_automatic_probes(services):
    """Créer automatiquement des sondes pour les services découverts"""
    probes_created = 0
    
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        for service_key, service in services.items():
            # Vérifier si le service existe déjà
            cursor.execute(
                "SELECT id FROM discovered_services WHERE name = ? AND port = ?",
                (service['name'], service.get('port', 0))
            )
            existing = cursor.fetchone()
            
            if not existing:
                # Créer le service
                cursor.execute('''
                    INSERT INTO discovered_services 
                    (name, description, port, protocol, process_name, command_line, status, fingerprint)
                    VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                ''', (
                    service['name'],
                    service.get('description', f"Service {service['name']}"),
                    service.get('port', 0),
                    service.get('protocol', 'tcp'),
                    service.get('process_name', ''),
                    service.get('command_line', ''),
                    service.get('status', 'active'),
                    hashlib.md5(str(service).encode()).hexdigest()
                ))
                service_id = cursor.lastrowid
            else:
                service_id = existing[0]
                # Mettre à jour la dernière vue
                cursor.execute(
                    "UPDATE discovered_services SET last_seen = CURRENT_TIMESTAMP WHERE id = ?",
                    (service_id,)
                )
            
            # Créer les sondes appropriées
            probe_configs = get_probe_configs_for_service(service)
            
            for probe_config in probe_configs:
                # Vérifier si la sonde existe déjà
                cursor.execute(
                    "SELECT id FROM monitoring_probes WHERE service_id = ? AND probe_type = ?",
                    (service_id, probe_config['type'])
                )
                
                if not cursor.fetchone():
                    cursor.execute('''
                        INSERT INTO monitoring_probes 
                        (service_id, probe_type, probe_config, check_interval, timeout)
                        VALUES (?, ?, ?, ?, ?)
                    ''', (
                        service_id,
                        probe_config['type'],
                        json.dumps(probe_config['config']),
                        probe_config.get('interval', 60),
                        probe_config.get('timeout', 30)
                    ))
                    probes_created += 1
        
        conn.commit()
        conn.close()
        
        logger.info(f"Sondes automatiques créées: {probes_created}")
        return probes_created
        
    except Exception as e:
        logger.error(f"Erreur création sondes automatiques: {e}")
        return 0

def get_probe_configs_for_service(service):
    """Obtenir les configurations de sondes pour un service"""
    configs = []
    
    service_name = service['name'].lower()
    port = service.get('port', 0)
    
    # Sonde de base - vérification du processus
    configs.append({
        'type': 'process_check',
        'config': {
            'process_name': service.get('process_name', service_name),
            'command_pattern': service.get('command_line', '')
        },
        'interval': 60,
        'timeout': 10
    })
    
    # Sondes spécifiques selon le type de service
    if port > 0:
        # Sonde de port TCP
        configs.append({
            'type': 'tcp_port_check',
            'config': {
                'host': '127.0.0.1',
                'port': port,
                'timeout': 5
            },
            'interval': 30,
            'timeout': 10
        })
    
    # Sondes spécialisées
    if service_name in ['http', 'apache', 'nginx']:
        configs.append({
            'type': 'http_check',
            'config': {
                'url': f"http://127.0.0.1:{port or 80}/",
                'expected_status': 200,
                'timeout': 10
            },
            'interval': 60,
            'timeout': 15
        })
    
    elif service_name in ['https']:
        configs.append({
            'type': 'https_check',
            'config': {
                'url': f"https://127.0.0.1:{port or 443}/",
                'expected_status': 200,
                'verify_ssl': False,
                'timeout': 10
            },
            'interval': 60,
            'timeout': 15
        })
    
    elif service_name in ['mysql']:
        configs.append({
            'type': 'mysql_check',
            'config': {
                'host': '127.0.0.1',
                'port': port or 3306,
                'timeout': 5
            },
            'interval': 120,
            'timeout': 30
        })
    
    elif service_name in ['postgresql']:
        configs.append({
            'type': 'postgresql_check',
            'config': {
                'host': '127.0.0.1',
                'port': port or 5432,
                'timeout': 5
            },
            'interval': 120,
            'timeout': 30
        })
    
    elif service_name in ['ssh']:
        configs.append({
            'type': 'ssh_check',
            'config': {
                'host': '127.0.0.1',
                'port': port or 22,
                'timeout': 5
            },
            'interval': 300,
            'timeout': 10
        })
    
    elif service_name in ['dns']:
        configs.append({
            'type': 'dns_check',
            'config': {
                'server': '127.0.0.1',
                'port': port or 53,
                'query': 'localhost',
                'timeout': 5
            },
            'interval': 180,
            'timeout': 10
        })
    
    return configs

def execute_monitoring_probes():
    """Exécuter toutes les sondes de surveillance actives"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        # Récupérer les sondes à exécuter
        cursor.execute('''
            SELECT p.id, p.service_id, p.probe_type, p.probe_config, p.check_interval, p.timeout,
                   s.name as service_name, s.port, s.status as service_status
            FROM monitoring_probes p
            JOIN discovered_services s ON p.service_id = s.id
            WHERE p.enabled = 1 
            AND (p.last_check IS NULL OR 
                 datetime(p.last_check, '+' || p.check_interval || ' seconds') <= datetime('now'))
        ''')
        
        probes = cursor.fetchall()
        conn.close()
        
        results = []
        
        for probe in probes:
            try:
                probe_id, service_id, probe_type, probe_config_json, interval, timeout, service_name, port, service_status = probe
                probe_config = json.loads(probe_config_json) if probe_config_json else {}
                
                # Exécuter la sonde
                result = execute_single_probe(probe_type, probe_config, timeout)
                
                # Sauvegarder le résultat
                save_probe_result(probe_id, result)
                
                # Générer des alertes si nécessaire
                if not result['success']:
                    create_probe_alert(service_name, probe_type, result['error'])
                
                results.append({
                    'probe_id': probe_id,
                    'service': service_name,
                    'type': probe_type,
                    'success': result['success'],
                    'response_time': result.get('response_time', 0),
                    'error': result.get('error')
                })
                
            except Exception as e:
                logger.error(f"Erreur exécution sonde {probe[0]}: {e}")
                save_probe_result(probe[0], {'success': False, 'error': str(e)})
        
        logger.info(f"Sondes exécutées: {len(results)}")
        return results
        
    except Exception as e:
        logger.error(f"Erreur exécution sondes: {e}")
        return []

def execute_single_probe(probe_type, config, timeout):
    """Exécuter une sonde individuelle"""
    start_time = time.time()
    
    try:
        if probe_type == 'tcp_port_check':
            return execute_tcp_probe(config, timeout)
        elif probe_type == 'http_check':
            return execute_http_probe(config, timeout)
        elif probe_type == 'https_check':
            return execute_https_probe(config, timeout)
        elif probe_type == 'process_check':
            return execute_process_probe(config, timeout)
        elif probe_type == 'mysql_check':
            return execute_mysql_probe(config, timeout)
        elif probe_type == 'postgresql_check':
            return execute_postgresql_probe(config, timeout)
        elif probe_type == 'ssh_check':
            return execute_ssh_probe(config, timeout)
        elif probe_type == 'dns_check':
            return execute_dns_probe(config, timeout)
        else:
            return {'success': False, 'error': f'Type de sonde inconnu: {probe_type}'}
            
    except Exception as e:
        return {
            'success': False, 
            'error': str(e),
            'response_time': (time.time() - start_time) * 1000
        }

def execute_tcp_probe(config, timeout):
    """Sonde TCP"""
    start_time = time.time()
    
    try:
        import socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(timeout)
        result = sock.connect_ex((config['host'], config['port']))
        sock.close()
        
        response_time = (time.time() - start_time) * 1000
        
        if result == 0:
            return {'success': True, 'response_time': response_time}
        else:
            return {'success': False, 'error': f'Port {config["port"]} fermé', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_http_probe(config, timeout):
    """Sonde HTTP"""
    start_time = time.time()
    
    try:
        import urllib.request
        import urllib.error
        
        req = urllib.request.Request(config['url'])
        response = urllib.request.urlopen(req, timeout=timeout)
        
        response_time = (time.time() - start_time) * 1000
        status_code = response.getcode()
        
        expected_status = config.get('expected_status', 200)
        if status_code == expected_status:
            return {'success': True, 'response_time': response_time, 'status_code': status_code}
        else:
            return {'success': False, 'error': f'Status code {status_code}, attendu {expected_status}', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_https_probe(config, timeout):
    """Sonde HTTPS"""
    start_time = time.time()
    
    try:
        import urllib.request
        import urllib.error
        import ssl
        
        # Créer un contexte SSL
        if not config.get('verify_ssl', True):
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ssl_context))
            urllib.request.install_opener(opener)
        
        req = urllib.request.Request(config['url'])
        response = urllib.request.urlopen(req, timeout=timeout)
        
        response_time = (time.time() - start_time) * 1000
        status_code = response.getcode()
        
        expected_status = config.get('expected_status', 200)
        if status_code == expected_status:
            return {'success': True, 'response_time': response_time, 'status_code': status_code}
        else:
            return {'success': False, 'error': f'Status code {status_code}, attendu {expected_status}', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_process_probe(config, timeout):
    """Sonde de processus"""
    start_time = time.time()
    
    try:
        if not PSUTIL_AVAILABLE:
            return {'success': False, 'error': 'psutil non disponible'}
        
        process_name = config.get('process_name', '')
        command_pattern = config.get('command_pattern', '')
        
        for proc in psutil.process_iter(['name', 'cmdline']):
            try:
                if process_name and process_name.lower() in proc.info['name'].lower():
                    return {'success': True, 'response_time': (time.time() - start_time) * 1000}
                
                if command_pattern and proc.info['cmdline']:
                    cmdline = ' '.join(proc.info['cmdline'])
                    if command_pattern.lower() in cmdline.lower():
                        return {'success': True, 'response_time': (time.time() - start_time) * 1000}
                        
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue
        
        return {'success': False, 'error': f'Processus {process_name} non trouvé', 'response_time': (time.time() - start_time) * 1000}
        
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_mysql_probe(config, timeout):
    """Sonde MySQL"""
    start_time = time.time()
    
    try:
        # Test de connexion TCP basique sur le port MySQL
        import socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(timeout)
        result = sock.connect_ex((config['host'], config['port']))
        sock.close()
        
        response_time = (time.time() - start_time) * 1000
        
        if result == 0:
            return {'success': True, 'response_time': response_time}
        else:
            return {'success': False, 'error': f'MySQL port {config["port"]} inaccessible', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_postgresql_probe(config, timeout):
    """Sonde PostgreSQL"""
    start_time = time.time()
    
    try:
        # Test de connexion TCP basique sur le port PostgreSQL
        import socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(timeout)
        result = sock.connect_ex((config['host'], config['port']))
        sock.close()
        
        response_time = (time.time() - start_time) * 1000
        
        if result == 0:
            return {'success': True, 'response_time': response_time}
        else:
            return {'success': False, 'error': f'PostgreSQL port {config["port"]} inaccessible', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_ssh_probe(config, timeout):
    """Sonde SSH"""
    start_time = time.time()
    
    try:
        import socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(timeout)
        result = sock.connect_ex((config['host'], config['port']))
        
        if result == 0:
            # Lire la bannière SSH
            banner = sock.recv(1024).decode('utf-8', errors='ignore')
            sock.close()
            
            response_time = (time.time() - start_time) * 1000
            
            if 'SSH' in banner:
                return {'success': True, 'response_time': response_time, 'banner': banner.strip()}
            else:
                return {'success': False, 'error': 'Pas de bannière SSH valide', 'response_time': response_time}
        else:
            sock.close()
            return {'success': False, 'error': f'SSH port {config["port"]} fermé', 'response_time': (time.time() - start_time) * 1000}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def execute_dns_probe(config, timeout):
    """Sonde DNS"""
    start_time = time.time()
    
    try:
        import socket
        
        # Test de résolution DNS
        socket.setdefaulttimeout(timeout)
        result = socket.gethostbyname(config.get('query', 'localhost'))
        
        response_time = (time.time() - start_time) * 1000
        
        if result:
            return {'success': True, 'response_time': response_time, 'resolved_ip': result}
        else:
            return {'success': False, 'error': 'Résolution DNS échouée', 'response_time': response_time}
            
    except Exception as e:
        return {'success': False, 'error': str(e), 'response_time': (time.time() - start_time) * 1000}

def save_probe_result(probe_id, result):
    """Sauvegarder le résultat d'une sonde"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        status = 'success' if result['success'] else 'failed'
        
        cursor.execute('''
            UPDATE monitoring_probes 
            SET last_check = CURRENT_TIMESTAMP, last_status = ?
            WHERE id = ?
        ''', (status, probe_id))
        
        # Sauvegarder les métriques de performance
        if 'response_time' in result:
            cursor.execute('''
                INSERT INTO performance_metrics 
                (metric_type, metric_name, value, unit, metadata)
                VALUES (?, ?, ?, ?, ?)
            ''', (
                'probe_response_time',
                f'probe_{probe_id}',
                result['response_time'],
                'ms',
                json.dumps({'probe_id': probe_id, 'success': result['success']})
            ))
        
        conn.commit()
        conn.close()
        
    except Exception as e:
        logger.error(f"Erreur sauvegarde résultat sonde {probe_id}: {e}")

def create_probe_alert(service_name, probe_type, error_message):
    """Créer une alerte pour une sonde en échec"""
    try:
        title = f"Alerte de surveillance: {service_name}"
        description = f"La sonde {probe_type} pour le service {service_name} a échoué.\nErreur: {error_message}"
        
        create_ticket(title, description, 'high', service_name, auto_created=True)
        
    except Exception as e:
        logger.error(f"Erreur création alerte sonde: {e}")

def advanced_monitoring_loop():
    """Boucle principale de surveillance avancée"""
    logger.info("Démarrage de la surveillance avancée")
    
    while True:
        try:
            # Vérifier si la surveillance automatique est activée
            if get_monitoring_config('auto_discovery_enabled', 'true') == 'true':
                
                # Scanner les processus
                discovered_processes = scan_running_processes()
                
                # Scanner le réseau
                if get_monitoring_config('network_scanning_enabled', 'true') == 'true':
                    network_services = scan_network_ports()
                
                # Combiner les services découverts
                all_services = {**discovered_processes}
                
                # Créer des sondes automatiques
                if get_monitoring_config('auto_probe_creation', 'true') == 'true':
                    create_automatic_probes(all_services)
                
                # Analyser les logs
                if get_monitoring_config('log_analysis_enabled', 'true') == 'true':
                    analyze_system_logs()
            
            # Exécuter les sondes de surveillance
            execute_monitoring_probes()
            
            # Attendre avant la prochaine itération
            discovery_interval = int(get_monitoring_config('discovery_interval', '300'))
            time.sleep(discovery_interval)
            
        except Exception as e:
            logger.error(f"Erreur dans la boucle de surveillance: {e}")
            time.sleep(60)  # Attendre 1 minute en cas d'erreur

# ========================
# API ENDPOINTS POUR LA SURVEILLANCE AVANCÉE
# ========================

@app.route('/api/monitoring/discover')
@login_required
def api_discover_services():
    """API pour lancer une découverte manuelle des services"""
    try:
        discovered_processes = scan_running_processes()
        network_services = scan_network_ports()
        
        all_services = {**discovered_processes}
        probes_created = create_automatic_probes(all_services)
        
        return jsonify({
            'success': True,
            'services_discovered': len(all_services),
            'probes_created': probes_created,
            'services': list(all_services.values())
        })
        
    except Exception as e:
        logger.error(f"Erreur découverte services: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/monitoring/probes')
@login_required
def api_list_probes():
    """API pour lister les sondes de surveillance"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        cursor.execute('''
            SELECT p.id, p.probe_type, p.check_interval, p.enabled, p.last_check, p.last_status,
                   s.name, s.description, s.port, s.status
            FROM monitoring_probes p
            JOIN discovered_services s ON p.service_id = s.id
            ORDER BY s.name, p.probe_type
        ''')
        
        probes = cursor.fetchall()
        conn.close()
        
        probes_data = []
        for probe in probes:
            probes_data.append({
                'id': probe[0],
                'type': probe[1],
                'interval': probe[2],
                'enabled': bool(probe[3]),
                'last_check': probe[4],
                'last_status': probe[5],
                'service_name': probe[6],
                'service_description': probe[7],
                'service_port': probe[8],
                'service_status': probe[9]
            })
        
        return jsonify({
            'success': True,
            'probes': probes_data,
            'count': len(probes_data)
        })
        
    except Exception as e:
        logger.error(f"Erreur API liste sondes: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/monitoring/security-events')
@login_required
def api_security_events():
    """API pour lister les événements de sécurité"""
    try:
        conn = get_db()
        cursor = conn.cursor()
        
        limit = int(request.args.get('limit', 100))
        severity_filter = request.args.get('severity', 'all')
        
        query = "SELECT * FROM security_events"
        params = []
        
        if severity_filter != 'all':
            query += " WHERE severity = ?"
            params.append(severity_filter)
        
        query += " ORDER BY detected_at DESC LIMIT ?"
        params.append(limit)
        
        cursor.execute(query, params)
        events = cursor.fetchall()
        conn.close()
        
        events_data = []
        for event in events:
            events_data.append({
                'id': event[0],
                'type': event[1],
                'source_ip': event[2],
                'destination_ip': event[3],
                'port': event[4],
                'severity': event[5],
                'description': event[6],
                'detected_at': event[8],
                'acknowledged': bool(event[9])
            })
        
        return jsonify({
            'success': True,
            'events': events_data,
            'count': len(events_data)
        })
        
    except Exception as e:
        logger.error(f"Erreur API événements sécurité: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/monitoring/config', methods=['GET', 'POST'])
@login_required
def api_monitoring_config():
    """API pour gérer la configuration de surveillance"""
    if request.method == 'GET':
        try:
            conn = get_db()
            cursor = conn.cursor()
            cursor.execute("SELECT config_key, config_value, description FROM monitoring_config")
            configs = cursor.fetchall()
            conn.close()
            
            config_data = {}
            for config in configs:
                config_data[config[0]] = {
                    'value': config[1],
                    'description': config[2]
                }
            
            return jsonify({'success': True, 'config': config_data})
            
        except Exception as e:
            logger.error(f"Erreur API config surveillance: {e}")
            return jsonify({'success': False, 'error': str(e)}), 500
    
    elif request.method == 'POST':
        try:
            data = request.get_json()
            
            conn = get_db()
            cursor = conn.cursor()
            
            for key, value in data.items():
                cursor.execute('''
                    UPDATE monitoring_config 
                    SET config_value = ?, updated_at = CURRENT_TIMESTAMP 
                    WHERE config_key = ?
                ''', (str(value), key))
            
            conn.commit()
            conn.close()
            
            logger.info(f"Configuration surveillance mise à jour par {session['username']}")
            return jsonify({'success': True, 'message': 'Configuration mise à jour'})
            
        except Exception as e:
            logger.error(f"Erreur mise à jour config surveillance: {e}")
            return jsonify({'success': False, 'error': str(e)}), 500

# ========================
# MISE À JOUR DE LA FONCTION D'INITIALISATION
# ========================

def initialize_app_advanced():
    """Initialiser l'application avec surveillance avancée"""
    logger.info("Initialisation de NeuroPulse Monitor Pro v2.1 avec surveillance avancée...")
    
    # Initialisation standard
    load_config()
    
    # Configurer Flask
    app.config['SECRET_KEY'] = config['application']['secret_key']
    app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=8)
    app.config['SESSION_COOKIE_SECURE'] = False  # True en production avec HTTPS
    app.config['SESSION_COOKIE_HTTPONLY'] = True
    app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
    
    # Initialiser les bases de données
    init_database()
    init_advanced_monitoring_db()
    
    # Initialiser le cache réseau
    if PSUTIL_AVAILABLE:
        try:
            net_io = psutil.net_io_counters()
            if net_io:
                global network_stats_cache
                network_stats_cache.update({
                    'last_bytes_sent': net_io.bytes_sent,
                    'last_bytes_recv': net_io.bytes_recv,
                    'last_timestamp': time.time()
                })
        except:
            pass
    
    # Démarrer les tâches en arrière-plan
    background_thread = threading.Thread(target=background_tasks, daemon=True)
    background_thread.start()
    
    # Démarrer la surveillance avancée
    monitoring_thread = threading.Thread(target=advanced_monitoring_loop, daemon=True)
    monitoring_thread.start()
    
    # Découverte initiale des services
    initial_discovery_thread = threading.Thread(target=initial_service_discovery, daemon=True)
    initial_discovery_thread.start()
    
    logger.info("✅ NeuroPulse Monitor Pro v2.1 avec surveillance avancée initialisé")
    logger.info(f"📁 Base path: {BASE_PATH}")
    logger.info(f"🗄️ Database: {DB_PATH}")
    logger.info(f"⚙️ Config: {CONFIG_PATH}")
    logger.info(f"📝 Logs: {LOG_PATH}")
    logger.info(f"🔧 psutil: {'✅ Disponible' if PSUTIL_AVAILABLE else '❌ Non disponible'}")
    logger.info("🔍 Surveillance automatique: ✅ Activée")
    logger.info("🚨 Détection d'anomalies: ✅ Activée")
    logger.info("🛡️ Surveillance sécurité: ✅ Activée")

def initial_service_discovery():
    """Découverte initiale des services au démarrage"""
    time.sleep(10)  # Attendre que l'application soit complètement démarrée
    
    try:
        logger.info("Démarrage de la découverte initiale des services...")
        
        discovered_processes = scan_running_processes()
        network_services = scan_network_ports()
        
        all_services = {**discovered_processes}
        probes_created = create_automatic_probes(all_services)
        
        logger.info(f"Découverte initiale terminée: {len(all_services)} services, {probes_created} sondes créées")
        
    except Exception as e:
        logger.error(f"Erreur découverte initiale: {e}")

# Remplacer la fonction initialize_app existante par initialize_app_advanced
# dans la section if __name__ == '__main__':