# tests/test_suite.py
"""
Suite de tests automatisés complète pour NeuroPulse Monitor Pro v2.0
Tests unitaires, intégration, performance et sécurité
"""

import unittest
import requests
import json
import sqlite3
import tempfile
import os
import time
import subprocess
import threading
from unittest.mock import patch, MagicMock
import psutil
import jwt
from datetime import datetime, timedelta

# Import des modules NeuroPulse
import sys
sys.path.append('/opt/neuropulse')

from app.app import app, init_db
from utils.monitoring import SystemMonitor, get_system_metrics
from backup.backup_manager import BackupManager
from api.neuropulse_api import app as api_app

class NeuroPulseTestBase(unittest.TestCase):
    """Classe de base pour tous les tests NeuroPulse"""
    
    @classmethod
    def setUpClass(cls):
        """Configuration une seule fois pour toute la classe"""
        cls.app = app
        cls.api_app = api_app
        cls.client = app.test_client()
        cls.api_client = api_app.test_client()
        
        # Configuration de test
        cls.app.config['TESTING'] = True
        cls.app.config['SECRET_KEY'] = 'test-secret-key'
        cls.api_app.config['TESTING'] = True
        cls.api_app.config['SECRET_KEY'] = 'test-secret-key'
        
        # Base de données temporaire
        cls.temp_db = tempfile.NamedTemporaryFile(delete=False, suffix='.db')
        cls.temp_db.close()
        cls.test_db_path = cls.temp_db.name
        
        # Initialiser la base de test
        with cls.app.app_context():
            init_db()
    
    @classmethod
    def tearDownClass(cls):
        """Nettoyage après tous les tests"""
        if os.path.exists(cls.test_db_path):
            os.unlink(cls.test_db_path)
    
    def setUp(self):
        """Configuration avant chaque test"""
        self.start_time = time.time()
        
    def tearDown(self):
        """Nettoyage après chaque test"""
        execution_time = time.time() - self.start_time
        if execution_time > 5.0:  # Test lent
            print(f"⚠️  Test lent détecté: {self._testMethodName} ({execution_time:.2f}s)")

class TestSystemMonitoring(NeuroPulseTestBase):
    """Tests du système de monitoring"""
    
    def setUp(self):
        super().setUp()
        self.monitor = SystemMonitor()
    
    def test_cpu_metrics_collection(self):
        """Test de collecte des métriques CPU"""
        metrics = self.monitor._get_cpu_metrics()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('percent', metrics)
        self.assertIn('count_logical', metrics)
        self.assertIn('frequency', metrics)
        
        # Vérifier les valeurs
        self.assertGreaterEqual(metrics['percent'], 0)
        self.assertLessEqual(metrics['percent'], 100)
        self.assertGreater(metrics['count_logical'], 0)
    
    def test_memory_metrics_collection(self):
        """Test de collecte des métriques mémoire"""
        metrics = self.monitor._get_memory_metrics()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('virtual', metrics)
        self.assertIn('swap', metrics)
        
        virtual = metrics['virtual']
        self.assertIn('total', virtual)
        self.assertIn('percent', virtual)
        self.assertGreater(virtual['total'], 0)
        self.assertGreaterEqual(virtual['percent'], 0)
        self.assertLessEqual(virtual['percent'], 100)
    
    def test_disk_metrics_collection(self):
        """Test de collecte des métriques disque"""
        metrics = self.monitor._get_disk_metrics()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('usage', metrics)
        self.assertIn('io_global', metrics)
        
        # Au moins une partition doit être présente
        self.assertGreater(len(metrics['usage']), 0)
    
    def test_network_metrics_collection(self):
        """Test de collecte des métriques réseau"""
        metrics = self.monitor._get_network_metrics()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('global', metrics)
        self.assertIn('speeds', metrics)
        
        global_stats = metrics['global']
        self.assertIn('bytes_sent', global_stats)
        self.assertIn('bytes_recv', global_stats)
        self.assertGreaterEqual(global_stats['bytes_sent'], 0)
        self.assertGreaterEqual(global_stats['bytes_recv'], 0)
    
    def test_process_metrics_collection(self):
        """Test de collecte des métriques processus"""
        metrics = self.monitor._get_process_metrics()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('total', metrics)
        self.assertIn('running', metrics)
        self.assertIn('top_cpu', metrics)
        self.assertIn('top_memory', metrics)
        
        self.assertGreater(metrics['total'], 0)
        self.assertIsInstance(metrics['top_cpu'], list)
        self.assertIsInstance(metrics['top_memory'], list)
    
    def test_system_info_collection(self):
        """Test de collecte des informations système"""
        metrics = self.monitor._get_system_info()
        
        self.assertIsInstance(metrics, dict)
        self.assertIn('hostname', metrics)
        self.assertIn('platform', metrics)
        self.assertIn('uptime', metrics)
        
        self.assertIsInstance(metrics['hostname'], str)
        self.assertGreater(len(metrics['hostname']), 0)
    
    def test_metrics_caching(self):
        """Test du système de cache des métriques"""
        # Premier appel
        start_time = time.time()
        metrics1 = self.monitor.get_system_metrics()
        first_call_time = time.time() - start_time
        
        # Deuxième appel (devrait être en cache)
        start_time = time.time()
        metrics2 = self.monitor.get_system_metrics()
        second_call_time = time.time() - start_time
        
        # Le cache devrait accélérer le deuxième appel
        self.assertLess(second_call_time, first_call_time)
        
        # Les métriques devraient être similaires
        self.assertEqual(metrics1['cpu']['count_logical'], metrics2['cpu']['count_logical'])
    
    def test_anomaly_detection(self):
        """Test de la détection d'anomalies"""
        # Créer des métriques avec anomalies
        fake_metrics = {
            'cpu': {'percent': 98},  # CPU critique
            'memory': {'virtual': {'percent': 99}},  # Mémoire critique
            'processes': {'zombie': 15}  # Trop de zombies
        }
        
        anomalies = self.monitor._detect_anomalies(fake_metrics)
        
        self.assertIsInstance(anomalies, list)
        self.assertGreater(len(anomalies), 0)
        
        # Vérifier qu'il y a bien des anomalies critiques
        critical_anomalies = [a for a in anomalies if a['severity'] == 'critical']
        self.assertGreater(len(critical_anomalies), 0)

class TestWebApplication(NeuroPulseTestBase):
    """Tests de l'application web"""
    
    def test_index_redirect(self):
        """Test de redirection de la page d'accueil"""
        response = self.client.get('/')
        self.assertEqual(response.status_code, 302)  # Redirection vers login
    
    def test_login_page_get(self):
        """Test d'affichage de la page de login"""
        response = self.client.get('/login')
        self.assertEqual(response.status_code, 200)
        self.assertIn(b'login', response.data.lower())
    
    def test_login_success(self):
        """Test de connexion réussie"""
        response = self.client.post('/login', data={
            'username': 'admin',
            'password': 'admin'
        }, follow_redirects=True)
        
        self.assertEqual(response.status_code, 200)
        # Vérifier qu'on est bien sur le dashboard
        self.assertIn(b'dashboard', response.data.lower())
    
    def test_login_failure(self):
        """Test de connexion échouée"""
        response = self.client.post('/login', data={
            'username': 'wrong',
            'password': 'wrong'
        })
        
        self.assertEqual(response.status_code, 200)
        self.assertIn(b'invalide', response.data.lower())
    
    def test_dashboard_authentication_required(self):
        """Test que le dashboard nécessite une authentification"""
        response = self.client.get('/dashboard')
        self.assertEqual(response.status_code, 302)  # Redirection vers login
    
    def test_dashboard_with_authentication(self):
        """Test d'accès au dashboard après authentification"""
        # Se connecter d'abord
        with self.client.session_transaction() as sess:
            sess['user'] = 'admin'
        
        response = self.client.get('/dashboard')
        self.assertEqual(response.status_code, 200)
        self.assertIn(b'dashboard', response.data.lower())
    
    def test_logout(self):
        """Test de déconnexion"""
        # Se connecter d'abord
        with self.client.session_transaction() as sess:
            sess['user'] = 'admin'
        
        response = self.client.get('/logout', follow_redirects=True)
        self.assertEqual(response.status_code, 200)
        
        # Vérifier qu'on est redirigé vers login
        response = self.client.get('/dashboard')
        self.assertEqual(response.status_code, 302)

class TestAPIEndpoints(NeuroPulseTestBase):
    """Tests de l'API REST"""
    
    def setUp(self):
        super().setUp()
        # Créer un token de test
        self.token = jwt.encode({
            'user': 'admin',
            'exp': datetime.utcnow() + timedelta(hours=1)
        }, 'test-secret-key', algorithm='HS256')
        
        self.auth_headers = {
            'Authorization': f'Bearer {self.token}',
            'Content-Type': 'application/json'
        }
    
    def test_api_authentication_login(self):
        """Test de connexion API"""
        response = self.api_client.post('/api/auth/login', 
            json={'username': 'admin', 'password': 'admin'})
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('token', data)
    
    def test_api_authentication_invalid(self):
        """Test de connexion API avec identifiants invalides"""
        response = self.api_client.post('/api/auth/login',
            json={'username': 'wrong', 'password': 'wrong'})
        
        self.assertEqual(response.status_code, 401)
    
    def test_api_token_verification(self):
        """Test de vérification de token"""
        response = self.api_client.post('/api/auth/verify',
            headers={'Authorization': f'Bearer {self.token}'})
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['valid'])
    
    def test_api_current_metrics(self):
        """Test de récupération des métriques actuelles"""
        response = self.api_client.get('/api/metrics/current',
            headers=self.auth_headers)
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('data', data)
        self.assertIn('cpu', data['data'])
        self.assertIn('ram', data['data'])
    
    def test_api_metrics_history(self):
        """Test de récupération de l'historique des métriques"""
        response = self.api_client.get('/api/metrics/history?hours=1',
            headers=self.auth_headers)
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('data', data)
        self.assertIsInstance(data['data'], list)
    
    def test_api_services_list(self):
        """Test de récupération de la liste des services"""
        response = self.api_client.get('/api/services',
            headers=self.auth_headers)
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('services', data)
        self.assertIsInstance(data['services'], list)
    
    def test_api_tickets_list(self):
        """Test de récupération de la liste des tickets"""
        response = self.api_client.get('/api/tickets',
            headers=self.auth_headers)
        
        self.assertEqual(response.status_code, 200)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('tickets', data)
        self.assertIsInstance(data['tickets'], list)
    
    def test_api_create_ticket(self):
        """Test de création d'un ticket"""
        ticket_data = {
            'title': 'Test Ticket',
            'description': 'Test Description',
            'severity': 'medium',
            'service': 'Test Service'
        }
        
        response = self.api_client.post('/api/tickets',
            headers=self.auth_headers, json=ticket_data)
        
        self.assertEqual(response.status_code, 201)
        data = json.loads(response.data)
        self.assertTrue(data['success'])
        self.assertIn('ticket_id', data)
    
    def test_api_unauthorized_access(self):
        """Test d'accès non autorisé à l'API"""
        response = self.api_client.get('/api/metrics/current')
        self.assertEqual(response.status_code, 401)
    
    def test_api_health_check(self):
        """Test du health check API"""
        response = self.api_client.get('/api/health')
        self.assertEqual(response.status_code, 200)
        
        data = json.loads(response.data)
        self.assertIn('status', data)
        self.assertIn('version', data)

class TestBackupSystem(NeuroPulseTestBase):
    """Tests du système de sauvegarde"""
    
    def setUp(self):
        super().setUp()
        self.temp_backup_dir = tempfile.mkdtemp()
        self.backup_manager = BackupManager()
        self.backup_manager.backup_base_dir = self.temp_backup_dir
    
    def tearDown(self):
        super().tearDown()
        import shutil
        shutil.rmtree(self.temp_backup_dir, ignore_errors=True)
    
    def test_backup_creation(self):
        """Test de création de sauvegarde"""
        result = self.backup_manager.create_full_backup()
        
        self.assertTrue(result['success'])
        self.assertIn('backup_path', result)
        self.assertTrue(os.path.exists(result['backup_path']))
    
    def test_backup_verification(self):
        """Test de vérification de sauvegarde"""
        # Créer une sauvegarde d'abord
        result = self.backup_manager.create_full_backup()
        backup_path = result['backup_path']
        
        # Vérifier la sauvegarde
        verification = self.backup_manager.verify_backup(backup_path)
        self.assertTrue(verification['success'])
    
    def test_backup_cleanup(self):
        """Test de nettoyage des sauvegardes"""
        # Créer plusieurs sauvegardes anciennes
        for i in range(3):
            result = self.backup_manager.create_full_backup()
            # Modifier la date de modification pour simuler l'ancienneté
            old_time = time.time() - (40 * 24 * 60 * 60)  # 40 jours
            os.utime(result['backup_path'], (old_time, old_time))
        
        # Lancer le nettoyage
        cleanup_result = self.backup_manager.cleanup_old_backups()
        
        self.assertIn('deleted_count', cleanup_result)
        self.assertGreater(cleanup_result['deleted_count'], 0)
    
    def test_backup_manifest(self):
        """Test de création du manifeste de sauvegarde"""
        result = self.backup_manager.create_full_backup()
        backup_path = result['backup_path']
        
        manifest_path = backup_path.replace('.tar.gz', '_manifest.json')
        self.assertTrue(os.path.exists(manifest_path))
        
        with open(manifest_path, 'r') as f:
            manifest = json.load(f)
        
        self.assertIn('backup_name', manifest)
        self.assertIn('created_at', manifest)
        self.assertIn('checksum', manifest)
        self.assertIn('size_bytes', manifest)

class TestSecurity(NeuroPulseTestBase):
    """Tests de sécurité"""
    
    def test_sql_injection_protection(self):
        """Test de protection contre l'injection SQL"""
        malicious_input = "'; DROP TABLE tickets; --"
        
        # Tenter une injection via l'API
        response = self.api_client.post('/api/tickets',
            headers=self.auth_headers,
            json={
                'title': malicious_input,
                'description': 'Test',
                'severity': 'low',
                'service': 'Test'
            })
        
        # L'API devrait accepter la requête mais échapper l'input
        self.assertEqual(response.status_code, 201)
        
        # Vérifier que la table existe toujours
        with app.app_context():
            from app import get_db
            db = get_db()
            cursor = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='tickets'")
            table_exists = cursor.fetchone() is not None
            self.assertTrue(table_exists)
    
    def test_xss_protection(self):
        """Test de protection contre XSS"""
        malicious_script = '<script>alert("XSS")</script>'
        
        # Se connecter
        with self.client.session_transaction() as sess:
            sess['user'] = 'admin'
        
        # Créer un ticket avec du code malicieux
        response = self.client.post('/tickets/new', data={
            'title': malicious_script,
            'description': 'Test description',
            'criticite': 'Faible',
            'service': 'Test'
        }, follow_redirects=True)
        
        # Le script ne devrait pas être exécuté
        self.assertNotIn(b'<script>', response.data)
        self.assertIn(b'&lt;script&gt;', response.data)  # Échappé
    
    def test_command_injection_protection(self):
        """Test de protection contre l'injection de commandes"""
        malicious_command = 'ls; rm -rf /'
        
        response = self.api_client.post('/api/execute',
            headers=self.auth_headers,
            json={'command': malicious_command})
        
        # La commande dangereuse devrait être bloquée
        self.assertEqual(response.status_code, 403)
        data = json.loads(response.data)
        self.assertIn('interdite', data['error'].lower())
    
    def test_rate_limiting(self):
        """Test de limitation de débit"""
        # Effectuer de nombreuses requêtes rapidement
        responses = []
        for _ in range(10):
            response = self.api_client.post('/api/auth/login',
                json={'username': 'admin', 'password': 'wrong'})
            responses.append(response.status_code)
        
        # Au moins une requête devrait être rate-limitée
        self.assertIn(429, responses)
    
    def test_jwt_token_expiration(self):
        """Test d'expiration des tokens JWT"""
        # Créer un token expiré
        expired_token = jwt.encode({
            'user': 'admin',
            'exp': datetime.utcnow() - timedelta(hours=1)
        }, 'test-secret-key', algorithm='HS256')
        
        response = self.api_client.get('/api/metrics/current',
            headers={'Authorization': f'Bearer {expired_token}'})
        
        self.assertEqual(response.status_code, 401)

class TestPerformance(NeuroPulseTestBase):
    """Tests de performance"""
    
    def test_metrics_collection_performance(self):
        """Test de performance de collecte des métriques"""
        monitor = SystemMonitor()
        
        start_time = time.time()
        metrics = monitor.get_system_metrics()
        execution_time = time.time() - start_time
        
        # La collecte ne devrait pas prendre plus de 2 secondes
        self.assertLess(execution_time, 2.0)
        self.assertIsInstance(metrics, dict)
        self.assertGreater(len(metrics), 0)
    
    def test_api_response_time(self):
        """Test de temps de réponse de l'API"""
        endpoints = [
            '/api/metrics/current',
            '/api/services',
            '/api/tickets',
            '/api/health'
        ]
        
        for endpoint in endpoints:
            start_time = time.time()
            response = self.api_client.get(endpoint, headers=self.auth_headers)
            response_time = time.time() - start_time
            
            # Chaque endpoint devrait répondre en moins d'1 seconde
            self.assertLess(response_time, 1.0)
            self.assertEqual(response.status_code, 200)
    
    def test_concurrent_requests(self):
        """Test de requêtes concurrentes"""
        def make_request():
            response = self.api_client.get('/api/health')
            return response.status_code
        
        # Lancer 10 requêtes en parallèle
        threads = []
        results = []
        
        for _ in range(10):
            thread = threading.Thread(target=lambda: results.append(make_request()))
            threads.append(thread)
            thread.start()
        
        for thread in threads:
            thread.join()
        
        # Toutes les requêtes devraient réussir
        self.assertEqual(len(results), 10)
        self.assertTrue(all(status == 200 for status in results))
    
    def test_memory_usage(self):
        """Test d'utilisation mémoire"""
        import tracemalloc
        
        tracemalloc.start()
        
        # Simuler une charge de travail
        monitor = SystemMonitor()
        for _ in range(100):
            monitor.get_system_metrics()
        
        current, peak = tracemalloc.get_traced_memory()
        tracemalloc.stop()
        
        # L'utilisation mémoire ne devrait pas être excessive
        self.assertLess(peak / 1024 / 1024, 100)  # Moins de 100MB

class TestIntegration(NeuroPulseTestBase):
    """Tests d'intégration"""
    
    def test_full_monitoring_cycle(self):
        """Test du cycle complet de monitoring"""
        # 1. Collecter des métriques
        monitor = SystemMonitor()
        metrics = monitor.get_system_metrics()
        
        # 2. Vérifier que les alertes fonctionnent
        # (Simuler des métriques élevées)
        with patch.object(monitor, 'get_system_metrics') as mock_metrics:
            mock_metrics.return_value = {
                'cpu': {'percent': 95},
                'memory': {'virtual': {'percent': 98}},
                'timestamp': datetime.now().isoformat()
            }
            
            # Les alertes devraient être déclenchées
            # (Test dépendant de l'implémentation du système d'alertes)
    
    def test_backup_and_restore_cycle(self):
        """Test du cycle sauvegarde/restauration"""
        # 1. Créer des données de test
        with app.app_context():
            from app import get_db
            db = get_db()
            db.execute('''
                INSERT INTO tickets (titre, description, criticite, service)
                VALUES (?, ?, ?, ?)
            ''', ('Test Ticket', 'Test Description', 'Faible', 'Test'))
            db.commit()
        
        # 2. Créer une sauvegarde
        backup_manager = BackupManager()
        backup_manager.backup_base_dir = tempfile.mkdtemp()
        
        result = backup_manager.create_full_backup()
        self.assertTrue(result['success'])
        
        # 3. Vérifier que la sauvegarde contient les données
        verification = backup_manager.verify_backup(result['backup_path'])
        self.assertTrue(verification['success'])

def run_test_suite():
    """Fonction principale pour exécuter tous les tests"""
    print("🧪 Lancement de la suite de tests NeuroPulse Monitor Pro v2.0")
    print("=" * 60)
    
    # Créer la suite de tests
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()
    
    # Ajouter tous les tests
    test_classes = [
        TestSystemMonitoring,
        TestWebApplication,
        TestAPIEndpoints,
        TestBackupSystem,
        TestSecurity,
        TestPerformance,
        TestIntegration
    ]
    
    for test_class in test_classes:
        tests = loader.loadTestsFromTestCase(test_class)
        suite.addTests(tests)
    
    # Exécuter les tests avec rapport détaillé
    runner = unittest.TextTestRunner(
        verbosity=2,
        failfast=False,
        buffer=True
    )
    
    print(f"📊 Exécution de {suite.countTestCases()} tests...")
    start_time = time.time()
    
    result = runner.run(suite)
    
    execution_time = time.time() - start_time
    
    # Rapport final
    print("\n" + "=" * 60)
    print("📋 RAPPORT FINAL")
    print("=" * 60)
    print(f"✅ Tests réussis: {result.testsRun - len(result.failures) - len(result.errors)}")
    print(f"❌ Tests échoués: {len(result.failures)}")
    print(f"🚨 Erreurs: {len(result.errors)}")
    print(f"⏱️  Temps d'exécution: {execution_time:.2f}s")
    
    if result.failures:
        print(f"\n🔴 ÉCHECS ({len(result.failures)}):")
        for test, traceback in result.failures:
            print(f"  - {test}: {traceback.split('AssertionError:')[-1].strip()}")
    
    if result.errors:
        print(f"\n🚨 ERREURS ({len(result.errors)}):")
        for test, traceback in result.errors:
            print(f"  - {test}: {traceback.split('Exception:')[-1].strip()}")
    
    success_rate = ((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun) * 100
    print(f"\n📈 Taux de réussite: {success_rate:.1f}%")
    
    if success_rate >= 95:
        print("🎉 Excellente qualité du code!")
    elif success_rate >= 85:
        print("👍 Bonne qualité du code")
    else:
        print("⚠️  Améliorations nécessaires")
    
    return result.wasSuccessful()

if __name__ == '__main__':
    success = run_test_suite()
    exit(0 if success else 1)

# ═══════════════════════════════════════════════════════════════════════════════
# tests/performance_benchmark.py - Tests de performance approfondie
# ═══════════════════════════════════════════════════════════════════════════════

import unittest
import time
import threading
import requests
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed

class PerformanceBenchmark:
    """Benchmark de performance pour NeuroPulse"""
    
    def __init__(self, base_url='http://localhost:5000'):
        self.base_url = base_url
        self.results = {}
    
    def benchmark_api_endpoints(self, concurrent_users=10, requests_per_user=20):
        """Benchmark des endpoints API"""
        print(f"🚀 Benchmark API: {concurrent_users} utilisateurs, {requests_per_user} requêtes/utilisateur")
        
        endpoints = [
            '/api/health',
            '/api/metrics/current',
            '/api/services',
            '/api/tickets'
        ]
        
        # Obtenir un token d'authentification
        auth_response = requests.post(f'{self.base_url}/api/auth/login', 
            json={'username': 'admin', 'password': 'admin'})
        token = auth_response.json()['token']
        headers = {'Authorization': f'Bearer {token}'}
        
        for endpoint in endpoints:
            print(f"\n📊 Testing {endpoint}")
            response_times = []
            
            def make_request():
                start_time = time.time()
                response = requests.get(f'{self.base_url}{endpoint}', headers=headers)
                response_time = time.time() - start_time
                return response_time, response.status_code
            
            # Test de charge
            with ThreadPoolExecutor(max_workers=concurrent_users) as executor:
                futures = []
                for _ in range(concurrent_users * requests_per_user):
                    futures.append(executor.submit(make_request))
                
                for future in as_completed(futures):
                    response_time, status_code = future.result()
                    if status_code == 200:
                        response_times.append(response_time)
            
            # Statistiques
            if response_times:
                avg_time = statistics.mean(response_times)
                median_time = statistics.median(response_times)
                p95_time = sorted(response_times)[int(len(response_times) * 0.95)]
                
                print(f"  ⏱️  Temps moyen: {avg_time:.3f}s")
                print(f"  ⏱️  Temps médian: {median_time:.3f}s")
                print(f"  ⏱️  95e percentile: {p95_time:.3f}s")
                print(f"  📈 Requêtes/seconde: {len(response_times) / sum(response_times):.1f}")
                
                self.results[endpoint] = {
                    'avg_time': avg_time,
                    'median_time': median_time,
                    'p95_time': p95_time,
                    'requests_per_second': len(response_times) / sum(response_times)
                }
    
    def benchmark_metrics_collection(self, iterations=100):
        """Benchmark de la collecte de métriques"""
        print(f"\n🔍 Benchmark collecte de métriques: {iterations} itérations")
        
        from utils.monitoring import SystemMonitor
        monitor = SystemMonitor()
        
        collection_times = []
        
        for i in range(iterations):
            start_time = time.time()
            metrics = monitor.get_system_metrics()
            collection_time = time.time() - start_time
            collection_times.append(collection_time)
            
            if (i + 1) % 20 == 0:
                print(f"  📊 {i + 1}/{iterations} itérations terminées")
        
        avg_time = statistics.mean(collection_times)
        median_time = statistics.median(collection_times)
        max_time = max(collection_times)
        
        print(f"  ⏱️  Temps moyen: {avg_time:.3f}s")
        print(f"  ⏱️  Temps médian: {median_time:.3f}s")
        print(f"  ⏱️  Temps maximum: {max_time:.3f}s")
        print(f"  📈 Collections/seconde: {1/avg_time:.1f}")
        
        self.results['metrics_collection'] = {
            'avg_time': avg_time,
            'median_time': median_time,
            'max_time': max_time,
            'collections_per_second': 1/avg_time
        }
    
    def benchmark_database_operations(self, operations=1000):
        """Benchmark des opérations base de données"""
        print(f"\n💾 Benchmark base de données: {operations} opérations")
        
        import sqlite3
        import tempfile
        
        # Créer une base de test
        with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as tmp:
            test_db = tmp.name
        
        conn = sqlite3.connect(test_db)
        conn.execute('''
            CREATE TABLE test_metrics (
                id INTEGER PRIMARY KEY,
                timestamp TEXT,
                cpu REAL,
                ram REAL,
                disk REAL
            )
        ''')
        conn.commit()
        
        # Test d'insertion
        insert_times = []
        for i in range(operations):
            start_time = time.time()
            conn.execute('''
                INSERT INTO test_metrics (timestamp, cpu, ram, disk)
                VALUES (?, ?, ?, ?)
            ''', (time.time(), 50.0 + i % 50, 60.0 + i % 40, 70.0 + i % 30))
            conn.commit()
            insert_time = time.time() - start_time
            insert_times.append(insert_time)
        
        # Test de lecture
        select_times = []
        for i in range(operations // 10):  # Moins de lectures
            start_time = time.time()
            cursor = conn.execute('SELECT * FROM test_metrics ORDER BY timestamp DESC LIMIT 10')
            rows = cursor.fetchall()
            select_time = time.time() - start_time
            select_times.append(select_time)
        
        conn.close()
        os.unlink(test_db)
        
        print(f"  ✏️  Insert moyen: {statistics.mean(insert_times):.4f}s")
        print(f"  📖 Select moyen: {statistics.mean(select_times):.4f}s")
        print(f"  📈 Inserts/seconde: {1/statistics.mean(insert_times):.0f}")
        
        self.results['database'] = {
            'insert_avg': statistics.mean(insert_times),
            'select_avg': statistics.mean(select_times),
            'inserts_per_second': 1/statistics.mean(insert_times)
        }
    
    def generate_report(self):
        """Générer un rapport de performance"""
        print("\n" + "=" * 60)
        print("📊 RAPPORT DE PERFORMANCE")
        print("=" * 60)
        
        if 'metrics_collection' in self.results:
            mc = self.results['metrics_collection']
            print(f"🔍 Collecte métriques: {mc['collections_per_second']:.1f}/s (avg: {mc['avg_time']:.3f}s)")
        
        if 'database' in self.results:
            db = self.results['database']
            print(f"💾 Base de données: {db['inserts_per_second']:.0f} inserts/s")
        
        print("\n🌐 Performance API:")
        for endpoint, stats in self.results.items():
            if endpoint.startswith('/api/'):
                print(f"  {endpoint}: {stats['requests_per_second']:.1f} req/s (p95: {stats['p95_time']:.3f}s)")
        
        # Recommandations
        print("\n💡 RECOMMANDATIONS:")
        
        # Vérifier la performance de collecte
        if 'metrics_collection' in self.results:
            if self.results['metrics_collection']['avg_time'] > 1.0:
                print("  ⚠️  Collecte de métriques lente - optimiser psutil usage")
            else:
                print("  ✅ Performance de collecte acceptable")
        
        # Vérifier la performance API
        slow_apis = [ep for ep, stats in self.results.items() 
                    if ep.startswith('/api/') and stats['p95_time'] > 1.0]
        if slow_apis:
            print(f"  ⚠️  APIs lentes détectées: {', '.join(slow_apis)}")
        else:
            print("  ✅ Performance API acceptable")

if __name__ == '__main__':
    benchmark = PerformanceBenchmark()
    
    try:
        benchmark.benchmark_metrics_collection(50)
        benchmark.benchmark_database_operations(500)
        benchmark.benchmark_api_endpoints(5, 10)
        benchmark.generate_report()
    except Exception as e:
        print(f"❌ Erreur lors du benchmark: {e}")
        print("💡 Assurez-vous que NeuroPulse est démarré et accessible")