#!/usr/bin/env python3
"""
Scraper intégré amélioré pour jobs.ch avec techniques stealth et gestion des captchas
Combine les fonctionnalités du scraper de base avec les techniques anti-détection avancées
Version avec Selenium standard (sans undetected-chrome)
"""

import csv
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import os
import time
import urllib.parse
from pathlib import Path
import hashlib
import logging
import re
from bs4 import BeautifulSoup
from datetime import datetime
import json
import random
import cloudscraper
from fake_useragent import UserAgent
import ssl
import urllib3
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.common.exceptions import TimeoutException, WebDriverException
from typing import Dict, List, Optional, Any

# Désactiver les warnings SSL
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class EnhancedJobsScraper:
    def __init__(self, temp_dir="temp_html", delay_range=(15, 30), debug_mode=False, use_selenium=True, headless=True, manual_captcha=True):
        """
        Scraper amélioré avec techniques stealth
        
        Args:
            temp_dir (str): Répertoire temporaire pour les fichiers HTML
            delay_range (tuple): Plage de délais aléatoires entre requêtes
            debug_mode (bool): Active les logs de debug détaillés
            use_selenium (bool): Utiliser Selenium pour contourner les protections
            headless (bool): Mode headless pour le navigateur
            manual_captcha (bool): Permet la résolution manuelle des captchas
        """
        self.temp_dir = Path(temp_dir)
        self.delay_range = delay_range
        self.debug_mode = debug_mode
        self.use_selenium = use_selenium
        self.headless = headless
        self.manual_captcha = manual_captcha
        
        # Créer le répertoire temporaire
        self.temp_dir.mkdir(exist_ok=True)
        
        # Configuration du logging
        log_level = logging.DEBUG if debug_mode else logging.INFO
        logging.basicConfig(
            level=log_level,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(f'scraping_{datetime.now().strftime("%Y%m%d")}.log'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
        
        # Initialiser les agents utilisateur rotatifs
        self.ua = UserAgent()
        
        # Configuration des sessions
        self.setup_sessions()
        
        # Driver Selenium
        self.driver = None
        self.session_count = 0
        self.session_reset_interval = 5
        
        # Champs CSV de sortie
        self.csv_fields = [
            'id_job', 'url_original', 'titre', 'entreprise', 'lieu_travail', 
            'type_contrat', 'taux_activite', 'date_publication', 'date_expiration',
            'salaire', 'description_entreprise', 'description_poste', 
            'qualifications', 'informations_supplementaires', 'contact',
            'url_candidature', 'logo_entreprise', 'secteur_activite',
            'type_emploi', 'competences', 'avantages', 'date_extraction'
        ]
        
        # Statistiques
        self.stats = {
            'total_processed': 0,
            'successful_downloads': 0,
            'successful_extractions': 0,
            'partial_extractions': 0,
            'failed_extractions': 0,
            'captcha_encounters': 0,
            'captcha_resolved': 0,
            'errors': 0
        }
        
        # Progress tracking
        self.progress_file = "scraping_progress.json"
        self.processed_urls = set()
        self.load_progress()

    def setup_sessions(self):
        """Configure les sessions avec rotation et techniques stealth"""
        # Session CloudScraper pour contourner CloudFlare
        self.cloudscraper_session = cloudscraper.create_scraper(
            browser='chrome',
            delay=random.uniform(5, 15)
        )
        
        # Session requests classique avec retry
        self.requests_session = requests.Session()
        retry_strategy = Retry(
            total=3,
            backoff_factor=2,
            status_forcelist=[429, 500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.requests_session.mount("http://", adapter)
        self.requests_session.mount("https://", adapter)
        
        # Headers rotatifs
        self.update_session_headers()

    def update_session_headers(self):
        """Met à jour les headers avec un user agent aléatoire"""
        headers = {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'fr-FR,fr;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Cache-Control': 'max-age=0'
        }
        
        self.requests_session.headers.update(headers)
        self.cloudscraper_session.headers.update(headers)

    def create_selenium_driver(self):
        """Crée un driver Selenium stealth avec Chrome standard"""
        try:
            options = Options()
            
            # Arguments stealth avancés
            stealth_args = [
                '--no-sandbox',
                '--disable-dev-shm-usage',
                '--disable-blink-features=AutomationControlled',
                '--disable-extensions',
                '--disable-plugins',
                '--disable-images',  # Performance
                '--disable-javascript',  # Désactiver JS pour éviter détection (optionnel)
                f'--user-agent={self.ua.random}',
                '--lang=fr-FR,fr,en',
                '--disable-web-security',
                '--disable-features=VizDisplayCompositor',
                '--disable-background-timer-throttling',
                '--disable-backgrounding-occluded-windows',
                '--disable-renderer-backgrounding',
                '--disable-background-networking',
                '--disable-sync',
                '--disable-translate',
                '--disable-ipc-flooding-protection',
                '--disable-hang-monitor',
                '--disable-client-side-phishing-detection',
                '--disable-component-update',
                '--disable-default-apps',
                '--disable-domain-reliability',
                '--disable-background-downloads',
                '--disable-add-to-shelf',
                '--disable-breakpad',
                '--memory-pressure-off',
                '--max_old_space_size=4096',
                '--disable-dev-tools',
                '--disable-gpu-sandbox',
                '--disable-software-rasterizer',
                f'--user-data-dir=/tmp/chrome_session_{random.randint(1000, 9999)}_{int(time.time())}'
            ]
            
            if self.headless:
                stealth_args.extend(['--headless=new', '--disable-gpu'])
            
            for arg in stealth_args:
                options.add_argument(arg)
            
            # Préférences avancées pour éviter la détection
            prefs = {
                "profile.default_content_setting_values": {
                    "images": 2,
                    "plugins": 2,
                    "popups": 2,
                    "geolocation": 2,
                    "notifications": 2,
                    "media_stream": 2,
                    "automatic_downloads": 2
                },
                "profile.managed_default_content_settings": {
                    "images": 2
                },
                "profile.content_settings.exceptions.automatic_downloads.*.setting": 2,
                "profile.default_content_settings.popups": 0
            }
            
            options.add_experimental_option("prefs", prefs)
            options.add_experimental_option("excludeSwitches", [
                "enable-automation",
                "enable-blink-features",
                "enable-logging"
            ])
            options.add_experimental_option('useAutomationExtension', False)
            
            # Créer le service Chrome
            service = Service()
            
            # Créer le driver avec Chrome standard
            driver = webdriver.Chrome(service=service, options=options)
            
            # Scripts anti-détection avancés
            stealth_script = """
            // Supprimer les traces webdriver
            Object.defineProperty(navigator, 'webdriver', {get: () => undefined});
            
            // Modifier les propriétés navigator pour paraître plus humain
            Object.defineProperty(navigator, 'plugins', {
                get: () => [1, 2, 3, 4, 5]
            });
            
            Object.defineProperty(navigator, 'languages', {
                get: () => ['fr-FR', 'fr', 'en-US', 'en']
            });
            
            // Masquer automation
            window.chrome = {
                runtime: {}
            };
            
            // Override permission queries  
            const originalQuery = window.navigator.permissions.query;
            window.navigator.permissions.query = (parameters) => (
                parameters.name === 'notifications' ?
                Promise.resolve({ state: Notification.permission }) :
                originalQuery(parameters)
            );
            """
            
            driver.execute_script(stealth_script)
            
            # Modifier le user agent pour supprimer les traces headless
            current_ua = driver.execute_script("return navigator.userAgent")
            clean_ua = current_ua.replace("Headless", "").replace("headless", "")
            
            driver.execute_cdp_cmd('Network.setUserAgentOverride', {
                "userAgent": clean_ua
            })
            
            # Définir une taille de fenêtre réaliste
            if not self.headless:
                driver.set_window_size(1366, 768)
            else:
                driver.set_window_size(1920, 1080)
            
            self.logger.info("Driver Chrome standard stealth créé avec succès")
            return driver
            
        except Exception as e:
            self.logger.error(f"Erreur création driver Selenium: {e}")
            return None

    def apply_manual_stealth_techniques(self, driver):
        """Applique des techniques stealth manuelles supplémentaires"""
        try:
            # Script complet d'évasion de détection
            advanced_stealth_script = """
            // === PHASE 1: Masquer les traces d'automation ===
            
            // Supprimer webdriver property
            delete navigator.__proto__.webdriver;
            
            // Override des propriétés navigator
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined,
                configurable: true
            });
            
            // Simuler des plugins réalistes
            Object.defineProperty(navigator, 'plugins', {
                get: () => ({
                    length: 5,
                    0: { name: 'Chrome PDF Plugin', description: 'Portable Document Format' },
                    1: { name: 'Chrome PDF Viewer', description: 'PDF Viewer' },
                    2: { name: 'Native Client', description: 'Native Client Executable' },
                    3: { name: 'WebKit built-in PDF', description: 'WebKit built-in PDF' },
                    4: { name: 'Shockwave Flash', description: 'Adobe Flash Player' }
                }),
                configurable: true
            });
            
            // Simuler mimeTypes
            Object.defineProperty(navigator, 'mimeTypes', {
                get: () => ({
                    length: 4,
                    0: { type: 'application/pdf', description: 'Portable Document Format' },
                    1: { type: 'application/x-nacl', description: 'Native Client Executable' },
                    2: { type: 'application/x-pnacl', description: 'Portable Native Client Executable' },
                    3: { type: 'application/x-shockwave-flash', description: 'Adobe Flash movie' }
                }),
                configurable: true
            });
            
            // Override permission API
            const originalQuery = window.navigator.permissions.query;
            window.navigator.permissions.query = (parameters) => (
                parameters.name === 'notifications' ?
                    Promise.resolve({ state: 'denied' }) :
                    originalQuery(parameters)
            );
            
            // === PHASE 2: Simuler comportement navigateur réel ===
            
            // Ajouter chrome runtime pour simuler Chrome réel
            if (!window.chrome) {
                window.chrome = {};
            }
            window.chrome.runtime = {
                onConnect: undefined,
                onMessage: undefined
            };
            
            // Simuler événements souris aléatoires
            function simulateMouseMovement() {
                const event = new MouseEvent('mousemove', {
                    clientX: Math.random() * window.innerWidth,
                    clientY: Math.random() * window.innerHeight,
                    bubbles: true
                });
                document.dispatchEvent(event);
            }
            
            // Exécuter mouvement souris périodique
            setInterval(simulateMouseMovement, Math.random() * 5000 + 2000);
            
            // === PHASE 3: Masquer les propriétés de détection communes ===
            
            // Nettoyer les propriétés du driver
            const propsToDelete = [
                'webdriver',
                'driver-evaluate',
                '__webdriver_evaluate',
                '__selenium_evaluate',
                '__webdriver_script_function',
                '__selenium_unwrapped',
                '__fxdriver_evaluate',
                '__driver_evaluate',
                '__selenium-evaluate',
                '__fxdriver_unwrapped'
            ];
            
            propsToDelete.forEach(prop => {
                try {
                    delete window[prop];
                    delete document[prop];
                } catch(e) {}
            });
            
            // Override toString pour cacher l'automation
            const getParameter = WebGLRenderingContext.getParameter;
            WebGLRenderingContext.prototype.getParameter = function(parameter) {
                if (parameter === 37445) {
                    return 'Intel Inc.';
                }
                if (parameter === 37446) {
                    return 'Intel Iris OpenGL Engine';
                }
                return getParameter(parameter);
            };
            
            console.log('Techniques stealth appliquées avec succès');
            """
            
            driver.execute_script(advanced_stealth_script)
            
            # Ajouter des cookies réalistes
            self.add_realistic_cookies(driver)
            
            # Simuler historique de navigation
            driver.execute_script("""
                if (typeof(Storage) !== "undefined") {
                    localStorage.setItem('visited_before', 'true');
                    localStorage.setItem('last_visit', Date.now().toString());
                    sessionStorage.setItem('session_start', Date.now().toString());
                }
            """)
            
        except Exception as e:
            self.logger.debug(f"Erreur application techniques stealth: {e}")
    
    def add_realistic_cookies(self, driver):
        """Ajoute des cookies réalistes pour simuler un utilisateur normal"""
        try:
            # Aller sur la page d'accueil pour ajouter cookies
            driver.get("https://www.jobs.ch")
            time.sleep(2)
            
            # Cookies typiques d'un navigateur réel
            realistic_cookies = [
                {"name": "_ga", "value": f"GA1.2.{random.randint(100000000, 999999999)}.{int(time.time())}"},
                {"name": "_gid", "value": f"GA1.2.{random.randint(100000000, 999999999)}.{int(time.time())}"},
                {"name": "session_id", "value": hashlib.md5(str(time.time()).encode()).hexdigest()},
                {"name": "lang_pref", "value": "fr"},
                {"name": "timezone", "value": "Europe/Paris"},
            ]
            
            for cookie in realistic_cookies:
                try:
                    driver.add_cookie(cookie)
                except:
                    pass
                    
        except Exception as e:
            self.logger.debug(f"Erreur ajout cookies: {e}")

    def detect_captcha_or_block(self, content: str) -> Dict[str, Any]:
        """Détecte les captchas et blocages dans le contenu"""
        block_info = {
            'has_block': False,
            'block_type': None,
            'indicators': []
        }
        
        content_lower = content.lower()
        
        # Indicateurs de captcha/blocage
        captcha_indicators = [
            ('recaptcha', 'recaptcha'),
            ('hcaptcha', 'hcaptcha'),
            ('cloudflare', 'cloudflare'),
            ('access denied', 'access_denied'),
            ('blocked', 'blocked'),
            ('bot detected', 'bot_detection'),
            ('verify you are human', 'human_verification'),
            ('please wait', 'loading_check'),
            ('checking your browser', 'browser_check'),
            ('security check', 'security_check')
        ]
        
        for indicator, block_type in captcha_indicators:
            if indicator in content_lower:
                block_info['has_block'] = True
                block_info['block_type'] = block_type
                block_info['indicators'].append(indicator)
        
        # Vérifier si la page est suspicieusement courte
        if len(content) < 5000 and not any(['404', 'not found'] in content_lower):
            block_info['has_block'] = True
            block_info['block_type'] = 'suspicious_short'
            block_info['indicators'].append('page_too_short')
        
        return block_info

    def handle_manual_captcha(self, url: str) -> bool:
        """Gère la résolution manuelle des captchas"""
        try:
            self.logger.warning("CAPTCHA détecté - Résolution manuelle requise")
            
            # Fermer le driver existant pour éviter les conflits
            if self.driver:
                try:
                    self.driver.quit()
                    time.sleep(2)  # Attendre que le processus se ferme
                except:
                    pass
                self.driver = None
            
            # Créer un nouveau driver visible pour la résolution manuelle
            self.logger.info("Création d'un navigateur visible pour la résolution manuelle...")
            
            # Options pour navigateur visible
            visible_options = Options()
            basic_args = [
                '--no-sandbox',
                '--disable-dev-shm-usage',
                f'--user-agent={self.ua.random}',
                '--lang=fr-FR,fr,en',
                '--disable-blink-features=AutomationControlled',
                f'--user-data-dir=/tmp/chrome_manual_{random.randint(1000, 9999)}_{int(time.time())}',
                '--disable-extensions'
            ]
            
            for arg in basic_args:
                visible_options.add_argument(arg)
            
            visible_options.add_experimental_option("excludeSwitches", ["enable-automation"])
            visible_options.add_experimental_option('useAutomationExtension', False)
            
            service = Service()
            visible_driver = webdriver.Chrome(service=service, options=visible_options)
            
            # Scripts anti-détection de base
            visible_driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined});")
            visible_driver.set_window_size(1366, 768)
            
            # Naviguer vers l'URL
            visible_driver.get(url)
            time.sleep(3)
            
            # Instructions pour l'utilisateur
            print("\n" + "="*60)
            print("CAPTCHA DÉTECTÉ - INTERVENTION MANUELLE REQUISE")
            print("="*60)
            print("Un navigateur Chrome s'est ouvert avec la page bloquée.")
            print("Instructions :")
            print("1. Résolvez le CAPTCHA dans le navigateur")
            print("2. Attendez que la page se charge complètement")
            print("3. Appuyez sur ENTRÉE ici quand c'est terminé")
            print("4. Ou tapez 'skip' pour ignorer cette URL")
            print("-" * 60)
            
            # Attendre l'intervention de l'utilisateur
            max_attempts = 3
            for attempt in range(max_attempts):
                user_input = input(f"CAPTCHA résolu ? [Entrée pour continuer / 'skip' pour ignorer] (Tentative {attempt + 1}/{max_attempts}): ").strip().lower()
                
                if user_input == 'skip':
                    self.logger.info("URL ignorée par l'utilisateur")
                    visible_driver.quit()
                    return False
                
                # Vérifier que le captcha a été résolu
                try:
                    current_source = visible_driver.page_source
                    block_info = self.detect_captcha_or_block(current_source)
                    
                    if not block_info['has_block']:
                        self.logger.info("CAPTCHA résolu avec succès!")
                        
                        # Récupérer le contenu et fermer le driver visible
                        final_content = visible_driver.page_source
                        visible_driver.quit()
                        
                        # Recreer le driver principal en mode headless
                        self.driver = self.create_selenium_driver()
                        
                        # Stocker le contenu résolu temporairement
                        self._resolved_content = final_content
                        
                        self.stats['captcha_resolved'] += 1
                        return True
                    else:
                        remaining_attempts = max_attempts - attempt - 1
                        if remaining_attempts > 0:
                            self.logger.warning(f"CAPTCHA pas encore résolu. Il reste {remaining_attempts} tentative(s)")
                            print(f"Le CAPTCHA semble toujours présent. Veuillez vérifier et réessayer.")
                        else:
                            self.logger.error("CAPTCHA non résolu après 3 tentatives")
                            
                except Exception as e:
                    self.logger.error(f"Erreur lors de la vérification du CAPTCHA: {e}")
            
            # Fermer le driver visible
            visible_driver.quit()
            return False
            
        except Exception as e:
            self.logger.error(f"Erreur gestion captcha manuel: {e}")
            return False

    def handle_selenium_download(self, url: str) -> Optional[str]:
        """Télécharge une page avec Selenium en gérant les captchas"""
        try:
            # Créer/reset le driver si nécessaire
            if not self.driver or self.session_count >= self.session_reset_interval:
                if self.driver:
                    try:
                        self.driver.quit()
                        time.sleep(3)  # Attendre que le processus se ferme complètement
                    except:
                        pass
                    self.driver = None
                
                self.driver = self.create_selenium_driver()
                if not self.driver:
                    return None
                
                # Appliquer les techniques stealth manuelles
                try:
                    self.apply_manual_stealth_techniques(self.driver)
                except Exception as e:
                    self.logger.debug(f"Erreur application techniques stealth: {e}")
                
                self.session_count = 0
            
            self.session_count += 1
            
            # Navigation humaine
            if random.random() < 0.3:  # 30% du temps visiter la page d'accueil
                try:
                    self.logger.info("Visite préalable de la page d'accueil")
                    self.driver.get("https://www.jobs.ch")
                    time.sleep(random.uniform(3, 8))
                    
                    # Scroll simulé
                    self.driver.execute_script("window.scrollTo(0, 300);")
                    time.sleep(random.uniform(1, 3))
                    self.driver.execute_script("window.scrollTo(0, 0);")
                    time.sleep(random.uniform(1, 2))
                except Exception as e:
                    self.logger.debug(f"Erreur navigation préalable: {e}")
            
            # Naviguer vers l'URL cible avec retry
            max_nav_attempts = 3
            page_source = None
            
            for nav_attempt in range(max_nav_attempts):
                try:
                    self.logger.info(f"Navigation vers URL cible (tentative {nav_attempt + 1})")
                    self.driver.get(url)
                    time.sleep(random.uniform(5, 10))
                    
                    page_source = self.driver.page_source
                    if len(page_source) > 1000:  # Vérifier qu'on a du contenu
                        break
                    else:
                        self.logger.warning(f"Contenu trop court: {len(page_source)} caractères")
                        
                except Exception as e:
                    self.logger.warning(f"Erreur navigation tentative {nav_attempt + 1}: {e}")
                    if nav_attempt < max_nav_attempts - 1:
                        time.sleep(5)
                        continue
                    else:
                        return None
            
            if not page_source:
                return None
            
            # Vérifier les blocages
            block_info = self.detect_captcha_or_block(page_source)
            
            if block_info['has_block']:
                self.stats['captcha_encounters'] += 1
                self.logger.warning(f"Blocage détecté: {block_info['block_type']} - {block_info['indicators']}")
                
                # Stratégies de contournement
                if block_info['block_type'] == 'cloudflare':
                    # Attendre la résolution automatique
                    self.logger.info("Attente résolution CloudFlare...")
                    time.sleep(30)
                    try:
                        self.driver.refresh()
                        time.sleep(10)
                        page_source = self.driver.page_source
                        
                        if not self.detect_captcha_or_block(page_source)['has_block']:
                            self.stats['captcha_resolved'] += 1
                            self.logger.info("CloudFlare contourné")
                        else:
                            # Attente plus longue
                            time.sleep(60)
                            return None
                    except Exception as e:
                        self.logger.error(f"Erreur gestion CloudFlare: {e}")
                        return None
                
                elif 'recaptcha' in block_info['block_type'] and self.manual_captcha:
                    # Résolution manuelle du reCAPTCHA
                    self.logger.info("reCAPTCHA détecté - Activation résolution manuelle")
                    if self.handle_manual_captcha(url):
                        # Utiliser le contenu résolu stocké temporairement
                        if hasattr(self, '_resolved_content'):
                            page_source = self._resolved_content
                            delattr(self, '_resolved_content')
                            self.logger.info("Reprise après résolution manuelle du CAPTCHA")
                        else:
                            return None
                    else:
                        self.logger.warning("Résolution manuelle échouée, abandon de l'URL")
                        return None
                
                elif 'recaptcha' in block_info['block_type'] and not self.manual_captcha:
                    # Mode automatique uniquement - attendre et retry
                    self.logger.info("reCAPTCHA détecté - Mode automatique: attente et retry")
                    time.sleep(random.uniform(60, 120))
                    try:
                        self.driver.refresh()
                        time.sleep(10)
                        page_source = self.driver.page_source
                    except Exception as e:
                        self.logger.error(f"Erreur retry reCAPTCHA: {e}")
                        return None
                
                else:
                    # Blocage générique
                    if self.manual_captcha:
                        self.logger.info("Blocage générique détecté - Tentative résolution manuelle")
                        if self.handle_manual_captcha(url):
                            if hasattr(self, '_resolved_content'):
                                page_source = self._resolved_content
                                delattr(self, '_resolved_content')
                            else:
                                return None
                        else:
                            return None
                    else:
                        self.logger.info("Blocage générique - attente longue")
                        time.sleep(random.uniform(30, 60))
                        return None
            
            # Simulation de lecture si pas de blocage
            try:
                total_height = self.driver.execute_script("return document.body.scrollHeight")
                viewport_height = self.driver.execute_script("return window.innerHeight")
                
                if total_height > viewport_height:
                    scroll_steps = random.randint(2, 4)
                    for i in range(scroll_steps):
                        scroll_position = (total_height / scroll_steps) * (i + 1)
                        self.driver.execute_script(f"window.scrollTo(0, {scroll_position});")
                        time.sleep(random.uniform(1, 3))
            except Exception as e:
                self.logger.debug(f"Erreur simulation lecture: {e}")
            
            return page_source
            
        except Exception as e:
            self.logger.error(f"Erreur Selenium pour {url}: {e}")
            # Essayer de recréer le driver en cas d'erreur critique
            if self.driver:
                try:
                    self.driver.quit()
                except:
                    pass
                self.driver = None
            return None

    def download_page(self, url: str) -> Optional[Path]:
        """Télécharge une page avec fallback multi-méthodes"""
        try:
            self.logger.info(f"Téléchargement: {url}")
            
            content = None
            method_used = None
            
            # Méthode 1: Selenium (recommandée)
            if self.use_selenium:
                content = self.handle_selenium_download(url)
                method_used = "selenium"
            
            # Méthode 2: CloudScraper fallback
            if not content:
                try:
                    self.logger.info("Tentative avec CloudScraper...")
                    response = self.cloudscraper_session.get(url, timeout=30)
                    if response.status_code == 200:
                        content = response.text
                        method_used = "cloudscraper"
                    else:
                        self.logger.warning(f"CloudScraper status: {response.status_code}")
                except Exception as e:
                    self.logger.debug(f"CloudScraper échoué: {e}")
            
            # Méthode 3: Requests classique avec headers rotatifs
            if not content:
                try:
                    self.logger.info("Tentative avec requests classique...")
                    self.update_session_headers()
                    response = self.requests_session.get(url, timeout=30)
                    if response.status_code == 200:
                        content = response.text
                        method_used = "requests"
                except Exception as e:
                    self.logger.debug(f"Requests classique échoué: {e}")
            
            if not content:
                self.logger.error(f"Toutes les méthodes ont échoué pour: {url}")
                self.stats['errors'] += 1
                return None
            
            # Vérification finale des blocages
            block_info = self.detect_captcha_or_block(content)
            if block_info['has_block']:
                self.logger.warning(f"Contenu bloqué détecté avec {method_used}: {block_info['block_type']}")
                return None
            
            # Sauvegarder le fichier
            filename = self.get_safe_filename(url)
            filepath = self.temp_dir / filename
            
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(content)
            
            self.stats['successful_downloads'] += 1
            self.logger.info(f"Téléchargement réussi avec {method_used}: {len(content)} caractères")
            return filepath
            
        except Exception as e:
            self.logger.error(f"Erreur téléchargement {url}: {e}")
            self.stats['errors'] += 1
            return None

    def get_safe_filename(self, url):
        """Génère un nom de fichier temporaire sécurisé"""
        url_hash = hashlib.md5(url.encode()).hexdigest()[:12]
        return f"temp_{url_hash}.html"

    def clean_text(self, text):
        """Nettoie et normalise le texte"""
        if not text:
            return ""
        
        text = re.sub(r'\s+', ' ', text.strip())
        text = text.replace('&nbsp;', ' ')
        text = text.replace('\n', ' ')
        text = text.replace('\t', ' ')
        
        return text

    def clean_html_preserve_lines(self, html_content: str) -> str:
        """Nettoie le HTML en préservant les retours à la ligne"""
        if not html_content:
            return ""
        
        soup = BeautifulSoup(html_content, "html.parser")
        
        block_tags = ['p', 'div', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 
                     'li', 'ul', 'ol', 'blockquote', 'pre', 'hr']
        
        for tag in soup.find_all(block_tags):
            if tag.name == 'br':
                tag.replace_with('\n')
            elif tag.name in ['li']:
                tag.insert(0, '• ')
                tag.append('\n')
            elif tag.name in ['ul', 'ol']:
                tag.append('\n')
            else:
                if not tag.get_text(strip=True):
                    tag.replace_with('\n')
                else:
                    tag.append('\n')
        
        text = soup.get_text(separator='', strip=False)
        lines = text.split('\n')
        cleaned_lines = []
        
        for line in lines:
            line = line.strip()
            if line:
                cleaned_lines.append(line)
            elif cleaned_lines and cleaned_lines[-1]:
                cleaned_lines.append('')
        
        result = '\n'.join(cleaned_lines)
        result = re.sub(r'\n{3,}', '\n\n', result)
        
        return result.strip()

    def extract_job_id_from_url(self, url):
        """Extrait l'ID du job depuis l'URL"""
        # Méthode 1: UUID pattern
        uuid_pattern = r'([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})'
        match = re.search(uuid_pattern, url)
        if match:
            return match.group(1)
        
        # Méthode 2: /detail/ID/ pattern
        detail_pattern = r'/detail/([^/]+)/'
        match = re.search(detail_pattern, url)
        if match:
            return match.group(1)
        
        # Fallback: hash de l'URL
        return hashlib.md5(url.encode()).hexdigest()[:12]

    def extract_json_ld_data(self, soup: BeautifulSoup) -> Optional[Dict]:
        """Extrait les données JSON-LD JobPosting"""
        scripts = soup.find_all("script", type="application/ld+json")
        
        for script in scripts:
            try:
                data_json = json.loads(script.string)
                items = data_json if isinstance(data_json, list) else [data_json]
                
                for item in items:
                    if item.get("@type") == "JobPosting":
                        return item
                        
            except (json.JSONDecodeError, KeyError, AttributeError):
                continue
        
        return None

    def extract_job_data(self, html_file, original_url):
        """Extrait les données d'une offre d'emploi avec méthodes combinées"""
        try:
            with open(html_file, 'r', encoding='utf-8') as f:
                content = f.read()
            
            soup = BeautifulSoup(content, 'html.parser')
            
            job_data = {}
            job_data['id_job'] = self.extract_job_id_from_url(original_url)
            job_data['url_original'] = original_url
            job_data['date_extraction'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            
            # Priorité 1: Extraction JSON-LD (plus fiable)
            json_data = self.extract_json_ld_data(soup)
            
            if json_data:
                self.logger.info("Utilisation des données JSON-LD")
                
                job_data['titre'] = json_data.get("title", "").strip()
                
                # Description avec nettoyage HTML
                description_html = json_data.get("description", "")
                job_data['description_poste'] = self.clean_html_preserve_lines(description_html)
                
                # Informations employeur
                employer_info = json_data.get("hiringOrganization", {})
                job_data['entreprise'] = employer_info.get("name", "").strip()
                
                # Localisation
                job_location = json_data.get("jobLocation", {})
                address_info = job_location.get("address", {})
                job_data['lieu_travail'] = address_info.get("addressLocality", "")
                
                # Type d'emploi
                employment_type = json_data.get("employmentType", [])
                if isinstance(employment_type, list):
                    employment_type = ', '.join(employment_type)
                job_data['type_contrat'] = employment_type
                
                # Date de publication
                job_data['date_publication'] = json_data.get("datePosted", "")
                
                # Secteur
                job_data['secteur_activite'] = json_data.get("industry", "")
                
                # Autres champs JSON-LD disponibles
                job_data['date_expiration'] = json_data.get("validThrough", "")
                job_data['qualifications'] = json_data.get("qualifications", "")
                job_data['competences'] = json_data.get("skills", "")
                
                # Champs restants à vide ou par défaut
                job_data['taux_activite'] = ""
                job_data['salaire'] = ""
                job_data['description_entreprise'] = ""
                job_data['informations_supplementaires'] = ""
                job_data['contact'] = ""
                job_data['url_candidature'] = ""
                job_data['logo_entreprise'] = ""
                job_data['type_emploi'] = ""
                job_data['avantages'] = ""
                
            else:
                # Priorité 2: Extraction HTML classique (méthode du script original)
                self.logger.info("Utilisation de l'extraction HTML classique")
                
                # Titre du poste
                title_elem = (
                    soup.find('h1', {'data-cy': 'vacancy-title'}) or
                    soup.find('h1', class_=re.compile('textStyle_h')) or
                    soup.find('h1') or
                    soup.find('title')
                )
                job_data['titre'] = self.clean_text(title_elem.text if title_elem else "")
                
                # Entreprise depuis le titre de la page
                page_title = soup.find('title')
                if page_title and ' - ' in page_title.text:
                    title_parts = page_title.text.split(' - ')
                    if len(title_parts) > 1 and 'jobs.ch' not in title_parts[1]:
                        job_data['entreprise'] = self.clean_text(title_parts[1].replace('Annonce auprès de', '').strip())
                else:
                    job_data['entreprise'] = ""
                
                # Logo et lieu
                logo_elem = soup.find('div', {'data-cy': 'vacancy-logo'})
                job_data['logo_entreprise'] = ""
                job_data['lieu_travail'] = ""
                
                if logo_elem:
                    img = logo_elem.find('img')
                    if img and img.get('src'):
                        job_data['logo_entreprise'] = img.get('src')
                    
                    location_elem = (
                        logo_elem.find('p', class_='textStyle_p1') or
                        logo_elem.find('p', class_=re.compile('textStyle')) or
                        logo_elem.find('p')
                    )
                    if location_elem:
                        job_data['lieu_travail'] = self.clean_text(location_elem.text)
                
                # Informations de l'emploi
                info_section = soup.find('div', {'data-cy': 'vacancy-info'})
                job_data['type_contrat'] = ""
                job_data['taux_activite'] = ""
                job_data['date_publication'] = ""
                job_data['salaire'] = ""
                
                if info_section:
                    info_items = info_section.find_all('li')
                    for item in info_items:
                        text = self.clean_text(item.text)
                        
                        if any(keyword in text.lower() for keyword in ['publication', 'publié', 'date']):
                            job_data['date_publication'] = text.split(':', 1)[1].strip() if ':' in text else ""
                        elif any(keyword in text.lower() for keyword in ['activité', 'taux', '%']):
                            job_data['taux_activite'] = text.split(':', 1)[1].strip() if ':' in text else ""
                        elif any(keyword in text.lower() for keyword in ['contrat', 'durée', 'type']):
                            job_data['type_contrat'] = text.split(':', 1)[1].strip() if ':' in text else ""
                        elif any(keyword in text.lower() for keyword in ['salaire', 'rémunération', 'chf']):
                            job_data['salaire'] = text.split(':', 1)[1].strip() if ':' in text else ""
                
                # Description
                description_elem = soup.find('div', {'data-cy': 'vacancy-description'})
                job_data['description_entreprise'] = ""
                job_data['description_poste'] = ""
                job_data['qualifications'] = ""
                job_data['informations_supplementaires'] = ""
                
                if description_elem:
                    # Extraction par sections
                    sections = {}
                    current_section = None
                    current_content = []
                    
                    for elem in description_elem.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'ul', 'li', 'div']):
                        if elem.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
                            if current_section and current_content:
                                sections[current_section] = '\n'.join(current_content)
                            
                            current_section = self.clean_text(elem.text).lower()
                            current_content = []
                        
                        elif elem.name in ['p', 'li'] or (elem.name == 'div' and elem.text and len(elem.text.strip()) > 20):
                            text = self.clean_text(elem.text)
                            if text and len(text) > 10:
                                current_content.append(text)
                    
                    if current_section and current_content:
                        sections[current_section] = '\n'.join(current_content)
                    
                    # Mapper les sections
                    for section_title, content in sections.items():
                        if any(keyword in section_title for keyword in ['entreprise', 'société', 'compagnie', 'about', 'company']):
                            job_data['description_entreprise'] = content
                        elif any(keyword in section_title for keyword in ['poste', 'mission', 'tâches', 'responsabilités', 'description', 'job', 'position']):
                            job_data['description_poste'] = content
                        elif any(keyword in section_title for keyword in ['qualifications', 'profil', 'exigences', 'requis', 'requirement', 'qualification']):
                            job_data['qualifications'] = content
                        elif any(keyword in section_title for keyword in ['informations supplémentaires', 'conditions', 'avantages', 'additional', 'benefits']):
                            job_data['informations_supplementaires'] = content
                    
                    # Si pas de sections, prendre tout le contenu
                    if not any([job_data['description_entreprise'], job_data['description_poste'], job_data['qualifications']]):
                        all_text = []
                        for elem in description_elem.find_all(['p', 'li', 'div']):
                            text = self.clean_text(elem.text)
                            if text and len(text) > 20:
                                all_text.append(text)
                        
                        if all_text:
                            combined_text = '\n'.join(all_text)
                            job_data['description_poste'] = combined_text[:1000]
                
                # Secteur d'activité
                meta_section = soup.find('div', {'data-cy': 'vacancy-meta'})
                job_data['secteur_activite'] = ""
                
                if meta_section:
                    category_links = meta_section.find_all('a')
                    categories = [self.clean_text(link.text) for link in category_links if link.text.strip()]
                    job_data['secteur_activite'] = ' | '.join(categories)
                
                # URL de candidature
                apply_buttons = soup.find_all('button', {'data-cy': re.compile('apply-button')})
                job_data['url_candidature'] = "Voir bouton de candidature sur la page" if apply_buttons else ""
                
                # Champs restants
                job_data['date_expiration'] = ""
                job_data['contact'] = ""
                job_data['type_emploi'] = ""
                job_data['competences'] = ""
                job_data['avantages'] = ""
            
            # Validation de l'extraction
            extracted_fields = sum(1 for field in ['titre', 'lieu_travail', 'type_contrat', 'taux_activite'] 
                                 if job_data.get(field, '').strip())
            
            if extracted_fields >= 3:
                self.stats['successful_extractions'] += 1
            elif extracted_fields >= 1:
                self.stats['partial_extractions'] += 1
                self.logger.warning(f"Extraction partielle: {extracted_fields}/4 champs principaux")
            else:
                self.stats['failed_extractions'] += 1
                self.logger.error(f"Extraction échouée: aucun champ principal extrait")
            
            return job_data
            
        except Exception as e:
            self.logger.error(f"Erreur extraction {html_file}: {e}")
            self.stats['errors'] += 1
            return None

    def cleanup_temp_file(self, filepath):
        """Supprime le fichier temporaire"""
        try:
            if filepath and filepath.exists():
                filepath.unlink()
        except Exception as e:
            self.logger.warning(f"Impossible de supprimer {filepath}: {e}")

    def save_progress(self):
        """Sauvegarde le progrès"""
        progress_data = {
            'processed_urls': list(self.processed_urls),
            'stats': self.stats,
            'timestamp': datetime.now().isoformat()
        }
        
        try:
            with open(self.progress_file, 'w', encoding='utf-8') as f:
                json.dump(progress_data, f, indent=2, ensure_ascii=False)
        except Exception as e:
            self.logger.error(f"Erreur sauvegarde: {e}")

    def load_progress(self):
        """Charge le progrès précédent"""
        if not os.path.exists(self.progress_file):
            return
        
        try:
            with open(self.progress_file, 'r', encoding='utf-8') as f:
                progress_data = json.load(f)
            
            self.processed_urls = set(progress_data.get('processed_urls', []))
            saved_stats = progress_data.get('stats', {})
            
            # Merge stats
            for key in self.stats:
                if key in saved_stats:
                    self.stats[key] = saved_stats[key]
            
            self.logger.info(f"Progrès chargé: {len(self.processed_urls)} URLs traitées")
        except Exception as e:
            self.logger.error(f"Erreur chargement: {e}")

    def process_csv_jobs(self, csv_file):
        """Traite toutes les URLs du CSV avec techniques stealth"""
        today = datetime.now().strftime('%Y%m%d')
        output_file = f"jobsch{today}.csv"
        
        self.logger.info(f"Début du traitement stealth: {csv_file} -> {output_file}")
        
        try:
            # Lire les URLs
            with open(csv_file, 'r', encoding='utf-8') as infile:
                reader = csv.DictReader(infile)
                urls = [(row.get('Lien', '').strip(), row) for row in reader if row.get('Lien', '').strip()]
            
            # Filtrer les URLs déjà traitées
            urls_to_process = [(url, row) for url, row in urls if url not in self.processed_urls]
            
            if not urls_to_process:
                self.logger.info("Toutes les URLs ont déjà été traitées")
                return True
            
            # Mélanger pour éviter les patterns
            random.shuffle(urls_to_process)
            
            self.logger.info(f"{len(urls_to_process)} nouvelles URLs à traiter")
            
            # Créer le fichier de sortie
            file_exists = os.path.exists(output_file)
            mode = 'a' if file_exists else 'w'
            
            with open(output_file, mode=mode, newline='', encoding='utf-8') as outfile:
                writer = csv.DictWriter(outfile, fieldnames=self.csv_fields)
                
                if not file_exists:
                    writer.writeheader()
                
                for i, (url, original_row) in enumerate(urls_to_process):
                    self.stats['total_processed'] += 1
                    
                    job_id = original_row.get('ID_Job', f"JOB_{i}")
                    job_type = original_row.get('Type', 'job')
                    
                    self.logger.info(f"Traitement [{i+1}/{len(urls_to_process)}]: {job_type}_{job_id}")
                    
                    # Télécharger avec techniques stealth
                    temp_file = self.download_page(url)
                    
                    if temp_file:
                        job_data = self.extract_job_data(temp_file, url)
                        
                        if job_data:
                            row_data = {field: job_data.get(field, "") for field in self.csv_fields}
                            writer.writerow(row_data)
                            outfile.flush()
                            self.processed_urls.add(url)
                            self.logger.info(f"Données extraites et sauvegardées pour {job_id}")
                        
                        self.cleanup_temp_file(temp_file)
                    
                    # Sauvegarde de progression régulière
                    if (i + 1) % 10 == 0:
                        self.save_progress()
                        self.logger.info(f"Progression sauvegardée: {i+1} pages traitées")
                    
                    # Délai aléatoire entre requêtes
                    delay = random.uniform(*self.delay_range)
                    self.logger.info(f"Pause: {delay:.1f}s")
                    time.sleep(delay)
        
        except FileNotFoundError:
            self.logger.error(f"Fichier CSV non trouvé: {csv_file}")
            return False
        except Exception as e:
            self.logger.error(f"Erreur lors du traitement: {e}")
            return False
        finally:
            # Nettoyage final
            if self.driver:
                self.driver.quit()
            self.cleanup_temp_directory()
            self.save_progress()
        
        self.print_final_statistics(output_file)
        return True

    def cleanup_temp_directory(self):
        """Nettoie le répertoire temporaire"""
        try:
            for file in self.temp_dir.glob("*"):
                file.unlink()
            if self.temp_dir.exists():
                self.temp_dir.rmdir()
        except Exception as e:
            self.logger.warning(f"Erreur nettoyage: {e}")

    def print_final_statistics(self, output_file):
        """Affiche les statistiques finales avec métriques stealth"""
        print("\n" + "="*70)
        print("RAPPORT DE TRAITEMENT JOBS.CH - VERSION STEALTH")
        print("="*70)
        print(f"Fichier de sortie: {output_file}")
        print(f"Total traité: {self.stats['total_processed']}")
        print(f"Téléchargements réussis: {self.stats['successful_downloads']}")
        print(f"Extractions complètes: {self.stats['successful_extractions']}")
        print(f"Extractions partielles: {self.stats['partial_extractions']}")
        print(f"Extractions échouées: {self.stats['failed_extractions']}")
        print(f"Captchas rencontrés: {self.stats['captcha_encounters']}")
        print(f"Captchas résolus: {self.stats['captcha_resolved']}")
        print(f"Erreurs totales: {self.stats['errors']}")
        
        if self.stats['total_processed'] > 0:
            success_rate = ((self.stats['successful_extractions'] + self.stats['partial_extractions']) / self.stats['total_processed']) * 100
            complete_rate = (self.stats['successful_extractions'] / self.stats['total_processed']) * 100
            print(f"Taux de réussite global: {success_rate:.1f}%")
            print(f"Taux d'extraction complète: {complete_rate:.1f}%")
            
            if self.stats['captcha_encounters'] > 0:
                captcha_success_rate = (self.stats['captcha_resolved'] / self.stats['captcha_encounters']) * 100
                print(f"Taux de résolution captcha: {captcha_success_rate:.1f}%")
        
        print("="*70)


def main():
    """Fonction principale améliorée"""
    print("Scraper intégré Jobs.ch - Version Stealth Enhanced (Selenium Standard)")
    print("Téléchargement, Extraction, Anti-détection + Résolution manuelle captchas")
    print("-" * 70)
    
    # Configuration
    CSV_INPUT = "jobs_ch_all_links_20250808_135526.csv"
    DELAY_RANGE = (15, 30)  # Délais aléatoires entre requêtes
    TEMP_DIR = "temp_html"
    DEBUG_MODE = False
    USE_SELENIUM = True  # Recommandé pour contourner les protections
    HEADLESS = True  # Mode headless pour la performance
    MANUAL_CAPTCHA = True  # Permet résolution manuelle des captchas
    
    if not os.path.exists(CSV_INPUT):
        print(f"Erreur: Fichier CSV '{CSV_INPUT}' non trouvé")
        return
    
    scraper = EnhancedJobsScraper(
        temp_dir=TEMP_DIR,
        delay_range=DELAY_RANGE,
        debug_mode=DEBUG_MODE,
        use_selenium=USE_SELENIUM,
        headless=HEADLESS,
        manual_captcha=MANUAL_CAPTCHA
    )
    
    try:
        print(f"Configuration:")
        print(f"- Fichier d'entrée: {CSV_INPUT}")
        print(f"- Délais: {DELAY_RANGE[0]}-{DELAY_RANGE[1]} secondes")
        print(f"- Mode Selenium standard avec stealth manuel: {USE_SELENIUM}")
        print(f"- Mode headless: {HEADLESS}")
        print(f"- Résolution manuelle captchas: {MANUAL_CAPTCHA}")
        print(f"- Mode debug: {DEBUG_MODE}")
        
        if MANUAL_CAPTCHA:
            print("\n" + "="*60)
            print("MODE RÉSOLUTION MANUELLE ACTIVÉ")
            print("="*60)
            print("⚠️  IMPORTANT: Quand un CAPTCHA sera détecté:")
            print("   • Un navigateur Chrome visible s'ouvrira automatiquement")
            print("   • Vous devrez résoudre le CAPTCHA manuellement")
            print("   • Le script attendra votre confirmation avant de continuer")
            print("   • Vous pouvez choisir d'ignorer certaines URLs si nécessaire")
            print("="*60)
            input("\nAppuyez sur ENTRÉE pour commencer le scraping...")
        
        print("-" * 50)
        
        success = scraper.process_csv_jobs(CSV_INPUT)
        
        if success:
            today = datetime.now().strftime('%Y%m%d')
            print(f"\nTraitement terminé avec succès!")
            print(f"Résultats dans: jobsch{today}.csv")
        else:
            print(f"\nTraitement échoué. Consultez les logs.")
    
    except KeyboardInterrupt:
        print(f"\nTraitement interrompu par l'utilisateur")
        if scraper.driver:
            scraper.driver.quit()
        scraper.cleanup_temp_directory()
        scraper.save_progress()
    
    except Exception as e:
        print(f"\nErreur inattendue: {e}")
        if scraper.driver:
            scraper.driver.quit()
        scraper.cleanup_temp_directory()
        scraper.save_progress()


if __name__ == "__main__":
    main()