#!/usr/bin/env python3
"""
Scraper pour jobs.ch avec gestion des captchas via Firefox
"""

import time
import json
import os
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from bs4 import BeautifulSoup
import threading
from flask import Flask, render_template, jsonify, request

class JobsChScraper:
    def __init__(self, headless=False):
        self.headless = headless
        self.driver = None
        self.results = []
        self.status = "Arrêté"
        self.current_url = ""
        self.setup_driver()
        
    def setup_driver(self):
        """Configuration du driver Firefox"""
        firefox_options = Options()
        
        if self.headless:
            firefox_options.add_argument("--headless")
            
        # Options pour éviter la détection
        firefox_options.add_argument("--disable-blink-features=AutomationControlled")
        firefox_options.set_preference("dom.webdriver.enabled", False)
        firefox_options.set_preference("useAutomationExtension", False)
        
        # Profile pour éviter les popups
        firefox_options.set_preference("dom.disable_beforeunload", True)
        firefox_options.set_preference("browser.tabs.warnOnClose", False)
        
        try:
            self.driver = webdriver.Firefox(options=firefox_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            print("✅ Driver Firefox initialisé")
        except Exception as e:
            print(f"❌ Erreur lors de l'initialisation du driver: {e}")
            
    def wait_for_manual_action(self, message="Action manuelle requise"):
        """Pause le script pour permettre une intervention manuelle"""
        self.status = f"⏸️ PAUSE: {message}"
        print(f"\n🔴 {message}")
        print("Gérez le captcha/problème dans Firefox puis tapez 'c' pour continuer ou 'q' pour quitter:")
        
        while True:
            try:
                user_input = input().strip().lower()
                if user_input == 'c':
                    print("▶️ Reprise du scraping...")
                    self.status = "🔄 En cours"
                    break
                elif user_input == 'q':
                    print("🛑 Arrêt du scraping")
                    self.status = "🛑 Arrêté par l'utilisateur"
                    return False
                else:
                    print("Tapez 'c' pour continuer ou 'q' pour quitter:")
            except KeyboardInterrupt:
                return False
        return True
        
    def scrape_job_listings(self, search_terms, pages=5):
        """Scrape les annonces d'emploi"""
        self.status = "🔄 Démarrage du scraping"
        self.results = []
        
        try:
            base_url = "https://www.jobs.ch/fr/emplois/"
            search_url = f"{base_url}?term={'+'.join(search_terms.split())}"
            
            print(f"🔍 Recherche: {search_terms}")
            print(f"📍 URL: {search_url}")
            
            self.driver.get(search_url)
            self.current_url = search_url
            time.sleep(3)
            
            # Vérifier s'il y a un captcha ou une protection
            if self.check_for_captcha():
                if not self.wait_for_manual_action("Captcha détecté - résolvez-le manuellement"):
                    return self.results
                    
            for page in range(1, pages + 1):
                self.status = f"🔄 Scraping page {page}/{pages}"
                print(f"\n📄 Page {page}")
                
                # Attendre que les résultats se chargent
                try:
                    WebDriverWait(self.driver, 10).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, "[data-cy='serp-jobad']"))
                    )
                except TimeoutException:
                    print("⚠️ Timeout - vérification manuelle nécessaire")
                    if not self.wait_for_manual_action("Vérifiez la page et résolvez les problèmes"):
                        break
                
                # Extraire les données de la page
                page_jobs = self.extract_jobs_from_page()
                self.results.extend(page_jobs)
                print(f"✅ {len(page_jobs)} emplois trouvés sur cette page")
                
                # Aller à la page suivante
                if page < pages:
                    next_button = self.find_next_button()
                    if next_button:
                        self.driver.execute_script("arguments[0].click();", next_button)
                        time.sleep(3)
                    else:
                        print("🔚 Pas de page suivante trouvée")
                        break
                        
        except Exception as e:
            print(f"❌ Erreur lors du scraping: {e}")
            self.status = f"❌ Erreur: {str(e)}"
            
        self.status = f"✅ Terminé - {len(self.results)} emplois trouvés"
        return self.results
        
    def check_for_captcha(self):
        """Vérifie la présence d'un captcha ou d'une protection"""
        captcha_selectors = [
            "iframe[src*='captcha']",
            ".captcha",
            "#captcha",
            "[data-cy='captcha']",
            ".cf-browser-verification",
            "#challenge-form"
        ]
        
        for selector in captcha_selectors:
            try:
                self.driver.find_element(By.CSS_SELECTOR, selector)
                return True
            except NoSuchElementException:
                continue
                
        return False
        
    def extract_jobs_from_page(self):
        """Extrait les informations des emplois de la page courante"""
        jobs = []
        
        try:
            # Attendre que la page soit chargée
            time.sleep(2)
            
            # Récupérer le HTML avec BeautifulSoup
            html = self.driver.page_source
            soup = BeautifulSoup(html, 'html.parser')
            
            # Sélecteurs pour jobs.ch
            job_cards = soup.find_all(['div', 'article'], {'data-cy': 'serp-jobad'}) or \
                       soup.find_all('div', class_=lambda x: x and 'job' in x.lower()) or \
                       soup.find_all('article')
            
            for card in job_cards:
                try:
                    job_data = self.extract_job_data(card)
                    if job_data:
                        jobs.append(job_data)
                except Exception as e:
                    print(f"⚠️ Erreur extraction emploi: {e}")
                    continue
                    
        except Exception as e:
            print(f"❌ Erreur extraction page: {e}")
            
        return jobs
        
    def extract_job_data(self, card):
        """Extrait les données d'une offre d'emploi"""
        try:
            # Titre
            title_elem = card.find(['h2', 'h3', 'h4']) or card.find('a')
            title = title_elem.get_text(strip=True) if title_elem else "Titre non trouvé"
            
            # Entreprise
            company_elem = card.find('span', class_=lambda x: x and 'company' in str(x).lower()) or \
                          card.find('div', class_=lambda x: x and 'company' in str(x).lower())
            company = company_elem.get_text(strip=True) if company_elem else "Entreprise non trouvée"
            
            # Lieu
            location_elem = card.find('span', class_=lambda x: x and ('location' in str(x).lower() or 'place' in str(x).lower()))
            location = location_elem.get_text(strip=True) if location_elem else "Lieu non trouvé"
            
            # Lien
            link_elem = card.find('a', href=True)
            link = link_elem['href'] if link_elem else ""
            if link and not link.startswith('http'):
                link = f"https://www.jobs.ch{link}"
                
            # Date de publication
            date_elem = card.find('time') or card.find('span', class_=lambda x: x and 'date' in str(x).lower())
            pub_date = date_elem.get_text(strip=True) if date_elem else "Date non trouvée"
            
            return {
                'titre': title,
                'entreprise': company,
                'lieu': location,
                'lien': link,
                'date_publication': pub_date,
                'date_scraping': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            }
            
        except Exception as e:
            print(f"⚠️ Erreur extraction données emploi: {e}")
            return None
            
    def find_next_button(self):
        """Trouve le bouton de page suivante"""
        next_selectors = [
            "a[aria-label*='suivant']",
            "a[data-cy*='next']", 
            "a:contains('Suivant')",
            ".pagination a:last-child",
            "a[title*='Next']"
        ]
        
        for selector in next_selectors:
            try:
                button = self.driver.find_element(By.CSS_SELECTOR, selector)
                if button.is_enabled():
                    return button
            except NoSuchElementException:
                continue
                
        return None
        
    def save_results(self, filename=None):
        """Sauvegarde les résultats en JSON"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"jobs_scraping_{timestamp}.json"
            
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(self.results, f, indent=2, ensure_ascii=False)
            
        print(f"💾 Résultats sauvegardés dans: {filename}")
        return filename
        
    def close(self):
        """Ferme le navigateur"""
        if self.driver:
            self.driver.quit()
            print("🔚 Navigateur fermé")

# Interface Flask pour contrôler le scraping
app = Flask(__name__)
scraper = None

@app.route('/')
def index():
    return '''
    <!DOCTYPE html>
    <html>
    <head>
        <title>Jobs.ch Scraper Controller</title>
        <meta charset="UTF-8">
        <style>
            body { font-family: Arial; margin: 20px; }
            .status { padding: 10px; margin: 10px 0; border-radius: 4px; }
            .running { background: #e8f5e8; }
            .stopped { background: #ffe8e8; }
            input, button { padding: 8px; margin: 5px; }
        </style>
    </head>
    <body>
        <h1>🔍 Jobs.ch Scraper Controller</h1>
        <div id="status" class="status stopped">Statut: Arrêté</div>
        
        <div>
            <input type="text" id="searchTerm" placeholder="Termes de recherche" value="developer python">
            <input type="number" id="pages" placeholder="Nombre de pages" value="3" min="1" max="10">
            <button onclick="startScraping()">🚀 Démarrer</button>
            <button onclick="stopScraping()">🛑 Arrêter</button>
        </div>
        
        <div id="results"></div>
        
        <script>
            function updateStatus() {
                fetch('/status')
                .then(r => r.json())
                .then(data => {
                    document.getElementById('status').textContent = 'Statut: ' + data.status;
                    document.getElementById('status').className = 'status ' + 
                        (data.status.includes('cours') ? 'running' : 'stopped');
                });
            }
            
            function startScraping() {
                const term = document.getElementById('searchTerm').value;
                const pages = document.getElementById('pages').value;
                
                fetch('/start', {
                    method: 'POST',
                    headers: {'Content-Type': 'application/json'},
                    body: JSON.stringify({search_term: term, pages: parseInt(pages)})
                });
            }
            
            function stopScraping() {
                fetch('/stop', {method: 'POST'});
            }
            
            setInterval(updateStatus, 2000);
        </script>
    </body>
    </html>
    '''

@app.route('/status')
def get_status():
    global scraper
    status = scraper.status if scraper else "Arrêté"
    return jsonify({"status": status})

@app.route('/start', methods=['POST'])
def start_scraping():
    global scraper
    data = request.json
    
    scraper = JobsChScraper(headless=False)
    
    def run_scraping():
        try:
            results = scraper.scrape_job_listings(
                data.get('search_term', 'python developer'),
                data.get('pages', 3)
            )
            filename = scraper.save_results()
            print(f"✅ Scraping terminé: {len(results)} résultats dans {filename}")
        except Exception as e:
            print(f"❌ Erreur scraping: {e}")
    
    thread = threading.Thread(target=run_scraping)
    thread.daemon = True
    thread.start()
    
    return jsonify({"message": "Scraping démarré"})

@app.route('/stop', methods=['POST'])
def stop_scraping():
    global scraper
    if scraper:
        scraper.close()
        scraper = None
    return jsonify({"message": "Scraping arrêté"})

if __name__ == "__main__":
    print("🚀 Démarrage du scraper Jobs.ch")
    print("📝 Utilisation:")
    print("   python jobs_scraper.py")
    print("   Puis ouvrez: http://localhost:5001")
    
    try:
        # Mode simple en ligne de commande
        scraper = JobsChScraper(headless=False)
        results = scraper.scrape_job_listings("python developer", 3)
        filename = scraper.save_results()
        print(f"\n✅ Terminé! {len(results)} emplois sauvegardés dans {filename}")
        
        # Démarrer l'interface web (optionnel)
        # app.run(debug=True, port=5001)
        
    except KeyboardInterrupt:
        print("\n🛑 Arrêt du programme")
    finally:
        if scraper:
            scraper.close()
