#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import json
import os
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
from threading import Thread, Lock
import csv

app = Flask(__name__)
CORS(app)

# Variables globales pour le scraping
scraping_status = {
    "is_scraping": False,
    "status": "En attente de démarrage",
    "total_urls": 0,
    "processed": 0,
    "remaining": 0,
    "total_extracted": 0,
    "success_rate": 0,
    "recent_urls": []
}

scraped_data = []
scraping_thread = None
lock = Lock()

def load_jobs_from_csv():
    """Charge les URLs des jobs depuis le fichier jobs.csv"""
    jobs = []
    try:
        if os.path.exists('jobs.csv'):
            with open('jobs.csv', 'r', encoding='utf-8') as f:
                reader = csv.reader(f)
                next(reader)  # Skip header
                for row in reader:
                    if row and len(row) > 0:
                        jobs.append(row[0])
        else:
            print("Fichier jobs.csv non trouvé. Création d'un exemple...")
            # Créer un fichier d'exemple avec quelques URLs de test
            sample_urls = [
                "https://www.jobs.ch/fr/vacancies/detail/12345/",
                "https://www.jobs.ch/fr/vacancies/detail/67890/",
                "https://www.jobs.ch/fr/vacancies/detail/54321/"
            ]
            with open('jobs.csv', 'w', encoding='utf-8', newline='') as f:
                writer = csv.writer(f)
                writer.writerow(['URL'])
                for url in sample_urls:
                    writer.writerow([url])
            jobs = sample_urls
    except Exception as e:
        print(f"Erreur lors de la lecture du fichier jobs.csv: {e}")
        jobs = []
    
    return jobs

def scrape_job(url):
    """Scrape une offre d'emploi spécifique"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # Extraction des données (à adapter selon la structure réelle du site)
        title = soup.find('h1').get_text(strip=True) if soup.find('h1') else "Titre non trouvé"
        
        # Ces sélecteurs sont des exemples et doivent être adaptés au site réel
        company_elem = soup.select_one('.company-name')
        company = company_elem.get_text(strip=True) if company_elem else "Entreprise non spécifiée"
        
        location_elem = soup.select_one('.job-location')
        location = location_elem.get_text(strip=True) if location_elem else "Lieu non spécifié"
        
        job_type_elem = soup.select_one('.employment-type')
        job_type = job_type_elem.get_text(strip=True) if job_type_elem else "Type non spécifié"
        
        salary_elem = soup.select_one('.salary')
        salary = salary_elem.get_text(strip=True) if salary_elem else "Salaire non spécifié"
        
        date_elem = soup.select_one('.publication-date')
        publication_date = date_elem.get_text(strip=True) if date_elem else "Date non spécifiée"
        
        description_elem = soup.select_one('.job-description')
        description = description_elem.get_text(strip=True) if description_elem else "Description non disponible"
        
        requirements_elem = soup.select_one('.job-requirements')
        requirements = requirements_elem.get_text(strip=True) if requirements_elem else "Exigences non spécifiées"
        
        job_data = {
            'id': str(random.randint(10000, 99999)),
            'url': url,
            'title': title,
            'company': company,
            'location': location,
            'job_type': job_type,
            'salary': salary,
            'publication_date': publication_date,
            'description': description,
            'requirements': requirements,
            'timestamp': time.strftime('%H:%M:%S', time.localtime())
        }
        
        return job_data, True
        
    except Exception as e:
        print(f"Erreur lors du scraping de {url}: {e}")
        return None, False

def scraping_process():
    """Processus principal de scraping"""
    global scraping_status, scraped_data
    
    with lock:
        scraping_status["is_scraping"] = True
        scraping_status["status"] = "Scraping en cours"
    
    job_urls = load_jobs_from_csv()
    
    with lock:
        scraping_status["total_urls"] = len(job_urls)
        scraping_status["remaining"] = len(job_urls)
        scraping_status["processed"] = 0
        scraping_status["total_extracted"] = 0
    
    successful_extractions = 0
    
    for i, url in enumerate(job_urls):
        if not scraping_status["is_scraping"]:
            break
            
        print(f"Scraping de l'URL {i+1}/{len(job_urls)}: {url}")
        
        job_data, success = scrape_job(url)
        
        with lock:
            scraping_status["processed"] = i + 1
            scraping_status["remaining"] = len(job_urls) - i - 1
            
            if success:
                successful_extractions += 1
                scraped_data.append(job_data)
                scraping_status["total_extracted"] = successful_extractions
                scraping_status["success_rate"] = round((successful_extractions / (i + 1)) * 100)
                
                # Ajouter aux URLs récentes (limité à 10)
                scraping_status["recent_urls"].insert(0, job_data)
                if len(scraping_status["recent_urls"]) > 10:
                    scraping_status["recent_urls"].pop()
            
        # Pause aléatoire entre les requêtes pour éviter le blocage
        time.sleep(random.uniform(1, 3))
    
    with lock:
        scraping_status["is_scraping"] = False
        scraping_status["status"] = "Scraping terminé" if scraping_status["is_scraping"] else "Scraping arrêté"

@app.route('/scraping-status', methods=['GET'])
def get_scraping_status():
    """Endpoint pour récupérer le statut du scraping"""
    with lock:
        return jsonify(scraping_status)

@app.route('/start-scraping', methods=['POST'])
def start_scraping():
    """Endpoint pour démarrer le scraping"""
    global scraping_thread, scraping_status
    
    with lock:
        if scraping_status["is_scraping"]:
            return jsonify({"error": "Le scraping est déjà en cours"}), 400
        
        # Réinitialiser les statistiques
        scraping_status["is_scraping"] = True
        scraping_status["status"] = "Démarrage du scraping"
        scraping_status["processed"] = 0
        scraping_status["remaining"] = 0
        scraping_status["total_extracted"] = 0
        scraping_status["success_rate"] = 0
        scraping_status["recent_urls"] = []
    
    # Démarrer le scraping dans un thread séparé
    scraping_thread = Thread(target=scraping_process)
    scraping_thread.daemon = True
    scraping_thread.start()
    
    return jsonify({"message": "Scraping démarré"})

@app.route('/stop-scraping', methods=['POST'])
def stop_scraping():
    """Endpoint pour arrêter le scraping"""
    global scraping_status
    
    with lock:
        if not scraping_status["is_scraping"]:
            return jsonify({"error": "Le scraping n'est pas en cours"}), 400
        
        scraping_status["is_scraping"] = False
        scraping_status["status"] = "Arrêt du scraping en cours"
    
    return jsonify({"message": "Arrêt du scraping demandé"})

@app.route('/export-data', methods=['POST'])
def export_data():
    """Endpoint pour exporter les données scrapées"""
    global scraped_data
    
    data = request.get_json()
    filename = data.get('filename', 'jobs_extracted.csv')
    
    if not scraped_data:
        return jsonify({"error": "Aucune donnée à exporter"}), 400
    
    try:
        # Créer un DataFrame pandas et exporter en CSV
        df = pd.DataFrame(scraped_data)
        df.to_csv(filename, index=False, encoding='utf-8')
        
        # Envoyer le fichier en réponse
        return send_file(
            filename,
            as_attachment=True,
            download_name=filename,
            mimetype='text/csv'
        )
    except Exception as e:
        return jsonify({"error": f"Erreur lors de l'exportation: {str(e)}"}), 500

if __name__ == '__main__':
    print("Démarrage du serveur Jobs.ch Scraper...")
    print("Accédez à l'interface web sur http://localhost:5000")
    app.run(debug=True, port=5000)