import os
import threading
import logging
from typing import List, Tuple, Dict, Any
from dotenv import load_dotenv
from datetime import datetime
from .cv_storage import CVStorageService
from .resume_parser import ResumeParserService
from .job_matching_service import JobMatchingService
from .job_api_service import JobAPIService

# Load environment variables
load_dotenv()

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class ResumeUploadService:
    """Service for handling resume uploads with background AI analysis"""
    
    def __init__(self):
        self.resume_parser = ResumeParserService()
        self.job_matching_service = JobMatchingService()
        self.job_api_service = JobAPIService()
        
    def validate_job_emp_id(self, job_emp_id: str) -> Dict[str, Any]:
        """
        Validate job_emp_id by checking if job exists using JobAPIService
        
        Args:
            job_emp_id: The employer job ID to validate
            
        Returns:
            Dict with validation result
        """
        try:
            # Use JobAPIService to get job details
            result = self.job_api_service.get_employer_job(job_emp_id)
            
            if not result.get('success'):
                return {
                    "valid": False,
                    "error": f"Invalid job_emp_id: {job_emp_id} - {result.get('error', 'Job not found')}"
                }
            
            # Check if the data is a list (invalid ID case)
            job_data = result.get('data', {})
            if isinstance(job_data, list):
                return {
                    "valid": False,
                    "error": f"Invalid job_emp_id: {job_emp_id} - Job ID not found or does not exist"
                }
            
            # Check if we have essential job data
            if isinstance(job_data, dict):
                # Check if data is nested under 'job' key
                if 'job' in job_data:
                    job_data = job_data['job']
                
                # Check for essential fields
                job_title = (job_data.get('job_title', '') or 
                           job_data.get('title', '') or 
                           job_data.get('position', '') or 
                           job_data.get('job_position', ''))
                
                if not job_title:
                    return {
                        "valid": False,
                        "error": f"Invalid job_emp_id: {job_emp_id} - Job data incomplete or corrupted"
                    }
            
            return {"valid": True}
            
        except Exception as e:
            return {
                "valid": False,
                "error": f"Error validating job_emp_id: {str(e)}"
            }
    
    def save_uploaded_file(self, file_content: bytes, filename: str, job_emp_id: str) -> str:
        """
        Save uploaded file to local storage and return file path
        
        Args:
            file_content: File content bytes
            filename: Original filename
            job_emp_id: Job employer ID for organizing files
            
        Returns:
            str: Saved file path
        """
        try:
            # Create uploads directory structure
            base_upload_dir = os.path.join(os.getcwd(), "uploads", "resumes", job_emp_id)
            os.makedirs(base_upload_dir, exist_ok=True)
            
            # Generate unique filename with timestamp
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            file_extension = os.path.splitext(filename)[1]
            safe_filename = f"{timestamp}_{filename.replace(' ', '_')}"
            
            file_path = os.path.join(base_upload_dir, safe_filename)
            
            # Save file
            with open(file_path, 'wb') as f:
                f.write(file_content)
            
            # Return relative path for database storage
            relative_path = os.path.join("uploads", "resumes", job_emp_id, safe_filename)
            
            logger.info(f"File saved successfully: {relative_path}")
            return relative_path
            
        except Exception as e:
            logger.error(f"Error saving file {filename}: {str(e)}")
            return ""
    
    def validate_file_types(self, files_data: List[Tuple[bytes, str]]) -> Dict[str, Any]:
        """
        Validate uploaded file types
        
        Args:
            files_data: List of (file_content, filename) tuples
            
        Returns:
            Dict with validation result
        """
        allowed_extensions = {'.pdf', '.docx', '.doc'}
        
        for file_content, filename in files_data:
            if not any(filename.lower().endswith(ext) for ext in allowed_extensions):
                return {
                    "valid": False,
                    "error": f"File {filename} is not supported. Only PDF and Word documents are allowed."
                }
        
        return {"valid": True}
    
    def process_resume_uploads_async(self, files_data: List[Tuple[bytes, str]], job_emp_id: str) -> Dict[str, Any]:
        """
        Start background processing for resume uploads
        
        Args:
            files_data: List of (file_content, filename) tuples
            job_emp_id: The employer job ID
            
        Returns:
            Dict with processing start confirmation
        """
        try:
            # Start background processing in a separate thread
            thread = threading.Thread(
                target=self._background_resume_processing,
                args=(files_data, job_emp_id),
                daemon=True
            )
            thread.start()
            
            return {
                "success": True,
                "message": f"Resume processing started for job ID: {job_emp_id}",
                "total_files": len(files_data),
                "status": "processing_started"
            }
            
        except Exception as e:
            logger.error(f"Error starting resume processing: {str(e)}")
            return {
                "success": False,
                "error": str(e)
            }
    
    def _background_resume_processing(self, files_data: List[Tuple[bytes, str]], job_emp_id: str):
        """
        Background function to:
        1. Parse and store resume files
        2. Generate AI analysis (strengths, weaknesses, matching_percentage) 
        3. Store results in screenings collection with type=3
        
        Args:
            files_data: List of (file_content, filename) tuples
            job_emp_id: The employer job ID
        """
        try:
            logger.info(f"Starting background resume processing for job ID: {job_emp_id}, files: {len(files_data)}")
            
            cv_storage = CVStorageService()
            processed_count = 0
            
            for file_content, filename in files_data:
                try:
                    logger.info(f"Processing file: {filename}")
                    
                    # Save file to local storage first
                    saved_file_path = self.save_uploaded_file(file_content, filename, job_emp_id)
                    if not saved_file_path:
                        logger.error(f"Failed to save file: {filename}")
                        continue
                    
                    logger.info(f"File saved successfully: {saved_file_path}")
                    
                    # Extract text from file
                    try:
                        extracted_text = self.resume_parser.extract_text_from_file(file_content, filename)
                        logger.info(f"Text extraction successful for {filename}, length: {len(extracted_text) if extracted_text else 0}")
                    except Exception as extract_error:
                        logger.error(f"Text extraction failed for {filename}: {str(extract_error)}")
                        continue
                    
                    if not extracted_text or extracted_text.strip() == "":
                        logger.error(f"Could not extract text from file: {filename} - empty content")
                        continue
                    
                    logger.info(f"Starting resume parsing for {filename}")
                    
                    # Parse resume
                    try:
                        parsed_data = self.resume_parser.parse_resume(extracted_text)
                        logger.info(f"Resume parsing successful for {filename}")
                    except Exception as parse_error:
                        logger.error(f"Resume parsing failed for {filename}: {str(parse_error)}")
                        continue
                    
                    if parsed_data:
                        logger.info(f"Starting database storage for {filename}")
                        
                        # Store in database with file path
                        try:
                            storage_result = cv_storage.store_complete_cv_data(
                                filename=filename,
                                raw_text=extracted_text,
                                parsed_data=parsed_data,
                                job_emp_id=job_emp_id,
                                file_path=saved_file_path
                            )
                            logger.info(f"Database storage completed for {filename}: {storage_result.get('success')}")
                        except Exception as storage_error:
                            logger.error(f"Database storage failed for {filename}: {str(storage_error)}")
                            continue
                        
                        if storage_result.get("success"):
                            jobseeker_id = storage_result["jobseeker_id"]
                            logger.info(f"Successfully stored resume data for jobseeker ID: {jobseeker_id}")
                            
                            # Generate AI analysis for this candidate with type=3
                            logger.info(f"Starting AI analysis for jobseeker ID: {jobseeker_id}")
                            
                            try:
                                ai_analysis_result = self.job_matching_service.analyze_single_candidate_for_upload(
                                    job_emp_id=job_emp_id,
                                    jobseeker_id=jobseeker_id,
                                    parsed_data=parsed_data,
                                    analysis_type=3  # Type 3 for upload analysis
                                )
                                logger.info(f"AI analysis completed for jobseeker ID: {jobseeker_id}: {ai_analysis_result.get('success')}")
                            except Exception as ai_error:
                                logger.error(f"AI analysis exception for jobseeker ID {jobseeker_id}: {str(ai_error)}")
                                # Continue anyway since the resume is stored
                            
                            if ai_analysis_result.get("success"):
                                logger.info(f"Successfully completed AI analysis for jobseeker ID: {jobseeker_id}")
                                processed_count += 1
                            else:
                                logger.error(f"AI analysis failed for jobseeker ID: {jobseeker_id}: {ai_analysis_result.get('error')}")
                        else:
                            logger.error(f"Storage failed for file {filename}: {storage_result.get('error')}")
                    else:
                        logger.error(f"Parsing failed for file {filename}: No data returned")
                        
                except Exception as e:
                    logger.error(f"Error processing file {filename}: {str(e)}")
                    continue
            
            logger.info(f"Resume processing completed for job ID: {job_emp_id}. Successfully processed {processed_count}/{len(files_data)} files.")
            
        except Exception as e:
            logger.error(f"Error in background resume processing for job ID {job_emp_id}: {str(e)}")