from fastapi import FastAPI, UploadFile, File, HTTPException, Form
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from schemas.request_response import JobQueryRequest, SearchResponse, ChatRequest, ChatResponse, ResumeParseResponse, ResumeParseRequest, OfferLetterRequest, OfferLetterResponse
from services.embeddings import Embedder
from services.qdrant_service import QdrantSearchService
from services.opensearch_service import OpenSearchService
from services.mongo_service import MongoResumeService
from services.ranking import HybridRanker
from services.explain import ExplainService
from services.ai_service import AIService
from services.resume_parser import ResumeParserService
from services.job_matching_service import JobMatchingService
from services.resume_upload_service import ResumeUploadService
from services.offer_letter_service import OfferLetterService
from typing import List, Optional
from bson import ObjectId
from datetime import datetime
import threading
import os
import requests
import logging

# Setup logging
logger = logging.getLogger(__name__)

def serialize_document(obj):
    """Convert MongoDB ObjectIds and datetime objects to JSON serializable format"""
    if isinstance(obj, ObjectId):
        return str(obj)
    elif isinstance(obj, datetime):
        return obj.isoformat()
    elif isinstance(obj, dict):
        return {k: serialize_document(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        return [serialize_document(item) for item in obj]
    else:
        return obj

class SpecificCandidatesRequest(BaseModel):
    jobseeker_ids: List[int]

app = FastAPI()

ai_service = AIService()
resume_parser = ResumeParserService()
embedder = Embedder()
qdrant_service = QdrantSearchService()
opensearch_service = OpenSearchService()
mongo_service = MongoResumeService()
ranker = HybridRanker()
explain = ExplainService()
job_matching_service = JobMatchingService()
resume_upload_service = ResumeUploadService()
offer_letter_service = OfferLetterService()


@app.post("/search", response_model=SearchResponse)
def search_resumes(request: JobQueryRequest):
    # 1. Create embedding of job description
    query_vector = embedder.embed(request.job_description)

    # 2. Vector Search (semantic)
    vector_results = qdrant_service.search(query_vector, top_k=50)

    # 3. Keyword Search (OpenSearch)
    keyword_results = opensearch_service.search(request.job_description, top_k=50)

    # 4. Combine results (hybrid scoring)
    ranked = ranker.combine(vector_results, keyword_results)

    # 5. Fetch resume details
    resumes = mongo_service.get_resumes([r.resume_id for r in ranked])

    # 6. Add % match + strengths + weaknesses
    enriched = explain.add_explanations(resumes, ranked, request.job_description)

    return SearchResponse(results=enriched)


@app.post("/ai-chat", response_model=ChatResponse)
def chat_with_ai(request: ChatRequest):

    max_tokens = 1000
    response = ai_service.ai_response(request.question, request.system_prompt, max_tokens)
    return ChatResponse(
        question=request.question, 
        system_prompt=request.system_prompt, 
        answer=response
    )


@app.post("/parse-resume", response_model=ResumeParseResponse)
async def parse_resume_files(files: List[UploadFile] = File(...), job_emp_id: str = Form(...)):
    """
    Upload, parse and store resume files with background AI analysis
    
    This endpoint:
    1. Validates job_emp_id immediately
    2. Validates file types immediately 
    3. Returns success immediately if validation passes
    4. Processes AI analysis in background (strengths, weaknesses, matching_percentage)
    5. Stores results in screenings collection with type=3
    
    Args:
        files: List of uploaded files (PDF or Word documents)
        job_emp_id: The employer job ID (form field)
    
    Returns:
        ResumeParseResponse: Immediate success response - AI analysis happens in background
    """
    if not files:
        raise HTTPException(status_code=400, detail="No files uploaded")
    
    if not job_emp_id or job_emp_id.strip() == "":
        raise HTTPException(status_code=400, detail="job_emp_id is required and cannot be empty")
    
    # Validate job_emp_id immediately
    job_validation = resume_upload_service.validate_job_emp_id(job_emp_id.strip())
    if not job_validation.get("valid"):
        raise HTTPException(status_code=400, detail=job_validation.get("error"))
    
    # Read file contents and validate file types
    files_data = []
    for file in files:
        file_content = await file.read()
        files_data.append((file_content, file.filename))
    
    file_validation = resume_upload_service.validate_file_types(files_data)
    if not file_validation.get("valid"):
        raise HTTPException(status_code=400, detail=file_validation.get("error"))
    
    # Start background processing
    processing_result = resume_upload_service.process_resume_uploads_async(files_data, job_emp_id.strip())
    
    if not processing_result.get("success"):
        raise HTTPException(status_code=500, detail=f"Error starting processing: {processing_result.get('error')}")
    
    # Return immediate success response
    return ResumeParseResponse(
        success=True,
        total_files=len(files),
        results=[
            {
                "filename": filename,
                "status": "processing_started",
                "message": "File validation passed. AI analysis started in background."
            }
            for _, filename in files_data
        ]
    )


@app.post("/index/reset-for-testing")
async def reset_indexed_status_for_testing(count: int = 10):
    """
    Reset indexed status for a few jobseekers for testing purposes
    
    Args:
        count: Number of jobseekers to reset (default: 10)
    
    Returns:
        dict: Reset operation results
    """
    try:
        from services.mongo_service import mongo_service
        
        # Reset indexed status for specified number of jobseekers
        result = mongo_service.get_collection('jobseekers').update_many(
            {"status": 1, "indexed": 1},  # Find indexed active jobseekers
            {"$set": {"indexed": 0}},     # Reset to unindexed
        )
        
        return {
            "success": True,
            "message": f"Reset indexed status for testing",
            "reset_count": result.modified_count,
            "requested_count": count
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.get("/index/bulk")
async def bulk_index_resumes(max_total: Optional[int] = None):
    """
    Bulk index all unindexed jobseekers (status=1, indexed=0) to OpenSearch
    Runs in background to avoid blocking other API calls
    
    Args:
        max_total: Maximum number of jobseekers to index 
                  - If None: Process ALL unindexed jobseekers (suitable for cron)
                  - If number: Limit processing for testing
    
    Returns:
        dict: Immediate response - indexing runs in background
    """
    try:
        y
        # Start bulk indexing in background thread to avoid blocking
        def background_bulk_indexing():
            try:
                logger.info(f"Starting background bulk indexing with max_total={max_total}")
                result = opensearch_service.bulk_index_jobseekers(max_total=max_total)
                logger.info(f"Background bulk indexing completed: {result}")
            except Exception as e:
                logger.error(f"Background bulk indexing failed: {str(e)}")
        
        # Start in background thread
        threading.Thread(target=background_bulk_indexing, daemon=True).start()
        
        return {
            "success": True,
            "message": "Bulk indexing started in background",
            "processed_all": max_total is None,
            "note": "Check logs for progress and completion status"
        }
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.post("/index/single/{jobseeker_id}")
async def index_single_resume(jobseeker_id: str):
    """
    Index a single jobseeker resume to OpenSearch
    
    Args:
        jobseeker_id: The jobseeker ID to index
        
    Returns:
        dict: Success status and message
    """
    try:
        success = opensearch_service.add_resume(jobseeker_id)
        if success:
            return {
                "success": True,
                "message": f"Successfully indexed jobseeker: {jobseeker_id}"
            }
        else:
            return {
                "success": False,
                "error": f"Failed to index jobseeker: {jobseeker_id}"
            }
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.put("/index/reindex/{jobseeker_id}")
async def reindex_resume(jobseeker_id: str):
    """
    Reindex a specific jobseeker resume (reset indexed status and re-index)
    
    Args:
        jobseeker_id: The jobseeker ID to reindex
        
    Returns:
        dict: Success status and message
    """
    try:
        success = opensearch_service.reindex_jobseeker(jobseeker_id)
        if success:
            return {
                "success": True,
                "message": f"Successfully reindexed jobseeker: {jobseeker_id}"
            }
        else:
            return {
                "success": False,
                "error": f"Failed to reindex jobseeker: {jobseeker_id}"
            }
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.delete("/index/{jobseeker_id}")
async def delete_from_index(jobseeker_id: str):
    """
    Delete a jobseeker resume from OpenSearch index
    
    Args:
        jobseeker_id: The jobseeker ID to delete from index
        
    Returns:
        dict: Success status and message
    """
    try:
        success = opensearch_service.delete_resume(jobseeker_id)
        if success:
            return {
                "success": True,
                "message": f"Successfully deleted from index: {jobseeker_id}"
            }
        else:
            return {
                "success": False,
                "error": f"Failed to delete from index: {jobseeker_id}"
            }
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.get("/index/status")
async def get_indexing_status():
    """
    Get current OpenSearch indexing status
    
    Returns:
        dict: Indexing status with counts
    """
    try:
        from services.mongo_service import mongo_service
        
        jobseeker_collection = mongo_service.get_collection('jobseekers')
        
        total_active = jobseeker_collection.count_documents({"status": 1})
        indexed_count = jobseeker_collection.count_documents({"status": 1, "indexed": 1})
        
        # Count unindexed (indexed=0, null, or missing)
        unindexed_count = jobseeker_collection.count_documents({
            "status": 1, 
            "$or": [
                {"indexed": {"$exists": False}},  # Field doesn't exist
                {"indexed": None},                # Field is null
                {"indexed": 0}                    # Field is 0
            ]
        })
        
        completion_percentage = (indexed_count / total_active * 100) if total_active > 0 else 0
        
        return {
            "total_active_jobseekers": total_active,
            "indexed_count": indexed_count,
            "unindexed_count": unindexed_count,
            "completion_percentage": round(completion_percentage, 2)
        }
        
    except Exception as e:
        return {
            "error": str(e)
        }


@app.post("/search/advanced")
async def search_resumes_advanced(
    query: str = Form(...),
    country_code: str = Form(None),
    skills: str = Form(None),
    education: str = Form(None),
    min_experience: int = Form(None),
    top_k: int = Form(50)
):
    """
    Advanced resume search with filtering by country, skills, education, experience
    
    Args:
        query: Job description or search query
        country_code: Country header code to filter by (e.g. 'ae', 'bd', 'in')
        skills: Comma-separated list of skills to filter by
        education: Education level to filter by
        min_experience: Minimum years of experience
        top_k: Number of results to return (default: 50)
    
    Returns:
        SearchResponse: Filtered search results with detailed candidate info
    """
    try:
        from services.embeddings import Embedder
        
        # Build filters
        filters = {}
        if country_code:
            filters['country_header_code'] = country_code.strip().lower()
        if skills:
            filters['skills'] = [skill.strip() for skill in skills.split(',') if skill.strip()]
        if education:
            filters['education'] = education.strip()
        if min_experience and min_experience > 0:
            filters['min_experience_years'] = min_experience
        
        # Search with filters
        embedder = Embedder()
        results = embedder.search_with_filters(query, filters, top_k)
        
        # Format response
        formatted_results = []
        for result in results.get('result', []):
            payload = result.get('payload', {})
            formatted_results.append({
                'resume_id': payload.get('resume_id'),
                'score': result.get('score', 0),
                'first_name': payload.get('first_name', ''),
                'last_name': payload.get('last_name', ''),
                'email': payload.get('email', ''),
                'country_name': payload.get('country_name', ''),
                'country_header_code': payload.get('country_header_code', ''),
                'skills': payload.get('skills', []),
                'current_designation': payload.get('current_designation', ''),
                'highest_education': payload.get('highest_education', ''),
                'experience_years': payload.get('experience_years', 0),
                'profile_summary': payload.get('profile_summary', '')[:200] + '...' if len(payload.get('profile_summary', '')) > 200 else payload.get('profile_summary', '')
            })
        
        return {
            "success": True,
            "total_results": len(formatted_results),
            "filters_applied": filters,
            "results": formatted_results
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e),
            "results": []
        }


# @app.post("/resume-index-and-search")
# async def index_and_search_resumes(
#     search_query: str = None, 
#     force_reindex: bool = False,
#     top_k: int = 50
# ):
#     """
#     Simple API to index resumes and search them
    
#     Args:
#         search_query: Search query string (optional)
#         force_reindex: Force reindexing all resumes (default: False)
#         top_k: Number of results to return (default: 50)
    
#     Returns:
#         dict: Indexing results and search results if query provided
#     """
#     try:
#         result = {
#             "indexing_completed": False,
#             "indexing_results": {},
#             "search_results": [],
#             "message": ""
#         }
        
#         # Step 1: Index resumes if needed
#         from services.mongo_service import mongo_service
#         jobseeker_collection = mongo_service.get_collection('jobseekers')
        
#         # Check if we need to index
#         unindexed_count = jobseeker_collection.count_documents({"status": 1, "indexed": 0})
        
#         if unindexed_count > 0 or force_reindex:
#             if force_reindex:
#                 # Reset all indexed flags if force reindex
#                 jobseeker_collection.update_many(
#                     {"status": 1}, 
#                     {"$set": {"indexed": 0}}
#                 )
#                 result["message"] = "Force reindexing initiated. "
            
#             # Perform bulk indexing
#             indexing_results = opensearch_service.bulk_index_jobseekers()
#             result["indexing_results"] = indexing_results
#             result["indexing_completed"] = True
#             result["message"] += f"Indexed {indexing_results.get('indexed', 0)} resumes. "
#         else:
#             result["message"] = "All resumes already indexed. "
#             result["indexing_completed"] = True
        
#         # Step 2: Search if query provided
#         if search_query and search_query.strip():
#             search_results = opensearch_service.search(search_query.strip(), top_k=top_k)
#             result["search_results"] = search_results
#             result["message"] += f"Found {len(search_results)} matching resumes."
        
#         return {
#             "success": True,
#             "data": result
#         }
        
#     except Exception as e:
#         return {
#             "success": False,
#             "error": str(e)
#         }

@app.get("/quick-search/{search_text}")
async def quick_search(search_text: str, limit: int = 10, min_threshold: float = 0.5):
    """
    Quick search endpoint - just provide search text in URL with minimum matching threshold
    
    Args:
        search_text: Text to search for
        limit: Number of results (default: 10)
        min_threshold: Minimum match threshold (default: 0.5 = 50%)
    
    Returns:
        dict: Simple search results with match percentages
    """
    try:
        results = opensearch_service.search(search_text, top_k=limit)
        
        if not results:
            return {
                "success": True,
                "query": search_text,
                "total_found": 0,
                "results": []
            }
        
        # Set absolute minimum score threshold to filter out random matches
        absolute_min_score = 0.3  # Minimum score required for any match
        
        # First filter by absolute minimum score
        relevant_results = [r for r in results if r["keyword_score"] >= absolute_min_score]
        
        if not relevant_results:
            return {
                "success": True,
                "query": search_text,
                "message": "No relevant matches found. Search terms too generic.",
                "total_found": 0,
                "results": []
            }
        
        # Calculate max score from relevant results only
        max_score = max([r["keyword_score"] for r in relevant_results])
        
        # Filter by threshold and add percentages
        filtered_results = []
        for r in relevant_results:
            match_percentage = (r["keyword_score"] / max_score) * 100
            
            if match_percentage >= (min_threshold * 100):
                filtered_results.append({
                    "resume_id": r["resume_id"],
                    "score": round(r["keyword_score"], 4),
                    "match_percentage": round(match_percentage, 2)
                })
        
        return {
            "success": True,
            "query": search_text,
            "threshold_applied": f"{min_threshold*100}%",
            "total_found": len(filtered_results),
            "results": filtered_results
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }



@app.get("/vector-search-simple/{search_text}")
async def vector_search_simple(search_text: str, limit: int = 10, min_threshold: float = 0.5):
    """
    Simple vector search endpoint - provide search text in URL for semantic search with minimum matching threshold
    
    Args:
        search_text: Text to search semantically
        limit: Number of results (default: 10)
        min_threshold: Minimum similarity threshold (default: 0.5 = 50%)
    
    Returns:
        dict: Simple vector search results with match percentages
    """
    try:
        vector_results = embedder.search_with_qdrant(search_text, top_k=limit)
        
        if not vector_results.get("result"):
            return {
                "success": True,
                "query": search_text,
                "search_type": "vector_semantic",
                "total_found": 0,
                "results": []
            }
        
        # Get raw results from vector search
        raw_results = vector_results["result"]
        
        # Set absolute minimum similarity threshold to filter out random matches
        absolute_min_similarity = 0.3  # Minimum similarity score required (30%)
        
        # Filter by absolute minimum similarity first
        relevant_results = [
            result for result in raw_results 
            if result.get("score", 0) >= absolute_min_similarity
        ]
        
        if not relevant_results:
            return {
                "success": True,
                "query": search_text,
                "search_type": "vector_semantic",
                "message": "No semantically relevant matches found. Query too generic.",
                "total_found": 0,
                "results": []
            }
        
        # Calculate max similarity from relevant results only
        max_similarity = max([result.get("score", 0) for result in relevant_results])
        
        results = []
        for result in relevant_results:
            resume_id = result.get("payload", {}).get("resume_id")
            similarity_score = result.get("score", 0)
            
            # Calculate match percentage
            match_percentage = (similarity_score / max_similarity) * 100
            
            if resume_id and match_percentage >= (min_threshold * 100):
                results.append({
                    "resume_id": resume_id,
                    "similarity_score": round(similarity_score, 4),
                    "match_percentage": round(match_percentage, 2),
                    "matched_text": result.get("payload", {}).get("text", "")[:200] + "..."
                })
        
        return {
            "success": True,
            "query": search_text,
            "search_type": "vector_semantic",
            "threshold_applied": f"{min_threshold*100}%",
            "total_found": len(results),
            "results": results
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e)
        }


@app.post("/job-matching/{job_emp_id}")
async def start_job_matching(job_emp_id: str):
    """
    Start job matching process for a specific job ID
    
    This endpoint immediately returns success and processes job matching in background:
    1. Fetches job details from third-party API
    2. Searches candidates using both OpenSearch and Vector search
    3. Filters candidates with 50%+ match threshold
    4. Generates AI analysis (strengths, weaknesses, match justification)
    5. Stores results in screenings collection
    
    Args:
        job_emp_id: The employer job ID to process
        
    Returns:
        dict: Immediate success response - actual processing happens in background
    """
    try:
        if not job_emp_id or not job_emp_id.strip():
            return {
                "success": False,
                "error": "job_emp_id is required and cannot be empty"
            }
        
        # Start background job matching process
        result = job_matching_service.process_job_matching_async(job_emp_id.strip())
        
        return result
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e),
            "job_id": job_emp_id
        }


@app.post("/job-matching/specific-candidates/{job_emp_id}")
async def analyze_specific_candidates(job_emp_id: str, request: SpecificCandidatesRequest):
    """
    Analyze specific jobseeker IDs for a job position with background processing
    
    This endpoint:
    1. Takes a job ID and array of specific jobseeker IDs 
    2. Checks if jobseekers are indexed, indexes them if needed
    3. Generates AI analysis (strengths, weaknesses, match justification)
    4. Stores results in screenings collection with type=1
    5. Processes in background and returns immediately
    
    Args:
        job_emp_id: The employer job ID to process
        jobseeker_ids: Array of jobseeker IDs to analyze
        
    Returns:
        dict: Immediate success response - actual processing happens in background
    """
    try:
        if not job_emp_id or not job_emp_id.strip():
            return {
                "success": False,
                "error": "job_emp_id is required and cannot be empty"
            }
            
        if not request.jobseeker_ids or len(request.jobseeker_ids) == 0:
            return {
                "success": False,
                "error": "jobseeker_ids array is required and cannot be empty"
            }
            
        # Validate jobseeker IDs
        valid_ids = []
        for jid in request.jobseeker_ids:
            try:
                valid_id = int(jid)
                if valid_id > 0:
                    valid_ids.append(valid_id)
            except (ValueError, TypeError):
                pass
                
        if not valid_ids:
            return {
                "success": False,
                "error": "No valid jobseeker IDs provided"
            }
            
        # Start background processing
        result = job_matching_service.process_specific_candidates_async(job_emp_id.strip(), valid_ids)
        
        # Add additional info to the service result
        return {
            "success": True,
            "message": f"Specific candidate analysis started for job ID: {job_emp_id}",
            "job_id": job_emp_id,
            "jobseeker_count": len(valid_ids),
            "jobseeker_ids": valid_ids,
            "status": "processing_started"
        }
        
    except Exception as e:
        return {
            "success": False,
            "error": str(e),
            "job_id": job_emp_id
        }



@app.post("/generate-offer-letter", response_model=OfferLetterResponse)
async def generate_offer_letter(request: OfferLetterRequest):
    """
    Generate an offer letter using AI based on provided candidate and job information
    
    This endpoint:
    1. Takes candidate information (name, job title, department, etc.)
    2. Uses OfferLetterService to generate a professional offer letter
    3. Returns the generated offer letter text
    
    Args:
        request: OfferLetterRequest containing all required candidate and job details
        
    Returns:
        OfferLetterResponse: Generated offer letter or error message
    """
    try:
        # Convert request to dictionary for service
        candidate_data = {
            "candidate_name": request.candidate_name,
            "job_title": request.job_title,
            "department": request.department,
            "company_name": request.company_name,
            "start_date": request.start_date,
            "salary": request.salary,
            "employment_type": request.employment_type,
            "benefits": request.benefits
        }
        
        # Generate offer letter using service
        result = offer_letter_service.generate_offer_letter(candidate_data)
        
        return OfferLetterResponse(
            success=result["success"],
            offer_letter=result.get("offer_letter"),
            error=result.get("error")
        )
        
    except Exception as e:
        return OfferLetterResponse(
            success=False,
            error=f"Error generating offer letter: {str(e)}"
        )
    

@app.get("/test-api-keys")
async def test_all_api_keys():
    """
    Test all API keys and configurations for Qdrant, OpenSearch, and MongoDB
    
    Returns:
        dict: Comprehensive test results for all services
    """
    import os
    import requests
    from dotenv import load_dotenv
    
    load_dotenv()
    
    test_results = {
        "timestamp": "2024-12-02T10:00:00",
        "overall_status": "checking...",
        "services": {}
    }
    
    # Test MongoDB Connection
    try:
        from services.database import is_database_connected
        if is_database_connected():
            test_results["services"]["mongodb"] = {
                "status": "✅ Connected",
                "details": "Database connection successful"
            }
        else:
            test_results["services"]["mongodb"] = {
                "status": "❌ Connection Failed",
                "details": "Cannot connect to MongoDB"
            }
    except Exception as e:
        test_results["services"]["mongodb"] = {
            "status": "❌ Error",
            "details": str(e)
        }
    
    # Test OpenSearch Connection
    try:
        opensearch_host = os.getenv("OPENSEARCH_HOST")
        opensearch_user = os.getenv("OPENSEARCH_USER")
        opensearch_password = os.getenv("OPENSEARCH_PASSWORD")
        
        if not all([opensearch_host, opensearch_user, opensearch_password]):
            test_results["services"]["opensearch"] = {
                "status": "❌ Configuration Missing",
                "details": "OpenSearch credentials not complete in environment"
            }
        else:
            # Test actual OpenSearch connection
            from opensearchpy import OpenSearch
            
            client = OpenSearch(
                hosts=[{"host": opensearch_host, "port": 443}],
                http_auth=(opensearch_user, opensearch_password),
                use_ssl=True,
            )
            
            # Try a simple cluster health check
            health = client.cluster.health()
            
            test_results["services"]["opensearch"] = {
                "status": "✅ Connected",
                "details": f"Cluster status: {health.get('status', 'unknown')}"
            }
            
    except Exception as e:
        test_results["services"]["opensearch"] = {
            "status": "❌ Connection Failed",
            "details": str(e)
        }
    
    # Test Qdrant API Configuration
    QDRANT_API_URL = os.getenv("QDRANT_API_URL")
    QDRANT_SEARCH_URL = os.getenv("QDRANT_SEARCH_URL") 
    QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
    QDRANT_COLLECTION_NAME = os.getenv("QDRANT_COLLECTION_NAME")
    
    # Check if Qdrant environment variables are set
    if not all([QDRANT_API_URL, QDRANT_SEARCH_URL, QDRANT_API_KEY, QDRANT_COLLECTION_NAME]):
        test_results["services"]["qdrant"] = {
            "status": "❌ Configuration Missing",
            "details": "Qdrant environment variables not complete",
            "config": {
                "QDRANT_API_URL": "✅ Set" if QDRANT_API_URL else "❌ Missing",
                "QDRANT_SEARCH_URL": "✅ Set" if QDRANT_SEARCH_URL else "❌ Missing", 
                "QDRANT_API_KEY": "✅ Set" if QDRANT_API_KEY else "❌ Missing",
                "QDRANT_COLLECTION_NAME": "✅ Set" if QDRANT_COLLECTION_NAME else "❌ Missing"
            }
        }
    else:
        # Test Qdrant API endpoints
        headers = {
            'api-key': QDRANT_API_KEY,
            'Content-Type': 'application/json'
        }
        
        qdrant_tests = {}
        
        # Test 1: Health check
        try:
            base_url = QDRANT_API_URL  # Direct URL without modification
            health_response = requests.get(base_url, headers=headers, timeout=10)
            if health_response.status_code == 200:
                qdrant_tests["health"] = "✅ Healthy"
            else:
                qdrant_tests["health"] = f"❌ Status {health_response.status_code}"
        except Exception as e:
            qdrant_tests["health"] = f"❌ Error: {str(e)}"
        
        # Test 2: Collection access  
        try:
            collection_url = f'{QDRANT_SEARCH_URL}/{QDRANT_COLLECTION_NAME}'
            collection_response = requests.get(collection_url, headers=headers, timeout=10)
            if collection_response.status_code == 200:
                collection_info = collection_response.json()
                if 'result' in collection_info:
                    points_count = collection_info['result'].get('points_count', 0)
                    qdrant_tests["collection"] = f"✅ Accessible ({points_count} points)"
                else:
                    qdrant_tests["collection"] = "✅ Accessible (structure unknown)"
            elif collection_response.status_code == 404:
                qdrant_tests["collection"] = "❌ Collection 'resumes' not found - needs to be created"
            elif collection_response.status_code == 403:
                qdrant_tests["collection"] = "❌ Access forbidden"
            else:
                qdrant_tests["collection"] = f"❌ Status {collection_response.status_code}"
        except Exception as e:
            qdrant_tests["collection"] = f"❌ Error: {str(e)}"
        
        # Test 3: Collections List
        try:
            collections_url = f'{QDRANT_SEARCH_URL.rstrip("/collections")}/collections'
            collections_response = requests.get(collections_url, headers=headers, timeout=10)
            if collections_response.status_code == 200:
                collections_data = collections_response.json()
                collections_list = collections_data.get('result', {}).get('collections', [])
                qdrant_tests["collections_list"] = f"✅ Accessible ({len(collections_list)} collections)"
            else:
                qdrant_tests["collections_list"] = f"❌ Status {collections_response.status_code}"
        except Exception as e:
            qdrant_tests["collections_list"] = f"❌ Error: {str(e)}"
            
        # Determine overall Qdrant status
        if all("✅" in status for status in qdrant_tests.values()):
            qdrant_status = "✅ Fully Functional"
        elif any("✅" in status for status in qdrant_tests.values()):
            qdrant_status = "⚠️ Partial Access"
        else:
            qdrant_status = "❌ No Access"
            
        test_results["services"]["qdrant"] = {
            "status": qdrant_status,
            "tests": qdrant_tests,
            "config": {
                "api_url": QDRANT_API_URL,
                "search_url": QDRANT_SEARCH_URL,
                "collection": QDRANT_COLLECTION_NAME,
                "api_key_preview": f"{QDRANT_API_KEY[:20]}..." if QDRANT_API_KEY else "Not set"
            }
        }
    
    # Determine overall status
    all_services_status = [service["status"] for service in test_results["services"].values()]
    
    if all("✅" in status for status in all_services_status):
        test_results["overall_status"] = "✅ All Systems Operational"
    elif any("❌" in status for status in all_services_status):
        test_results["overall_status"] = "❌ Critical Issues Detected"
    else:
        test_results["overall_status"] = "⚠️ Some Issues Detected"
    
    # Add recommendations
    recommendations = []
    
    if "❌" in test_results["services"].get("qdrant", {}).get("status", ""):
        recommendations.append("🔧 Update Qdrant API key with proper permissions")
        
    if "❌" in test_results["services"].get("opensearch", {}).get("status", ""):
        recommendations.append("🔧 Check OpenSearch credentials and connectivity")
        
    if "❌" in test_results["services"].get("mongodb", {}).get("status", ""):
        recommendations.append("🔧 Verify MongoDB connection settings")
    
    if not recommendations:
        recommendations.append("✨ All services are working correctly!")
        
    test_results["recommendations"] = recommendations
    
    return test_results

