""" Application factory for creating and configuring the Flask app. This approach allows for easier testing and management of application state. """ import logging import os from typing import Dict from dotenv import load_dotenv from flask import Flask, jsonify, render_template, request logger = logging.getLogger(__name__) # Load environment variables from .env file load_dotenv() def ensure_embeddings_on_startup(): """ Ensure embeddings exist and have the correct dimension on app startup. This is critical for Render deployments where the vector store is ephemeral. """ from src.config import ( COLLECTION_NAME, CORPUS_DIRECTORY, DEFAULT_CHUNK_SIZE, DEFAULT_OVERLAP, EMBEDDING_DIMENSION, EMBEDDING_MODEL_NAME, RANDOM_SEED, VECTOR_DB_PERSIST_PATH, ) from src.ingestion.ingestion_pipeline import IngestionPipeline from src.vector_store.vector_db import VectorDatabase try: logging.info("Checking vector store on startup...") # Initialize vector database to check its state vector_db = VectorDatabase(VECTOR_DB_PERSIST_PATH, COLLECTION_NAME) # Check if embeddings exist and have correct dimension if not vector_db.has_valid_embeddings(EMBEDDING_DIMENSION): logging.warning( f"Vector store is empty or has wrong dimension. " f"Expected: {EMBEDDING_DIMENSION}, " f"Current: {vector_db.get_embedding_dimension()}" ) logging.info( f"Running ingestion pipeline with model: {EMBEDDING_MODEL_NAME}" ) # Run ingestion pipeline to rebuild embeddings ingestion_pipeline = IngestionPipeline( chunk_size=DEFAULT_CHUNK_SIZE, overlap=DEFAULT_OVERLAP, seed=RANDOM_SEED, store_embeddings=True, ) # Process the corpus directory results = ingestion_pipeline.process_directory(CORPUS_DIRECTORY) if not results or len(results) == 0: logging.error( "Ingestion failed or processed 0 chunks. " "Please check the corpus directory and " "ingestion pipeline for errors." ) else: logging.info(f"Ingestion completed: {len(results)} chunks processed") else: logging.info( f"Vector store is valid with {vector_db.get_count()} embeddings " f"of dimension {vector_db.get_embedding_dimension()}" ) except Exception as e: logging.error(f"Failed to ensure embeddings on startup: {e}") # Don't crash the app, but log the error # The app will still start but searches may fail def create_app(): """Create and configure the Flask application.""" from src.utils.memory_utils import clean_memory, log_memory_usage # Clean memory at start clean_memory("App startup") # Proactively disable ChromaDB telemetry os.environ.setdefault("ANONYMIZED_TELEMETRY", "False") os.environ.setdefault("CHROMA_TELEMETRY", "False") # Attempt to configure chromadb and monkeypatch telemetry try: import chromadb try: chromadb.configure(anonymized_telemetry=False) except Exception: pass # Non-fatal try: from chromadb.telemetry.product import posthog as _posthog if hasattr(_posthog, "capture"): setattr(_posthog, "capture", lambda *args, **kwargs: None) if hasattr(_posthog, "Posthog") and hasattr(_posthog.Posthog, "capture"): setattr(_posthog.Posthog, "capture", lambda *args, **kwargs: None) except Exception: pass # Non-fatal except Exception: pass # chromadb not installed # Get the absolute path to the project root directory (parent of src) project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) template_dir = os.path.join(project_root, "templates") static_dir = os.path.join(project_root, "static") app = Flask(__name__, template_folder=template_dir, static_folder=static_dir) # Force garbage collection after initialization clean_memory("Post-initialization") # Add memory circuit breaker @app.before_request def check_memory(): try: memory_mb = log_memory_usage("Before request") if memory_mb and memory_mb > 450: # Critical threshold for 512MB limit clean_memory("Emergency cleanup") if memory_mb > 480: # Near crash return jsonify({"error": "Server too busy, try again later"}), 503 except Exception as e: # Don't let memory monitoring crash the app logger.debug(f"Memory monitoring failed: {e}") pass # Lazy-load services to avoid high memory usage at startup # These will be initialized on the first request to a relevant endpoint app.config["RAG_PIPELINE"] = None app.config["INGESTION_PIPELINE"] = None app.config["SEARCH_SERVICE"] = None def get_rag_pipeline(): """Initialize and cache the RAG pipeline.""" # Always check if we have valid LLM configuration before using cache from src.llm.llm_service import LLMService # Quick check for API keys - don't use cache if no keys available has_api_keys = bool( os.getenv("OPENROUTER_API_KEY") or os.getenv("GROQ_API_KEY") ) if not has_api_keys: # Don't cache when no API keys - always raise ValueError LLMService.from_environment() # This will raise ValueError if app.config.get("RAG_PIPELINE") is None: logging.info("Initializing RAG pipeline for the first time...") from src.config import ( COLLECTION_NAME, EMBEDDING_BATCH_SIZE, EMBEDDING_DEVICE, EMBEDDING_MODEL_NAME, VECTOR_DB_PERSIST_PATH, ) from src.embedding.embedding_service import EmbeddingService from src.rag.rag_pipeline import RAGPipeline from src.search.search_service import SearchService from src.vector_store.vector_db import VectorDatabase vector_db = VectorDatabase(VECTOR_DB_PERSIST_PATH, COLLECTION_NAME) embedding_service = EmbeddingService( model_name=EMBEDDING_MODEL_NAME, device=EMBEDDING_DEVICE, batch_size=EMBEDDING_BATCH_SIZE, ) search_service = SearchService(vector_db, embedding_service) # This will raise ValueError if no LLM API keys are configured llm_service = LLMService.from_environment() app.config["RAG_PIPELINE"] = RAGPipeline(search_service, llm_service) logging.info("RAG pipeline initialized.") return app.config["RAG_PIPELINE"] def get_ingestion_pipeline(store_embeddings=True): """Initialize the ingestion pipeline.""" # Ingestion is request-specific, so we don't cache it from src.config import ( DEFAULT_CHUNK_SIZE, DEFAULT_OVERLAP, EMBEDDING_BATCH_SIZE, EMBEDDING_DEVICE, EMBEDDING_MODEL_NAME, RANDOM_SEED, ) from src.embedding.embedding_service import EmbeddingService from src.ingestion.ingestion_pipeline import IngestionPipeline embedding_service = None if store_embeddings: embedding_service = EmbeddingService( model_name=EMBEDDING_MODEL_NAME, device=EMBEDDING_DEVICE, batch_size=EMBEDDING_BATCH_SIZE, ) return IngestionPipeline( chunk_size=DEFAULT_CHUNK_SIZE, overlap=DEFAULT_OVERLAP, seed=RANDOM_SEED, store_embeddings=store_embeddings, embedding_service=embedding_service, ) def get_search_service(): """Initialize and cache the search service.""" if app.config.get("SEARCH_SERVICE") is None: logging.info("Initializing search service for the first time...") from src.config import ( COLLECTION_NAME, EMBEDDING_BATCH_SIZE, EMBEDDING_DEVICE, EMBEDDING_MODEL_NAME, VECTOR_DB_PERSIST_PATH, ) from src.embedding.embedding_service import EmbeddingService from src.search.search_service import SearchService from src.utils.memory_utils import MemoryManager from src.vector_store.vector_db import VectorDatabase # Use memory manager for this expensive operation with MemoryManager("search_service_initialization"): vector_db = VectorDatabase(VECTOR_DB_PERSIST_PATH, COLLECTION_NAME) embedding_service = EmbeddingService( model_name=EMBEDDING_MODEL_NAME, device=EMBEDDING_DEVICE, batch_size=EMBEDDING_BATCH_SIZE, ) app.config["SEARCH_SERVICE"] = SearchService( vector_db, embedding_service ) logging.info("Search service initialized.") return app.config["SEARCH_SERVICE"] @app.route("/") def index(): return render_template("chat.html") @app.route("/health") def health(): from src.utils.memory_utils import get_memory_usage memory_mb = get_memory_usage() status = "ok" # Add warning if memory usage is high if memory_mb > 400: # Warning threshold for 512MB limit status = "warning" elif memory_mb > 450: # Critical threshold status = "critical" return ( jsonify( { "status": status, "memory_mb": round(memory_mb, 1), "timestamp": __import__("datetime").datetime.utcnow().isoformat(), } ), 200, ) @app.route("/ingest", methods=["POST"]) def ingest(): try: from src.config import CORPUS_DIRECTORY data = request.get_json() if request.is_json else {} store_embeddings = bool(data.get("store_embeddings", True)) pipeline = get_ingestion_pipeline(store_embeddings) result = pipeline.process_directory_with_embeddings(CORPUS_DIRECTORY) # Create response with enhanced information response = { "status": result["status"], "chunks_processed": result["chunks_processed"], "files_processed": result["files_processed"], "embeddings_stored": result["embeddings_stored"], "store_embeddings": result["store_embeddings"], "message": ( f"Successfully processed {result['chunks_processed']} chunks " f"from {result['files_processed']} files" ), } # Include failed files info if any if result["failed_files"]: response["failed_files"] = result["failed_files"] failed_count = len(result["failed_files"]) response["warnings"] = f"{failed_count} files failed to process" return jsonify(response) except Exception as e: logging.error(f"Ingestion failed: {e}", exc_info=True) return jsonify({"status": "error", "message": str(e)}), 500 @app.route("/search", methods=["POST"]) def search(): from src.utils.memory_utils import log_memory_usage try: log_memory_usage("search_request_start") # Validate request contains JSON data if not request.is_json: return ( jsonify( { "status": "error", "message": "Content-Type must be application/json", } ), 400, ) data = request.get_json() # Validate required query parameter query = data.get("query") if query is None: return ( jsonify( {"status": "error", "message": "Query parameter is required"} ), 400, ) if not isinstance(query, str) or not query.strip(): return ( jsonify( { "status": "error", "message": "Query must be a non-empty string", } ), 400, ) # Extract optional parameters with defaults top_k = data.get("top_k", 5) threshold = data.get("threshold", 0.3) # Validate parameters if not isinstance(top_k, int) or top_k <= 0: return ( jsonify( { "status": "error", "message": "top_k must be a positive integer", } ), 400, ) if not isinstance(threshold, (int, float)) or not (0.0 <= threshold <= 1.0): return ( jsonify( { "status": "error", "message": "threshold must be a number between 0 and 1", } ), 400, ) search_service = get_search_service() results = search_service.search( query=query.strip(), top_k=top_k, threshold=threshold ) # Format response response = { "status": "success", "query": query.strip(), "results_count": len(results), "results": results, } return jsonify(response) except ValueError as e: return jsonify({"status": "error", "message": str(e)}), 400 except Exception as e: logging.error(f"Search failed: {e}", exc_info=True) return ( jsonify({"status": "error", "message": f"Search failed: {str(e)}"}), 500, ) @app.route("/chat", methods=["POST"]) def chat(): try: # Validate request contains JSON data if not request.is_json: return ( jsonify( { "status": "error", "message": "Content-Type must be application/json", } ), 400, ) data = request.get_json() # Validate required message parameter message = data.get("message") if message is None: return ( jsonify( {"status": "error", "message": "message parameter is required"} ), 400, ) if not isinstance(message, str) or not message.strip(): return ( jsonify( { "status": "error", "message": "message must be a non-empty string", } ), 400, ) # Extract optional parameters conversation_id = data.get("conversation_id") include_sources = data.get("include_sources", True) include_debug = data.get("include_debug", False) try: rag_pipeline = get_rag_pipeline() rag_response = rag_pipeline.generate_answer(message.strip()) from src.rag.response_formatter import ResponseFormatter formatter = ResponseFormatter() # Format response for API if include_sources: formatted_response = formatter.format_api_response( rag_response, include_debug ) else: formatted_response = formatter.format_chat_response( rag_response, conversation_id, include_sources=False ) return jsonify(formatted_response) except ValueError as e: # LLM configuration error - return 503 Service Unavailable return ( jsonify( { "status": "error", "message": f"LLM service configuration error: {str(e)}", "details": ( "Please ensure OPENROUTER_API_KEY or GROQ_API_KEY " "environment variables are set" ), } ), 503, ) except Exception as e: logging.error(f"Chat failed: {e}", exc_info=True) return ( jsonify( {"status": "error", "message": f"Chat request failed: {str(e)}"} ), 500, ) @app.route("/chat/health") def chat_health(): try: rag_pipeline = get_rag_pipeline() health_data = rag_pipeline.health_check() from src.rag.response_formatter import ResponseFormatter formatter = ResponseFormatter() health_response = formatter.create_health_response(health_data) # Determine HTTP status based on health if health_data.get("pipeline") == "healthy": return jsonify(health_response), 200 elif health_data.get("pipeline") == "degraded": return jsonify(health_response), 200 # Still functional else: return jsonify(health_response), 503 # Service unavailable except ValueError as e: return ( jsonify( { "status": "error", "message": f"LLM configuration error: {str(e)}", "health": { "pipeline_status": "unhealthy", "components": { "llm_service": { "status": "unconfigured", "error": str(e), } }, }, } ), 503, ) except Exception as e: logging.error(f"Chat health check failed: {e}", exc_info=True) return ( jsonify( {"status": "error", "message": f"Health check failed: {str(e)}"} ), 500, ) # Add other non-ML routes directly @app.route("/chat/suggestions") def get_query_suggestions(): suggestions = [ "What is our remote work policy?", "How do I request time off?", "What are our information security guidelines?", "How does our expense reimbursement work?", "Tell me about our diversity and inclusion policy", "What's the process for employee performance reviews?", "How do I report an emergency at work?", "What professional development opportunities are available?", ] return jsonify({"status": "success", "suggestions": suggestions}) @app.route("/chat/feedback", methods=["POST"]) def submit_feedback(): try: feedback_data = request.json if not feedback_data: return ( jsonify( {"status": "error", "message": "No feedback data provided"} ), 400, ) required_fields = ["conversation_id", "message_id", "feedback_type"] for field in required_fields: if field not in feedback_data: return ( jsonify( { "status": "error", "message": f"Missing required field: {field}", } ), 400, ) print(f"Received feedback: {feedback_data}") return jsonify( { "status": "success", "message": "Feedback received", "feedback": feedback_data, } ) except Exception as e: print(f"Error processing feedback: {str(e)}") return ( jsonify( { "status": "error", "message": f"Error processing feedback: {str(e)}", } ), 500, ) @app.route("/chat/source/") def get_source_document(source_id: str): try: from typing import Union source_map: Dict[str, Dict[str, Union[str, Dict[str, str]]]] = { "remote_work": { "content": ( "# Remote Work Policy\n\n" "Employees may work remotely up to 3 days per week" " with manager approval." ), "metadata": { "filename": "remote_work_policy.md", "last_updated": "2025-09-15", }, }, "pto": { "content": ( "# PTO Policy\n\n" "Full-time employees receive 20 days of PTO annually, " "accrued monthly." ), "metadata": { "filename": "pto_policy.md", "last_updated": "2025-08-20", }, }, "security": { "content": ( "# Information Security Policy\n\n" "All employees must use company-approved devices and " "software for work tasks." ), "metadata": { "filename": "information_security_policy.md", "last_updated": "2025-10-01", }, }, "expense": { "content": ( "# Expense Reimbursement\n\n" "Submit all expense reports within 30 days of incurring " "the expense." ), "metadata": { "filename": "expense_reimbursement_policy.md", "last_updated": "2025-07-10", }, }, } if source_id in source_map: source_data = source_map[source_id] return jsonify( { "status": "success", "source_id": source_id, "content": source_data["content"], "metadata": source_data["metadata"], } ) else: return ( jsonify( { "status": "error", "message": ( f"Source document with ID {source_id} not found" ), } ), 404, ) except Exception as e: return ( jsonify( { "status": "error", "message": f"Failed to retrieve source document: {str(e)}", } ), 500, ) @app.route("/conversations", methods=["GET"]) def get_conversations(): conversations = [ { "id": "conv-123456", "title": "HR Policy Questions", "timestamp": "2025-10-15T14:30:00Z", "preview": "What is our remote work policy?", }, { "id": "conv-789012", "title": "Project Planning Queries", "timestamp": "2025-10-14T09:15:00Z", "preview": "How do we handle project kickoffs?", }, { "id": "conv-345678", "title": "Security Compliance", "timestamp": "2025-10-12T16:45:00Z", "preview": "What are our password requirements?", }, ] return jsonify({"status": "success", "conversations": conversations}) @app.route("/conversations/", methods=["GET"]) def get_conversation(conversation_id: str): try: from typing import List, Union if conversation_id == "conv-123456": messages: List[Dict[str, Union[str, List[Dict[str, str]]]]] = [ { "id": "msg-111", "role": "user", "content": "What is our remote work policy?", "timestamp": "2025-10-15T14:30:00Z", }, { "id": "msg-112", "role": "assistant", "content": ( "According to our remote work policy, employees may " "work up to 3 days per week with manager approval." ), "timestamp": "2025-10-15T14:30:15Z", "sources": [ {"id": "remote_work", "title": "Remote Work Policy"} ], }, ] else: return ( jsonify( { "status": "error", "message": f"Conversation {conversation_id} not found", } ), 404, ) return jsonify( { "status": "success", "conversation_id": conversation_id, "messages": messages, } ) except Exception as e: app.logger.error(f"An unexpected error occurred: {e}") # noqa: E501 return ( jsonify({"status": "error", "message": "An internal error occurred."}), 500, ) # noqa: E501 # Register memory-aware error handlers from src.utils.error_handlers import register_error_handlers register_error_handlers(app) # Ensure embeddings on app startup. # Embeddings are checked and rebuilt before the app starts serving requests. # Disabled: Using pre-built embeddings to avoid memory spikes during deployment. # ensure_embeddings_on_startup() return app