# ============================================================================== # Aura Mind Glow - Main Application (Refactored) # ============================================================================== """ This script launches the Aura Mind Glow application, now with multiple modes. """ # --- Step 0: Essential Imports --- import gradio as gr from PIL import Image import os import warnings import socket import tempfile # Suppress potential warnings for a cleaner console warnings.filterwarnings("ignore") os.environ["TORCH_COMPILE_DISABLE"] = "1" # Ensure torch compile is off # --- Step 1: Import Core Components from Modules --- from vision_model import load_vision_model from knowledge_base import get_retriever from agent_setup import initialize_adk from google.genai import types from story_generator import create_story_prompt_from_pdf, generate_video_from_prompt from langchain_huggingface import HuggingFaceEndpoint print("✅ All libraries imported successfully.") # --- Step 2: Global Initialization --- # This expensive setup runs only ONCE when the application starts. print("Performing initial setup...") VISION_MODEL, PROCESSOR = load_vision_model() RETRIEVER = get_retriever() # Initialize ADK components for Connected Mode adk_components = initialize_adk(VISION_MODEL, PROCESSOR, RETRIEVER) ADK_RUNNER = adk_components["runner"] if adk_components else None DIAGNOSIS_TOOL = adk_components["diagnosis_tool"] if adk_components else None REMEDY_TOOL = adk_components["remedy_tool"] if adk_components else None SESSION_SERVICE = adk_components["session_service"] if adk_components else None # Initialize a separate LLM for the Story Generator STORY_LLM = None if os.environ.get("HF_TOKEN"): try: STORY_LLM = HuggingFaceEndpoint( repo_id="HuggingFaceH4/zephyr-7b-beta", huggingfacehub_api_token=os.environ.get("HF_TOKEN"), max_new_tokens=150, temperature=0.4, ) print("✅ Story Generator LLM initialized successfully.") except Exception as e: print(f"❌ Could not initialize Story Generator LLM: {e}") else: print("❌ HF_TOKEN not found. Story Generator Mode will be disabled.") # --- Step 3: Define Gradio UIs --- def create_field_mode_ui(): """Creates the Gradio UI for the offline Field Mode.""" # ... (This function remains unchanged) ... def get_diagnosis_and_remedy(uploaded_image: Image.Image) -> str: if uploaded_image is None: return "Please upload an image of a maize plant first." if RETRIEVER is None: raise gr.Error("Knowledge base is not loaded. Cannot find remedy. Check logs.") temp_file_path = None try: with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: uploaded_image.save(temp_file.name) temp_file_path = temp_file.name diagnosis = DIAGNOSIS_TOOL(temp_file_path) print(f"Diagnosis received: {diagnosis}") if "Could not parse" in diagnosis: return f"Sorry, I couldn't identify the condition from the image. Raw output: {diagnosis}" remedy = REMEDY_TOOL(diagnosis) final_response = f""" ## Diagnosis Report **Condition Identified:** ### {diagnosis} --- ## Suggested Remedy {remedy} """ print("Workflow complete. Returning response.") return final_response except Exception as e: print(f"An error occurred during the analysis workflow: {e}") raise gr.Error(f"An unexpected error occurred: {e}") finally: if temp_file_path and os.path.exists(temp_file_path): os.remove(temp_file_path) css = """ footer {visibility: hidden !important;} .gradio-container {font-family: 'IBM Plex Sans', sans-serif;} """ return gr.Interface( fn=get_diagnosis_and_remedy, inputs=gr.Image(type="pil", label="Upload Maize Plant Image", sources=["upload", "webcam"]), outputs=gr.Markdown(label="Diagnosis and Remedy Report", value="The report will appear here..."), title="🌽 Aura Mind Glow: Field Mode (Offline)", description="**A 100% Offline-Capable Farming Assistant.** Upload an image of a maize plant. The AI will diagnose its condition and retrieve a treatment plan from its local knowledge base.", article="
Built with Unsloth, LangChain, and Gradio. Version 2.1
", allow_flagging="never", theme=gr.themes.Soft(primary_hue="teal", secondary_hue="orange"), css=css ) def create_connected_mode_ui(): """Creates the Gradio UI for the online Connected Mode.""" # ... (This function remains unchanged) ... with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", secondary_hue="lime")) as demo: gr.Markdown("# 🌽 Aura Mind Glow: Connected Mode 🤖") gr.Markdown("I am an AI farming assistant. Upload an image and ask for a diagnosis and remedy.") chatbot = gr.Chatbot(height=600) msg = gr.MultimodalTextbox(file_types=["image"], label="Ask a question and/or upload an image...") async def respond(chat_input, history): user_id = "default_user" # In a real app, this would be unique per user if not SESSION_SERVICE or not ADK_RUNNER: history.append((chat_input["text"], "Connected mode is not available. Check logs.")) yield history, gr.MultimodalTextbox(value=None) return session = await SESSION_SERVICE.create_session(user_id=user_id, app_name="AuraMindGlow") files = chat_input.get("files", []) text = chat_input.get("text", "") if not files: history.append([text, "Please upload an image for diagnosis."]) yield history, gr.MultimodalTextbox(value=None) return # Create the prompt for the ADK agent with open(files[0], 'rb') as f: image_data = f.read() image_part = types.Part( inline_data=types.Blob( mime_type='image/png', data=image_data ) ) text_part = types.Part(text=text or "Diagnose this plant and provide a remedy.") prompt = types.Content(parts=[image_part, text_part], role="user") # Stream the response from the agent bot_message = "" history.append([(files[0],), bot_message]) try: async for event in ADK_RUNNER.run_async( session_id=session.id, user_id=user_id, new_message=prompt ): if event.is_llm_response_chunk() and event.content.parts: bot_message += event.content.parts[0].text history[-1] = ((files[0],), bot_message) yield history, gr.MultimodalTextbox(value=None) elif event.is_final_response() and event.content.parts: bot_message = event.content.parts[0].text history[-1] = ((files[0],), bot_message) yield history, gr.MultimodalTextbox(value=None) except Exception as e: print(f"Error during agent execution: {e}") history[-1] = ((files[0],), f"An error occurred: {e}") yield history, gr.MultimodalTextbox(value=None) msg.submit(respond, [msg, chatbot], [chatbot, msg]) return demo def create_story_mode_ui(): """Creates the Gradio UI for the Farmer's Story Mode.""" with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="yellow")) as demo: gr.Markdown("# 🌽 Aura Mind Glow: Farmer's Story Mode 🎬") gr.Markdown("Create a short video story from your farm documents. Upload a PDF, describe the mood, and let the AI create a visual story.") with gr.Row(): with gr.Column(scale=1): pdf_input = gr.File(label="Upload Farm PDF", file_types=[".pdf"]) image_input = gr.Image(type="filepath", label="Optional: Upload a Starting Image") user_prompt_input = gr.Textbox(label="Describe the video's tone or theme", placeholder="e.g., hopeful, a look back at a tough season, etc.") submit_btn = gr.Button("Generate Video Story") with gr.Column(scale=2): video_output = gr.Video(label="Generated Video Story") status_output = gr.Textbox(label="Status", interactive=False, lines=3) def story_generation_process(pdf, image, user_prompt): if pdf is None: yield None, "Please upload a PDF document to begin." return yield None, "Step 1: Reading PDF and generating creative prompt..." creative_prompt = create_story_prompt_from_pdf(pdf.name, user_prompt, STORY_LLM) if "Error" in creative_prompt: yield None, creative_prompt return yield None, f"Step 2: Generating video with prompt: '{creative_prompt[:100]}...' (This may take several minutes)" video_path = generate_video_from_prompt(creative_prompt, image) if "Error" in video_path: yield None, video_path return yield video_path, "Video generation complete!" submit_btn.click( story_generation_process, inputs=[pdf_input, image_input, user_prompt_input], outputs=[video_output, status_output] ) return demo # --- Step 4: App Launcher --- def check_internet_connection(host="8.8.8.8", port=53, timeout=3): """Check for internet connectivity.""" try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except socket.error: return False if __name__ == "__main__": field_mode_ui = create_field_mode_ui() interface_list = [field_mode_ui] tab_titles = ["Field Mode (Offline)"] # Conditionally add modes that require an internet connection if check_internet_connection(): if ADK_RUNNER: connected_mode_ui = create_connected_mode_ui() interface_list.append(connected_mode_ui) tab_titles.append("Connected Mode") else: print("⚠️ Connected Mode disabled: ADK components not initialized.") # Add the new Document Analysis UI document_analysis_ui = create_document_analysis_ui() interface_list.append(document_analysis_ui) tab_titles.append("Document Analysis") else: print("⚠️ Farmer's Story Mode disabled: Story LLM not initialized.") else: print("❌ No internet connection. Launching in Offline Mode only.") # Launch the appropriate UI if len(interface_list) > 1: ui = gr.TabbedInterface(interface_list, tab_titles) else: ui = field_mode_ui ui.launch(share=True, debug=True)