Spaces:
Sleeping
Sleeping
Commit
·
2a632a7
1
Parent(s):
590a472
nice
Browse files
app.py
CHANGED
|
@@ -117,7 +117,7 @@ try:
|
|
| 117 |
|
| 118 |
ADK_AGENT = Agent(
|
| 119 |
name="AuraMindGlowAgent",
|
| 120 |
-
model="gemini-
|
| 121 |
description="A farming assistant that can diagnose plant health and suggest remedies.",
|
| 122 |
instruction="You are a friendly farming assistant. Your goal is to help users identify plant health issues and find solutions. Use your tools to diagnose the plant from an image and then find a remedy.",
|
| 123 |
tools=[DIAGNOSIS_TOOL, REMEDY_TOOL]
|
|
|
|
| 117 |
|
| 118 |
ADK_AGENT = Agent(
|
| 119 |
name="AuraMindGlowAgent",
|
| 120 |
+
model="gemini-2.5-flash",
|
| 121 |
description="A farming assistant that can diagnose plant health and suggest remedies.",
|
| 122 |
instruction="You are a friendly farming assistant. Your goal is to help users identify plant health issues and find solutions. Use your tools to diagnose the plant from an image and then find a remedy.",
|
| 123 |
tools=[DIAGNOSIS_TOOL, REMEDY_TOOL]
|
tools.py
CHANGED
|
@@ -25,13 +25,14 @@ def create_plant_diagnosis_tool(model: FastVisionModel, processor: AutoProcessor
|
|
| 25 |
|
| 26 |
image = image.convert("RGB")
|
| 27 |
messages = [
|
| 28 |
-
{"role": "user", "content": [{"type": "text", "text": "What is the condition of this maize plant?
|
| 29 |
]
|
| 30 |
text_prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 31 |
inputs = processor(text=text_prompt, images=image, return_tensors="pt").to(model.device)
|
| 32 |
|
| 33 |
with torch.inference_mode():
|
| 34 |
outputs = model.generate(**inputs, max_new_tokens=48, use_cache=True)
|
|
|
|
| 35 |
|
| 36 |
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
| 37 |
|
|
|
|
| 25 |
|
| 26 |
image = image.convert("RGB")
|
| 27 |
messages = [
|
| 28 |
+
{"role": "user", "content": [{"type": "text", "text": "What is the condition of this maize plant?"}, {"type": "image", "image": image}]}
|
| 29 |
]
|
| 30 |
text_prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 31 |
inputs = processor(text=text_prompt, images=image, return_tensors="pt").to(model.device)
|
| 32 |
|
| 33 |
with torch.inference_mode():
|
| 34 |
outputs = model.generate(**inputs, max_new_tokens=48, use_cache=True)
|
| 35 |
+
print(f"Model outputs: {outputs}")
|
| 36 |
|
| 37 |
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
| 38 |
|