Jeff28 commited on
Commit
0d452fb
Β·
verified Β·
1 Parent(s): 57f86c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -30
app.py CHANGED
@@ -6,7 +6,8 @@ from tensorflow.keras.models import load_model
6
  from PIL import Image
7
 
8
  # ===== Hugging Face API Token =====
9
- HF_API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure this is set in your environment
 
10
 
11
  # ===== Load Trained Models =====
12
  model_a = load_model("Tomato_Leaf_Disease_Model.h5")
@@ -24,27 +25,39 @@ def preprocess_image(image, target_size=(224, 224)):
24
  img_array = np.expand_dims(img_array, axis=0)
25
  return img_array
26
 
 
 
 
 
 
 
 
 
 
 
 
27
  # ===== Prediction Functions =====
28
  def predict_model_a(image):
29
  img = preprocess_image(image)
30
  pred = model_a.predict(img)
31
- # Replace with your own mapping from predictions to disease names.
32
- return "Disease A detected" if np.argmax(pred) == 1 else "No disease"
33
 
34
  def predict_model_b(image):
35
  img = preprocess_image(image)
36
  pred = model_b.predict(img)
37
- return "Disease B detected" if np.argmax(pred) == 1 else "No disease"
 
38
 
39
  def predict_classifier(image):
40
  img = preprocess_image(image)
41
  pred = classifier_model.predict(img)
42
- # Assume classifier returns class 1 for a tomato leaf.
43
  return "Tomato Leaf" if np.argmax(pred) == 1 else "Not Tomato Leaf"
44
 
45
  # ===== Hugging Face Inference API Calls =====
46
  def call_llama2(prompt):
47
- """Call the Llama 2-7B Chat model on Hugging Face for conversational advice."""
48
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
49
  payload = {"inputs": prompt, "parameters": {"max_new_tokens": 100}}
50
  url = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
@@ -75,23 +88,31 @@ def call_openassistant(prompt):
75
 
76
  # ===== AI Assistant Functions =====
77
  def ai_assistant_v1(image, prediction):
78
- # Use Llama 2-7B Chat (Model A version)
79
  if "No disease" in prediction:
80
- prompt = ("You are an agricultural advisor. The tomato crop appears healthy. "
81
- "Provide additional preventive tips and good practices for maintaining crop health.")
 
 
82
  else:
83
- prompt = (f"You are an agricultural advisor. A disease has been detected: {prediction}. "
84
- "Provide detailed advice on how to manage and curb this disease, and explain more about it in simple terms.")
 
 
85
  return call_llama2(prompt)
86
 
87
  def ai_assistant_v2(image, prediction):
88
- # Use OpenAssistant (Model B version)
89
  if "No disease" in prediction:
90
- prompt = ("You are an agricultural advisor. The tomato crop appears healthy. "
91
- "Offer additional preventive tips and guidelines for maintaining a healthy crop.")
 
 
92
  else:
93
- prompt = (f"You are an agricultural advisor. A disease has been detected: {prediction}. "
94
- "Provide actionable steps and detailed advice on how to control and manage this disease in tomato crops.")
 
 
95
  return call_openassistant(prompt)
96
 
97
  # ===== Process Function Based on Version =====
@@ -115,8 +136,10 @@ def process_version(image, version):
115
  return "Classifier: The image is not a tomato leaf. Please try again."
116
  result = predict_model_a(image)
117
  advice = ai_assistant_v1(image, result)
118
- return (f"Classifier: {cls_result}\nModel A Prediction: {result}\nAdvice: {advice}\n\n"
119
- f"[View Model A & Classifier Training Notebook](https://huggingface.co/your-model-a-classifier-notebook)")
 
 
120
 
121
  # --- Version 2.x (Model B) ---
122
  elif version == "2.1":
@@ -134,8 +157,10 @@ def process_version(image, version):
134
  return "Classifier: The image is not a tomato leaf. Please try again."
135
  result = predict_model_b(image)
136
  advice = ai_assistant_v2(image, result)
137
- return (f"Classifier: {cls_result}\nModel B Prediction: {result}\nAdvice: {advice}\n\n"
138
- f"[View Model B & Classifier Training Notebook](https://huggingface.co/your-model-b-classifier-notebook)")
 
 
139
 
140
  else:
141
  return "Invalid version selected."
@@ -149,13 +174,15 @@ light_css = """
149
  <style>
150
  body { background-color: white; color: black; }
151
  .gr-button { background-color: #4CAF50; color: white; }
 
152
  </style>
153
  """
154
 
155
  dark_css = """
156
  <style>
157
- body { background-color: #333; color: white; }
158
- .gr-button { background-color: #555; color: white; }
 
159
  </style>
160
  """
161
 
@@ -201,14 +228,8 @@ with gr.Blocks() as demo:
201
  )
202
  # ----- Right Column (β‰ˆ70%) -----
203
  with gr.Column(scale=2):
204
- image_input = gr.Image(
205
- label="πŸ“‚ Upload Tomato Leaf Image",
206
- type="pil"
207
- )
208
- camera_input = gr.Image(
209
- label="πŸ“Έ Use Camera (Live Preview)",
210
- type="pil"
211
- )
212
  submit = gr.Button("πŸ” Analyze")
213
 
214
  output = gr.Textbox(label="πŸ“ Diagnosis & Advice", lines=8)
 
6
  from PIL import Image
7
 
8
  # ===== Hugging Face API Token =====
9
+ # Set HF_API_TOKEN in your environment. For local testing, you can create a .env file.
10
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN") # e.g., "hf_xxx..."
11
 
12
  # ===== Load Trained Models =====
13
  model_a = load_model("Tomato_Leaf_Disease_Model.h5")
 
25
  img_array = np.expand_dims(img_array, axis=0)
26
  return img_array
27
 
28
+ # ===== Disease Label Mappings =====
29
+ # Update these labels to match your model training.
30
+ disease_labels = {
31
+ 0: "No disease",
32
+ 1: "Early Blight",
33
+ 2: "Late Blight",
34
+ 3: "Septoria Leaf Spot",
35
+ 4: "Bacterial Spot",
36
+ 5: "Mosaic Virus"
37
+ }
38
+
39
  # ===== Prediction Functions =====
40
  def predict_model_a(image):
41
  img = preprocess_image(image)
42
  pred = model_a.predict(img)
43
+ predicted_class = np.argmax(pred)
44
+ return disease_labels.get(predicted_class, "Unknown result")
45
 
46
  def predict_model_b(image):
47
  img = preprocess_image(image)
48
  pred = model_b.predict(img)
49
+ predicted_class = np.argmax(pred)
50
+ return disease_labels.get(predicted_class, "Unknown result")
51
 
52
  def predict_classifier(image):
53
  img = preprocess_image(image)
54
  pred = classifier_model.predict(img)
55
+ # Here we assume the classifier returns class 1 for "Tomato Leaf"
56
  return "Tomato Leaf" if np.argmax(pred) == 1 else "Not Tomato Leaf"
57
 
58
  # ===== Hugging Face Inference API Calls =====
59
  def call_llama2(prompt):
60
+ """Call Llama 2-7B Chat model on Hugging Face for conversational advice."""
61
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
62
  payload = {"inputs": prompt, "parameters": {"max_new_tokens": 100}}
63
  url = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
 
88
 
89
  # ===== AI Assistant Functions =====
90
  def ai_assistant_v1(image, prediction):
91
+ # Use Llama 2-7B Chat (Model A versions)
92
  if "No disease" in prediction:
93
+ prompt = (
94
+ "You are an agricultural advisor. The tomato crop appears healthy. "
95
+ "Provide additional preventive tips and best practices for maintaining crop health."
96
+ )
97
  else:
98
+ prompt = (
99
+ f"You are an agricultural advisor. A disease has been detected: {prediction}. "
100
+ "Provide detailed advice on how to manage and curb this disease, explaining it in simple terms."
101
+ )
102
  return call_llama2(prompt)
103
 
104
  def ai_assistant_v2(image, prediction):
105
+ # Use OpenAssistant (Model B versions)
106
  if "No disease" in prediction:
107
+ prompt = (
108
+ "You are an agricultural advisor. The tomato crop appears healthy. "
109
+ "Offer additional preventive tips and guidelines for maintaining a healthy crop."
110
+ )
111
  else:
112
+ prompt = (
113
+ f"You are an agricultural advisor. A disease has been detected: {prediction}. "
114
+ "Provide actionable steps and detailed advice on how to control and manage this disease in tomato crops."
115
+ )
116
  return call_openassistant(prompt)
117
 
118
  # ===== Process Function Based on Version =====
 
136
  return "Classifier: The image is not a tomato leaf. Please try again."
137
  result = predict_model_a(image)
138
  advice = ai_assistant_v1(image, result)
139
+ return (
140
+ f"Classifier: {cls_result}\nModel A Prediction: {result}\nAdvice: {advice}\n\n"
141
+ f"[View Model A & Classifier Training Notebook](https://huggingface.co/your-model-a-classifier-notebook)"
142
+ )
143
 
144
  # --- Version 2.x (Model B) ---
145
  elif version == "2.1":
 
157
  return "Classifier: The image is not a tomato leaf. Please try again."
158
  result = predict_model_b(image)
159
  advice = ai_assistant_v2(image, result)
160
+ return (
161
+ f"Classifier: {cls_result}\nModel B Prediction: {result}\nAdvice: {advice}\n\n"
162
+ f"[View Model B & Classifier Training Notebook](https://huggingface.co/your-model-b-classifier-notebook)"
163
+ )
164
 
165
  else:
166
  return "Invalid version selected."
 
174
  <style>
175
  body { background-color: white; color: black; }
176
  .gr-button { background-color: #4CAF50; color: white; }
177
+ .gr-input, .gr-textbox, .gr-dropdown, .gr-radio, .gr-markdown, .gr-container { background-color: white; color: black; }
178
  </style>
179
  """
180
 
181
  dark_css = """
182
  <style>
183
+ body { background-color: #121212 !important; color: #e0e0e0 !important; }
184
+ .gr-button { background-color: #555 !important; color: white !important; }
185
+ .gr-input, .gr-textbox, .gr-dropdown, .gr-radio, .gr-markdown, .gr-container { background-color: #333 !important; color: #e0e0e0 !important; }
186
  </style>
187
  """
188
 
 
228
  )
229
  # ----- Right Column (β‰ˆ70%) -----
230
  with gr.Column(scale=2):
231
+ image_input = gr.Image(label="πŸ“‚ Upload Tomato Leaf Image", type="pil")
232
+ camera_input = gr.Image(label="πŸ“Έ Use Camera (Live Preview)", type="pil")
 
 
 
 
 
 
233
  submit = gr.Button("πŸ” Analyze")
234
 
235
  output = gr.Textbox(label="πŸ“ Diagnosis & Advice", lines=8)