eyupipler commited on
Commit
aaa9386
·
verified ·
1 Parent(s): 822cb72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -21
app.py CHANGED
@@ -9,20 +9,16 @@ import numpy as np
9
  from thop import profile
10
  import io
11
 
12
- # Device selection
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
 
15
- # Cache models to avoid repeated downloads
16
  models_cache = {}
17
 
18
- # Preprocess transform for 224x224 input
19
  transform = transforms.Compose([
20
  transforms.Resize((224,224)),
21
  transforms.ToTensor(),
22
  transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])
23
  ])
24
 
25
- # Class names
26
  class_names = [
27
  'Alzheimer Disease',
28
  'Mild Alzheimer Risk',
@@ -32,19 +28,16 @@ class_names = [
32
  'Parkinson Disease'
33
  ]
34
 
35
- # Performance metrics calculation outside predict to not block UI
36
  def calculate_performance(model):
37
  model.eval()
38
  dummy = torch.randn(1,3,224,224).to(device)
39
  flops, params = profile(model, inputs=(dummy,), verbose=False)
40
  params_m = round(params/1e6,2)
41
  flops_b = round(flops/1e9,2)
42
- # inference timing on CPU
43
  import time
44
  start = time.time()
45
  _ = model(dummy.cpu())
46
  cpu_ms = round((time.time() - start)*1000,2)
47
- # inference timing on GPU if available
48
  if device.type == 'cuda':
49
  start_event = torch.cuda.Event(enable_timing=True)
50
  end_event = torch.cuda.Event(enable_timing=True)
@@ -57,30 +50,24 @@ def calculate_performance(model):
57
  gpu_ms = None
58
  return {'params_million':params_m, 'flops_billion':flops_b, 'cpu_ms':cpu_ms, 'gpu_ms':gpu_ms}
59
 
60
- # Prediction function
61
  def predict_and_monitor(version, image):
62
  try:
63
- # load or get cached model
64
  if version not in models_cache:
65
  models_cache[version] = load_model(version, device)
66
  model = models_cache[version]
67
 
68
- # preprocess
69
  if image is None:
70
  raise gr.Error("Görsel yüklenmedi.")
71
  img = image.convert("RGB")
72
  tensor = transform(img).unsqueeze(0).to(device)
73
 
74
- # inference
75
  with torch.no_grad():
76
  logits = model(tensor)
77
  probs = F.softmax(logits, dim=1)[0]
78
 
79
- # prepare outputs
80
  pred_dict = {class_names[i]: round(float(probs[i]),4) for i in range(len(class_names))}
81
  metrics = calculate_performance(model)
82
 
83
- # plot image with top1 label
84
  top1 = max(pred_dict, key=pred_dict.get)
85
  buf = io.BytesIO()
86
  plt.figure(figsize=(3,3))
@@ -90,23 +77,20 @@ def predict_and_monitor(version, image):
90
  plt.savefig(buf, format='png')
91
  plt.close()
92
  buf.seek(0)
93
- # Convert buffer to PIL Image for Gradio
94
  buf_image = Image.open(buf)
95
  return pred_dict, metrics, buf_image
96
  except Exception as e:
97
- # show exception message
98
- raise gr.Error(f"Tahmin hatası: {e}")
99
 
100
- # Gradio interface
101
  with gr.Blocks() as demo:
102
- gr.Markdown("# Vbai-DPA Risk Classification & Monitoring")
103
  with gr.Row():
104
- version = gr.Radio(['f','c','q'], value='c', label="Model Version")
105
- image_in = gr.Image(type="pil", label="Brain Slice (224x224)")
106
  with gr.Row():
107
  preds = gr.JSON(label="Prediction Probabilities")
108
  stats = gr.JSON(label="Performance Metrics")
109
- plot = gr.Image(label="Input & Top1")
110
  btn = gr.Button("Run")
111
  btn.click(fn=predict_and_monitor, inputs=[version, image_in], outputs=[preds, stats, plot])
112
 
 
9
  from thop import profile
10
  import io
11
 
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
 
14
  models_cache = {}
15
 
 
16
  transform = transforms.Compose([
17
  transforms.Resize((224,224)),
18
  transforms.ToTensor(),
19
  transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])
20
  ])
21
 
 
22
  class_names = [
23
  'Alzheimer Disease',
24
  'Mild Alzheimer Risk',
 
28
  'Parkinson Disease'
29
  ]
30
 
 
31
  def calculate_performance(model):
32
  model.eval()
33
  dummy = torch.randn(1,3,224,224).to(device)
34
  flops, params = profile(model, inputs=(dummy,), verbose=False)
35
  params_m = round(params/1e6,2)
36
  flops_b = round(flops/1e9,2)
 
37
  import time
38
  start = time.time()
39
  _ = model(dummy.cpu())
40
  cpu_ms = round((time.time() - start)*1000,2)
 
41
  if device.type == 'cuda':
42
  start_event = torch.cuda.Event(enable_timing=True)
43
  end_event = torch.cuda.Event(enable_timing=True)
 
50
  gpu_ms = None
51
  return {'params_million':params_m, 'flops_billion':flops_b, 'cpu_ms':cpu_ms, 'gpu_ms':gpu_ms}
52
 
 
53
  def predict_and_monitor(version, image):
54
  try:
 
55
  if version not in models_cache:
56
  models_cache[version] = load_model(version, device)
57
  model = models_cache[version]
58
 
 
59
  if image is None:
60
  raise gr.Error("Görsel yüklenmedi.")
61
  img = image.convert("RGB")
62
  tensor = transform(img).unsqueeze(0).to(device)
63
 
 
64
  with torch.no_grad():
65
  logits = model(tensor)
66
  probs = F.softmax(logits, dim=1)[0]
67
 
 
68
  pred_dict = {class_names[i]: round(float(probs[i]),4) for i in range(len(class_names))}
69
  metrics = calculate_performance(model)
70
 
 
71
  top1 = max(pred_dict, key=pred_dict.get)
72
  buf = io.BytesIO()
73
  plt.figure(figsize=(3,3))
 
77
  plt.savefig(buf, format='png')
78
  plt.close()
79
  buf.seek(0)
 
80
  buf_image = Image.open(buf)
81
  return pred_dict, metrics, buf_image
82
  except Exception as e:
83
+ raise gr.Error(f"Prediction Error: {e}")
 
84
 
 
85
  with gr.Blocks() as demo:
86
+ gr.Markdown("Dementia and Parkinson Diagnosis with Vbai-DPA 2.1(f,c,q)")
87
  with gr.Row():
88
+ version = gr.Radio(['f | Fastest Model','c | Classic Model','q | Quality Model'], value='c', label="Model Version")
89
+ image_in = gr.Image(type="pil", label="MRI or fMRI Image")
90
  with gr.Row():
91
  preds = gr.JSON(label="Prediction Probabilities")
92
  stats = gr.JSON(label="Performance Metrics")
93
+ plot = gr.Image(label="Prediction")
94
  btn = gr.Button("Run")
95
  btn.click(fn=predict_and_monitor, inputs=[version, image_in], outputs=[preds, stats, plot])
96