Spaces:
Running
Running
Update app.py (#1)
Browse files- Update app.py (b844c641be99fe6f50cf25cb57e0a5f76900a5dd)
Co-authored-by: Hatef <[email protected]>
app.py
CHANGED
|
@@ -289,6 +289,13 @@ def badge(text: str, colour: str) -> str:
|
|
| 289 |
# Face comparison
|
| 290 |
# βββββββββββββββββββββββββββββββ
|
| 291 |
def compare(img_left, img_right, variant):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 292 |
img_left = Image.fromarray(img_left).convert('RGB')
|
| 293 |
img_right = Image.fromarray(img_right).convert('RGB')
|
| 294 |
|
|
@@ -296,9 +303,9 @@ def compare(img_left, img_right, variant):
|
|
| 296 |
if crop_a is None and crop_b is None:
|
| 297 |
return None, None, badge("No face detected", "#DC2626")
|
| 298 |
if crop_a is None:
|
| 299 |
-
return None, None, badge("No face in A", "#DC2626")
|
| 300 |
if crop_b is None:
|
| 301 |
-
return None, None, badge("No face in B", "#DC2626")
|
| 302 |
mdl = get_face_rec_model(variant)
|
| 303 |
dev = next(mdl.parameters()).device
|
| 304 |
with torch.no_grad():
|
|
@@ -326,7 +333,7 @@ HERO_HTML = f"""
|
|
| 326 |
<a href="https://arxiv.org/abs/2411.08470v2">arXiv</a> β’
|
| 327 |
<a href="https://gitlab.idiap.ch/biometric/code.iclr2025_hyperface">Code</a> β’
|
| 328 |
<a href="https://huggingface.co/collections/Idiap/hyperface-682485119ccbd3ba5c42bde1">Models</a> β’
|
| 329 |
-
<a href="https://zenodo.org/records/15087238">Dataset</a
|
| 330 |
</div>
|
| 331 |
</div>
|
| 332 |
"""
|
|
|
|
| 289 |
# Face comparison
|
| 290 |
# βββββββββββββββββββββββββββββββ
|
| 291 |
def compare(img_left, img_right, variant):
|
| 292 |
+
if img_left is None and img_right is None:
|
| 293 |
+
return None, None, badge("Please upload/select two face images", "#DC2626")
|
| 294 |
+
if img_left is None:
|
| 295 |
+
return None, None, badge("Please upload/select a face image for Image A (left)", "#DC2626")
|
| 296 |
+
if img_right is None:
|
| 297 |
+
return None, None, badge("Please upload/select a face image for Image B (right)", "#DC2626")
|
| 298 |
+
|
| 299 |
img_left = Image.fromarray(img_left).convert('RGB')
|
| 300 |
img_right = Image.fromarray(img_right).convert('RGB')
|
| 301 |
|
|
|
|
| 303 |
if crop_a is None and crop_b is None:
|
| 304 |
return None, None, badge("No face detected", "#DC2626")
|
| 305 |
if crop_a is None:
|
| 306 |
+
return None, None, badge("No face was detected in Image A (left)", "#DC2626")
|
| 307 |
if crop_b is None:
|
| 308 |
+
return None, None, badge("No face was detected in Image B (right)", "#DC2626")
|
| 309 |
mdl = get_face_rec_model(variant)
|
| 310 |
dev = next(mdl.parameters()).device
|
| 311 |
with torch.no_grad():
|
|
|
|
| 333 |
<a href="https://arxiv.org/abs/2411.08470v2">arXiv</a> β’
|
| 334 |
<a href="https://gitlab.idiap.ch/biometric/code.iclr2025_hyperface">Code</a> β’
|
| 335 |
<a href="https://huggingface.co/collections/Idiap/hyperface-682485119ccbd3ba5c42bde1">Models</a> β’
|
| 336 |
+
<a href="https://zenodo.org/records/15087238">Dataset</a>
|
| 337 |
</div>
|
| 338 |
</div>
|
| 339 |
"""
|