Spaces:
Runtime error
Runtime error
Update app.py (#2)
Browse files- Update app.py (5e57aecfad1d45f33c08aecba87a338e1f9ae566)
Co-authored-by: Moulon <[email protected]>
app.py
CHANGED
|
@@ -56,7 +56,7 @@ def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
|
|
| 56 |
inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
|
| 57 |
|
| 58 |
# for vitpose-plus-base checkpoint we should additionaly provide dataset_index
|
| 59 |
-
# to
|
| 60 |
if pose_model.config.backbone_config.num_experts > 1:
|
| 61 |
dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
|
| 62 |
dataset_index = dataset_index.to(inputs["pixel_values"].device)
|
|
@@ -99,10 +99,10 @@ def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
|
|
| 99 |
|
| 100 |
annotated_frame = image.copy()
|
| 101 |
|
| 102 |
-
# annotate
|
| 103 |
annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
|
| 104 |
|
| 105 |
-
# annotate edges and
|
| 106 |
annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
|
| 107 |
return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
|
| 108 |
|
|
|
|
| 56 |
inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
|
| 57 |
|
| 58 |
# for vitpose-plus-base checkpoint we should additionaly provide dataset_index
|
| 59 |
+
# to specify which MOE experts to use for inference
|
| 60 |
if pose_model.config.backbone_config.num_experts > 1:
|
| 61 |
dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
|
| 62 |
dataset_index = dataset_index.to(inputs["pixel_values"].device)
|
|
|
|
| 99 |
|
| 100 |
annotated_frame = image.copy()
|
| 101 |
|
| 102 |
+
# annotate bounding boxes
|
| 103 |
annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
|
| 104 |
|
| 105 |
+
# annotate edges and vertices
|
| 106 |
annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
|
| 107 |
return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
|
| 108 |
|