Spaces:
Runtime error
Runtime error
Update
Browse files
app.py
CHANGED
|
@@ -34,7 +34,7 @@ pose_image_processor = AutoProcessor.from_pretrained(pose_model_name)
|
|
| 34 |
pose_model = VitPoseForPoseEstimation.from_pretrained(pose_model_name, device_map=device)
|
| 35 |
|
| 36 |
|
| 37 |
-
@spaces.GPU
|
| 38 |
@torch.inference_mode()
|
| 39 |
def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
|
| 40 |
inputs = person_image_processor(images=image, return_tensors="pt").to(device)
|
|
@@ -107,6 +107,7 @@ def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
|
|
| 107 |
return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
|
| 108 |
|
| 109 |
|
|
|
|
| 110 |
def process_video(
|
| 111 |
video_path: str,
|
| 112 |
progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
|
|
@@ -133,9 +134,6 @@ def process_video(
|
|
| 133 |
return out_file.name
|
| 134 |
|
| 135 |
|
| 136 |
-
process_video.zerogpu = True # type: ignore
|
| 137 |
-
|
| 138 |
-
|
| 139 |
with gr.Blocks(css_paths="style.css") as demo:
|
| 140 |
gr.Markdown(DESCRIPTION)
|
| 141 |
|
|
|
|
| 34 |
pose_model = VitPoseForPoseEstimation.from_pretrained(pose_model_name, device_map=device)
|
| 35 |
|
| 36 |
|
| 37 |
+
@spaces.GPU(duration=5)
|
| 38 |
@torch.inference_mode()
|
| 39 |
def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
|
| 40 |
inputs = person_image_processor(images=image, return_tensors="pt").to(device)
|
|
|
|
| 107 |
return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
|
| 108 |
|
| 109 |
|
| 110 |
+
@spaces.GPU(duration=60)
|
| 111 |
def process_video(
|
| 112 |
video_path: str,
|
| 113 |
progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
|
|
|
|
| 134 |
return out_file.name
|
| 135 |
|
| 136 |
|
|
|
|
|
|
|
|
|
|
| 137 |
with gr.Blocks(css_paths="style.css") as demo:
|
| 138 |
gr.Markdown(DESCRIPTION)
|
| 139 |
|