Update README.md
Browse files
README.md
CHANGED
|
@@ -4,6 +4,8 @@ pipeline_tag: video-classification
|
|
| 4 |
tags:
|
| 5 |
- video
|
| 6 |
library_name: transformers
|
|
|
|
|
|
|
| 7 |
---
|
| 8 |
|
| 9 |
# V-JEPA 2
|
|
@@ -44,7 +46,7 @@ model = AutoModelForVideoClassification.from_pretrained(hf_repo).to(device)
|
|
| 44 |
processor = AutoVideoProcessor.from_pretrained(hf_repo)
|
| 45 |
|
| 46 |
# To load a video, sample the number of frames according to the model.
|
| 47 |
-
video_url = "
|
| 48 |
vr = VideoDecoder(video_url)
|
| 49 |
frame_idx = np.arange(0, model.config.frames_per_clip, 8) # you can define more complex sampling strategy
|
| 50 |
video = vr.get_frames_at(indices=frame_idx).data # frames x channels x height x width
|
|
|
|
| 4 |
tags:
|
| 5 |
- video
|
| 6 |
library_name: transformers
|
| 7 |
+
datasets:
|
| 8 |
+
- bkprocovid19/diving48
|
| 9 |
---
|
| 10 |
|
| 11 |
# V-JEPA 2
|
|
|
|
| 46 |
processor = AutoVideoProcessor.from_pretrained(hf_repo)
|
| 47 |
|
| 48 |
# To load a video, sample the number of frames according to the model.
|
| 49 |
+
video_url = "https://huggingface.co/qubvel-hf/vjepa2-vitl-fpc32-256-diving48/resolve/main/sample/diving.mp4"
|
| 50 |
vr = VideoDecoder(video_url)
|
| 51 |
frame_idx = np.arange(0, model.config.frames_per_clip, 8) # you can define more complex sampling strategy
|
| 52 |
video = vr.get_frames_at(indices=frame_idx).data # frames x channels x height x width
|