Spaces:
Running
on
Zero
Running
on
Zero
Clean debug strings.
Browse files
VideoLLaMA2/videollama2/model/videollama2_arch.py
CHANGED
|
@@ -117,7 +117,6 @@ class Videollama2MetaForCausalLM(ABC):
|
|
| 117 |
|
| 118 |
data_batch = []
|
| 119 |
for i, (data, modal) in enumerate(images):
|
| 120 |
-
print(data.shape, modal)
|
| 121 |
if modal == 'image':
|
| 122 |
data = data.expand(num_frames, -1, -1, -1)
|
| 123 |
else:
|
|
@@ -126,8 +125,6 @@ class Videollama2MetaForCausalLM(ABC):
|
|
| 126 |
|
| 127 |
data_batch = torch.stack(data_batch, dim=0)
|
| 128 |
|
| 129 |
-
print(data_batch.shape)
|
| 130 |
-
|
| 131 |
assert len(data_batch.size()) == 5
|
| 132 |
batch_size = data_batch.size(0)
|
| 133 |
|
|
|
|
| 117 |
|
| 118 |
data_batch = []
|
| 119 |
for i, (data, modal) in enumerate(images):
|
|
|
|
| 120 |
if modal == 'image':
|
| 121 |
data = data.expand(num_frames, -1, -1, -1)
|
| 122 |
else:
|
|
|
|
| 125 |
|
| 126 |
data_batch = torch.stack(data_batch, dim=0)
|
| 127 |
|
|
|
|
|
|
|
| 128 |
assert len(data_batch.size()) == 5
|
| 129 |
batch_size = data_batch.size(0)
|
| 130 |
|