demalenk commited on
Commit
7bae93a
ยท
1 Parent(s): 20f92fc

ego4d dataset download

Browse files
.gitignore CHANGED
@@ -1,5 +1,5 @@
1
  lvos/frames/
2
  ego4d/frames/
3
- lvos/lvos_temp/
4
- lvos/lvos_train/
5
- lvos/train
 
1
  lvos/frames/
2
  ego4d/frames/
3
+
4
+ *.pyc
5
+ __pycache__/
README.md CHANGED
@@ -26,10 +26,15 @@ bash lvos/install_lvos.sh
26
 
27
  3. Next, install the Ego4D portion of the dataset. Note that you have need access to obtain access to Ego4D data. License requests can take a few hours to a few days, and can be obtained here: https://ego4d-data.org/docs/start-here/
28
 
 
29
  ```
30
- bash ego4d/install_ego4d.sh
31
  ```
32
 
 
 
 
 
33
  We provide a dataloader script:
34
  ```
35
  python check_dataloader.py
 
26
 
27
  3. Next, install the Ego4D portion of the dataset. Note that you have need access to obtain access to Ego4D data. License requests can take a few hours to a few days, and can be obtained here: https://ego4d-data.org/docs/start-here/
28
 
29
+ Make sure that you have the ego4d CLI installed by running:
30
  ```
31
+ pip install ego4d
32
  ```
33
 
34
+ ```
35
+ bash ego4d/install_ego4d.sh
36
+ ```
37
+ ## loading data:
38
  We provide a dataloader script:
39
  ```
40
  python check_dataloader.py
check_dataloader.py CHANGED
@@ -2,44 +2,38 @@ import os
2
 
3
  from dataloaders.mosedataloader import MoseTrackDataLoader
4
  from dataloaders.lvosdataloader import LVOSTrackDataLoader
5
-
6
- # from dataloaders.ego4ddataloader import Ego4DTrackDataLoader
7
 
8
  data_root = "/data/ilona/datasets/itto_release/itto"
9
 
10
- for dataset_type in ["mose", "lvos"]:
 
 
 
 
11
  if dataset_type == "mose":
12
  print("checking MOSE dataset portion")
13
- video_id_list = os.listdir(
14
- os.path.join(data_root, dataset_type, "frames")
15
- )
16
  loader = MoseTrackDataLoader(
17
  video_ids=video_id_list,
18
- annotation_dir=os.path.join(data_root, dataset_type, "annotations"),
19
- video_dir=os.path.join(data_root, dataset_type, "frames"),
20
  device="cpu",
21
  )
22
  elif dataset_type == "lvos":
23
  print("checking LVOS dataset portion")
24
- video_id_list = os.listdir(
25
- os.path.join(data_root, dataset_type, "frames")
26
- )
27
  loader = LVOSTrackDataLoader(
28
  video_ids=video_id_list,
29
- annotation_dir=os.path.join(data_root, dataset_type, "annotations"),
30
- video_dir=os.path.join(data_root, dataset_type, "frames"),
31
  device="cpu",
32
  )
33
 
34
  elif dataset_type == "ego4d":
35
  print("checking EGO4D dataset portion")
36
- video_id_list = os.listdir(
37
- "/data/ilona/code/tracking-benchmark-dataset/model_eval_scripts/tool_annotations/ego4d_tool_annotations/"
38
- )
39
  loader = Ego4DTrackDataLoader(
40
  video_ids=video_id_list,
41
- annotation_dir="/data/ilona/code/tracking-benchmark-dataset/model_eval_scripts/video_tool_annots/ego4d_npz/",
42
- video_dir="/data/ilona/datasets/ego4d/ego4d_chosen_videos/",
43
  device="cpu",
44
  )
45
 
 
2
 
3
  from dataloaders.mosedataloader import MoseTrackDataLoader
4
  from dataloaders.lvosdataloader import LVOSTrackDataLoader
5
+ from dataloaders.egoloader import Ego4DTrackDataLoader
 
6
 
7
  data_root = "/data/ilona/datasets/itto_release/itto"
8
 
9
+ for dataset_type in ["ego4d", "mose", "lvos"]:
10
+ annotation_dir = os.path.join(data_root, dataset_type, "annotations")
11
+ video_id_list = os.listdir(os.path.join(data_root, dataset_type, "frames"))
12
+ video_dir = os.path.join(data_root, dataset_type, "frames")
13
+
14
  if dataset_type == "mose":
15
  print("checking MOSE dataset portion")
 
 
 
16
  loader = MoseTrackDataLoader(
17
  video_ids=video_id_list,
18
+ annotation_dir=annotation_dir,
19
+ video_dir=video_dir,
20
  device="cpu",
21
  )
22
  elif dataset_type == "lvos":
23
  print("checking LVOS dataset portion")
 
 
 
24
  loader = LVOSTrackDataLoader(
25
  video_ids=video_id_list,
26
+ annotation_dir=annotation_dir,
27
+ video_dir=video_dir,
28
  device="cpu",
29
  )
30
 
31
  elif dataset_type == "ego4d":
32
  print("checking EGO4D dataset portion")
 
 
 
33
  loader = Ego4DTrackDataLoader(
34
  video_ids=video_id_list,
35
+ annotation_dir=annotation_dir,
36
+ video_dir=video_dir,
37
  device="cpu",
38
  )
39
 
dataloaders/egoloader.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .mosedataloader import MoseTrackDataLoader, load_mose_video
2
+ import os
3
+ import torch
4
+ import numpy as np
5
+ from typing import List
6
+ from PIL import Image
7
+ import sys
8
+ import pdb
9
+
10
+
11
+ class Ego4DTrackDataLoader(MoseTrackDataLoader):
12
+ def __init__(
13
+ self,
14
+ video_ids: List[str],
15
+ annotation_dir: str,
16
+ video_dir: str,
17
+ device: str = "cuda:0",
18
+ ):
19
+ super().__init__(video_ids, annotation_dir, video_dir, device)
20
+
21
+ def _create_queries(
22
+ self, tracks: torch.Tensor, gt_vis: torch.Tensor
23
+ ) -> torch.Tensor:
24
+ B, N, T, _ = tracks.shape
25
+
26
+ # Find the first frame where visibility is True
27
+ first_visible_mask = gt_vis.bool()
28
+
29
+ query_frames = torch.zeros((B, N), dtype=torch.long, device=self.device)
30
+
31
+ for b in range(B):
32
+ for n in range(N):
33
+ visible_indices = torch.nonzero(
34
+ first_visible_mask[b, n], as_tuple=False
35
+ )
36
+ if visible_indices.numel() > 0:
37
+ query_frames[b, n] = visible_indices[0].item()
38
+ else:
39
+ query_frames[b, n] = 0 # fallback
40
+
41
+ query_coords = tracks[
42
+ torch.arange(B)[:, None], torch.arange(N)[None, :], query_frames
43
+ ]
44
+
45
+ return torch.stack(
46
+ [
47
+ query_frames.float(),
48
+ query_coords[:, :, 0],
49
+ query_coords[:, :, 1],
50
+ ],
51
+ dim=2,
52
+ )
53
+
54
+ def get(self, video_id: str):
55
+ if video_id not in self.videos:
56
+ grad_file = os.path.join(
57
+ self.annotation_dir, f"{video_id}/{video_id}_gradient.npy"
58
+ )
59
+ rand_file = os.path.join(
60
+ self.annotation_dir, f"{video_id}/{video_id}_random.npy"
61
+ )
62
+ bcknd_file = os.path.join(
63
+ self.annotation_dir, f"{video_id}/{video_id}_background.npy"
64
+ )
65
+
66
+ use_bcknd, use_rand, use_grad = True, True, True
67
+ if os.path.exists(bcknd_file):
68
+ bcknd_annots = self._load_annotations(bcknd_file) # N1, T, 4
69
+ _, T, _ = bcknd_annots.shape
70
+ else:
71
+ use_bcknd = False
72
+ print(f"no background file for {video_id} found !!")
73
+
74
+ if os.path.exists(rand_file):
75
+ rand_annots = self._load_annotations(rand_file) # N2, T, 4
76
+ _, T, _ = rand_annots.shape
77
+ else:
78
+ print(f"no random queries file for {video_id} found !!")
79
+ use_rand = False
80
+
81
+ if os.path.exists(grad_file):
82
+ grad_annots = self._load_annotations(grad_file) # N3, T, 4
83
+ _, T, _ = grad_annots.shape
84
+ else:
85
+ use_grad = False
86
+
87
+ # creates empty tensors if those files aren't used
88
+ if not use_bcknd:
89
+ bcknd_annots = torch.empty((0, T, 4), dtype=torch.float32)
90
+ if not use_rand:
91
+ rand_annots = torch.empty((0, T, 4), dtype=torch.float32)
92
+ if not use_grad:
93
+ grad_annots = torch.empty((0, T, 4), dtype=torch.float32)
94
+
95
+ grad_tracks = grad_annots[:, :, :2].unsqueeze(0)
96
+ rand_tracks = rand_annots[:, :, :2].unsqueeze(0)
97
+ bcknd_tracks = bcknd_annots[:, :, :2].unsqueeze(0)
98
+
99
+ grad_vis = grad_annots[:, :, 2].unsqueeze(0).to(self.device)
100
+ rand_vis = rand_annots[:, :, 2].unsqueeze(0).to(self.device)
101
+ bcknd_vis = bcknd_annots[:, :, 2].unsqueeze(0).to(self.device)
102
+
103
+ gt_tracks = torch.cat(
104
+ [grad_tracks, rand_tracks, bcknd_tracks],
105
+ dim=1,
106
+ ) # 1, (N1+N2+N3), T, 2
107
+ gt_vis = torch.cat(
108
+ [grad_vis, rand_vis, bcknd_vis],
109
+ dim=1,
110
+ ) # 1, (N1+N2+N3), T, 1
111
+ queries = self._create_queries(
112
+ gt_tracks, gt_vis
113
+ ) # 1, (N1+N2+N3), 3
114
+
115
+ video_folder = os.path.join(self.video_dir, video_id)
116
+ video_tensor = load_mose_video(video_folder) # keep on CPU
117
+
118
+ self.grad_tracks[video_id] = grad_tracks
119
+ self.rand_tracks[video_id] = rand_tracks
120
+ self.bcknd_tracks[video_id] = bcknd_tracks
121
+ self.gt_tracks[video_id] = gt_tracks
122
+
123
+ self.grad_vis[video_id] = grad_vis
124
+ self.rand_vis[video_id] = rand_vis
125
+ self.bcknd_vis[video_id] = bcknd_vis
126
+ self.gt_vis[video_id] = gt_vis
127
+
128
+ self.queries[video_id] = queries
129
+ self.videos[video_id] = video_tensor
130
+
131
+ return {
132
+ "video_id": video_id,
133
+ "video": self.videos[video_id],
134
+ "gt_tracks": self.gt_tracks[video_id],
135
+ "gt_vis": self.gt_vis[video_id],
136
+ "queries": self.queries[video_id],
137
+ }
dataloaders/lvosdataloader.py CHANGED
@@ -1,4 +1,3 @@
1
- # from .mosedataloader import MoseTrackDataLoader, load_mose_video
2
  from .mosedataloader import MoseTrackDataLoader, load_mose_video
3
  import os
4
  import torch
@@ -8,60 +7,16 @@ from PIL import Image
8
  import sys
9
  import pdb
10
 
11
- import torch
12
- from typing import List
13
- from .mosedataloader import MoseTrackDataLoader
14
-
15
 
16
  class LVOSTrackDataLoader(MoseTrackDataLoader):
17
-
18
  def __init__(
19
  self,
20
  video_ids: List[str],
21
  annotation_dir: str,
22
  video_dir: str,
23
  device: str = "cuda:0",
24
- load_video: bool = True,
25
  ):
26
- # Forward load_video flag to base class
27
- super().__init__(
28
- video_ids, annotation_dir, video_dir, device, load_video
29
- )
30
-
31
- # Initialize caches for get()
32
- self.videos = {}
33
- self.gt_tracks = {}
34
- self.gt_vis = {}
35
- self.queries = {}
36
- self.grad_tracks = {}
37
- self.rand_tracks = {}
38
- self.bcknd_tracks = {}
39
- self.grad_vis = {}
40
- self.rand_vis = {}
41
- self.bcknd_vis = {}
42
-
43
- self.load_video = load_video
44
-
45
- def _load_annotations(self, path: str) -> torch.Tensor:
46
- """
47
- Load a .npy annotation and return a Tensor of shape [N, T, 4]:
48
- - [:,:,:2] = (x,y) track coords
49
- - [:,:,2] = visibility mask (0/1)
50
- - [:,:,3] = zeros (unused)
51
- """
52
- a = np.load(path)
53
- if a.ndim == 3 and a.shape[-1] >= 4:
54
- arr = a[..., :4]
55
- elif a.ndim == 3 and a.shape[-1] == 2:
56
- tracks = a.astype(np.float32)
57
- vis = np.ones(tracks.shape[:2], dtype=np.float32)[..., None]
58
- zeros = np.zeros(tracks.shape[:2] + (1,), dtype=np.float32)
59
- arr = np.concatenate([tracks, vis, zeros], axis=2)
60
- else:
61
- raise ValueError(
62
- f"[Ego4DLoader] Unexpected annotation shape {a.shape}"
63
- )
64
- return torch.from_numpy(arr).to(self.device)
65
 
66
  def _create_queries(
67
  self, tracks: torch.Tensor, gt_vis: torch.Tensor
@@ -97,100 +52,69 @@ class LVOSTrackDataLoader(MoseTrackDataLoader):
97
  )
98
 
99
  def get(self, video_id: str):
100
- """
101
- Returns dict with:
102
- - video_id: str
103
- - video: Tensor [T,3,H,W] or None
104
- - gt_tracks: Tensor [1, N, T, 2]
105
- - gt_vis: Tensor [1, N, T]
106
- - queries: Tensor [1, N, 3]
107
- Caches results to avoid reloading on subsequent calls.
108
- """
109
  if video_id not in self.videos:
110
- # Paths for each query type
111
  grad_file = os.path.join(
112
- self.annotation_dir, video_id, f"{video_id}_gradient.npy"
113
  )
114
  rand_file = os.path.join(
115
- self.annotation_dir, video_id, f"{video_id}_random.npy"
116
  )
117
  bcknd_file = os.path.join(
118
- self.annotation_dir, video_id, f"{video_id}_background.npy"
119
  )
120
 
121
- # Load any existing annotation arrays and record their length T
122
- Ts, data = [], {}
123
- for key, path in [
124
- ("grad", grad_file),
125
- ("rand", rand_file),
126
- ("bcknd", bcknd_file),
127
- ]:
128
- if os.path.exists(path):
129
- arr = self._load_annotations(path) # [N, T, 4]
130
- Ts.append(arr.shape[1])
131
- data[key] = arr
132
- else:
133
- data[key] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
- if not Ts:
136
- raise FileNotFoundError(
137
- f"No annotation files found for {video_id}"
138
- )
139
- T = Ts[0] # assume consistent length
140
-
141
- # Prepare placeholders for missing types
142
- grad_annots = (
143
- data["grad"]
144
- if data["grad"] is not None
145
- else torch.empty(
146
- (0, T, 4), dtype=torch.float32, device=self.device
147
- )
148
- )
149
- rand_annots = (
150
- data["rand"]
151
- if data["rand"] is not None
152
- else torch.empty(
153
- (0, T, 4), dtype=torch.float32, device=self.device
154
- )
155
- )
156
- bcknd_annots = (
157
- data["bcknd"]
158
- if data["bcknd"] is not None
159
- else torch.empty(
160
- (0, T, 4), dtype=torch.float32, device=self.device
161
- )
162
- )
163
-
164
- # Split coords vs. vis
165
- grad_tracks = grad_annots[..., :2].unsqueeze(0) # [1, N1, T, 2]
166
- rand_tracks = rand_annots[..., :2].unsqueeze(0) # [1, N2, T, 2]
167
- bcknd_tracks = bcknd_annots[..., :2].unsqueeze(0) # [1, N3, T, 2]
168
-
169
- grad_vis = grad_annots[..., 2].unsqueeze(0) # [1, N1, T]
170
- rand_vis = rand_annots[..., 2].unsqueeze(0) # [1, N2, T]
171
- bcknd_vis = bcknd_annots[..., 2].unsqueeze(0) # [1, N3, T]
172
-
173
- # Concatenate all queries
174
  gt_tracks = torch.cat(
175
- [grad_tracks, rand_tracks, bcknd_tracks], dim=1
176
- ) # [1, N, T, 2]
 
177
  gt_vis = torch.cat(
178
- [grad_vis, rand_vis, bcknd_vis], dim=1
179
- ) # [1, N, T]
180
-
181
  queries = self._create_queries(
182
  gt_tracks, gt_vis
183
  ) # 1, (N1+N2+N3), 3
184
 
185
- # Load video frames if requested
186
- video_tensor = None
187
- if self.load_video:
188
- vid_folder = os.path.join(self.video_dir, video_id)
189
- video_tensor = load_mose_video(vid_folder).to(
190
- self.device
191
- ) # [T,3,H,W]
192
 
193
- # Cache for future calls
194
  self.grad_tracks[video_id] = grad_tracks
195
  self.rand_tracks[video_id] = rand_tracks
196
  self.bcknd_tracks[video_id] = bcknd_tracks
 
 
1
  from .mosedataloader import MoseTrackDataLoader, load_mose_video
2
  import os
3
  import torch
 
7
  import sys
8
  import pdb
9
 
 
 
 
 
10
 
11
  class LVOSTrackDataLoader(MoseTrackDataLoader):
 
12
  def __init__(
13
  self,
14
  video_ids: List[str],
15
  annotation_dir: str,
16
  video_dir: str,
17
  device: str = "cuda:0",
 
18
  ):
19
+ super().__init__(video_ids, annotation_dir, video_dir, device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def _create_queries(
22
  self, tracks: torch.Tensor, gt_vis: torch.Tensor
 
52
  )
53
 
54
  def get(self, video_id: str):
 
 
 
 
 
 
 
 
 
55
  if video_id not in self.videos:
 
56
  grad_file = os.path.join(
57
+ self.annotation_dir, f"{video_id}/{video_id}_gradient.npy"
58
  )
59
  rand_file = os.path.join(
60
+ self.annotation_dir, f"{video_id}/{video_id}_random.npy"
61
  )
62
  bcknd_file = os.path.join(
63
+ self.annotation_dir, f"{video_id}/{video_id}_background.npy"
64
  )
65
 
66
+ use_bcknd, use_rand, use_grad = True, True, True
67
+ if os.path.exists(bcknd_file):
68
+ bcknd_annots = self._load_annotations(bcknd_file) # N1, T, 4
69
+ _, T, _ = bcknd_annots.shape
70
+ else:
71
+ use_bcknd = False
72
+ print(f"no background file for {video_id} found !!")
73
+
74
+ if os.path.exists(rand_file):
75
+ rand_annots = self._load_annotations(rand_file) # N2, T, 4
76
+ _, T, _ = rand_annots.shape
77
+ else:
78
+ print(f"no random queries file for {video_id} found !!")
79
+ use_rand = False
80
+
81
+ if os.path.exists(grad_file):
82
+ grad_annots = self._load_annotations(grad_file) # N3, T, 4
83
+ _, T, _ = grad_annots.shape
84
+ else:
85
+ use_grad = False
86
+
87
+ # creates empty tensors if those files aren't used
88
+ if not use_bcknd:
89
+ bcknd_annots = torch.empty((0, T, 4), dtype=torch.float32)
90
+ if not use_rand:
91
+ rand_annots = torch.empty((0, T, 4), dtype=torch.float32)
92
+ if not use_grad:
93
+ grad_annots = torch.empty((0, T, 4), dtype=torch.float32)
94
+
95
+ grad_tracks = grad_annots[:, :, :2].unsqueeze(0)
96
+ rand_tracks = rand_annots[:, :, :2].unsqueeze(0)
97
+ bcknd_tracks = bcknd_annots[:, :, :2].unsqueeze(0)
98
+
99
+ grad_vis = grad_annots[:, :, 2].unsqueeze(0).to(self.device)
100
+ rand_vis = rand_annots[:, :, 2].unsqueeze(0).to(self.device)
101
+ bcknd_vis = bcknd_annots[:, :, 2].unsqueeze(0).to(self.device)
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  gt_tracks = torch.cat(
104
+ [grad_tracks, rand_tracks, bcknd_tracks],
105
+ dim=1,
106
+ ) # 1, (N1+N2+N3), T, 2
107
  gt_vis = torch.cat(
108
+ [grad_vis, rand_vis, bcknd_vis],
109
+ dim=1,
110
+ ) # 1, (N1+N2+N3), T, 1
111
  queries = self._create_queries(
112
  gt_tracks, gt_vis
113
  ) # 1, (N1+N2+N3), 3
114
 
115
+ video_folder = os.path.join(self.video_dir, video_id)
116
+ video_tensor = load_mose_video(video_folder) # keep on CPU
 
 
 
 
 
117
 
 
118
  self.grad_tracks[video_id] = grad_tracks
119
  self.rand_tracks[video_id] = rand_tracks
120
  self.bcknd_tracks[video_id] = bcknd_tracks
dataloaders/mosedataloader.py CHANGED
@@ -1,24 +1,37 @@
1
- #!/usr/bin/env python3
2
- """
3
- MoseTrackDataLoader โ€” loads GT, visibility, queries for MOSE; skip video if desired.
4
- """
5
  import os
6
  import torch
7
  import numpy as np
8
  from typing import List
9
  from PIL import Image
 
10
 
 
 
11
 
12
- def load_mose_video(folder: str) -> torch.Tensor:
13
- files = sorted(
14
- f for f in os.listdir(folder) if f.lower().endswith((".png", ".jpg"))
15
- )
16
- imgs = []
17
- for fn in files:
18
- im = Image.open(os.path.join(folder, fn)).convert("RGB")
19
- arr = np.array(im).transpose(2, 0, 1).astype(np.float32)
20
- imgs.append(arr)
21
- return torch.from_numpy(np.stack(imgs, 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
24
  class MoseTrackDataLoader:
@@ -28,13 +41,29 @@ class MoseTrackDataLoader:
28
  annotation_dir: str,
29
  video_dir: str,
30
  device: str = "cuda:0",
31
- load_video: bool = True,
32
  ):
33
  self.video_ids = video_ids
34
  self.annotation_dir = annotation_dir
35
  self.video_dir = video_dir
36
  self.device = torch.device(device)
37
- self.load_video = load_video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  def _create_queries(self, tracks: torch.Tensor) -> torch.Tensor:
40
  B, N, T, _ = tracks.shape
@@ -46,49 +75,81 @@ class MoseTrackDataLoader:
46
  )
47
 
48
  def get(self, video_id: str):
49
- types = ["gradient", "random", "background"]
50
- tracks_list, vis_list = [], []
51
- for t in types:
52
- path = os.path.join(
53
- self.annotation_dir, video_id, f"{video_id}_{t}.npy"
 
 
 
54
  )
55
- if not os.path.exists(path):
56
- continue
57
- arr = np.load(path)
58
- if arr.ndim == 3 and arr.shape[-1] >= 4:
59
- tr = arr[..., :2].astype(np.float32)
60
- vi = arr[..., 2].astype(bool)
 
 
61
  else:
62
- tr = arr.astype(np.float32)
63
- vi = np.ones(arr.shape[:2], bool)
64
-
65
- # now tr: [N, T, 2], vi: [N, T]
66
- tr_ten = (
67
- torch.from_numpy(tr).to(self.device).unsqueeze(0)
68
- ) # [1, N, T, 2]
69
- vi_ten = (
70
- torch.from_numpy(vi).to(self.device).unsqueeze(0)
71
- ) # [1, N, T]
72
- tracks_list.append(tr_ten)
73
- vis_list.append(vi_ten)
74
-
75
- # concatenate along track-dimension
76
- gt_tracks = torch.cat(tracks_list, dim=1) # [1, sum_N, T, 2]
77
- gt_vis = torch.cat(vis_list, dim=1) # [1, sum_N, T]
78
-
79
- # queries: [1, sum_N, 3]
80
- queries = self._create_queries(gt_tracks)
81
-
82
- # optionally load video frames
83
- video_tensor = None
84
- if self.load_video:
85
- folder = os.path.join(self.video_dir, video_id)
86
- video_tensor = load_mose_video(folder).to(self.device) # [T,3,H,W]
 
 
 
 
 
 
 
 
87
 
88
  return {
89
  "video_id": video_id,
90
- "video": video_tensor,
91
- "gt_tracks": gt_tracks,
92
- "gt_vis": gt_vis,
93
- "queries": queries,
94
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import torch
3
  import numpy as np
4
  from typing import List
5
  from PIL import Image
6
+ import sys
7
 
8
+ # sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
9
+ # from utils import load_mose_video
10
 
11
+
12
+ # eek redundant... TODO(ilona) figure out importing of from utils here
13
+ def load_mose_video(video_folder):
14
+ files = [f for f in os.listdir(video_folder) if f.endswith(".jpg")]
15
+ files = sorted(files, key=lambda x: int(os.path.splitext(x)[0]))
16
+
17
+ frames = [] # list to collect each image tensor
18
+
19
+ # print(files)
20
+ for filename in files:
21
+ filepath = os.path.join(video_folder, filename)
22
+ # Open image and ensure it's in RGB mode
23
+ img = Image.open(filepath).convert("RGB")
24
+ # Convert the image to a NumPy array of shape (H, W, 3)
25
+ img_np = np.array(img)
26
+ # Convert to (3, H, W) by reordering the axes
27
+ img_np = np.transpose(img_np, (2, 0, 1))
28
+ frames.append(img_np)
29
+
30
+ video_np = np.stack(frames, axis=0)
31
+ video_tensor = torch.from_numpy(video_np).float()
32
+
33
+ # [T, 3, H, W]
34
+ return video_tensor
35
 
36
 
37
  class MoseTrackDataLoader:
 
41
  annotation_dir: str,
42
  video_dir: str,
43
  device: str = "cuda:0",
 
44
  ):
45
  self.video_ids = video_ids
46
  self.annotation_dir = annotation_dir
47
  self.video_dir = video_dir
48
  self.device = torch.device(device)
49
+
50
+ self.grad_tracks = {}
51
+ self.rand_tracks = {}
52
+ self.bcknd_tracks = {}
53
+ self.gt_tracks = {}
54
+
55
+ self.grad_vis = {}
56
+ self.rand_vis = {}
57
+ self.bcknd_vis = {}
58
+ self.gt_vis = {}
59
+
60
+ self.queries = {}
61
+ self.videos = {}
62
+
63
+ def _load_annotations(self, file_path: str) -> torch.Tensor:
64
+ return torch.tensor(
65
+ np.load(file_path), dtype=torch.float32
66
+ ) # [N, T, 4]
67
 
68
  def _create_queries(self, tracks: torch.Tensor) -> torch.Tensor:
69
  B, N, T, _ = tracks.shape
 
75
  )
76
 
77
  def get(self, video_id: str):
78
+
79
+ if video_id not in self.videos:
80
+ # Load annotations
81
+ grad_file = os.path.join(
82
+ self.annotation_dir, f"{video_id}/{video_id}_gradient.npy"
83
+ )
84
+ rand_file = os.path.join(
85
+ self.annotation_dir, f"{video_id}/{video_id}_random.npy"
86
  )
87
+ bcknd_file = os.path.join(
88
+ self.annotation_dir, f"{video_id}/{video_id}_background.npy"
89
+ )
90
+
91
+ grad_annots = self._load_annotations(grad_file)
92
+ rand_annots = self._load_annotations(rand_file)
93
+ if os.path.exists(bcknd_file):
94
+ bcknd_annots = self._load_annotations(bcknd_file) # N1, T, 4
95
  else:
96
+ _, T, _ = grad_annots.shape
97
+ bcknd_annots = torch.empty((0, T, 4), dtype=torch.float32)
98
+
99
+ grad_tracks = grad_annots[:, :, :2].unsqueeze(0)
100
+ rand_tracks = rand_annots[:, :, :2].unsqueeze(0)
101
+ bcknd_tracks = bcknd_annots[:, :, :2].unsqueeze(0)
102
+
103
+ grad_vis = grad_annots[:, :, 2].unsqueeze(0)
104
+ rand_vis = rand_annots[:, :, 2].unsqueeze(0)
105
+ bcknd_vis = bcknd_annots[:, :, 2].unsqueeze(0)
106
+
107
+ gt_tracks = torch.cat(
108
+ [grad_tracks, rand_tracks, bcknd_tracks], dim=1
109
+ )
110
+ gt_vis = torch.cat([grad_vis, rand_vis, bcknd_vis], dim=1)
111
+ queries = self._create_queries(gt_tracks)
112
+
113
+ video_folder = os.path.join(self.video_dir, video_id)
114
+ video_tensor = load_mose_video(video_folder)
115
+
116
+ # Store in memory if needed
117
+ self.grad_tracks[video_id] = grad_tracks
118
+ self.rand_tracks[video_id] = rand_tracks
119
+ self.bcknd_tracks[video_id] = bcknd_tracks
120
+ self.gt_tracks[video_id] = gt_tracks
121
+
122
+ self.grad_vis[video_id] = grad_vis
123
+ self.rand_vis[video_id] = rand_vis
124
+ self.bcknd_vis[video_id] = bcknd_vis
125
+ self.gt_vis[video_id] = gt_vis
126
+
127
+ self.queries[video_id] = queries
128
+ self.videos[video_id] = video_tensor
129
 
130
  return {
131
  "video_id": video_id,
132
+ "video": self.videos[video_id],
133
+ "gt_tracks": self.gt_tracks[video_id],
134
+ "gt_vis": self.gt_vis[video_id],
135
+ "queries": self.queries[video_id],
136
  }
137
+
138
+
139
+ # debugging
140
+ if __name__ == "__main__":
141
+
142
+ video_id_list = os.listdir(f"video_tool_annots/mose_npz")
143
+ video_id_list = list(set(v.split("_")[0] for v in video_id_list))
144
+
145
+ loader = MoseTrackDataLoader(
146
+ video_ids=video_id_list,
147
+ annotation_dir=f"video_tool_annots/mose_npz",
148
+ video_dir="/data/ilona/datasets/mose/mose_train/JPEGImages",
149
+ device="cpu",
150
+ )
151
+
152
+ data = loader.get("03e5f069")
153
+ print(data["video"].shape) # [T, 3, H, W]
154
+ print(data["gt_tracks"].shape) # [1, N, T, 2]
155
+ print(data["queries"].shape) # [1, N, 3]
ego4d/install_ego4d.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ################## ilona command line history ##################
2
+
3
+ conda create -n ego4d-py312 python=3.12.7 -y
4
+
5
+ conda activate ego4d-py312
6
+
7
+ python -m pip install --upgrade pip setuptools wheel
8
+
9
+ pip install ego4d
10
+ pip install torch torchvision
11
+ pip install av
12
+
13
+ cd ego4d
14
+
15
+ # TODO: this part doesn't work at the moment
16
+ ego4d \
17
+ --datasets full_scale \
18
+ --video_uid_file video_uids.txt \
19
+ --aws_profile_name ego4d \
20
+ --output_directory ego4d_full_videos/ \
21
+ -y
22
+
23
+ # loads the metadata
24
+ python process_ego4d_videos.py
25
+
26
+ rm -rf ego4d_full_videos
27
+
28
+
29
+ ################################################################
30
+
ego4d/process_ego4d_videos.py CHANGED
@@ -33,7 +33,7 @@ for json_file in json_list:
33
 
34
  # save paths
35
  video_path = f"ego4d_videos/v2/full_scale/{video_id}.mp4"
36
- save_dir = f"ego4d_chosen_videos/{video_name}"
37
 
38
  print("\n PROCESSING VIDEO ", video_name)
39
 
@@ -56,13 +56,13 @@ for json_file in json_list:
56
 
57
  print(f"Saved {len(cropped_vid)} frames to {save_dir}")
58
 
59
- # TODO(ilona) - remove this later
60
- # checks that current dataset aligns with the files that we have !!
61
  # for frame_number in [f"{i:06d}" for i in range(0, num_output_frames)]:
62
  # paths = [
63
- # # f"/data/ilona/datasets/ego4d/ego4d_chosen_videos/{video_name}/rgb_frames/{frame_number}.jpg",
64
  # f"/data/ilona/datasets/ego4d/ego4d_chosen_videos/{video_name}/rgb_frames/{frame_number}.jpg",
65
- # f"/data/ilona/datasets/itto_release/itto/ego4d/ego4d_chosen_videos/{video_name}/{frame_number}.jpg",
 
66
  # ]
67
 
68
  # # Load images
 
33
 
34
  # save paths
35
  video_path = f"ego4d_videos/v2/full_scale/{video_id}.mp4"
36
+ save_dir = f"frames/{video_name}"
37
 
38
  print("\n PROCESSING VIDEO ", video_name)
39
 
 
56
 
57
  print(f"Saved {len(cropped_vid)} frames to {save_dir}")
58
 
59
+ # # TODO(ilona) - remove this later
60
+ # # checks that current dataset aligns with the files that we have !!
61
  # for frame_number in [f"{i:06d}" for i in range(0, num_output_frames)]:
62
  # paths = [
 
63
  # f"/data/ilona/datasets/ego4d/ego4d_chosen_videos/{video_name}/rgb_frames/{frame_number}.jpg",
64
+ # # f"/data/ilona/datasets/go4d/frames/{video_name}/rgb_frames/{frame_number}.jpg",
65
+ # f"/data/ilona/datasets/itto_release/itto/ego4d/frames/{video_name}/{frame_number}.jpg",
66
  # ]
67
 
68
  # # Load images
ego4d/video_uids.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  c6047868-51a3-4bb8-b833-5453c1fa563c
2
  d40be91b-85bb-4c47-ac18-52c4d044e3fc
3
  3ae87694-c71b-4564-9aea-8d65ba9cc19d
@@ -18,7 +19,6 @@ be462bcf-13ad-446a-8dfb-248cb2e3417a
18
  426eb9d9-d4cf-4acc-9efb-979f61a5be91
19
  fa6386e1-82ba-4c67-b217-e11640546582
20
  366b71ad-3f35-4704-b6ef-e793c7e73ac6
21
- f42ae9c2-d43e-45af-8adb-02d0b16b2ef7
22
  04046863-98c0-42a8-90f9-4191013cc252
23
  127392a3-036a-4af6-9db9-ca00141229db
24
  4675859e-620c-493a-b1e7-27c347074783
 
1
+ f42ae9c2-d43e-45af-8adb-02d0b16b2ef7
2
  c6047868-51a3-4bb8-b833-5453c1fa563c
3
  d40be91b-85bb-4c47-ac18-52c4d044e3fc
4
  3ae87694-c71b-4564-9aea-8d65ba9cc19d
 
19
  426eb9d9-d4cf-4acc-9efb-979f61a5be91
20
  fa6386e1-82ba-4c67-b217-e11640546582
21
  366b71ad-3f35-4704-b6ef-e793c7e73ac6
 
22
  04046863-98c0-42a8-90f9-4191013cc252
23
  127392a3-036a-4af6-9db9-ca00141229db
24
  4675859e-620c-493a-b1e7-27c347074783