ianhajra commited on
Commit
60be171
·
verified ·
1 Parent(s): 8c1a7f7

Update revisitop.py

Browse files
Files changed (1) hide show
  1. revisitop.py +199 -45
revisitop.py CHANGED
@@ -1,25 +1,40 @@
1
  import os
2
  import tarfile
3
  import urllib.request
 
4
  import datasets
5
 
6
  _VERSION = datasets.Version("1.0.0")
7
 
8
  _URLS = {
9
- "roxford5k": [
10
- "https://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images-v1.tgz"
11
- ],
12
- "rparis6k": [
13
- "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1-v1.tgz",
14
- "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2-v1.tgz"
15
- ],
16
- "revisitop1m": [
17
- f"http://ptak.felk.cvut.cz/revisitop/revisitop1m/jpg/revisitop1m.{i+1}.tar.gz"
18
- for i in range(100)
19
- ]
 
 
 
 
 
 
 
 
 
 
 
 
20
  }
21
 
22
- _DESCRIPTION = "Oxford5k, Paris6k, and RevisitOP1M benchmark datasets for image retrieval."
 
 
23
 
24
  _CITATION = """\
25
  @inproceedings{Radenovic2018RevisitingOP,
@@ -30,10 +45,26 @@ _CITATION = """\
30
  """
31
 
32
  BUILDER_CONFIGS = [
33
- datasets.BuilderConfig(name="roxford5k", version=_VERSION, description="Oxford 5k image retrieval dataset."),
34
- datasets.BuilderConfig(name="rparis6k", version=_VERSION, description="Paris 6k image retrieval dataset."),
35
- datasets.BuilderConfig(name="revisitop1m", version=_VERSION, description="RevisitOP 1M distractor images."),
36
- datasets.BuilderConfig(name="oxfordparis", version=_VERSION, description="Oxford + Paris combined dataset.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  ]
38
 
39
 
@@ -44,11 +75,24 @@ class RevisitOP(datasets.GeneratorBasedBuilder):
44
  def _info(self):
45
  return datasets.DatasetInfo(
46
  description=_DESCRIPTION,
47
- features=datasets.Features({
48
- "image": datasets.Image(),
49
- "filename": datasets.Value("string"),
50
- "dataset": datasets.Value("string")
51
- }),
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  supervised_keys=None,
53
  homepage="http://cmp.felk.cvut.cz/revisitop/",
54
  citation=_CITATION,
@@ -56,38 +100,148 @@ class RevisitOP(datasets.GeneratorBasedBuilder):
56
 
57
  def _split_generators(self, dl_manager):
58
  cfg_name = self.config.name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  if cfg_name == "oxfordparis":
60
- urls = _URLS["roxford5k"] + _URLS["rparis6k"]
61
- datasets_to_extract = ["roxford5k", "rparis6k"]
 
 
 
62
  else:
63
- urls = _URLS[cfg_name]
64
- datasets_to_extract = [cfg_name]
65
 
66
- # Download and extract archives
67
- archive_paths = dl_manager.download(urls)
68
  extracted_paths = dl_manager.extract(archive_paths)
69
 
70
- # Normalize list if single item
 
 
 
71
  if not isinstance(extracted_paths, list):
72
  extracted_paths = [extracted_paths]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- data_dirs = {ds_name: [] for ds_name in datasets_to_extract}
75
- for path, ds_name in zip(extracted_paths, datasets_to_extract):
76
- data_dirs[ds_name].append(path)
 
 
 
 
 
 
 
 
 
 
77
 
78
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dirs": data_dirs})]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
- def _generate_examples(self, data_dirs):
81
  key = 0
82
- for ds_name, paths in data_dirs.items():
83
- for path in paths:
84
- for root, _, files in os.walk(path):
85
- for fname in files:
86
- if fname.lower().endswith(('.jpg', '.jpeg', '.png')):
87
- fpath = os.path.join(root, fname)
88
- yield key, {
89
- "image": fpath,
90
- "filename": fname,
91
- "dataset": ds_name,
92
- }
93
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import tarfile
3
  import urllib.request
4
+ import pickle
5
  import datasets
6
 
7
  _VERSION = datasets.Version("1.0.0")
8
 
9
  _URLS = {
10
+ "roxford5k": {
11
+ "images": [
12
+ "https://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images-v1.tgz"
13
+ ],
14
+ "ground_truth": [
15
+ "http://cmp.felk.cvut.cz/revisitop/data/datasets/roxford5k/gnd_roxford5k.pkl"
16
+ ],
17
+ },
18
+ "rparis6k": {
19
+ "images": [
20
+ "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1-v1.tgz",
21
+ "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2-v1.tgz",
22
+ ],
23
+ "ground_truth": [
24
+ "http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.pkl"
25
+ ],
26
+ },
27
+ "revisitop1m": {
28
+ "images": [
29
+ f"http://ptak.felk.cvut.cz/revisitop/revisitop1m/jpg/revisitop1m.{i+1}.tar.gz"
30
+ for i in range(100)
31
+ ]
32
+ },
33
  }
34
 
35
+ _DESCRIPTION = (
36
+ "Oxford5k, Paris6k, and RevisitOP1M benchmark datasets for image retrieval."
37
+ )
38
 
39
  _CITATION = """\
40
  @inproceedings{Radenovic2018RevisitingOP,
 
45
  """
46
 
47
  BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(
49
+ name="roxford5k",
50
+ version=_VERSION,
51
+ description="Oxford 5k image retrieval dataset.",
52
+ ),
53
+ datasets.BuilderConfig(
54
+ name="rparis6k",
55
+ version=_VERSION,
56
+ description="Paris 6k image retrieval dataset.",
57
+ ),
58
+ datasets.BuilderConfig(
59
+ name="revisitop1m",
60
+ version=_VERSION,
61
+ description="RevisitOP 1M distractor images.",
62
+ ),
63
+ datasets.BuilderConfig(
64
+ name="oxfordparis",
65
+ version=_VERSION,
66
+ description="Oxford + Paris combined dataset.",
67
+ ),
68
  ]
69
 
70
 
 
75
  def _info(self):
76
  return datasets.DatasetInfo(
77
  description=_DESCRIPTION,
78
+ features=datasets.Features(
79
+ {
80
+ "image": datasets.Image(),
81
+ "filename": datasets.Value("string"),
82
+ "dataset": datasets.Value("string"),
83
+ "query_id": datasets.Value("int32"),
84
+ "bbx": datasets.Sequence(
85
+ datasets.Value("float32")
86
+ ), # bounding box [x1, y1, x2, y2]
87
+ "easy": datasets.Sequence(
88
+ datasets.Value("int32")
89
+ ), # easy relevant images
90
+ "hard": datasets.Sequence(
91
+ datasets.Value("int32")
92
+ ), # hard relevant images
93
+ "junk": datasets.Sequence(datasets.Value("int32")), # junk images
94
+ }
95
+ ),
96
  supervised_keys=None,
97
  homepage="http://cmp.felk.cvut.cz/revisitop/",
98
  citation=_CITATION,
 
100
 
101
  def _split_generators(self, dl_manager):
102
  cfg_name = self.config.name
103
+
104
+ if cfg_name == "revisitop1m":
105
+ urls = _URLS[cfg_name]["images"]
106
+ archive_paths = dl_manager.download(urls)
107
+ extracted_paths = dl_manager.extract(archive_paths)
108
+
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name="imlist",
112
+ gen_kwargs={
113
+ "image_dirs": (
114
+ extracted_paths
115
+ if isinstance(extracted_paths, list)
116
+ else [extracted_paths]
117
+ ),
118
+ "ground_truth_file": None,
119
+ "split_type": "imlist",
120
+ "dataset_name": cfg_name,
121
+ },
122
+ )
123
+ ]
124
+
125
  if cfg_name == "oxfordparis":
126
+ # Handle combined dataset
127
+ image_urls = _URLS["roxford5k"]["images"] + _URLS["rparis6k"]["images"]
128
+ gt_urls = (
129
+ _URLS["roxford5k"]["ground_truth"] + _URLS["rparis6k"]["ground_truth"]
130
+ )
131
  else:
132
+ image_urls = _URLS[cfg_name]["images"]
133
+ gt_urls = _URLS[cfg_name]["ground_truth"]
134
 
135
+ # Download and extract image archives
136
+ archive_paths = dl_manager.download(image_urls)
137
  extracted_paths = dl_manager.extract(archive_paths)
138
 
139
+ # Download ground truth files
140
+ gt_paths = dl_manager.download(gt_urls)
141
+
142
+ # Normalize lists if single items
143
  if not isinstance(extracted_paths, list):
144
  extracted_paths = [extracted_paths]
145
+ if not isinstance(gt_paths, list):
146
+ gt_paths = [gt_paths]
147
+
148
+ return [
149
+ datasets.SplitGenerator(
150
+ name="qimlist",
151
+ gen_kwargs={
152
+ "image_dirs": extracted_paths,
153
+ "ground_truth_files": gt_paths,
154
+ "split_type": "qimlist",
155
+ "dataset_name": cfg_name,
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name="imlist",
160
+ gen_kwargs={
161
+ "image_dirs": extracted_paths,
162
+ "ground_truth_files": gt_paths,
163
+ "split_type": "imlist",
164
+ "dataset_name": cfg_name,
165
+ },
166
+ ),
167
+ ]
168
 
169
+ def _generate_examples(
170
+ self, image_dirs, ground_truth_files, split_type, dataset_name
171
+ ):
172
+ # Build image path mapping
173
+ image_path_mapping = {}
174
+ for image_dir in image_dirs:
175
+ for root, _, files in os.walk(image_dir):
176
+ for fname in files:
177
+ if fname.lower().endswith((".jpg", ".jpeg", ".png")):
178
+ fpath = os.path.join(root, fname)
179
+ # Remove extension for mapping
180
+ fname_no_ext = os.path.splitext(fname)[0]
181
+ image_path_mapping[fname_no_ext] = fpath
182
 
183
+ # Handle revisitop1m case (no ground truth)
184
+ if ground_truth_files is None:
185
+ key = 0
186
+ for fname_no_ext, fpath in image_path_mapping.items():
187
+ yield key, {
188
+ "image": fpath,
189
+ "filename": fname_no_ext + ".jpg",
190
+ "dataset": dataset_name,
191
+ "query_id": -1,
192
+ "bbx": [],
193
+ "easy": [],
194
+ "hard": [],
195
+ "junk": [],
196
+ }
197
+ key += 1
198
+ return
199
+
200
+ # Load ground truth files
201
+ ground_truth_data = []
202
+ for gt_file in ground_truth_files:
203
+ with open(gt_file, "rb") as f:
204
+ gt_data = pickle.load(f)
205
+ ground_truth_data.append(gt_data)
206
 
 
207
  key = 0
208
+
209
+ for gt_data in ground_truth_data:
210
+ imlist = gt_data["imlist"]
211
+ qimlist = gt_data["qimlist"]
212
+ gnd = gt_data["gnd"]
213
+
214
+ if split_type == "qimlist":
215
+ # Generate query examples
216
+ for i, query_name in enumerate(qimlist):
217
+ query_name_no_ext = os.path.splitext(query_name)[0]
218
+ if query_name_no_ext in image_path_mapping:
219
+ query_gnd = gnd[i]
220
+ yield key, {
221
+ "image": image_path_mapping[query_name_no_ext],
222
+ "filename": query_name,
223
+ "dataset": dataset_name,
224
+ "query_id": i,
225
+ "bbx": query_gnd.get("bbx", []),
226
+ "easy": query_gnd.get("easy", []),
227
+ "hard": query_gnd.get("hard", []),
228
+ "junk": query_gnd.get("junk", []),
229
+ }
230
+ key += 1
231
+
232
+ elif split_type == "imlist":
233
+ # Generate image pool examples
234
+ for i, image_name in enumerate(imlist):
235
+ image_name_no_ext = os.path.splitext(image_name)[0]
236
+ if image_name_no_ext in image_path_mapping:
237
+ yield key, {
238
+ "image": image_path_mapping[image_name_no_ext],
239
+ "filename": image_name,
240
+ "dataset": dataset_name,
241
+ "query_id": -1, # Not a query image
242
+ "bbx": [],
243
+ "easy": [],
244
+ "hard": [],
245
+ "junk": [],
246
+ }
247
+ key += 1