revisitop / revisitop.py
ianhajra's picture
Update revisitop.py
60be171 verified
raw
history blame
8.75 kB
import os
import tarfile
import urllib.request
import pickle
import datasets
_VERSION = datasets.Version("1.0.0")
_URLS = {
"roxford5k": {
"images": [
"https://www.robots.ox.ac.uk/~vgg/data/oxbuildings/oxbuild_images-v1.tgz"
],
"ground_truth": [
"http://cmp.felk.cvut.cz/revisitop/data/datasets/roxford5k/gnd_roxford5k.pkl"
],
},
"rparis6k": {
"images": [
"https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1-v1.tgz",
"https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2-v1.tgz",
],
"ground_truth": [
"http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.pkl"
],
},
"revisitop1m": {
"images": [
f"http://ptak.felk.cvut.cz/revisitop/revisitop1m/jpg/revisitop1m.{i+1}.tar.gz"
for i in range(100)
]
},
}
_DESCRIPTION = (
"Oxford5k, Paris6k, and RevisitOP1M benchmark datasets for image retrieval."
)
_CITATION = """\
@inproceedings{Radenovic2018RevisitingOP,
title={Revisiting Oxford and Paris: Large-Scale Image Retrieval Benchmarking},
author={Filip Radenovic and Ahmet Iscen and Giorgos Tolias and Yannis Avrithis and Ondrej Chum},
year={2018}
}
"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="roxford5k",
version=_VERSION,
description="Oxford 5k image retrieval dataset.",
),
datasets.BuilderConfig(
name="rparis6k",
version=_VERSION,
description="Paris 6k image retrieval dataset.",
),
datasets.BuilderConfig(
name="revisitop1m",
version=_VERSION,
description="RevisitOP 1M distractor images.",
),
datasets.BuilderConfig(
name="oxfordparis",
version=_VERSION,
description="Oxford + Paris combined dataset.",
),
]
class RevisitOP(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = BUILDER_CONFIGS
DEFAULT_CONFIG_NAME = "roxford5k"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"filename": datasets.Value("string"),
"dataset": datasets.Value("string"),
"query_id": datasets.Value("int32"),
"bbx": datasets.Sequence(
datasets.Value("float32")
), # bounding box [x1, y1, x2, y2]
"easy": datasets.Sequence(
datasets.Value("int32")
), # easy relevant images
"hard": datasets.Sequence(
datasets.Value("int32")
), # hard relevant images
"junk": datasets.Sequence(datasets.Value("int32")), # junk images
}
),
supervised_keys=None,
homepage="http://cmp.felk.cvut.cz/revisitop/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
cfg_name = self.config.name
if cfg_name == "revisitop1m":
urls = _URLS[cfg_name]["images"]
archive_paths = dl_manager.download(urls)
extracted_paths = dl_manager.extract(archive_paths)
return [
datasets.SplitGenerator(
name="imlist",
gen_kwargs={
"image_dirs": (
extracted_paths
if isinstance(extracted_paths, list)
else [extracted_paths]
),
"ground_truth_file": None,
"split_type": "imlist",
"dataset_name": cfg_name,
},
)
]
if cfg_name == "oxfordparis":
# Handle combined dataset
image_urls = _URLS["roxford5k"]["images"] + _URLS["rparis6k"]["images"]
gt_urls = (
_URLS["roxford5k"]["ground_truth"] + _URLS["rparis6k"]["ground_truth"]
)
else:
image_urls = _URLS[cfg_name]["images"]
gt_urls = _URLS[cfg_name]["ground_truth"]
# Download and extract image archives
archive_paths = dl_manager.download(image_urls)
extracted_paths = dl_manager.extract(archive_paths)
# Download ground truth files
gt_paths = dl_manager.download(gt_urls)
# Normalize lists if single items
if not isinstance(extracted_paths, list):
extracted_paths = [extracted_paths]
if not isinstance(gt_paths, list):
gt_paths = [gt_paths]
return [
datasets.SplitGenerator(
name="qimlist",
gen_kwargs={
"image_dirs": extracted_paths,
"ground_truth_files": gt_paths,
"split_type": "qimlist",
"dataset_name": cfg_name,
},
),
datasets.SplitGenerator(
name="imlist",
gen_kwargs={
"image_dirs": extracted_paths,
"ground_truth_files": gt_paths,
"split_type": "imlist",
"dataset_name": cfg_name,
},
),
]
def _generate_examples(
self, image_dirs, ground_truth_files, split_type, dataset_name
):
# Build image path mapping
image_path_mapping = {}
for image_dir in image_dirs:
for root, _, files in os.walk(image_dir):
for fname in files:
if fname.lower().endswith((".jpg", ".jpeg", ".png")):
fpath = os.path.join(root, fname)
# Remove extension for mapping
fname_no_ext = os.path.splitext(fname)[0]
image_path_mapping[fname_no_ext] = fpath
# Handle revisitop1m case (no ground truth)
if ground_truth_files is None:
key = 0
for fname_no_ext, fpath in image_path_mapping.items():
yield key, {
"image": fpath,
"filename": fname_no_ext + ".jpg",
"dataset": dataset_name,
"query_id": -1,
"bbx": [],
"easy": [],
"hard": [],
"junk": [],
}
key += 1
return
# Load ground truth files
ground_truth_data = []
for gt_file in ground_truth_files:
with open(gt_file, "rb") as f:
gt_data = pickle.load(f)
ground_truth_data.append(gt_data)
key = 0
for gt_data in ground_truth_data:
imlist = gt_data["imlist"]
qimlist = gt_data["qimlist"]
gnd = gt_data["gnd"]
if split_type == "qimlist":
# Generate query examples
for i, query_name in enumerate(qimlist):
query_name_no_ext = os.path.splitext(query_name)[0]
if query_name_no_ext in image_path_mapping:
query_gnd = gnd[i]
yield key, {
"image": image_path_mapping[query_name_no_ext],
"filename": query_name,
"dataset": dataset_name,
"query_id": i,
"bbx": query_gnd.get("bbx", []),
"easy": query_gnd.get("easy", []),
"hard": query_gnd.get("hard", []),
"junk": query_gnd.get("junk", []),
}
key += 1
elif split_type == "imlist":
# Generate image pool examples
for i, image_name in enumerate(imlist):
image_name_no_ext = os.path.splitext(image_name)[0]
if image_name_no_ext in image_path_mapping:
yield key, {
"image": image_path_mapping[image_name_no_ext],
"filename": image_name,
"dataset": dataset_name,
"query_id": -1, # Not a query image
"bbx": [],
"easy": [],
"hard": [],
"junk": [],
}
key += 1