create loading script for NVLR2
Browse files
NLVR2.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""NVLR2 loading script."""
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import datasets
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_CITATION = """\
|
| 10 |
+
@article{DBLP:journals/corr/abs-2202-01994,
|
| 11 |
+
author = {Yamini Bansal and
|
| 12 |
+
Behrooz Ghorbani and
|
| 13 |
+
Ankush Garg and
|
| 14 |
+
Biao Zhang and
|
| 15 |
+
Maxim Krikun and
|
| 16 |
+
Colin Cherry and
|
| 17 |
+
Behnam Neyshabur and
|
| 18 |
+
Orhan Firat},
|
| 19 |
+
title = {Data Scaling Laws in {NMT:} The Effect of Noise and Architecture},
|
| 20 |
+
journal = {CoRR},
|
| 21 |
+
volume = {abs/2202.01994},
|
| 22 |
+
year = {2022},
|
| 23 |
+
url = {https://arxiv.org/abs/2202.01994},
|
| 24 |
+
eprinttype = {arXiv},
|
| 25 |
+
eprint = {2202.01994},
|
| 26 |
+
timestamp = {Mon, 24 Oct 2022 10:21:23 +0200},
|
| 27 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-2202-01994.bib},
|
| 28 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 29 |
+
}
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
_DESCRIPTION = """\
|
| 33 |
+
The Natural Language for Visual Reasoning corpora are two language grounding datasets containing natural language sentences grounded in images. The task is to determine whether a sentence is true about a visual input. The data was collected through crowdsourcings, and solving the task requires reasoning about sets of objects, comparisons, and spatial relations. This includes two corpora: NLVR, with synthetically generated images, and NLVR2, which includes natural photographs.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
_HOMEPAGE = "https://lil.nlp.cornell.edu/nlvr/"
|
| 37 |
+
|
| 38 |
+
_LICENSE = "CC BY 4.0" # TODO need to credit both ms coco and vqa authors!
|
| 39 |
+
|
| 40 |
+
_URL_BASE = "https://github.com/lil-lab/nlvr/tree/master/nlvr2/data"
|
| 41 |
+
_SPLITS = {
|
| 42 |
+
"train": "train.json",
|
| 43 |
+
"validation": "dev.json",
|
| 44 |
+
"test1": "test1.json",
|
| 45 |
+
"test2": "test2.json",
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class NLVR2Dataset(datasets.GeneratorBasedBuilder):
|
| 50 |
+
|
| 51 |
+
VERSION = datasets.Version("1.0.0")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
DEFAULT_CONFIG_NAME = "Default"
|
| 55 |
+
|
| 56 |
+
def _info(self):
|
| 57 |
+
return datasets.DatasetInfo(
|
| 58 |
+
description=_DESCRIPTION,
|
| 59 |
+
features=datasets.Features(
|
| 60 |
+
{
|
| 61 |
+
"identifier": datasets.Value("string"),
|
| 62 |
+
"sentence": datasets.Value("string"),
|
| 63 |
+
"left_image": datasets.Image(),
|
| 64 |
+
"right_image": datasets.Image(),
|
| 65 |
+
"label": datasets.Value("string"),
|
| 66 |
+
}
|
| 67 |
+
),
|
| 68 |
+
homepage=_HOMEPAGE,
|
| 69 |
+
license=_LICENSE,
|
| 70 |
+
citation=_CITATION,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def _split_generators(self, dl_manager):
|
| 74 |
+
urls = {
|
| 75 |
+
"Default": {
|
| 76 |
+
"train": os.path.join(_URL_BASE, _SPLITS["train"]),
|
| 77 |
+
"validation": os.path.join(_URL_BASE, _SPLITS["validation"]),
|
| 78 |
+
"test1": os.path.join(_URL_BASE, _SPLITS["test1"]),
|
| 79 |
+
"test2": os.path.join(_URL_BASE, _SPLITS["test2"]),
|
| 80 |
+
},
|
| 81 |
+
}
|
| 82 |
+
files_path = dl_manager.download_and_extract(urls)
|
| 83 |
+
return [
|
| 84 |
+
datasets.SplitGenerator(
|
| 85 |
+
name=datasets.Split.TRAIN,
|
| 86 |
+
gen_kwargs={"files_path": files_path[self.config.name]["train"]},
|
| 87 |
+
),
|
| 88 |
+
datasets.SplitGenerator(
|
| 89 |
+
name=datasets.Split.VALIDATION,
|
| 90 |
+
gen_kwargs={"files_path": files_path[self.config.name]["validation"]},
|
| 91 |
+
),
|
| 92 |
+
datasets.SplitGenerator(
|
| 93 |
+
name=datasets.Split.TEST1,
|
| 94 |
+
gen_kwargs={"files_path": files_path[self.config.name]["test1"]},
|
| 95 |
+
),
|
| 96 |
+
datasets.SplitGenerator(
|
| 97 |
+
name=datasets.Split.TEST2,
|
| 98 |
+
gen_kwargs={"files_path": files_path[self.config.name]["test2"]},
|
| 99 |
+
),
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
def _generate_examples(self, files_path):
|
| 103 |
+
json_file = json.load(open(files_path, "r"))
|
| 104 |
+
|
| 105 |
+
for i, ex in enumerate(json_file):
|
| 106 |
+
record = {
|
| 107 |
+
"identifier": ex["identifier"],
|
| 108 |
+
"sentence": ex["sentence"],
|
| 109 |
+
"left_image": str(ex["left_url"]),
|
| 110 |
+
"right_image": str(ex["right_url"]),
|
| 111 |
+
"label": ex["label"],
|
| 112 |
+
}
|
| 113 |
+
yield i, record
|