Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
baber commited on
Commit
7378970
·
verified ·
1 Parent(s): 30419ee

Delete loading script

Browse files
Files changed (1) hide show
  1. race.py +0 -66
race.py DELETED
@@ -1,66 +0,0 @@
1
- import json
2
-
3
- import datasets
4
-
5
-
6
- _CITATION = """\
7
- @article{lai2017large,
8
- title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
9
- author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
10
- journal={arXiv preprint arXiv:1704.04683},
11
- year={2017}
12
- }
13
- """
14
-
15
- _DESCRIPTION = """\
16
- Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
17
- dataset is collected from English examinations in China, which are designed for middle school and high school students.
18
- The dataset can be served as the training and test sets for machine comprehension.
19
- """
20
-
21
- _BASE_URL = "https://huggingface.co/datasets/bfattori/race/raw/main"
22
- _URLS = {
23
- "high": f"{_BASE_URL}/race_high_test.jsonl",
24
- }
25
-
26
- class Race(datasets.GeneratorBasedBuilder):
27
- """ReAding Comprehension Dataset From Examination dataset from CMU"""
28
-
29
- VERSION = datasets.Version("0.1.0")
30
-
31
- BUILDER_CONFIGS = [
32
- datasets.BuilderConfig(name="high", description="Exams designed for high school students", version=VERSION),
33
- ]
34
-
35
- def _info(self):
36
- features = datasets.Features(
37
- {
38
- "article": datasets.Value("string"),
39
- "problems": datasets.Value("string"),
40
- }
41
- )
42
- return datasets.DatasetInfo(
43
- description=f"{_DESCRIPTION}\n{self.config.description}",
44
- features=features,
45
- citation=_CITATION,
46
- )
47
-
48
- def _split_generators(self, dl_manager):
49
- urls = _URLS[self.config.name]
50
- data_dir = dl_manager.download_and_extract(urls)
51
- return [
52
- datasets.SplitGenerator(
53
- name=datasets.Split.TEST,
54
- # These kwargs will be passed to _generate_examples
55
- gen_kwargs={
56
- "filepath": data_dir,
57
- "split": datasets.Split.TEST,
58
- },
59
- ),
60
- ]
61
-
62
- def _generate_examples(self, filepath, split):
63
- with open(filepath, encoding="utf-8") as f:
64
- for key, row in enumerate(f):
65
- data = json.loads(row)
66
- yield key, {"article": data["article"], "problems": data["problems"]}