|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
from collections import defaultdict |
|
|
|
|
|
import datasets |
|
|
import csv |
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@misc{dalton2020trec, |
|
|
title={TREC CAsT 2019: The Conversational Assistance Track Overview}, |
|
|
author={Jeffrey Dalton and Chenyan Xiong and Jamie Callan}, |
|
|
year={2020}, |
|
|
eprint={2003.13624}, |
|
|
archivePrefix={arXiv}, |
|
|
primaryClass={cs.IR} |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
The Conversational Assistance Track (CAsT) is a new track for TREC 2019 to facilitate Conversational Information |
|
|
Seeking (CIS) research and to create a large-scale reusable test collection for conversational search systems. |
|
|
The document corpus is 38,426,252 passages from the TREC Complex Answer Retrieval (CAR) and Microsoft MAchine |
|
|
Reading COmprehension (MARCO) datasets. |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "http://www.treccast.ai" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/uva-irlab/trec-cast-2019-multi-turn/resolve/main/" |
|
|
_URLs = { |
|
|
'topics': _URL+"cast2019_test_annotated.tsv", |
|
|
'qrels': _URL+"2019qrels.txt", |
|
|
'test_collection': { |
|
|
'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', |
|
|
'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz", |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder): |
|
|
""" |
|
|
|
|
|
""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
datasets.BuilderConfig(name="qrels", |
|
|
version=VERSION, |
|
|
description=""), |
|
|
datasets.BuilderConfig(name="topics", |
|
|
version=VERSION, |
|
|
description="The topics contain the queries, query IDs and their history."), |
|
|
datasets.BuilderConfig(name="test_collection", |
|
|
version=VERSION, |
|
|
description="The test collection will provide the passages of TREC CAR and MSMARCO"), |
|
|
] |
|
|
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "test_collection" |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
if self.config.name == "topics": |
|
|
features = datasets.Features({ |
|
|
"qid": datasets.Value("string"), |
|
|
"history": datasets.features.Sequence(feature=datasets.Value('string')), |
|
|
"query": datasets.Value("string"), |
|
|
}) |
|
|
elif self.config.name == "qrels": |
|
|
features = datasets.Features({ |
|
|
"qid": datasets.Value("string"), |
|
|
"qrels": datasets.features.Sequence(feature=datasets.Features({ |
|
|
'docno': datasets.Value("string"), |
|
|
'rank': datasets.Value("string"), |
|
|
})), |
|
|
}) |
|
|
elif self.config.name == 'test_collection': |
|
|
features = datasets.Features({ |
|
|
"docid": datasets.Value("string"), |
|
|
}) |
|
|
return datasets.DatasetInfo( |
|
|
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
license=_LICENSE, |
|
|
|
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
|
downloaded_files = dl_manager.download_and_extract(my_urls) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={ |
|
|
"file": downloaded_files, |
|
|
"split": self.config.name |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples( |
|
|
self, file, split |
|
|
): |
|
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
|
|
|
|
if split == 'qrels': |
|
|
qrels_file = csv.reader(open(file), delimiter=" ") |
|
|
qrels = defaultdict(list) |
|
|
for row in qrels_file: |
|
|
qid = row[0] |
|
|
docno = row[2] |
|
|
rank = row[3] |
|
|
qrels[qid].append({'docno': docno, 'rank': rank}) |
|
|
|
|
|
for qid in qrels.keys(): |
|
|
yield qid, {'qid': qid, 'qrels': qrels[qid]} |
|
|
|
|
|
elif split == 'topics': |
|
|
topics_file = csv.reader(open(file), delimiter="\t") |
|
|
topics = defaultdict(list) |
|
|
for row in topics_file: |
|
|
qid, query = row |
|
|
conversation_id, question_number = qid.split('_') |
|
|
topics[conversation_id].append(query) |
|
|
|
|
|
for conversation_id in topics.keys(): |
|
|
queries = topics[conversation_id] |
|
|
for idx in range(len(queries)): |
|
|
query = queries[idx] |
|
|
qid = f"{conversation_id}_{str(idx+1)}" |
|
|
yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid}) |
|
|
|
|
|
else: |
|
|
raise NotImplementedError(f"'{split}' is not yet implemented") |
|
|
|