Create coqa.py
Browse files
coqa.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CoQA: A Conversational Question Answering Challenge
|
| 3 |
+
https://arxiv.org/pdf/1808.07042.pdf
|
| 4 |
+
|
| 5 |
+
CoQA is a large-scale dataset for building Conversational Question Answering
|
| 6 |
+
systems. The goal of the CoQA challenge is to measure the ability of machines to
|
| 7 |
+
understand a text passage and answer a series of interconnected questions that
|
| 8 |
+
appear in a conversation.
|
| 9 |
+
|
| 10 |
+
Homepage: https://stanfordnlp.github.io/coqa/
|
| 11 |
+
"""
|
| 12 |
+
import inspect
|
| 13 |
+
import transformers.data.metrics.squad_metrics as squad_metrics
|
| 14 |
+
import lm_eval.datasets.coqa.coqa
|
| 15 |
+
from lm_eval.base import Task, rf, mean
|
| 16 |
+
from itertools import zip_longest
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_CITATION = """
|
| 20 |
+
@misc{reddy2018coqa,
|
| 21 |
+
title={CoQA: A Conversational Question Answering Challenge},
|
| 22 |
+
author={Siva Reddy and Danqi Chen and Christopher D. Manning},
|
| 23 |
+
year={2018},
|
| 24 |
+
eprint={1808.07042},
|
| 25 |
+
archivePrefix={arXiv},
|
| 26 |
+
primaryClass={cs.CL}
|
| 27 |
+
}
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class CoQA(Task):
|
| 32 |
+
VERSION = 1
|
| 33 |
+
DATASET_PATH = inspect.getfile(lm_eval.datasets.coqa.coqa)
|
| 34 |
+
DATASET_NAME = None
|
| 35 |
+
|
| 36 |
+
def has_training_docs(self):
|
| 37 |
+
return True
|
| 38 |
+
|
| 39 |
+
def has_validation_docs(self):
|
| 40 |
+
return True
|
| 41 |
+
|
| 42 |
+
def has_test_docs(self):
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
def training_docs(self):
|
| 46 |
+
return self.dataset["train"]
|
| 47 |
+
|
| 48 |
+
def validation_docs(self):
|
| 49 |
+
return self.dataset["validation"]
|
| 50 |
+
|
| 51 |
+
def test_docs(self):
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
def doc_to_text(self, doc):
|
| 55 |
+
# Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1}
|
| 56 |
+
# and a question qi, the task is to predict the answer ai
|
| 57 |
+
doc_text = doc["story"] + "\n\n"
|
| 58 |
+
for (q, a) in zip_longest(
|
| 59 |
+
doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]
|
| 60 |
+
): # omit target answer ai
|
| 61 |
+
question = f"Q: {q}\n\n"
|
| 62 |
+
answer = f"A: {a}\n\n" if a is not None else "A:"
|
| 63 |
+
doc_text += question + answer
|
| 64 |
+
return doc_text
|
| 65 |
+
|
| 66 |
+
def should_decontaminate(self):
|
| 67 |
+
return True
|
| 68 |
+
|
| 69 |
+
def doc_to_decontamination_query(self, doc):
|
| 70 |
+
return doc["story"] + " " + "\n".join(doc["questions"]["input_text"])
|
| 71 |
+
|
| 72 |
+
@classmethod
|
| 73 |
+
def get_answers(cls, doc, turn_id):
|
| 74 |
+
# Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
|
| 75 |
+
answers = []
|
| 76 |
+
answer_forturn = doc["answers"]["input_text"][turn_id - 1]
|
| 77 |
+
answers.append(answer_forturn)
|
| 78 |
+
|
| 79 |
+
additional_answers = doc.get("additional_answers")
|
| 80 |
+
if additional_answers:
|
| 81 |
+
for key in additional_answers:
|
| 82 |
+
additional_answer_for_turn = additional_answers[key]["input_text"][
|
| 83 |
+
turn_id - 1
|
| 84 |
+
]
|
| 85 |
+
if additional_answer_for_turn.lower() not in map(str.lower, answers):
|
| 86 |
+
answers.append(additional_answer_for_turn)
|
| 87 |
+
return answers
|
| 88 |
+
|
| 89 |
+
@classmethod
|
| 90 |
+
def get_answer_choice(self, raw_text):
|
| 91 |
+
# Function maps answers to CoQA answer categories
|
| 92 |
+
# ~ 1/5 of the CoQA answers are Yes/No
|
| 93 |
+
# ~ 2/3 of the CoQA answers are span-based
|
| 94 |
+
# (answers overlap with the passage ignoring punctuation and case mismatch)
|
| 95 |
+
if raw_text == "unknown":
|
| 96 |
+
return "0"
|
| 97 |
+
if squad_metrics.normalize_answer(raw_text) == "yes":
|
| 98 |
+
return "1"
|
| 99 |
+
if squad_metrics.normalize_answer(raw_text) == "no":
|
| 100 |
+
return "2"
|
| 101 |
+
return "3" # Not a yes/no question
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
def compute_scores(gold_list, pred):
|
| 105 |
+
# tests for exact match and on the normalised answer (compute_exact)
|
| 106 |
+
# test for overlap (compute_f1)
|
| 107 |
+
f1_sum = 0.0
|
| 108 |
+
em_sum = 0.0
|
| 109 |
+
if len(gold_list) > 1:
|
| 110 |
+
for i in range(len(gold_list)):
|
| 111 |
+
gold_answers = gold_list[0:i] + gold_list[i + 1 :]
|
| 112 |
+
# predictions compared against (n) golds and take maximum
|
| 113 |
+
em_sum += max(
|
| 114 |
+
squad_metrics.compute_exact(a, pred) for a in gold_answers
|
| 115 |
+
)
|
| 116 |
+
f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
|
| 117 |
+
else:
|
| 118 |
+
em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
|
| 119 |
+
f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)
|
| 120 |
+
|
| 121 |
+
return {
|
| 122 |
+
"em": em_sum / max(1, len(gold_list)),
|
| 123 |
+
"f1": f1_sum / max(1, len(gold_list)),
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
def doc_to_target(self, doc, turnid=None):
|
| 127 |
+
# Default to prediction of last turn.
|
| 128 |
+
if turnid is None:
|
| 129 |
+
turnid = len(doc["questions"]["input_text"])
|
| 130 |
+
raw_text = doc["answers"]["input_text"][turnid - 1]
|
| 131 |
+
return " " + raw_text
|
| 132 |
+
|
| 133 |
+
def construct_requests(self, doc, ctx):
|
| 134 |
+
"""Uses RequestFactory to construct Requests and returns an iterable of
|
| 135 |
+
Requests which will be sent to the LM.
|
| 136 |
+
|
| 137 |
+
:param doc:
|
| 138 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
| 139 |
+
:param ctx: str
|
| 140 |
+
The context string, generated by fewshot_context. This includes the natural
|
| 141 |
+
language description, as well as the few shot examples, and the question
|
| 142 |
+
part of the document for `doc`.
|
| 143 |
+
"""
|
| 144 |
+
cont_request = rf.greedy_until(ctx, {"until": ["\nQ:"]})
|
| 145 |
+
return cont_request
|
| 146 |
+
|
| 147 |
+
def process_results(self, doc, results):
|
| 148 |
+
"""Take a single document and the LM results and evaluates, returning a
|
| 149 |
+
dict where keys are the names of submetrics and values are the values of
|
| 150 |
+
the metric for that one document
|
| 151 |
+
|
| 152 |
+
:param doc:
|
| 153 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
| 154 |
+
:param results:
|
| 155 |
+
The results of the requests created in construct_requests.
|
| 156 |
+
"""
|
| 157 |
+
turn_id = len(doc["questions"]["input_text"])
|
| 158 |
+
gold_list = self.get_answers(doc, turn_id)
|
| 159 |
+
pred = results[0].strip().split("\n")[0]
|
| 160 |
+
|
| 161 |
+
scores = self.compute_scores(gold_list, pred)
|
| 162 |
+
|
| 163 |
+
return {
|
| 164 |
+
"f1": scores["f1"],
|
| 165 |
+
"em": scores["em"],
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
def higher_is_better(self):
|
| 169 |
+
return {
|
| 170 |
+
"f1": True,
|
| 171 |
+
"em": True,
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
def aggregation(self):
|
| 175 |
+
return {
|
| 176 |
+
"f1": mean,
|
| 177 |
+
"em": mean,
|
| 178 |
+
}
|