Delete loading script auxiliary file
Browse files
raw_data/merge_clue_cluewsc2020.py
DELETED
|
@@ -1,136 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Adds clue/cluewsc2020 samples to xwinograd
|
| 3 |
-
From: https://gist.github.com/jordiclive/26506ea7e897ad8270f9e793bdc285b5
|
| 4 |
-
"""
|
| 5 |
-
|
| 6 |
-
import json
|
| 7 |
-
|
| 8 |
-
import datasets
|
| 9 |
-
import pandas as pd
|
| 10 |
-
from datasets import load_dataset
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def find_pronoun(x):
|
| 14 |
-
pronoun = x["target"]["span2_text"]
|
| 15 |
-
indices = [x["target"]["span2_index"], x["target"]["span2_index"] + len(pronoun)]
|
| 16 |
-
return [pronoun, indices, list(pronoun)]
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def find_switch(x):
|
| 20 |
-
pronoun = x["target"]["span1_text"]
|
| 21 |
-
indices = [x["target"]["span1_index"], x["target"]["span1_index"] + len(pronoun)]
|
| 22 |
-
if x["label"] == 1:
|
| 23 |
-
label = False
|
| 24 |
-
else:
|
| 25 |
-
label = True
|
| 26 |
-
return [pronoun, indices, list(pronoun), label]
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def convert_to_format(df):
|
| 30 |
-
|
| 31 |
-
df["pronoun"] = df.apply(find_pronoun, axis=1)
|
| 32 |
-
|
| 33 |
-
df["toks"] = df["text"].apply(lambda x: list(x))
|
| 34 |
-
|
| 35 |
-
df["switch"] = df.apply(find_switch, axis=1)
|
| 36 |
-
df.reset_index(inplace=True, drop=True)
|
| 37 |
-
|
| 38 |
-
lang = []
|
| 39 |
-
original = []
|
| 40 |
-
o_text = []
|
| 41 |
-
sent = []
|
| 42 |
-
toks = []
|
| 43 |
-
pronoun = []
|
| 44 |
-
switch = []
|
| 45 |
-
df["pronoun_to_replace"] = df["target"].apply(lambda x: x["span2_text"])
|
| 46 |
-
for i, df_text in df.groupby(["text", "pronoun_to_replace"]):
|
| 47 |
-
if len(df_text) == 1:
|
| 48 |
-
continue
|
| 49 |
-
df_text.reset_index(inplace=True, drop=True)
|
| 50 |
-
try:
|
| 51 |
-
if df_text["label"][0] != df_text["label"][1]:
|
| 52 |
-
df_text = df_text[:2]
|
| 53 |
-
elif df_text["label"][0] != df_text["label"][2] and len(df_text) > 2:
|
| 54 |
-
df_text = df_text.iloc[[0, 2], :]
|
| 55 |
-
df_text.reset_index(inplace=True, drop=True)
|
| 56 |
-
df_new = df_text[:1]
|
| 57 |
-
df_new["switch"] = df_new["switch"].apply(
|
| 58 |
-
lambda x: [df_text["switch"][0], df_text["switch"][1]]
|
| 59 |
-
)
|
| 60 |
-
|
| 61 |
-
lang.append("zh")
|
| 62 |
-
original.append("original")
|
| 63 |
-
o_text.append("?")
|
| 64 |
-
sent.append(df_new.iloc[0]["text"])
|
| 65 |
-
toks.append(df_new.iloc[0]["toks"])
|
| 66 |
-
pronoun.append(df_new.iloc[0]["pronoun"])
|
| 67 |
-
switch.append(df_new.iloc[0]["switch"])
|
| 68 |
-
|
| 69 |
-
except:
|
| 70 |
-
continue
|
| 71 |
-
|
| 72 |
-
total_df = pd.DataFrame(
|
| 73 |
-
{0: lang, 1: original, 2: o_text, 3: sent, 4: toks, 5: pronoun, 6: switch}
|
| 74 |
-
)
|
| 75 |
-
count = total_df[5].apply(lambda x: len(x[0]))
|
| 76 |
-
total_df[4] = total_df[4].apply(lambda x: json.dumps(x))
|
| 77 |
-
total_df[5] = total_df[5].apply(lambda x: json.dumps(x))
|
| 78 |
-
total_df[6] = total_df[6].apply(lambda x: json.dumps(x))
|
| 79 |
-
return total_df, count
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
def remove_at(i, s):
|
| 83 |
-
return s[:i] + s[i + 1 :]
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def remove_to(x):
|
| 87 |
-
if x["count"] == 2:
|
| 88 |
-
return remove_at(x["sentence"].index("_") + 1, x["sentence"])
|
| 89 |
-
else:
|
| 90 |
-
return x["sentence"]
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
def get_original_splits():
|
| 94 |
-
# xwinograd directory is https://huggingface.co/datasets/Muennighoff/xwinograd/tree/9dbcc59f86f9e53e0b36480d806982499d877edc
|
| 95 |
-
|
| 96 |
-
for j, i in enumerate(["en", "jp", "ru", "pt", "fr", "zh"]):
|
| 97 |
-
if j == 0:
|
| 98 |
-
dfx = datasets.load_dataset("xwinograd", i)["test"].to_pandas()
|
| 99 |
-
dfx["lang"] = i
|
| 100 |
-
else:
|
| 101 |
-
df = datasets.load_dataset("xwinograd", i)["test"].to_pandas()
|
| 102 |
-
df["lang"] = i
|
| 103 |
-
dfx = pd.concat([dfx, df])
|
| 104 |
-
return dfx
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
def get_examples_from_clue():
|
| 108 |
-
# xwinograd directory is commit: https://huggingface.co/datasets/Muennighoff/xwinograd/tree/9dbcc59f86f9e53e0b36480d806982499d877edc
|
| 109 |
-
|
| 110 |
-
dataset = load_dataset("clue", "cluewsc2020")
|
| 111 |
-
df = dataset["train"].to_pandas()
|
| 112 |
-
df_val = dataset["validation"].to_pandas()
|
| 113 |
-
df = pd.concat([df, df_val])
|
| 114 |
-
new_examples, count = convert_to_format(df)
|
| 115 |
-
new_examples.reset_index(inplace=True, drop=True)
|
| 116 |
-
new_examples.to_csv(
|
| 117 |
-
"xwinograd/data/xwinograd.tsv", sep="\t", header=None, index=False
|
| 118 |
-
)
|
| 119 |
-
df_post = datasets.load_dataset("xwinograd", "zh")
|
| 120 |
-
df_post = df_post["test"].to_pandas()
|
| 121 |
-
df_post["count"] = count
|
| 122 |
-
df_post["sentence"] = df_post.apply(remove_to, axis=1)
|
| 123 |
-
df_post = df_post[["sentence", "option1", "option2", "answer"]]
|
| 124 |
-
return df_post
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
if __name__ == "__main__":
|
| 128 |
-
# xwinograd directory is commit: https://huggingface.co/datasets/Muennighoff/xwinograd/tree/9dbcc59f86f9e53e0b36480d806982499d877edc
|
| 129 |
-
dfx = get_original_splits()
|
| 130 |
-
df_post = get_examples_from_clue()
|
| 131 |
-
df_post.to_json("new_examples_updated.json", orient="split")
|
| 132 |
-
df_post["lang"] = "zh"
|
| 133 |
-
dfx = pd.concat([dfx, df_post])
|
| 134 |
-
dfx.drop_duplicates()
|
| 135 |
-
dfx.reset_index(inplace=True, drop=True)
|
| 136 |
-
dfx.to_json("all_examples_updated.json", orient="split")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|