| from collections import Counter |
| import json |
| import re |
|
|
| import datasets |
| import pandas as pd |
| from huggingface_hub import create_repo, upload_file, hf_hub_download |
| from huggingface_hub.utils._errors import HfHubHTTPError |
|
|
| |
| |
| |
|
|
| |
| dl_path = hf_hub_download( |
| repo_id="antoinelb7/alloprof", |
| filename="data/alloprof.csv", |
| repo_type="dataset", |
| revision="0faa90fee1ad1a6e3e461d7be49abf71488e6687" |
| ) |
| alloprof_queries = pd.read_csv(dl_path) |
|
|
| |
| alloprof_queries = alloprof_queries[alloprof_queries["is_query"]] |
|
|
| |
| alloprof_queries = alloprof_queries[~alloprof_queries["text"].isna()] |
|
|
| |
| |
| alloprof_queries = alloprof_queries[ |
| ~( |
| (alloprof_queries["text"].str.lower().str.startswith("hi")) |
| | (alloprof_queries["text"].str.lower().str.startswith("hello")) |
| | (alloprof_queries["text"].str.lower().str.startswith("how")) |
| | (alloprof_queries["text"].str.lower().str.startswith("i ")) |
| ) |
| ] |
|
|
| |
| alloprof_queries = alloprof_queries[ |
| (~alloprof_queries["relevant"].isna()) & (alloprof_queries["relevant"].str.endswith("-fr")) |
| ] |
|
|
| |
| alloprof_queries = alloprof_queries[~alloprof_queries["text"].str.contains("https://www.alloprof.qc.ca")] |
|
|
|
|
| |
| def parse_relevant_ids(row): |
| row = row.split(";") |
| row = [r[:-3] for r in row if r.endswith("-fr")] |
| return row |
|
|
|
|
| alloprof_queries["relevant"] = alloprof_queries["relevant"].apply(parse_relevant_ids) |
|
|
|
|
| |
| def parse_answer(row): |
| try: |
| row = json.loads(row) |
| text = [] |
| for i in row: |
| if type(i["insert"]) is not dict: |
| text.append(i["insert"]) |
| text = "".join(text) |
| except: |
| text = row |
| return text.replace(" ", " ").replace("\u200b", "").replace("\xa0", "") |
|
|
|
|
| alloprof_queries["answer"] = alloprof_queries["answer"].apply(parse_answer) |
|
|
| |
| alloprof_queries = alloprof_queries[["id", "text", "answer", "relevant", "subject"]] |
|
|
| |
| alloprof_queries = alloprof_queries.drop_duplicates(subset=["text"], keep="first") |
|
|
| |
| |
| |
|
|
| |
| dl_path = hf_hub_download( |
| repo_id="antoinelb7/alloprof", |
| filename="data/pages/page-content-fr.json", |
| repo_type="dataset", |
| revision="0faa90fee1ad1a6e3e461d7be49abf71488e6687" |
| ) |
| alloprof_docs = pd.read_json(dl_path) |
|
|
| |
| alloprof_docs = alloprof_docs[~alloprof_docs["data"].isna()] |
|
|
| |
| def parse_row(row): |
| return [row["file"]["uuid"], row["file"]["title"], row["file"]["topic"]] |
|
|
|
|
| def get_text(row): |
| text = [] |
| for s in row["file"]["sections"]: |
| for m in s["modules"]: |
| if m["type"] == "blocSpecial": |
| if m["subtype"] in ["definition", "exemple"]: |
| for sm in m["submodules"]: |
| if sm["type"] == "text": |
| text.append(sm["text"]) |
| elif m["type"] == "text": |
| text.append(m["text"]) |
| text = " ".join(text) |
| text = re.sub("<[^<]+?>", "", text) |
| text = text.replace(" ", " ").replace("\u200b", "") |
| text = re.sub("\s{2,}", " ", text) |
|
|
| return text |
|
|
|
|
| parsed_df = alloprof_docs["data"].apply(parse_row) |
| alloprof_docs[["uuid", "title", "topic"]] = parsed_df.tolist() |
| alloprof_docs["text"] = alloprof_docs["data"].apply(get_text) |
|
|
| |
| alloprof_docs = alloprof_docs[["uuid", "title", "topic", "text"]] |
|
|
| |
| |
| |
|
|
| |
| relevants = alloprof_queries["relevant"].tolist() |
| relevants = {i for j in relevants for i in j} |
| assert relevants.issubset( |
| alloprof_docs["uuid"].tolist() |
| ), "Some relevant document of queries are not present in the corpus" |
|
|
| |
| alloprof_queries = datasets.Dataset.from_pandas(alloprof_queries) |
| alloprof_docs = datasets.Dataset.from_pandas(alloprof_docs) |
|
|
| |
| |
| |
| duplicate_docs = Counter(alloprof_docs["text"]) |
| duplicate_docs = {k:v for k,v in duplicate_docs.items() if v > 1} |
|
|
| |
| for dup_text in duplicate_docs: |
| |
| duplicate_ids = [d["uuid"] for d in alloprof_docs if d["text"] == dup_text] |
| |
| alloprof_docs = alloprof_docs.filter(lambda x: x["uuid"] not in duplicate_ids) |
| |
| alloprof_queries = alloprof_queries.map(lambda x: {"relevant": [i for i in x["relevant"] if i not in duplicate_ids]}) |
|
|
| |
| alloprof_queries = alloprof_queries.filter(lambda x: len(x["relevant"]) > 0) |
|
|
| |
| alloprof_queries = alloprof_queries.train_test_split(test_size=.2) |
|
|
| |
| |
| |
|
|
| |
| repo_id = "lyon-nlp/alloprof" |
| try: |
| create_repo(repo_id, repo_type="dataset") |
| except HfHubHTTPError as e: |
| print("HF repo already exist") |
|
|
| |
| alloprof_queries["train"].to_pandas().to_json("queries-train.json", orient="records") |
| alloprof_queries["test"].to_pandas().to_json("queries-test.json", orient="records") |
| alloprof_docs.to_pandas().to_json("documents.json", orient="records") |
|
|
| upload_file(path_or_fileobj="queries-train.json", path_in_repo="queries-train.json", repo_id=repo_id, repo_type="dataset") |
| upload_file(path_or_fileobj="queries-test.json", path_in_repo="queries-test.json", repo_id=repo_id, repo_type="dataset") |
| upload_file(path_or_fileobj="documents.json", path_in_repo="documents.json", repo_id=repo_id, repo_type="dataset") |
|
|