Datasets:
aapot
commited on
Commit
·
3db1b8a
1
Parent(s):
87bf288
Add dataset deduplication
Browse files- deduplicate.py +63 -0
- train.csv +2 -2
- valid.csv +2 -2
deduplicate.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# adapted from: https://github.com/huggingface/transformers/blob/master/examples/research_projects/codeparrot/scripts/preprocessing.py
|
| 2 |
+
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
def get_hash(example):
|
| 6 |
+
"""Get hash of text field."""
|
| 7 |
+
return {"hash": hash(example["text"])}
|
| 8 |
+
|
| 9 |
+
def check_uniques(example, uniques):
|
| 10 |
+
"""Check if current hash is still in set of unique hashes and remove if true."""
|
| 11 |
+
if example["hash"] in uniques:
|
| 12 |
+
uniques.remove(example["hash"])
|
| 13 |
+
return True
|
| 14 |
+
else:
|
| 15 |
+
return False
|
| 16 |
+
|
| 17 |
+
def filter(example, uniques):
|
| 18 |
+
"""Filter dataset with unique values."""
|
| 19 |
+
if not check_uniques(example, uniques):
|
| 20 |
+
return False
|
| 21 |
+
else:
|
| 22 |
+
return True
|
| 23 |
+
|
| 24 |
+
dataset = datasets.load_dataset("csv", data_files={"train": "train.csv", "validation": "valid.csv"})
|
| 25 |
+
|
| 26 |
+
# TRAIN SPLIT DEDUPLICATION
|
| 27 |
+
|
| 28 |
+
len_train = len(dataset["train"])
|
| 29 |
+
print(f"Size of original dataset train: {len_train}")
|
| 30 |
+
|
| 31 |
+
dataset["train"] = dataset["train"].map(get_hash, num_proc=64, writer_batch_size=100000)
|
| 32 |
+
|
| 33 |
+
# Deduplicate hashes
|
| 34 |
+
uniques = set(dataset["train"].unique("hash"))
|
| 35 |
+
frac = len(uniques) / len(dataset["train"])
|
| 36 |
+
print(f"Fraction of duplicates: {1-frac:.2%}")
|
| 37 |
+
|
| 38 |
+
# Deduplicate data
|
| 39 |
+
dataset_train_deduplicated = dataset["train"].filter(filter, fn_kwargs={"uniques": uniques})
|
| 40 |
+
print(f"Size of filtered dataset train: {len(dataset_train_deduplicated)}")
|
| 41 |
+
|
| 42 |
+
# VALIDATION SPLIT DEDUPLICATION
|
| 43 |
+
|
| 44 |
+
len_val = len(dataset["validation"])
|
| 45 |
+
print(f"Size of original dataset valid: {len_val}")
|
| 46 |
+
|
| 47 |
+
dataset["validation"] = dataset["validation"].map(get_hash, num_proc=64, writer_batch_size=100000)
|
| 48 |
+
|
| 49 |
+
# Deduplicate hashes
|
| 50 |
+
uniques = set(dataset["validation"].unique("hash"))
|
| 51 |
+
frac = len(uniques) / len(dataset["validation"])
|
| 52 |
+
print(f"Fraction of duplicates: {1-frac:.2%}")
|
| 53 |
+
|
| 54 |
+
# Deduplicate data
|
| 55 |
+
dataset_valid_deduplicated = dataset["validation"].filter(filter, fn_kwargs={"uniques": uniques})
|
| 56 |
+
print(f"Size of filtered dataset valid: {len(dataset_valid_deduplicated)}")
|
| 57 |
+
|
| 58 |
+
# SAVE DEDUPLICATED DATASET
|
| 59 |
+
dataset_train_deduplicated = dataset_train_deduplicated.remove_columns(["hash"])
|
| 60 |
+
dataset_valid_deduplicated = dataset_valid_deduplicated.remove_columns(["hash"])
|
| 61 |
+
|
| 62 |
+
dataset_train_deduplicated.to_csv("train.csv", num_proc=64, index=False)
|
| 63 |
+
dataset_valid_deduplicated.to_csv("valid.csv", num_proc=64, index=False)
|
train.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:857db09dc9d10a5606768ff8885d6fe1162f5dbbbb2aa82977278499530a3fd9
|
| 3 |
+
size 64553318366
|
valid.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:816cb816f2f002e158bba01e19eee6cec58d18a37919f1284de45decbe331289
|
| 3 |
+
size 63928522
|