|
|
import gzip |
|
|
import json |
|
|
|
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
COLLATE_LENGTH = 370 |
|
|
|
|
|
|
|
|
def emit(line_id, nl_str, en_str, nl_l, en_l): |
|
|
obj = { |
|
|
"id": line_id, |
|
|
"translation": { |
|
|
"nl": nl_str.strip(), |
|
|
"en": en_str.strip(), |
|
|
}, |
|
|
"nl_len": nl_l, |
|
|
"en_len": en_l, |
|
|
} |
|
|
writer.write(str.encode(json.dumps(obj))) |
|
|
writer.write("\n".encode("utf-8")) |
|
|
|
|
|
|
|
|
class TokenLength: |
|
|
def __init__(self, tokenizer): |
|
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
|
tokenizer, max_length=4096, truncation=False, use_fast=False |
|
|
) |
|
|
|
|
|
def __call__(self, text: str): |
|
|
return len(self.tokenizer.encode(text, max_length=4096, truncation=False)) |
|
|
|
|
|
|
|
|
class Counter: |
|
|
def __init__(self, start=0): |
|
|
self.count = start |
|
|
|
|
|
def __call__(self): |
|
|
self.count += 1 |
|
|
return self.count |
|
|
|
|
|
|
|
|
class Buffer: |
|
|
def __init__( |
|
|
self, |
|
|
id: int, |
|
|
emit_lines: bool, |
|
|
max_length: int, |
|
|
en_prefix="", |
|
|
): |
|
|
self.id = id |
|
|
self.emit_lines = emit_lines |
|
|
self.max_length = max_length |
|
|
self.en_prefix = en_prefix |
|
|
self.counter = Counter() |
|
|
self.nl_l = None |
|
|
self.en_l = None |
|
|
self.nl_buf = None |
|
|
self.en_buf = None |
|
|
self.cur_max_length = None |
|
|
self.reset() |
|
|
|
|
|
def set_cur_max_length(self): |
|
|
"""You can check the distribution with the following code: |
|
|
%matplotlib notebook |
|
|
import numpy as np |
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
plt.rcParams['figure.figsize'] = [9.5,6] |
|
|
fig, ax = plt.subplots(1, 1) |
|
|
|
|
|
r = np.random.beta(20,8,102000) |
|
|
ax.hist(r, density=True, histtype='stepfilled', alpha=0.2, bins=200) |
|
|
ax.legend(loc='best', frameon=False) |
|
|
plt.show() |
|
|
""" |
|
|
self.cur_max_length = int(self.max_length * np.random.beta(20, 8)) |
|
|
|
|
|
def reset(self): |
|
|
self.nl_l = None |
|
|
self.en_l = None |
|
|
self.nl_buf = None |
|
|
self.en_buf = None |
|
|
self.set_cur_max_length() |
|
|
|
|
|
def add_ok(self, nl_str, en_str, separator="\n"): |
|
|
"""If the new text fits within the max_length tokens, add it, else return False""" |
|
|
nl_new = self.nl_buf + f"{separator}{nl_str}" if self.nl_buf else nl_str |
|
|
en_new = self.en_buf + f"{separator}{en_str}" if self.en_buf else en_str |
|
|
nl_new_l = token_length(nl_new) |
|
|
en_new_l = token_length(en_new) |
|
|
|
|
|
if ( |
|
|
nl_new_l > self.cur_max_length |
|
|
or token_length(self.en_prefix + en_new) > self.cur_max_length |
|
|
): |
|
|
return False |
|
|
else: |
|
|
self.nl_buf = nl_new |
|
|
self.en_buf = en_new |
|
|
self.nl_l = nl_new_l |
|
|
self.en_l = en_new_l |
|
|
return True |
|
|
|
|
|
def emit(self, row, separator): |
|
|
nl_str = row.translation["nl"] |
|
|
en_str = row.translation["en"] |
|
|
nl_id = row.meta["sentenceIds"]["nl"] |
|
|
en_id = row.meta["sentenceIds"]["en"] |
|
|
|
|
|
|
|
|
if nl_str.endswith(".") and not en_str.endswith("."): |
|
|
en_str += "." |
|
|
elif en_str.endswith(".") and not nl_str.endswith("."): |
|
|
nl_str += "." |
|
|
|
|
|
nl_str = nl_str.lstrip("- ") |
|
|
en_str = en_str.lstrip("- ") |
|
|
|
|
|
nl_len = token_length(nl_str) |
|
|
en_len = token_length(en_str) |
|
|
if self.emit_lines and nl_len <= COLLATE_LENGTH and en_len <= COLLATE_LENGTH: |
|
|
emit( |
|
|
line_id=f"{row.tconst}-nl{nl_id}-en{en_id}-l-", |
|
|
nl_str=nl_str, |
|
|
en_str=en_str, |
|
|
nl_l=nl_len, |
|
|
en_l=en_len, |
|
|
) |
|
|
if self.add_ok(nl_str.strip(), en_str.strip(), separator): |
|
|
return |
|
|
|
|
|
|
|
|
if self.nl_buf: |
|
|
emit( |
|
|
line_id=f"{row.tconst}-b{self.id}-{self.counter()}", |
|
|
nl_str=self.nl_buf, |
|
|
en_str=self.en_buf, |
|
|
nl_l=self.nl_l, |
|
|
en_l=self.en_l, |
|
|
) |
|
|
|
|
|
self.reset() |
|
|
|
|
|
|
|
|
result = self.add_ok(nl_str.strip(), en_str.strip()) |
|
|
if not result: |
|
|
self.reset() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
token_length = TokenLength(tokenizer="yhavinga/ul2-base-dutch") |
|
|
line_counter = Counter() |
|
|
|
|
|
buffers = [ |
|
|
Buffer( |
|
|
id=index, emit_lines=(index == 0), max_length=buf_max_length, en_prefix="" |
|
|
) |
|
|
for index, buf_max_length in enumerate([0.6 * 370, 370]) |
|
|
] |
|
|
|
|
|
df = pd.read_json("episode_opensubtitles.json.gz", lines=True) |
|
|
|
|
|
with gzip.open("outfile", mode="wb") as writer: |
|
|
for row in df.itertuples(): |
|
|
for buffer in buffers: |
|
|
buffer.emit(row, separator="\n") |
|
|
|