File size: 5,165 Bytes
501f190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import gzip
import json

import numpy as np
import pandas as pd
from transformers import AutoTokenizer

COLLATE_LENGTH = 370


def emit(line_id, nl_str, en_str, nl_l, en_l):
    obj = {
        "id": line_id,
        "translation": {
            "nl": nl_str.strip(),
            "en": en_str.strip(),
        },
        "nl_len": nl_l,
        "en_len": en_l,
    }
    writer.write(str.encode(json.dumps(obj)))
    writer.write("\n".encode("utf-8"))


class TokenLength:
    def __init__(self, tokenizer):
        self.tokenizer = AutoTokenizer.from_pretrained(
            tokenizer, max_length=4096, truncation=False, use_fast=False
        )

    def __call__(self, text: str):
        return len(self.tokenizer.encode(text, max_length=4096, truncation=False))


class Counter:
    def __init__(self, start=0):
        self.count = start

    def __call__(self):
        self.count += 1
        return self.count


class Buffer:
    def __init__(
        self,
        id: int,
        emit_lines: bool,
        max_length: int,
        en_prefix="",
    ):
        self.id = id
        self.emit_lines = emit_lines
        self.max_length = max_length
        self.en_prefix = en_prefix
        self.counter = Counter()
        self.nl_l = None
        self.en_l = None
        self.nl_buf = None
        self.en_buf = None
        self.cur_max_length = None
        self.reset()

    def set_cur_max_length(self):
        """You can check the distribution with the following code:
        %matplotlib notebook
        import numpy as np
        import matplotlib.pyplot as plt

        plt.rcParams['figure.figsize'] = [9.5,6]
        fig, ax = plt.subplots(1, 1)

        r = np.random.beta(20,8,102000)
        ax.hist(r, density=True, histtype='stepfilled', alpha=0.2, bins=200)
        ax.legend(loc='best', frameon=False)
        plt.show()
        """
        self.cur_max_length = int(self.max_length * np.random.beta(20, 8))

    def reset(self):
        self.nl_l = None
        self.en_l = None
        self.nl_buf = None
        self.en_buf = None
        self.set_cur_max_length()

    def add_ok(self, nl_str, en_str, separator="\n"):
        """If the new text fits within the max_length tokens, add it, else return False"""
        nl_new = self.nl_buf + f"{separator}{nl_str}" if self.nl_buf else nl_str
        en_new = self.en_buf + f"{separator}{en_str}" if self.en_buf else en_str
        nl_new_l = token_length(nl_new)
        en_new_l = token_length(en_new)
        # Check if we can add it or if the result would be too long
        if (
            nl_new_l > self.cur_max_length
            or token_length(self.en_prefix + en_new) > self.cur_max_length
        ):
            return False
        else:
            self.nl_buf = nl_new
            self.en_buf = en_new
            self.nl_l = nl_new_l
            self.en_l = en_new_l
            return True

    def emit(self, row, separator):
        nl_str = row.translation["nl"]
        en_str = row.translation["en"]
        nl_id = row.meta["sentenceIds"]["nl"]
        en_id = row.meta["sentenceIds"]["en"]

        # if one of the sentences ends on a . but the other doesn't, add a dot to the other
        if nl_str.endswith(".") and not en_str.endswith("."):
            en_str += "."
        elif en_str.endswith(".") and not nl_str.endswith("."):
            nl_str += "."
        # Strip any leading "- " or "- " from the sentences
        nl_str = nl_str.lstrip("- ")
        en_str = en_str.lstrip("- ")

        nl_len = token_length(nl_str)
        en_len = token_length(en_str)
        if self.emit_lines and nl_len <= COLLATE_LENGTH and en_len <= COLLATE_LENGTH:
            emit(
                line_id=f"{row.tconst}-nl{nl_id}-en{en_id}-l-",
                nl_str=nl_str,
                en_str=en_str,
                nl_l=nl_len,
                en_l=en_len,
            )
        if self.add_ok(nl_str.strip(), en_str.strip(), separator):
            return

        # If buf.add returns false, we've hit the maximum length boundary, so emit the current buffer, if it is not Empty
        if self.nl_buf:
            emit(
                line_id=f"{row.tconst}-b{self.id}-{self.counter()}",
                nl_str=self.nl_buf,
                en_str=self.en_buf,
                nl_l=self.nl_l,
                en_l=self.en_l,
            )
            # After emit of the buffer, we reset the buffer
            self.reset()

        # Add the first line in this new buffer
        result = self.add_ok(nl_str.strip(), en_str.strip())
        if not result:
            self.reset()


if __name__ == "__main__":
    token_length = TokenLength(tokenizer="yhavinga/ul2-base-dutch")
    line_counter = Counter()

    buffers = [
        Buffer(
            id=index, emit_lines=(index == 0), max_length=buf_max_length, en_prefix=""
        )
        for index, buf_max_length in enumerate([0.6 * 370, 370])
    ]

    df = pd.read_json("episode_opensubtitles.json.gz", lines=True)

    with gzip.open("outfile", mode="wb") as writer:
        for row in df.itertuples():
            for buffer in buffers:
                buffer.emit(row, separator="\n")