flyingbugs commited on
Commit
3404cf6
·
verified ·
1 Parent(s): 2d1f71c

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-1.5B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2.5-1.5B-Open-R1-Distill-eos
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-1.5B-Open-R1-Distill-eos
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="flyingbugs/Qwen2.5-1.5B-Open-R1-Distill-eos", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jjh233/huggingface/runs/jahc2e17)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.54.0
39
+ - Pytorch: 2.5.1
40
+ - Datasets: 4.0.0
41
+ - Tokenizers: 0.21.2
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1465863748190208.0,
3
+ "train_loss": 0.52242068924121,
4
+ "train_runtime": 15658.3478,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 6.572,
7
+ "train_steps_per_second": 0.051
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.54.0"
14
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd42647bdfc81a8de0520590650c6d121d89ab39ba173e65cc7e037039c3fcef
3
  size 3087467144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7afe98e27bed91a0d4f4fe77fb25060fa8a97d2b5e2f69361c14ef5d9baa091b
3
  size 3087467144
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1465863748190208.0,
3
+ "train_loss": 0.52242068924121,
4
+ "train_runtime": 15658.3478,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 6.572,
7
+ "train_steps_per_second": 0.051
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 804,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.018656716417910446,
14
+ "grad_norm": 1.9214146781432249,
15
+ "learning_rate": 4.8780487804878055e-06,
16
+ "loss": 0.857,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.03731343283582089,
21
+ "grad_norm": 1.7625595046451557,
22
+ "learning_rate": 1.0975609756097562e-05,
23
+ "loss": 0.8038,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.055970149253731345,
28
+ "grad_norm": 0.6948661270341572,
29
+ "learning_rate": 1.707317073170732e-05,
30
+ "loss": 0.7448,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.07462686567164178,
35
+ "grad_norm": 0.5136824696543728,
36
+ "learning_rate": 2.3170731707317075e-05,
37
+ "loss": 0.7055,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.09328358208955224,
42
+ "grad_norm": 0.4018338347498522,
43
+ "learning_rate": 2.926829268292683e-05,
44
+ "loss": 0.6712,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.11194029850746269,
49
+ "grad_norm": 0.28711501314318655,
50
+ "learning_rate": 3.5365853658536584e-05,
51
+ "loss": 0.6599,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.13059701492537312,
56
+ "grad_norm": 0.2894771986907634,
57
+ "learning_rate": 4.146341463414634e-05,
58
+ "loss": 0.6395,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.14925373134328357,
63
+ "grad_norm": 0.2821436189327092,
64
+ "learning_rate": 4.75609756097561e-05,
65
+ "loss": 0.627,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.16791044776119404,
70
+ "grad_norm": 0.3279211667096163,
71
+ "learning_rate": 4.999828351434079e-05,
72
+ "loss": 0.6245,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.1865671641791045,
77
+ "grad_norm": 0.2537209910093848,
78
+ "learning_rate": 4.998779482816942e-05,
79
+ "loss": 0.6048,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.20522388059701493,
84
+ "grad_norm": 0.30352094230828874,
85
+ "learning_rate": 4.996777549883426e-05,
86
+ "loss": 0.6056,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.22388059701492538,
91
+ "grad_norm": 0.35358905732065365,
92
+ "learning_rate": 4.9938234010808136e-05,
93
+ "loss": 0.5995,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.24253731343283583,
98
+ "grad_norm": 0.5881353923808288,
99
+ "learning_rate": 4.989918288418841e-05,
100
+ "loss": 0.5898,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.26119402985074625,
105
+ "grad_norm": 0.4365964873532165,
106
+ "learning_rate": 4.9850638669390816e-05,
107
+ "loss": 0.5856,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.2798507462686567,
112
+ "grad_norm": 0.3310629283404945,
113
+ "learning_rate": 4.97926219401351e-05,
114
+ "loss": 0.583,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.29850746268656714,
119
+ "grad_norm": 0.39095858353376106,
120
+ "learning_rate": 4.9725157284725665e-05,
121
+ "loss": 0.5818,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.31716417910447764,
126
+ "grad_norm": 0.4130266981460891,
127
+ "learning_rate": 4.964827329563061e-05,
128
+ "loss": 0.5777,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.3358208955223881,
133
+ "grad_norm": 0.3251791282337107,
134
+ "learning_rate": 4.956200255736394e-05,
135
+ "loss": 0.5802,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.35447761194029853,
140
+ "grad_norm": 0.2800299594598508,
141
+ "learning_rate": 4.9466381632675714e-05,
142
+ "loss": 0.5777,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.373134328358209,
147
+ "grad_norm": 0.30351899813854083,
148
+ "learning_rate": 4.936145104705629e-05,
149
+ "loss": 0.5732,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.3917910447761194,
154
+ "grad_norm": 0.3184280675590268,
155
+ "learning_rate": 4.9247255271560994e-05,
156
+ "loss": 0.5693,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.41044776119402987,
161
+ "grad_norm": 0.35433866607808917,
162
+ "learning_rate": 4.9123842703962754e-05,
163
+ "loss": 0.5762,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.4291044776119403,
168
+ "grad_norm": 0.28639350303291805,
169
+ "learning_rate": 4.899126564824033e-05,
170
+ "loss": 0.5666,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.44776119402985076,
175
+ "grad_norm": 0.3274186200975153,
176
+ "learning_rate": 4.884958029241127e-05,
177
+ "loss": 0.5674,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.4664179104477612,
182
+ "grad_norm": 0.336381141981706,
183
+ "learning_rate": 4.869884668471853e-05,
184
+ "loss": 0.5641,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.48507462686567165,
189
+ "grad_norm": 0.4812221145192079,
190
+ "learning_rate": 4.8539128708181276e-05,
191
+ "loss": 0.563,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.503731343283582,
196
+ "grad_norm": 0.2985625122934817,
197
+ "learning_rate": 4.8370494053520316e-05,
198
+ "loss": 0.5599,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.5223880597014925,
203
+ "grad_norm": 0.40904102656943087,
204
+ "learning_rate": 4.8193014190469815e-05,
205
+ "loss": 0.5651,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.5410447761194029,
210
+ "grad_norm": 0.30462346527690765,
211
+ "learning_rate": 4.800676433748746e-05,
212
+ "loss": 0.5571,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.5597014925373134,
217
+ "grad_norm": 0.29542772666907074,
218
+ "learning_rate": 4.781182342987577e-05,
219
+ "loss": 0.5548,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.5783582089552238,
224
+ "grad_norm": 0.3051473053785931,
225
+ "learning_rate": 4.7608274086328275e-05,
226
+ "loss": 0.555,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.5970149253731343,
231
+ "grad_norm": 0.27345084859980273,
232
+ "learning_rate": 4.739620257391446e-05,
233
+ "loss": 0.5525,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.6156716417910447,
238
+ "grad_norm": 0.289181458801349,
239
+ "learning_rate": 4.7175698771518656e-05,
240
+ "loss": 0.551,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.6343283582089553,
245
+ "grad_norm": 0.41307993000424736,
246
+ "learning_rate": 4.6946856131748076e-05,
247
+ "loss": 0.5517,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.6529850746268657,
252
+ "grad_norm": 0.5340748917517905,
253
+ "learning_rate": 4.6709771641326244e-05,
254
+ "loss": 0.5509,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.6716417910447762,
259
+ "grad_norm": 0.3291048059480862,
260
+ "learning_rate": 4.6464545779988757e-05,
261
+ "loss": 0.5428,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.6902985074626866,
266
+ "grad_norm": 0.28986748904908194,
267
+ "learning_rate": 4.621128247789846e-05,
268
+ "loss": 0.5456,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.7089552238805971,
273
+ "grad_norm": 0.25439473449314687,
274
+ "learning_rate": 4.595008907159847e-05,
275
+ "loss": 0.5485,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.7276119402985075,
280
+ "grad_norm": 0.22286184197901884,
281
+ "learning_rate": 4.568107625852136e-05,
282
+ "loss": 0.5508,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.746268656716418,
287
+ "grad_norm": 0.2642945823026863,
288
+ "learning_rate": 4.5404358050074115e-05,
289
+ "loss": 0.5448,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.7649253731343284,
294
+ "grad_norm": 0.27448365288562576,
295
+ "learning_rate": 4.512005172331842e-05,
296
+ "loss": 0.5424,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.7835820895522388,
301
+ "grad_norm": 0.2971424901345131,
302
+ "learning_rate": 4.482827777126706e-05,
303
+ "loss": 0.5471,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.8022388059701493,
308
+ "grad_norm": 0.35310755203104544,
309
+ "learning_rate": 4.4529159851817255e-05,
310
+ "loss": 0.541,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.8208955223880597,
315
+ "grad_norm": 0.308625434473984,
316
+ "learning_rate": 4.422282473534271e-05,
317
+ "loss": 0.5405,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.8395522388059702,
322
+ "grad_norm": 0.31816940842512614,
323
+ "learning_rate": 4.3909402250966534e-05,
324
+ "loss": 0.546,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.8582089552238806,
329
+ "grad_norm": 0.22827227012944756,
330
+ "learning_rate": 4.358902523153791e-05,
331
+ "loss": 0.542,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.8768656716417911,
336
+ "grad_norm": 0.24079886565005823,
337
+ "learning_rate": 4.326182945733555e-05,
338
+ "loss": 0.5416,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.8955223880597015,
343
+ "grad_norm": 0.3638342950483874,
344
+ "learning_rate": 4.292795359852221e-05,
345
+ "loss": 0.5372,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.914179104477612,
350
+ "grad_norm": 0.3256070901197167,
351
+ "learning_rate": 4.2587539156374295e-05,
352
+ "loss": 0.5405,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.9328358208955224,
357
+ "grad_norm": 0.3165450063227312,
358
+ "learning_rate": 4.2240730403311586e-05,
359
+ "loss": 0.5364,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.9514925373134329,
364
+ "grad_norm": 0.30669208285819877,
365
+ "learning_rate": 4.188767432175263e-05,
366
+ "loss": 0.5322,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.9701492537313433,
371
+ "grad_norm": 0.257272912960744,
372
+ "learning_rate": 4.1528520541821506e-05,
373
+ "loss": 0.5353,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.9888059701492538,
378
+ "grad_norm": 0.30370874605896786,
379
+ "learning_rate": 4.116342127793245e-05,
380
+ "loss": 0.5385,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 1.007462686567164,
385
+ "grad_norm": 0.23367419347190874,
386
+ "learning_rate": 4.0792531264279285e-05,
387
+ "loss": 0.5246,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 1.0261194029850746,
392
+ "grad_norm": 0.28960025979101867,
393
+ "learning_rate": 4.041600768925687e-05,
394
+ "loss": 0.5175,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 1.044776119402985,
399
+ "grad_norm": 0.3544154485769114,
400
+ "learning_rate": 4.0034010128842484e-05,
401
+ "loss": 0.5183,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 1.0634328358208955,
406
+ "grad_norm": 0.2830781283593527,
407
+ "learning_rate": 3.964670047896525e-05,
408
+ "loss": 0.5181,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 1.0820895522388059,
413
+ "grad_norm": 0.26140038642658264,
414
+ "learning_rate": 3.925424288689239e-05,
415
+ "loss": 0.5087,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 1.1007462686567164,
420
+ "grad_norm": 0.2617191990469776,
421
+ "learning_rate": 3.8856803681661296e-05,
422
+ "loss": 0.516,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 1.1194029850746268,
427
+ "grad_norm": 0.26589479989678716,
428
+ "learning_rate": 3.8454551303586964e-05,
429
+ "loss": 0.5111,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 1.1380597014925373,
434
+ "grad_norm": 0.2406368600470144,
435
+ "learning_rate": 3.8047656232874624e-05,
436
+ "loss": 0.5139,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 1.1567164179104479,
441
+ "grad_norm": 0.25755826798857223,
442
+ "learning_rate": 3.763629091736781e-05,
443
+ "loss": 0.5116,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 1.1753731343283582,
448
+ "grad_norm": 0.33892425739707405,
449
+ "learning_rate": 3.722062969946254e-05,
450
+ "loss": 0.5118,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 1.1940298507462686,
455
+ "grad_norm": 0.23443314308723368,
456
+ "learning_rate": 3.6800848742218644e-05,
457
+ "loss": 0.5133,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 1.212686567164179,
462
+ "grad_norm": 0.23884530654624148,
463
+ "learning_rate": 3.6377125954699254e-05,
464
+ "loss": 0.5113,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 1.2313432835820897,
469
+ "grad_norm": 0.2090085180372967,
470
+ "learning_rate": 3.5949640916570566e-05,
471
+ "loss": 0.5085,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 1.25,
476
+ "grad_norm": 0.23291376449474052,
477
+ "learning_rate": 3.551857480199336e-05,
478
+ "loss": 0.5135,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 1.2686567164179103,
483
+ "grad_norm": 0.24011296587040026,
484
+ "learning_rate": 3.5084110302838916e-05,
485
+ "loss": 0.505,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 1.287313432835821,
490
+ "grad_norm": 0.21935005312866615,
491
+ "learning_rate": 3.464643155126162e-05,
492
+ "loss": 0.5104,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 1.3059701492537314,
497
+ "grad_norm": 0.2261621862735514,
498
+ "learning_rate": 3.4205724041661135e-05,
499
+ "loss": 0.5081,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 1.3246268656716418,
504
+ "grad_norm": 0.2574439184051825,
505
+ "learning_rate": 3.376217455206732e-05,
506
+ "loss": 0.5115,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 1.3432835820895521,
511
+ "grad_norm": 0.2129876722579311,
512
+ "learning_rate": 3.3315971064981025e-05,
513
+ "loss": 0.506,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 1.3619402985074627,
518
+ "grad_norm": 0.20753843021299664,
519
+ "learning_rate": 3.286730268770452e-05,
520
+ "loss": 0.5038,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 1.3805970149253732,
525
+ "grad_norm": 0.3217141035376458,
526
+ "learning_rate": 3.2416359572195155e-05,
527
+ "loss": 0.5132,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 1.3992537313432836,
532
+ "grad_norm": 0.21539693831207604,
533
+ "learning_rate": 3.1963332834476247e-05,
534
+ "loss": 0.5081,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 1.417910447761194,
539
+ "grad_norm": 0.27467469680387524,
540
+ "learning_rate": 3.150841447363948e-05,
541
+ "loss": 0.5067,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 1.4365671641791045,
546
+ "grad_norm": 0.23691311945378055,
547
+ "learning_rate": 3.1051797290472966e-05,
548
+ "loss": 0.5018,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 1.455223880597015,
553
+ "grad_norm": 0.30787370613511683,
554
+ "learning_rate": 3.059367480574958e-05,
555
+ "loss": 0.5035,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 1.4738805970149254,
560
+ "grad_norm": 0.21214521272783723,
561
+ "learning_rate": 3.0134241178210103e-05,
562
+ "loss": 0.5004,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 1.4925373134328357,
567
+ "grad_norm": 0.2567066053575565,
568
+ "learning_rate": 2.9673691122276086e-05,
569
+ "loss": 0.5041,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 1.5111940298507462,
574
+ "grad_norm": 0.23880722667167875,
575
+ "learning_rate": 2.9212219825527075e-05,
576
+ "loss": 0.4981,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 1.5298507462686568,
581
+ "grad_norm": 0.19930339353323281,
582
+ "learning_rate": 2.8750022865977443e-05,
583
+ "loss": 0.502,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 1.5485074626865671,
588
+ "grad_norm": 0.24784475664597616,
589
+ "learning_rate": 2.82872961291876e-05,
590
+ "loss": 0.5068,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 1.5671641791044775,
595
+ "grad_norm": 0.2637097189189239,
596
+ "learning_rate": 2.7824235725245042e-05,
597
+ "loss": 0.5031,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 1.585820895522388,
602
+ "grad_norm": 0.20097295957360145,
603
+ "learning_rate": 2.7361037905650032e-05,
604
+ "loss": 0.5051,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 1.6044776119402986,
609
+ "grad_norm": 0.1855097808926682,
610
+ "learning_rate": 2.689789898014155e-05,
611
+ "loss": 0.5043,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 1.623134328358209,
616
+ "grad_norm": 0.19387268984078115,
617
+ "learning_rate": 2.6435015233498443e-05,
618
+ "loss": 0.503,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 1.6417910447761193,
623
+ "grad_norm": 0.1959979856590692,
624
+ "learning_rate": 2.5972582842351156e-05,
625
+ "loss": 0.5031,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 1.6604477611940298,
630
+ "grad_norm": 0.2130968015995863,
631
+ "learning_rate": 2.551079779203932e-05,
632
+ "loss": 0.5056,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 1.6791044776119404,
637
+ "grad_norm": 0.19562804614219742,
638
+ "learning_rate": 2.504985579355047e-05,
639
+ "loss": 0.4971,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 1.6977611940298507,
644
+ "grad_norm": 0.20132877928156498,
645
+ "learning_rate": 2.458995220057491e-05,
646
+ "loss": 0.5048,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 1.716417910447761,
651
+ "grad_norm": 0.19111009428414005,
652
+ "learning_rate": 2.4131281926712146e-05,
653
+ "loss": 0.5051,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 1.7350746268656716,
658
+ "grad_norm": 0.19713565236023642,
659
+ "learning_rate": 2.3674039362863687e-05,
660
+ "loss": 0.5052,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 1.7537313432835822,
665
+ "grad_norm": 0.2278886953059889,
666
+ "learning_rate": 2.3218418294847517e-05,
667
+ "loss": 0.503,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 1.7723880597014925,
672
+ "grad_norm": 0.21076902236368567,
673
+ "learning_rate": 2.2764611821268918e-05,
674
+ "loss": 0.5004,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 1.7910447761194028,
679
+ "grad_norm": 0.2018936557220544,
680
+ "learning_rate": 2.231281227168257e-05,
681
+ "loss": 0.4972,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 1.8097014925373134,
686
+ "grad_norm": 0.1900377713824044,
687
+ "learning_rate": 2.18632111250806e-05,
688
+ "loss": 0.4987,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 1.828358208955224,
693
+ "grad_norm": 0.1818444743971112,
694
+ "learning_rate": 2.141599892874107e-05,
695
+ "loss": 0.4988,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 1.8470149253731343,
700
+ "grad_norm": 0.1843911681060789,
701
+ "learning_rate": 2.09713652174714e-05,
702
+ "loss": 0.5045,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 1.8656716417910446,
707
+ "grad_norm": 0.1756356146626833,
708
+ "learning_rate": 2.0529498433280807e-05,
709
+ "loss": 0.5062,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 1.8843283582089554,
714
+ "grad_norm": 0.17919268852808168,
715
+ "learning_rate": 2.0090585845516012e-05,
716
+ "loss": 0.5011,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 1.9029850746268657,
721
+ "grad_norm": 0.1861488051548517,
722
+ "learning_rate": 1.965481347149376e-05,
723
+ "loss": 0.4964,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 1.921641791044776,
728
+ "grad_norm": 0.1808039014665401,
729
+ "learning_rate": 1.9222365997664165e-05,
730
+ "loss": 0.5038,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 1.9402985074626866,
735
+ "grad_norm": 0.18185075285557611,
736
+ "learning_rate": 1.8793426701337947e-05,
737
+ "loss": 0.5033,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 1.9589552238805972,
742
+ "grad_norm": 0.1802698151327418,
743
+ "learning_rate": 1.8368177373010954e-05,
744
+ "loss": 0.498,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 1.9776119402985075,
749
+ "grad_norm": 0.1839298648566157,
750
+ "learning_rate": 1.7946798239318775e-05,
751
+ "loss": 0.5033,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 1.9962686567164178,
756
+ "grad_norm": 0.1787192449198445,
757
+ "learning_rate": 1.75294678866542e-05,
758
+ "loss": 0.4937,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 2.014925373134328,
763
+ "grad_norm": 0.19908015949978794,
764
+ "learning_rate": 1.7116363185479754e-05,
765
+ "loss": 0.4861,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 2.033582089552239,
770
+ "grad_norm": 0.17330047242909324,
771
+ "learning_rate": 1.670765921536755e-05,
772
+ "loss": 0.4807,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 2.0522388059701493,
777
+ "grad_norm": 0.1887761738499672,
778
+ "learning_rate": 1.6303529190798088e-05,
779
+ "loss": 0.4807,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 2.0708955223880596,
784
+ "grad_norm": 0.18086274895322357,
785
+ "learning_rate": 1.590414438774954e-05,
786
+ "loss": 0.478,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 2.08955223880597,
791
+ "grad_norm": 0.1666312790289245,
792
+ "learning_rate": 1.550967407110856e-05,
793
+ "loss": 0.4801,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 2.1082089552238807,
798
+ "grad_norm": 0.16804347810729678,
799
+ "learning_rate": 1.5120285422933478e-05,
800
+ "loss": 0.4814,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 2.126865671641791,
805
+ "grad_norm": 0.1741311542160614,
806
+ "learning_rate": 1.4736143471600173e-05,
807
+ "loss": 0.4841,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 2.1455223880597014,
812
+ "grad_norm": 0.16178175453073618,
813
+ "learning_rate": 1.4357411021860773e-05,
814
+ "loss": 0.4768,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 2.1641791044776117,
819
+ "grad_norm": 0.16621312000057425,
820
+ "learning_rate": 1.3984248585844645e-05,
821
+ "loss": 0.4804,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 2.1828358208955225,
826
+ "grad_norm": 0.16578503722378288,
827
+ "learning_rate": 1.3616814315031146e-05,
828
+ "loss": 0.4806,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 2.201492537313433,
833
+ "grad_norm": 0.17421634398822453,
834
+ "learning_rate": 1.3255263933222833e-05,
835
+ "loss": 0.477,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 2.220149253731343,
840
+ "grad_norm": 0.1584162460344385,
841
+ "learning_rate": 1.2899750670547473e-05,
842
+ "loss": 0.4761,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 2.2388059701492535,
847
+ "grad_norm": 0.1669406945768043,
848
+ "learning_rate": 1.2550425198516973e-05,
849
+ "loss": 0.4807,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 2.2574626865671643,
854
+ "grad_norm": 0.1755571617029938,
855
+ "learning_rate": 1.2207435566170722e-05,
856
+ "loss": 0.4836,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 2.2761194029850746,
861
+ "grad_norm": 0.16049791652334905,
862
+ "learning_rate": 1.1870927137330267e-05,
863
+ "loss": 0.48,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 2.294776119402985,
868
+ "grad_norm": 0.14841988134789108,
869
+ "learning_rate": 1.1541042528992152e-05,
870
+ "loss": 0.4797,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 2.3134328358208958,
875
+ "grad_norm": 0.14669420324350554,
876
+ "learning_rate": 1.1217921550884774e-05,
877
+ "loss": 0.4781,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 2.332089552238806,
882
+ "grad_norm": 0.16275970644355697,
883
+ "learning_rate": 1.0901701146215085e-05,
884
+ "loss": 0.4677,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 2.3507462686567164,
889
+ "grad_norm": 0.15639224942974775,
890
+ "learning_rate": 1.0592515333630128e-05,
891
+ "loss": 0.4795,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 2.3694029850746268,
896
+ "grad_norm": 0.17837315275471302,
897
+ "learning_rate": 1.029049515041808e-05,
898
+ "loss": 0.4757,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 2.388059701492537,
903
+ "grad_norm": 0.15999076690352537,
904
+ "learning_rate": 9.99576859697277e-06,
905
+ "loss": 0.483,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 2.406716417910448,
910
+ "grad_norm": 0.14623430655091096,
911
+ "learning_rate": 9.708460582545337e-06,
912
+ "loss": 0.479,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 2.425373134328358,
917
+ "grad_norm": 0.14446705361316536,
918
+ "learning_rate": 9.428692872305925e-06,
919
+ "loss": 0.4782,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 2.4440298507462686,
924
+ "grad_norm": 0.1499556136205738,
925
+ "learning_rate": 9.15658403573792e-06,
926
+ "loss": 0.4771,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 2.4626865671641793,
931
+ "grad_norm": 0.13688966492338145,
932
+ "learning_rate": 8.892249396386513e-06,
933
+ "loss": 0.477,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 2.4813432835820897,
938
+ "grad_norm": 0.1405824099115518,
939
+ "learning_rate": 8.635800982982958e-06,
940
+ "loss": 0.4801,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 2.5,
945
+ "grad_norm": 0.1556802805035451,
946
+ "learning_rate": 8.387347481965244e-06,
947
+ "loss": 0.4767,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 2.5186567164179103,
952
+ "grad_norm": 0.14957588217337456,
953
+ "learning_rate": 8.14699419141525e-06,
954
+ "loss": 0.4796,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 2.5373134328358207,
959
+ "grad_norm": 0.15266928757717518,
960
+ "learning_rate": 7.914842976431932e-06,
961
+ "loss": 0.4752,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 2.5559701492537314,
966
+ "grad_norm": 0.15337919631830368,
967
+ "learning_rate": 7.690992225959465e-06,
968
+ "loss": 0.4805,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 2.574626865671642,
973
+ "grad_norm": 0.14478875983801728,
974
+ "learning_rate": 7.4755368110886366e-06,
975
+ "loss": 0.4768,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 2.593283582089552,
980
+ "grad_norm": 0.15398254303144288,
981
+ "learning_rate": 7.268568044849132e-06,
982
+ "loss": 0.4765,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 2.611940298507463,
987
+ "grad_norm": 0.15439199576307303,
988
+ "learning_rate": 7.0701736435098155e-06,
989
+ "loss": 0.4782,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 2.6305970149253732,
994
+ "grad_norm": 0.14978173171132997,
995
+ "learning_rate": 6.880437689403316e-06,
996
+ "loss": 0.4741,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 2.6492537313432836,
1001
+ "grad_norm": 0.1412522116274614,
1002
+ "learning_rate": 6.699440595290754e-06,
1003
+ "loss": 0.4778,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 2.667910447761194,
1008
+ "grad_norm": 0.15039092893602207,
1009
+ "learning_rate": 6.527259070281722e-06,
1010
+ "loss": 0.4816,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 2.6865671641791042,
1015
+ "grad_norm": 0.14048749084616627,
1016
+ "learning_rate": 6.363966087323844e-06,
1017
+ "loss": 0.48,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 2.705223880597015,
1022
+ "grad_norm": 0.14837195306929193,
1023
+ "learning_rate": 6.209630852275836e-06,
1024
+ "loss": 0.4715,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 2.7238805970149254,
1029
+ "grad_norm": 0.14480369337511617,
1030
+ "learning_rate": 6.06431877457709e-06,
1031
+ "loss": 0.4752,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 2.7425373134328357,
1036
+ "grad_norm": 0.14464453043475206,
1037
+ "learning_rate": 5.928091439526226e-06,
1038
+ "loss": 0.4758,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 2.7611940298507465,
1043
+ "grad_norm": 0.14318518863987956,
1044
+ "learning_rate": 5.801006582180398e-06,
1045
+ "loss": 0.4795,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 2.779850746268657,
1050
+ "grad_norm": 0.1378858672961454,
1051
+ "learning_rate": 5.683118062886346e-06,
1052
+ "loss": 0.478,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 2.798507462686567,
1057
+ "grad_norm": 0.1473158474351985,
1058
+ "learning_rate": 5.574475844453634e-06,
1059
+ "loss": 0.477,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 2.8171641791044775,
1064
+ "grad_norm": 0.13519605107478774,
1065
+ "learning_rate": 5.475125970979702e-06,
1066
+ "loss": 0.4777,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 2.835820895522388,
1071
+ "grad_norm": 0.1469374974433458,
1072
+ "learning_rate": 5.385110548335753e-06,
1073
+ "loss": 0.4746,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 2.8544776119402986,
1078
+ "grad_norm": 0.13682187728455042,
1079
+ "learning_rate": 5.30446772632166e-06,
1080
+ "loss": 0.4778,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 2.873134328358209,
1085
+ "grad_norm": 0.1468534460053792,
1086
+ "learning_rate": 5.233231682497572e-06,
1087
+ "loss": 0.4791,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 2.8917910447761193,
1092
+ "grad_norm": 0.1381366037912041,
1093
+ "learning_rate": 5.171432607698975e-06,
1094
+ "loss": 0.4737,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 2.91044776119403,
1099
+ "grad_norm": 0.13873604296833256,
1100
+ "learning_rate": 5.119096693241395e-06,
1101
+ "loss": 0.4785,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 2.9291044776119404,
1106
+ "grad_norm": 0.1354800953615682,
1107
+ "learning_rate": 5.07624611982014e-06,
1108
+ "loss": 0.4719,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 2.9477611940298507,
1113
+ "grad_norm": 0.1412434132198824,
1114
+ "learning_rate": 5.0428990481098275e-06,
1115
+ "loss": 0.481,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 2.966417910447761,
1120
+ "grad_norm": 0.14472205562980991,
1121
+ "learning_rate": 5.01906961106762e-06,
1122
+ "loss": 0.4742,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 2.9850746268656714,
1127
+ "grad_norm": 0.1389514632202056,
1128
+ "learning_rate": 5.004767907943488e-06,
1129
+ "loss": 0.4824,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 3.0,
1134
+ "step": 804,
1135
+ "total_flos": 1465863748190208.0,
1136
+ "train_loss": 0.52242068924121,
1137
+ "train_runtime": 15658.3478,
1138
+ "train_samples_per_second": 6.572,
1139
+ "train_steps_per_second": 0.051
1140
+ }
1141
+ ],
1142
+ "logging_steps": 5,
1143
+ "max_steps": 804,
1144
+ "num_input_tokens_seen": 0,
1145
+ "num_train_epochs": 3,
1146
+ "save_steps": 100,
1147
+ "stateful_callbacks": {
1148
+ "TrainerControl": {
1149
+ "args": {
1150
+ "should_epoch_stop": false,
1151
+ "should_evaluate": false,
1152
+ "should_log": false,
1153
+ "should_save": true,
1154
+ "should_training_stop": true
1155
+ },
1156
+ "attributes": {}
1157
+ }
1158
+ },
1159
+ "total_flos": 1465863748190208.0,
1160
+ "train_batch_size": 16,
1161
+ "trial_name": null,
1162
+ "trial_params": null
1163
+ }