File size: 21,738 Bytes
f86dba1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
{
  "best_global_step": 3400,
  "best_metric": 0.8500096117258309,
  "best_model_checkpoint": "output/PKOBP-polish-roberta-8k_lr1e-5_tb32_w0.1_ep4_wd0.03/checkpoint-3400",
  "epoch": 3.78619153674833,
  "eval_steps": 200,
  "global_step": 3400,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.111358574610245,
      "grad_norm": 0.6548389792442322,
      "learning_rate": 2.7500000000000004e-06,
      "loss": 0.6139,
      "step": 100
    },
    {
      "epoch": 0.22271714922049,
      "grad_norm": 1.2604601383209229,
      "learning_rate": 5.527777777777779e-06,
      "loss": 0.4821,
      "step": 200
    },
    {
      "epoch": 0.22271714922049,
      "eval_accuracy": 0.35198329853862215,
      "eval_f1_label_gniew": 0.6845324788814448,
      "eval_f1_label_negatywny": 0.7949462182004439,
      "eval_f1_label_neutralny": 0.7492940069030436,
      "eval_f1_label_pozytywny": 0.42994241842610365,
      "eval_f1_label_przeczuwanie": 0.0,
      "eval_f1_label_rado\u015b\u0107": 0.19078947368421054,
      "eval_f1_label_sarkazm": 0.0,
      "eval_f1_label_wstr\u0119t": 0.5752023860247124,
      "eval_f1_macro": 0.7509803650720916,
      "eval_f1_micro": 0.8395441892832289,
      "eval_f1_weighted": 0.8265873475311692,
      "eval_loss": 0.41080573201179504,
      "eval_runtime": 13.553,
      "eval_samples_per_second": 530.14,
      "eval_steps_per_second": 16.601,
      "eval_subset_accuracy": 0.8395441892832289,
      "step": 200
    },
    {
      "epoch": 0.33407572383073497,
      "grad_norm": 1.7567846775054932,
      "learning_rate": 8.305555555555557e-06,
      "loss": 0.4129,
      "step": 300
    },
    {
      "epoch": 0.44543429844098,
      "grad_norm": 2.582125186920166,
      "learning_rate": 9.99640768631301e-06,
      "loss": 0.3769,
      "step": 400
    },
    {
      "epoch": 0.44543429844098,
      "eval_accuracy": 0.4321503131524008,
      "eval_f1_label_gniew": 0.7182017543859649,
      "eval_f1_label_negatywny": 0.8120326308633583,
      "eval_f1_label_neutralny": 0.7886710239651417,
      "eval_f1_label_pozytywny": 0.7161493477282951,
      "eval_f1_label_przeczuwanie": 0.534637326813366,
      "eval_f1_label_rado\u015b\u0107": 0.6868131868131868,
      "eval_f1_label_sarkazm": 0.005263157894736842,
      "eval_f1_label_wstr\u0119t": 0.6907111262334127,
      "eval_f1_macro": 0.8079475306110289,
      "eval_f1_micro": 0.8666144745998609,
      "eval_f1_weighted": 0.8619885595665887,
      "eval_loss": 0.33686527609825134,
      "eval_runtime": 13.4667,
      "eval_samples_per_second": 533.539,
      "eval_steps_per_second": 16.708,
      "eval_subset_accuracy": 0.8666144745998609,
      "step": 400
    },
    {
      "epoch": 0.5567928730512249,
      "grad_norm": 2.2375481128692627,
      "learning_rate": 9.954431380479526e-06,
      "loss": 0.3415,
      "step": 500
    },
    {
      "epoch": 0.6681514476614699,
      "grad_norm": 2.377732515335083,
      "learning_rate": 9.86568060370307e-06,
      "loss": 0.3359,
      "step": 600
    },
    {
      "epoch": 0.6681514476614699,
      "eval_accuracy": 0.44231036882393876,
      "eval_f1_label_gniew": 0.7346007604562738,
      "eval_f1_label_negatywny": 0.8233744855967078,
      "eval_f1_label_neutralny": 0.7887421820708825,
      "eval_f1_label_pozytywny": 0.7572641931157801,
      "eval_f1_label_przeczuwanie": 0.6198969480776853,
      "eval_f1_label_rado\u015b\u0107": 0.7128842380640942,
      "eval_f1_label_sarkazm": 0.18295543393275998,
      "eval_f1_label_wstr\u0119t": 0.6842447916666666,
      "eval_f1_macro": 0.8208003215407338,
      "eval_f1_micro": 0.8731384829505915,
      "eval_f1_weighted": 0.8701057540509732,
      "eval_loss": 0.307725727558136,
      "eval_runtime": 13.3682,
      "eval_samples_per_second": 537.472,
      "eval_steps_per_second": 16.831,
      "eval_subset_accuracy": 0.8731384829505915,
      "step": 600
    },
    {
      "epoch": 0.779510022271715,
      "grad_norm": 2.0692615509033203,
      "learning_rate": 9.730993246411562e-06,
      "loss": 0.3179,
      "step": 700
    },
    {
      "epoch": 0.89086859688196,
      "grad_norm": 3.814361095428467,
      "learning_rate": 9.551640883362268e-06,
      "loss": 0.3184,
      "step": 800
    },
    {
      "epoch": 0.89086859688196,
      "eval_accuracy": 0.4701461377870564,
      "eval_f1_label_gniew": 0.7435356200527704,
      "eval_f1_label_negatywny": 0.8380327868852459,
      "eval_f1_label_neutralny": 0.8036529680365296,
      "eval_f1_label_pozytywny": 0.7029945999018163,
      "eval_f1_label_przeczuwanie": 0.6724700761697497,
      "eval_f1_label_rado\u015b\u0107": 0.6844694307800422,
      "eval_f1_label_sarkazm": 0.3567289117836446,
      "eval_f1_label_wstr\u0119t": 0.7084769124741558,
      "eval_f1_macro": 0.8300609729014572,
      "eval_f1_micro": 0.8788100208768267,
      "eval_f1_weighted": 0.8763999627004312,
      "eval_loss": 0.29314953088760376,
      "eval_runtime": 13.6772,
      "eval_samples_per_second": 525.325,
      "eval_steps_per_second": 16.451,
      "eval_subset_accuracy": 0.8788100208768267,
      "step": 800
    },
    {
      "epoch": 1.0022271714922049,
      "grad_norm": 2.8335301876068115,
      "learning_rate": 9.329316768785544e-06,
      "loss": 0.3097,
      "step": 900
    },
    {
      "epoch": 1.1135857461024499,
      "grad_norm": 1.909611701965332,
      "learning_rate": 9.066119850479551e-06,
      "loss": 0.2756,
      "step": 1000
    },
    {
      "epoch": 1.1135857461024499,
      "eval_accuracy": 0.4810020876826722,
      "eval_f1_label_gniew": 0.7485624673288029,
      "eval_f1_label_negatywny": 0.8409730266423961,
      "eval_f1_label_neutralny": 0.8091783364265227,
      "eval_f1_label_pozytywny": 0.7727272727272727,
      "eval_f1_label_przeczuwanie": 0.681491618200479,
      "eval_f1_label_rado\u015b\u0107": 0.7310513447432763,
      "eval_f1_label_sarkazm": 0.3149171270718232,
      "eval_f1_label_wstr\u0119t": 0.7109237794169301,
      "eval_f1_macro": 0.8361271998835582,
      "eval_f1_micro": 0.8823938761308281,
      "eval_f1_weighted": 0.8804578924937377,
      "eval_loss": 0.28486499190330505,
      "eval_runtime": 14.7594,
      "eval_samples_per_second": 486.807,
      "eval_steps_per_second": 15.244,
      "eval_subset_accuracy": 0.8823938761308281,
      "step": 1000
    },
    {
      "epoch": 1.2249443207126949,
      "grad_norm": 4.029219150543213,
      "learning_rate": 8.764534953777842e-06,
      "loss": 0.2685,
      "step": 1100
    },
    {
      "epoch": 1.3363028953229399,
      "grad_norm": 1.7298495769500732,
      "learning_rate": 8.427409322471743e-06,
      "loss": 0.2622,
      "step": 1200
    },
    {
      "epoch": 1.3363028953229399,
      "eval_accuracy": 0.4683368128044537,
      "eval_f1_label_gniew": 0.7479035639412998,
      "eval_f1_label_negatywny": 0.8449914316871787,
      "eval_f1_label_neutralny": 0.7788461538461539,
      "eval_f1_label_pozytywny": 0.7732656514382402,
      "eval_f1_label_przeczuwanie": 0.7060367454068242,
      "eval_f1_label_rado\u015b\u0107": 0.7399756986634265,
      "eval_f1_label_sarkazm": 0.4,
      "eval_f1_label_wstr\u0119t": 0.7199725463280714,
      "eval_f1_macro": 0.8361219089623048,
      "eval_f1_micro": 0.881419624217119,
      "eval_f1_weighted": 0.8799866455346702,
      "eval_loss": 0.28199732303619385,
      "eval_runtime": 13.3959,
      "eval_samples_per_second": 536.358,
      "eval_steps_per_second": 16.796,
      "eval_subset_accuracy": 0.881419624217119,
      "step": 1200
    },
    {
      "epoch": 1.447661469933185,
      "grad_norm": 2.291908025741577,
      "learning_rate": 8.057925738163153e-06,
      "loss": 0.2689,
      "step": 1300
    },
    {
      "epoch": 1.5590200445434297,
      "grad_norm": 2.64717960357666,
      "learning_rate": 7.659572471826265e-06,
      "loss": 0.2653,
      "step": 1400
    },
    {
      "epoch": 1.5590200445434297,
      "eval_accuracy": 0.48434237995824636,
      "eval_f1_label_gniew": 0.7580278128950695,
      "eval_f1_label_negatywny": 0.8478637868516475,
      "eval_f1_label_neutralny": 0.8011984490659148,
      "eval_f1_label_pozytywny": 0.777292576419214,
      "eval_f1_label_przeczuwanie": 0.6951871657754011,
      "eval_f1_label_rado\u015b\u0107": 0.7375621890547264,
      "eval_f1_label_sarkazm": 0.47809206877426513,
      "eval_f1_label_wstr\u0119t": 0.7300771208226221,
      "eval_f1_macro": 0.8415650899916123,
      "eval_f1_micro": 0.8838726513569938,
      "eval_f1_weighted": 0.8832473603278248,
      "eval_loss": 0.27665975689888,
      "eval_runtime": 13.39,
      "eval_samples_per_second": 536.594,
      "eval_steps_per_second": 16.804,
      "eval_subset_accuracy": 0.8838726513569938,
      "step": 1400
    },
    {
      "epoch": 1.670378619153675,
      "grad_norm": 2.742314100265503,
      "learning_rate": 7.236110351263578e-06,
      "loss": 0.2551,
      "step": 1500
    },
    {
      "epoch": 1.7817371937639197,
      "grad_norm": 2.0853464603424072,
      "learning_rate": 6.7915372553702926e-06,
      "loss": 0.253,
      "step": 1600
    },
    {
      "epoch": 1.7817371937639197,
      "eval_accuracy": 0.49255393180236606,
      "eval_f1_label_gniew": 0.745141588006663,
      "eval_f1_label_negatywny": 0.8427041499330656,
      "eval_f1_label_neutralny": 0.8148646376324197,
      "eval_f1_label_pozytywny": 0.7725178047758693,
      "eval_f1_label_przeczuwanie": 0.7135483870967742,
      "eval_f1_label_rado\u015b\u0107": 0.7449786975045648,
      "eval_f1_label_sarkazm": 0.45395894428152495,
      "eval_f1_label_wstr\u0119t": 0.7020958083832335,
      "eval_f1_macro": 0.8419394716369004,
      "eval_f1_micro": 0.8862908837856646,
      "eval_f1_weighted": 0.8845662361245985,
      "eval_loss": 0.27193114161491394,
      "eval_runtime": 13.4458,
      "eval_samples_per_second": 534.368,
      "eval_steps_per_second": 16.734,
      "eval_subset_accuracy": 0.8862908837856646,
      "step": 1600
    },
    {
      "epoch": 1.893095768374165,
      "grad_norm": 2.6639328002929688,
      "learning_rate": 6.330050370414403e-06,
      "loss": 0.2502,
      "step": 1700
    },
    {
      "epoch": 2.0044543429844097,
      "grad_norm": 2.5934195518493652,
      "learning_rate": 5.856006564668624e-06,
      "loss": 0.2511,
      "step": 1800
    },
    {
      "epoch": 2.0044543429844097,
      "eval_accuracy": 0.46583159359777315,
      "eval_f1_label_gniew": 0.7512172501738928,
      "eval_f1_label_negatywny": 0.8434200743494423,
      "eval_f1_label_neutralny": 0.7797117724125023,
      "eval_f1_label_pozytywny": 0.7771623348956114,
      "eval_f1_label_przeczuwanie": 0.7179814755669115,
      "eval_f1_label_rado\u015b\u0107": 0.7299448867115738,
      "eval_f1_label_sarkazm": 0.5232903865213082,
      "eval_f1_label_wstr\u0119t": 0.7176891006551519,
      "eval_f1_macro": 0.8367166256068449,
      "eval_f1_micro": 0.8771746694502436,
      "eval_f1_weighted": 0.8780966128511206,
      "eval_loss": 0.2889347970485687,
      "eval_runtime": 13.5639,
      "eval_samples_per_second": 529.716,
      "eval_steps_per_second": 16.588,
      "eval_subset_accuracy": 0.8771746694502436,
      "step": 1800
    },
    {
      "epoch": 2.115812917594655,
      "grad_norm": 2.778862714767456,
      "learning_rate": 5.373881255494652e-06,
      "loss": 0.2128,
      "step": 1900
    },
    {
      "epoch": 2.2271714922048997,
      "grad_norm": 2.1749610900878906,
      "learning_rate": 4.888226157213061e-06,
      "loss": 0.2068,
      "step": 2000
    },
    {
      "epoch": 2.2271714922048997,
      "eval_accuracy": 0.5082811412665275,
      "eval_f1_label_gniew": 0.7617009895694036,
      "eval_f1_label_negatywny": 0.8477131847880717,
      "eval_f1_label_neutralny": 0.8223467369808833,
      "eval_f1_label_pozytywny": 0.7810858143607706,
      "eval_f1_label_przeczuwanie": 0.7022071307300509,
      "eval_f1_label_rado\u015b\u0107": 0.7382636655948553,
      "eval_f1_label_sarkazm": 0.46941176470588236,
      "eval_f1_label_wstr\u0119t": 0.7270175438596491,
      "eval_f1_macro": 0.8469364739492785,
      "eval_f1_micro": 0.8896659707724426,
      "eval_f1_weighted": 0.8881098685832289,
      "eval_loss": 0.2712438106536865,
      "eval_runtime": 13.4837,
      "eval_samples_per_second": 532.864,
      "eval_steps_per_second": 16.687,
      "eval_subset_accuracy": 0.8896659707724426,
      "step": 2000
    },
    {
      "epoch": 2.338530066815145,
      "grad_norm": 3.2917253971099854,
      "learning_rate": 4.403626308658459e-06,
      "loss": 0.2042,
      "step": 2100
    },
    {
      "epoch": 2.4498886414253898,
      "grad_norm": 2.2220280170440674,
      "learning_rate": 3.9246567861200166e-06,
      "loss": 0.2097,
      "step": 2200
    },
    {
      "epoch": 2.4498886414253898,
      "eval_accuracy": 0.4989561586638831,
      "eval_f1_label_gniew": 0.75792897804283,
      "eval_f1_label_negatywny": 0.8532080362929358,
      "eval_f1_label_neutralny": 0.8129346314325452,
      "eval_f1_label_pozytywny": 0.7774578362813657,
      "eval_f1_label_przeczuwanie": 0.7263442475217783,
      "eval_f1_label_rado\u015b\u0107": 0.7484811664641555,
      "eval_f1_label_sarkazm": 0.5211047420531527,
      "eval_f1_label_wstr\u0119t": 0.7377866400797607,
      "eval_f1_macro": 0.8475632085047498,
      "eval_f1_micro": 0.8878044537230341,
      "eval_f1_weighted": 0.8874379181538837,
      "eval_loss": 0.2748867869377136,
      "eval_runtime": 13.5472,
      "eval_samples_per_second": 530.369,
      "eval_steps_per_second": 16.609,
      "eval_subset_accuracy": 0.8878044537230341,
      "step": 2200
    },
    {
      "epoch": 2.5612472160356345,
      "grad_norm": 4.778714179992676,
      "learning_rate": 3.4558395103377882e-06,
      "loss": 0.202,
      "step": 2300
    },
    {
      "epoch": 2.6726057906458798,
      "grad_norm": 2.9350364208221436,
      "learning_rate": 3.0016005553371464e-06,
      "loss": 0.2042,
      "step": 2400
    },
    {
      "epoch": 2.6726057906458798,
      "eval_accuracy": 0.5029923451635352,
      "eval_f1_label_gniew": 0.7653221581980094,
      "eval_f1_label_negatywny": 0.8546814044213265,
      "eval_f1_label_neutralny": 0.8173642338291248,
      "eval_f1_label_pozytywny": 0.7812627291242362,
      "eval_f1_label_przeczuwanie": 0.7192023158571889,
      "eval_f1_label_rado\u015b\u0107": 0.7540394973070018,
      "eval_f1_label_sarkazm": 0.5525328330206379,
      "eval_f1_label_wstr\u0119t": 0.7323117052494295,
      "eval_f1_macro": 0.8486039951352532,
      "eval_f1_micro": 0.8879262352122478,
      "eval_f1_weighted": 0.8878859652865869,
      "eval_loss": 0.27490347623825073,
      "eval_runtime": 13.5014,
      "eval_samples_per_second": 532.167,
      "eval_steps_per_second": 16.665,
      "eval_subset_accuracy": 0.8879262352122478,
      "step": 2400
    },
    {
      "epoch": 2.7839643652561246,
      "grad_norm": 3.124579906463623,
      "learning_rate": 2.566228362145968e-06,
      "loss": 0.1985,
      "step": 2500
    },
    {
      "epoch": 2.89532293986637,
      "grad_norm": 2.5567288398742676,
      "learning_rate": 2.1538332518961635e-06,
      "loss": 0.2071,
      "step": 2600
    },
    {
      "epoch": 2.89532293986637,
      "eval_accuracy": 0.5102296450939457,
      "eval_f1_label_gniew": 0.7664253150978815,
      "eval_f1_label_negatywny": 0.8534298404342819,
      "eval_f1_label_neutralny": 0.8198167628096369,
      "eval_f1_label_pozytywny": 0.7838400666389005,
      "eval_f1_label_przeczuwanie": 0.7165740438051651,
      "eval_f1_label_rado\u015b\u0107": 0.7517985611510791,
      "eval_f1_label_sarkazm": 0.4584795321637427,
      "eval_f1_label_wstr\u0119t": 0.7267462902642056,
      "eval_f1_macro": 0.8485724561583636,
      "eval_f1_micro": 0.8903096729297146,
      "eval_f1_weighted": 0.8890469205384153,
      "eval_loss": 0.2728336453437805,
      "eval_runtime": 13.4,
      "eval_samples_per_second": 536.194,
      "eval_steps_per_second": 16.791,
      "eval_subset_accuracy": 0.8903096729297146,
      "step": 2600
    },
    {
      "epoch": 3.0066815144766146,
      "grad_norm": 3.9624898433685303,
      "learning_rate": 1.76830862054381e-06,
      "loss": 0.2014,
      "step": 2700
    },
    {
      "epoch": 3.11804008908686,
      "grad_norm": 1.9180693626403809,
      "learning_rate": 1.4132941815660222e-06,
      "loss": 0.1767,
      "step": 2800
    },
    {
      "epoch": 3.11804008908686,
      "eval_accuracy": 0.5039665970772442,
      "eval_f1_label_gniew": 0.7623196297304655,
      "eval_f1_label_negatywny": 0.8542283645936163,
      "eval_f1_label_neutralny": 0.8082359099633571,
      "eval_f1_label_pozytywny": 0.7777777777777778,
      "eval_f1_label_przeczuwanie": 0.7178329571106095,
      "eval_f1_label_rado\u015b\u0107": 0.745920745920746,
      "eval_f1_label_sarkazm": 0.5346432264736298,
      "eval_f1_label_wstr\u0119t": 0.7262289199856476,
      "eval_f1_macro": 0.8473642910777622,
      "eval_f1_micro": 0.8887265135699374,
      "eval_f1_weighted": 0.8878169372581985,
      "eval_loss": 0.2742355167865753,
      "eval_runtime": 16.0479,
      "eval_samples_per_second": 447.723,
      "eval_steps_per_second": 14.021,
      "eval_subset_accuracy": 0.8887265135699374,
      "step": 2800
    },
    {
      "epoch": 3.2293986636971046,
      "grad_norm": 2.348484992980957,
      "learning_rate": 1.092141603657972e-06,
      "loss": 0.1773,
      "step": 2900
    },
    {
      "epoch": 3.34075723830735,
      "grad_norm": 2.2864317893981934,
      "learning_rate": 8.078828678422995e-07,
      "loss": 0.173,
      "step": 3000
    },
    {
      "epoch": 3.34075723830735,
      "eval_accuracy": 0.508141962421712,
      "eval_f1_label_gniew": 0.7665431445209105,
      "eval_f1_label_negatywny": 0.8550937245313773,
      "eval_f1_label_neutralny": 0.8155373032169747,
      "eval_f1_label_pozytywny": 0.781314592806945,
      "eval_f1_label_przeczuwanie": 0.7226675216415518,
      "eval_f1_label_rado\u015b\u0107": 0.7475728155339806,
      "eval_f1_label_sarkazm": 0.5287713841368584,
      "eval_f1_label_wstr\u0119t": 0.7339011925042589,
      "eval_f1_macro": 0.8494136493629865,
      "eval_f1_micro": 0.889544189283229,
      "eval_f1_weighted": 0.8889910470048148,
      "eval_loss": 0.27491486072540283,
      "eval_runtime": 13.5687,
      "eval_samples_per_second": 529.527,
      "eval_steps_per_second": 16.582,
      "eval_subset_accuracy": 0.889544189283229,
      "step": 3000
    },
    {
      "epoch": 3.4521158129175946,
      "grad_norm": 2.671910524368286,
      "learning_rate": 5.632016427295134e-07,
      "loss": 0.1756,
      "step": 3100
    },
    {
      "epoch": 3.5634743875278394,
      "grad_norm": 3.2793824672698975,
      "learning_rate": 3.6040794817364335e-07,
      "loss": 0.1695,
      "step": 3200
    },
    {
      "epoch": 3.5634743875278394,
      "eval_accuracy": 0.511482254697286,
      "eval_f1_label_gniew": 0.7687056970333421,
      "eval_f1_label_negatywny": 0.8545868575146389,
      "eval_f1_label_neutralny": 0.8166494668042655,
      "eval_f1_label_pozytywny": 0.7816091954022989,
      "eval_f1_label_przeczuwanie": 0.7237728585178056,
      "eval_f1_label_rado\u015b\u0107": 0.7479091995221028,
      "eval_f1_label_sarkazm": 0.527850078084331,
      "eval_f1_label_wstr\u0119t": 0.731740614334471,
      "eval_f1_macro": 0.8495682589006989,
      "eval_f1_micro": 0.889544189283229,
      "eval_f1_weighted": 0.8890490603152431,
      "eval_loss": 0.27663180232048035,
      "eval_runtime": 13.4661,
      "eval_samples_per_second": 533.56,
      "eval_steps_per_second": 16.709,
      "eval_subset_accuracy": 0.889544189283229,
      "step": 3200
    },
    {
      "epoch": 3.6748329621380846,
      "grad_norm": 1.78013277053833,
      "learning_rate": 2.0141634652200205e-07,
      "loss": 0.167,
      "step": 3300
    },
    {
      "epoch": 3.78619153674833,
      "grad_norm": 2.706618070602417,
      "learning_rate": 8.772786735407601e-08,
      "loss": 0.172,
      "step": 3400
    },
    {
      "epoch": 3.78619153674833,
      "eval_accuracy": 0.5124565066109952,
      "eval_f1_label_gniew": 0.7693124502256438,
      "eval_f1_label_negatywny": 0.8553212358999509,
      "eval_f1_label_neutralny": 0.8172006167551824,
      "eval_f1_label_pozytywny": 0.7813649366571311,
      "eval_f1_label_przeczuwanie": 0.7220240025948751,
      "eval_f1_label_rado\u015b\u0107": 0.7476190476190476,
      "eval_f1_label_sarkazm": 0.5337423312883436,
      "eval_f1_label_wstr\u0119t": 0.7336787564766839,
      "eval_f1_macro": 0.8500096117258309,
      "eval_f1_micro": 0.8900487125956854,
      "eval_f1_weighted": 0.8894635900360007,
      "eval_loss": 0.2760503590106964,
      "eval_runtime": 13.4196,
      "eval_samples_per_second": 535.411,
      "eval_steps_per_second": 16.767,
      "eval_subset_accuracy": 0.8900487125956854,
      "step": 3400
    }
  ],
  "logging_steps": 100,
  "max_steps": 3592,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 4,
  "save_steps": 200,
  "stateful_callbacks": {
    "EarlyStoppingCallback": {
      "args": {
        "early_stopping_patience": 3,
        "early_stopping_threshold": 0.0001
      },
      "attributes": {
        "early_stopping_patience_counter": 0
      }
    },
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.9578702215192064e+16,
  "train_batch_size": 32,
  "trial_name": null,
  "trial_params": null
}