sedrickkeh's picture
End of training
884a2a8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9956108266276518,
"eval_steps": 500,
"global_step": 1023,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029261155815654718,
"grad_norm": 9.084855756887139,
"learning_rate": 9.615384615384617e-07,
"loss": 0.886,
"step": 10
},
{
"epoch": 0.058522311631309436,
"grad_norm": 2.1280915791190536,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.8328,
"step": 20
},
{
"epoch": 0.08778346744696415,
"grad_norm": 1.4864870064749038,
"learning_rate": 2.8846153846153845e-06,
"loss": 0.7582,
"step": 30
},
{
"epoch": 0.11704462326261887,
"grad_norm": 1.2093234450532022,
"learning_rate": 3.846153846153847e-06,
"loss": 0.7319,
"step": 40
},
{
"epoch": 0.14630577907827358,
"grad_norm": 1.226249948265427,
"learning_rate": 4.807692307692308e-06,
"loss": 0.715,
"step": 50
},
{
"epoch": 0.1755669348939283,
"grad_norm": 1.1538654672324984,
"learning_rate": 4.999179359206061e-06,
"loss": 0.6994,
"step": 60
},
{
"epoch": 0.20482809070958302,
"grad_norm": 1.0705237000585746,
"learning_rate": 4.9958464481673945e-06,
"loss": 0.6873,
"step": 70
},
{
"epoch": 0.23408924652523774,
"grad_norm": 0.7949212281230073,
"learning_rate": 4.989953462717219e-06,
"loss": 0.6767,
"step": 80
},
{
"epoch": 0.26335040234089246,
"grad_norm": 0.5573809976671956,
"learning_rate": 4.981506571060115e-06,
"loss": 0.669,
"step": 90
},
{
"epoch": 0.29261155815654716,
"grad_norm": 0.5067683313185354,
"learning_rate": 4.970514614581525e-06,
"loss": 0.6592,
"step": 100
},
{
"epoch": 0.3218727139722019,
"grad_norm": 0.7197115966513962,
"learning_rate": 4.95698909859346e-06,
"loss": 0.6672,
"step": 110
},
{
"epoch": 0.3511338697878566,
"grad_norm": 0.423347227360164,
"learning_rate": 4.9409441802918504e-06,
"loss": 0.6717,
"step": 120
},
{
"epoch": 0.38039502560351135,
"grad_norm": 0.4057413591370665,
"learning_rate": 4.922396653938172e-06,
"loss": 0.6551,
"step": 130
},
{
"epoch": 0.40965618141916604,
"grad_norm": 0.40545236615866387,
"learning_rate": 4.9013659332808424e-06,
"loss": 0.6554,
"step": 140
},
{
"epoch": 0.4389173372348208,
"grad_norm": 0.34430634744049543,
"learning_rate": 4.877874031234797e-06,
"loss": 0.6506,
"step": 150
},
{
"epoch": 0.4681784930504755,
"grad_norm": 0.3947471159362995,
"learning_rate": 4.851945536840513e-06,
"loss": 0.6523,
"step": 160
},
{
"epoch": 0.49743964886613024,
"grad_norm": 0.3607675988690859,
"learning_rate": 4.823607589526577e-06,
"loss": 0.6572,
"step": 170
},
{
"epoch": 0.5267008046817849,
"grad_norm": 0.3388740063419326,
"learning_rate": 4.792889850702788e-06,
"loss": 0.6567,
"step": 180
},
{
"epoch": 0.5559619604974396,
"grad_norm": 0.3657639111088194,
"learning_rate": 4.759824472713458e-06,
"loss": 0.6451,
"step": 190
},
{
"epoch": 0.5852231163130943,
"grad_norm": 0.3385115737777815,
"learning_rate": 4.724446065183473e-06,
"loss": 0.6462,
"step": 200
},
{
"epoch": 0.6144842721287491,
"grad_norm": 0.34158523130244933,
"learning_rate": 4.686791658792296e-06,
"loss": 0.6527,
"step": 210
},
{
"epoch": 0.6437454279444038,
"grad_norm": 0.36017500334376507,
"learning_rate": 4.64690066651385e-06,
"loss": 0.6405,
"step": 220
},
{
"epoch": 0.6730065837600585,
"grad_norm": 0.34353043662176397,
"learning_rate": 4.6048148423628475e-06,
"loss": 0.6404,
"step": 230
},
{
"epoch": 0.7022677395757132,
"grad_norm": 0.3226863732614126,
"learning_rate": 4.560578237690746e-06,
"loss": 0.6473,
"step": 240
},
{
"epoch": 0.731528895391368,
"grad_norm": 0.34804446118771093,
"learning_rate": 4.514237155077068e-06,
"loss": 0.6456,
"step": 250
},
{
"epoch": 0.7607900512070227,
"grad_norm": 0.36196891649357604,
"learning_rate": 4.465840099864371e-06,
"loss": 0.6482,
"step": 260
},
{
"epoch": 0.7900512070226774,
"grad_norm": 0.3648762882092531,
"learning_rate": 4.415437729387553e-06,
"loss": 0.6504,
"step": 270
},
{
"epoch": 0.8193123628383321,
"grad_norm": 0.34075124061174383,
"learning_rate": 4.363082799950688e-06,
"loss": 0.6492,
"step": 280
},
{
"epoch": 0.8485735186539868,
"grad_norm": 0.34077798093344264,
"learning_rate": 4.3088301116068515e-06,
"loss": 0.6414,
"step": 290
},
{
"epoch": 0.8778346744696416,
"grad_norm": 0.35911830019231555,
"learning_rate": 4.252736450798742e-06,
"loss": 0.6332,
"step": 300
},
{
"epoch": 0.9070958302852963,
"grad_norm": 0.3691524853261293,
"learning_rate": 4.194860530920158e-06,
"loss": 0.6463,
"step": 310
},
{
"epoch": 0.936356986100951,
"grad_norm": 0.3237790683882795,
"learning_rate": 4.135262930860523e-06,
"loss": 0.6382,
"step": 320
},
{
"epoch": 0.9656181419166057,
"grad_norm": 0.3741964897155993,
"learning_rate": 4.074006031596782e-06,
"loss": 0.6415,
"step": 330
},
{
"epoch": 0.9948792977322605,
"grad_norm": 0.35814476324219435,
"learning_rate": 4.0111539508990635e-06,
"loss": 0.6375,
"step": 340
},
{
"epoch": 0.9978054133138259,
"eval_loss": 0.642014741897583,
"eval_runtime": 345.6219,
"eval_samples_per_second": 26.645,
"eval_steps_per_second": 0.417,
"step": 341
},
{
"epoch": 1.025237746891002,
"grad_norm": 0.3629604915183517,
"learning_rate": 3.946772476218427e-06,
"loss": 0.6554,
"step": 350
},
{
"epoch": 1.054498902706657,
"grad_norm": 0.37086856038495986,
"learning_rate": 3.880928995826948e-06,
"loss": 0.61,
"step": 360
},
{
"epoch": 1.0837600585223117,
"grad_norm": 0.3627351529973698,
"learning_rate": 3.813692428282223e-06,
"loss": 0.6134,
"step": 370
},
{
"epoch": 1.1130212143379663,
"grad_norm": 0.3243490766587339,
"learning_rate": 3.7451331502901254e-06,
"loss": 0.6107,
"step": 380
},
{
"epoch": 1.142282370153621,
"grad_norm": 0.3850326873012922,
"learning_rate": 3.675322923041302e-06,
"loss": 0.616,
"step": 390
},
{
"epoch": 1.1715435259692757,
"grad_norm": 0.34816033713569355,
"learning_rate": 3.6043348170985315e-06,
"loss": 0.6174,
"step": 400
},
{
"epoch": 1.2008046817849305,
"grad_norm": 0.35518585728200275,
"learning_rate": 3.532243135913563e-06,
"loss": 0.617,
"step": 410
},
{
"epoch": 1.2300658376005853,
"grad_norm": 0.3316993583684839,
"learning_rate": 3.4591233380534793e-06,
"loss": 0.605,
"step": 420
},
{
"epoch": 1.2593269934162399,
"grad_norm": 0.3199920196074056,
"learning_rate": 3.3850519582180026e-06,
"loss": 0.6151,
"step": 430
},
{
"epoch": 1.2885881492318947,
"grad_norm": 0.3924257030583339,
"learning_rate": 3.3101065271304066e-06,
"loss": 0.6076,
"step": 440
},
{
"epoch": 1.3178493050475493,
"grad_norm": 0.3435542310163033,
"learning_rate": 3.2343654903858873e-06,
"loss": 0.6115,
"step": 450
},
{
"epoch": 1.347110460863204,
"grad_norm": 0.3266482902770932,
"learning_rate": 3.157908126342339e-06,
"loss": 0.609,
"step": 460
},
{
"epoch": 1.3763716166788589,
"grad_norm": 0.32160594265418013,
"learning_rate": 3.0808144631394693e-06,
"loss": 0.6063,
"step": 470
},
{
"epoch": 1.4056327724945135,
"grad_norm": 0.32601950132705126,
"learning_rate": 3.0031651949331216e-06,
"loss": 0.607,
"step": 480
},
{
"epoch": 1.4348939283101683,
"grad_norm": 0.3446989520890391,
"learning_rate": 2.9250415974324696e-06,
"loss": 0.6132,
"step": 490
},
{
"epoch": 1.464155084125823,
"grad_norm": 0.32948545631939724,
"learning_rate": 2.8465254428285066e-06,
"loss": 0.6017,
"step": 500
},
{
"epoch": 1.4934162399414777,
"grad_norm": 0.33195140305843396,
"learning_rate": 2.767698914202857e-06,
"loss": 0.6097,
"step": 510
},
{
"epoch": 1.5226773957571325,
"grad_norm": 0.34392530557314854,
"learning_rate": 2.688644519506513e-06,
"loss": 0.612,
"step": 520
},
{
"epoch": 1.5519385515727873,
"grad_norm": 0.33949149781758015,
"learning_rate": 2.6094450051985314e-06,
"loss": 0.6106,
"step": 530
},
{
"epoch": 1.5811997073884418,
"grad_norm": 0.327303003129616,
"learning_rate": 2.5301832696350766e-06,
"loss": 0.6172,
"step": 540
},
{
"epoch": 1.6104608632040964,
"grad_norm": 0.33943660199630793,
"learning_rate": 2.4509422762994763e-06,
"loss": 0.608,
"step": 550
},
{
"epoch": 1.6397220190197512,
"grad_norm": 0.3410312861107126,
"learning_rate": 2.371804966964112e-06,
"loss": 0.6098,
"step": 560
},
{
"epoch": 1.668983174835406,
"grad_norm": 0.3281165569806639,
"learning_rate": 2.292854174875026e-06,
"loss": 0.6003,
"step": 570
},
{
"epoch": 1.6982443306510606,
"grad_norm": 0.33097626626038495,
"learning_rate": 2.214172538050132e-06,
"loss": 0.6098,
"step": 580
},
{
"epoch": 1.7275054864667154,
"grad_norm": 0.32188678043068986,
"learning_rate": 2.1358424127817636e-06,
"loss": 0.6096,
"step": 590
},
{
"epoch": 1.7567666422823702,
"grad_norm": 0.3345570749109643,
"learning_rate": 2.0579457874341145e-06,
"loss": 0.6067,
"step": 600
},
{
"epoch": 1.7860277980980248,
"grad_norm": 0.3458674892135895,
"learning_rate": 1.980564196625778e-06,
"loss": 0.6091,
"step": 610
},
{
"epoch": 1.8152889539136796,
"grad_norm": 0.3096604787123586,
"learning_rate": 1.9037786358872403e-06,
"loss": 0.6094,
"step": 620
},
{
"epoch": 1.8445501097293344,
"grad_norm": 0.3209091351001541,
"learning_rate": 1.8276694768826186e-06,
"loss": 0.6087,
"step": 630
},
{
"epoch": 1.873811265544989,
"grad_norm": 0.3137761488457552,
"learning_rate": 1.752316383284421e-06,
"loss": 0.6034,
"step": 640
},
{
"epoch": 1.9030724213606436,
"grad_norm": 0.33774818566408105,
"learning_rate": 1.6777982273893492e-06,
"loss": 0.6091,
"step": 650
},
{
"epoch": 1.9323335771762986,
"grad_norm": 0.30571808842565584,
"learning_rate": 1.6041930075624462e-06,
"loss": 0.6097,
"step": 660
},
{
"epoch": 1.9615947329919532,
"grad_norm": 0.32428741284613727,
"learning_rate": 1.531577766595981e-06,
"loss": 0.5985,
"step": 670
},
{
"epoch": 1.9908558888076078,
"grad_norm": 0.31122632241768317,
"learning_rate": 1.4600285110685461e-06,
"loss": 0.613,
"step": 680
},
{
"epoch": 1.9967081199707388,
"eval_loss": 0.6351438164710999,
"eval_runtime": 347.2019,
"eval_samples_per_second": 26.523,
"eval_steps_per_second": 0.415,
"step": 682
},
{
"epoch": 2.0212143379663496,
"grad_norm": 0.3173744883131915,
"learning_rate": 1.3896201317887491e-06,
"loss": 0.6328,
"step": 690
},
{
"epoch": 2.050475493782004,
"grad_norm": 0.3199871258087607,
"learning_rate": 1.3204263254067992e-06,
"loss": 0.5842,
"step": 700
},
{
"epoch": 2.0797366495976592,
"grad_norm": 0.30735021392437534,
"learning_rate": 1.2525195172760049e-06,
"loss": 0.5864,
"step": 710
},
{
"epoch": 2.108997805413314,
"grad_norm": 0.33451707159645516,
"learning_rate": 1.1859707856449616e-06,
"loss": 0.5874,
"step": 720
},
{
"epoch": 2.1382589612289684,
"grad_norm": 0.31423591154099745,
"learning_rate": 1.1208497872597376e-06,
"loss": 0.589,
"step": 730
},
{
"epoch": 2.1675201170446234,
"grad_norm": 0.33377630723760604,
"learning_rate": 1.0572246844539681e-06,
"loss": 0.5937,
"step": 740
},
{
"epoch": 2.196781272860278,
"grad_norm": 0.326073303326455,
"learning_rate": 9.951620738031426e-07,
"loss": 0.5925,
"step": 750
},
{
"epoch": 2.2260424286759326,
"grad_norm": 0.36828938707275,
"learning_rate": 9.34726916417784e-07,
"loss": 0.5896,
"step": 760
},
{
"epoch": 2.255303584491587,
"grad_norm": 0.3129069532358376,
"learning_rate": 8.759824699484654e-07,
"loss": 0.5878,
"step": 770
},
{
"epoch": 2.284564740307242,
"grad_norm": 0.31809132851390914,
"learning_rate": 8.18990222373854e-07,
"loss": 0.5873,
"step": 780
},
{
"epoch": 2.313825896122897,
"grad_norm": 0.3005492376139684,
"learning_rate": 7.638098276410629e-07,
"loss": 0.5853,
"step": 790
},
{
"epoch": 2.3430870519385514,
"grad_norm": 0.31088436898911936,
"learning_rate": 7.104990432257027e-07,
"loss": 0.5929,
"step": 800
},
{
"epoch": 2.3723482077542064,
"grad_norm": 0.3054167834067301,
"learning_rate": 6.591136696769605e-07,
"loss": 0.5879,
"step": 810
},
{
"epoch": 2.401609363569861,
"grad_norm": 0.3180072069259439,
"learning_rate": 6.097074922110135e-07,
"loss": 0.5941,
"step": 820
},
{
"epoch": 2.4308705193855156,
"grad_norm": 0.294678201722322,
"learning_rate": 5.623322244138855e-07,
"loss": 0.5864,
"step": 830
},
{
"epoch": 2.4601316752011706,
"grad_norm": 0.30213154178383383,
"learning_rate": 5.170374541126944e-07,
"loss": 0.5867,
"step": 840
},
{
"epoch": 2.489392831016825,
"grad_norm": 0.3104650229121875,
"learning_rate": 4.73870591471928e-07,
"loss": 0.5884,
"step": 850
},
{
"epoch": 2.5186539868324798,
"grad_norm": 0.31369258551616863,
"learning_rate": 4.328768193690963e-07,
"loss": 0.5817,
"step": 860
},
{
"epoch": 2.547915142648135,
"grad_norm": 0.3063951802164903,
"learning_rate": 3.9409904610167937e-07,
"loss": 0.584,
"step": 870
},
{
"epoch": 2.5771762984637894,
"grad_norm": 0.2941603017955741,
"learning_rate": 3.575778604748955e-07,
"loss": 0.588,
"step": 880
},
{
"epoch": 2.606437454279444,
"grad_norm": 0.30173603577561026,
"learning_rate": 3.2335148931728154e-07,
"loss": 0.5873,
"step": 890
},
{
"epoch": 2.6356986100950985,
"grad_norm": 0.3091421484646903,
"learning_rate": 2.914557574685625e-07,
"loss": 0.5919,
"step": 900
},
{
"epoch": 2.6649597659107536,
"grad_norm": 0.30385884437853805,
"learning_rate": 2.6192405028169264e-07,
"loss": 0.5922,
"step": 910
},
{
"epoch": 2.694220921726408,
"grad_norm": 0.31174481931495496,
"learning_rate": 2.3478727867830904e-07,
"loss": 0.5862,
"step": 920
},
{
"epoch": 2.723482077542063,
"grad_norm": 0.3163300064243285,
"learning_rate": 2.1007384679418537e-07,
"loss": 0.5896,
"step": 930
},
{
"epoch": 2.7527432333577178,
"grad_norm": 0.3128550441662863,
"learning_rate": 1.8780962224853865e-07,
"loss": 0.5887,
"step": 940
},
{
"epoch": 2.7820043891733723,
"grad_norm": 0.2956602109611889,
"learning_rate": 1.6801790906832232e-07,
"loss": 0.5873,
"step": 950
},
{
"epoch": 2.811265544989027,
"grad_norm": 0.3063122290845978,
"learning_rate": 1.5071942329583381e-07,
"loss": 0.5912,
"step": 960
},
{
"epoch": 2.840526700804682,
"grad_norm": 0.29690799928240513,
"learning_rate": 1.3593227130517372e-07,
"loss": 0.5869,
"step": 970
},
{
"epoch": 2.8697878566203365,
"grad_norm": 0.29850143627079,
"learning_rate": 1.2367193085025406e-07,
"loss": 0.5868,
"step": 980
},
{
"epoch": 2.899049012435991,
"grad_norm": 0.30319295397334783,
"learning_rate": 1.1395123486418795e-07,
"loss": 0.5794,
"step": 990
},
{
"epoch": 2.928310168251646,
"grad_norm": 0.298852220351779,
"learning_rate": 1.0678035802702309e-07,
"loss": 0.5874,
"step": 1000
},
{
"epoch": 2.9575713240673007,
"grad_norm": 0.3016454436069009,
"learning_rate": 1.0216680611587525e-07,
"loss": 0.5787,
"step": 1010
},
{
"epoch": 2.9868324798829553,
"grad_norm": 0.3043887312834752,
"learning_rate": 1.0011540814860988e-07,
"loss": 0.5907,
"step": 1020
},
{
"epoch": 2.9956108266276518,
"eval_loss": 0.6356203556060791,
"eval_runtime": 347.401,
"eval_samples_per_second": 26.508,
"eval_steps_per_second": 0.415,
"step": 1023
},
{
"epoch": 2.9956108266276518,
"step": 1023,
"total_flos": 2144987064041472.0,
"train_loss": 0.6243335795192775,
"train_runtime": 55244.514,
"train_samples_per_second": 9.501,
"train_steps_per_second": 0.019
}
],
"logging_steps": 10,
"max_steps": 1023,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2144987064041472.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}