java_and_text_gpt2 / trainer_state.json
gbemilekeonilude's picture
End of training
6b570ad verified
{
"best_metric": 1.7454155683517456,
"best_model_checkpoint": "/data/user_data/gonilude/java_and_text_gpt2/checkpoint-200",
"epoch": 3.0,
"eval_steps": 50,
"global_step": 243,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.19444444444444445,
"eval_loss": 11.014116287231445,
"eval_runtime": 1.1343,
"eval_samples_per_second": 63.475,
"eval_steps_per_second": 7.934,
"num_input_tokens_seen": 0,
"step": 0
},
{
"epoch": 0.012345679012345678,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 7.2768,
"num_input_tokens_seen": 8192,
"step": 1
},
{
"epoch": 0.06172839506172839,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 7.576,
"num_input_tokens_seen": 40960,
"step": 5
},
{
"epoch": 0.12345679012345678,
"grad_norm": 143.01515197753906,
"learning_rate": 5e-06,
"loss": 8.9704,
"num_input_tokens_seen": 81920,
"step": 10
},
{
"epoch": 0.18518518518518517,
"grad_norm": 357.00616455078125,
"learning_rate": 1.7500000000000002e-05,
"loss": 9.5348,
"num_input_tokens_seen": 122880,
"step": 15
},
{
"epoch": 0.24691358024691357,
"grad_norm": 436.4880676269531,
"learning_rate": 1.9991958847061786e-05,
"loss": 6.4758,
"num_input_tokens_seen": 163840,
"step": 20
},
{
"epoch": 0.30864197530864196,
"grad_norm": 103.16507720947266,
"learning_rate": 1.9942865292219837e-05,
"loss": 5.3616,
"num_input_tokens_seen": 204800,
"step": 25
},
{
"epoch": 0.37037037037037035,
"grad_norm": 69.18275451660156,
"learning_rate": 1.984936448731556e-05,
"loss": 5.0694,
"num_input_tokens_seen": 245760,
"step": 30
},
{
"epoch": 0.43209876543209874,
"grad_norm": 85.65583038330078,
"learning_rate": 1.971187402964132e-05,
"loss": 4.3876,
"num_input_tokens_seen": 286720,
"step": 35
},
{
"epoch": 0.49382716049382713,
"grad_norm": 23.326828002929688,
"learning_rate": 1.9531007984957408e-05,
"loss": 3.5825,
"num_input_tokens_seen": 327680,
"step": 40
},
{
"epoch": 0.5555555555555556,
"grad_norm": 31.104318618774414,
"learning_rate": 1.9307574144925288e-05,
"loss": 1.927,
"num_input_tokens_seen": 368640,
"step": 45
},
{
"epoch": 0.6172839506172839,
"grad_norm": 46.547122955322266,
"learning_rate": 1.9042570419313927e-05,
"loss": 2.3634,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6172839506172839,
"eval_accuracy": 0.19444444444444445,
"eval_loss": 2.2269253730773926,
"eval_runtime": 0.9055,
"eval_samples_per_second": 79.514,
"eval_steps_per_second": 9.939,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6790123456790124,
"grad_norm": 14.275629043579102,
"learning_rate": 1.8737180379092536e-05,
"loss": 1.8308,
"num_input_tokens_seen": 450560,
"step": 55
},
{
"epoch": 0.7407407407407407,
"grad_norm": 20.500579833984375,
"learning_rate": 1.8392767970315314e-05,
"loss": 1.7708,
"num_input_tokens_seen": 491520,
"step": 60
},
{
"epoch": 0.8024691358024691,
"grad_norm": 28.264705657958984,
"learning_rate": 1.8010871422407238e-05,
"loss": 1.8038,
"num_input_tokens_seen": 532480,
"step": 65
},
{
"epoch": 0.8641975308641975,
"grad_norm": 17.55794334411621,
"learning_rate": 1.759319637805806e-05,
"loss": 1.7319,
"num_input_tokens_seen": 573440,
"step": 70
},
{
"epoch": 0.9259259259259259,
"grad_norm": 16.978687286376953,
"learning_rate": 1.714160827540801e-05,
"loss": 1.6038,
"num_input_tokens_seen": 614400,
"step": 75
},
{
"epoch": 0.9876543209876543,
"grad_norm": 18.727697372436523,
"learning_rate": 1.66581240165482e-05,
"loss": 1.8072,
"num_input_tokens_seen": 655360,
"step": 80
},
{
"epoch": 1.0493827160493827,
"grad_norm": 18.814525604248047,
"learning_rate": 1.6144902959546286e-05,
"loss": 1.8374,
"num_input_tokens_seen": 696320,
"step": 85
},
{
"epoch": 1.1111111111111112,
"grad_norm": 10.187082290649414,
"learning_rate": 1.560423727422915e-05,
"loss": 1.5674,
"num_input_tokens_seen": 737280,
"step": 90
},
{
"epoch": 1.1728395061728394,
"grad_norm": 24.2742862701416,
"learning_rate": 1.5038541704796004e-05,
"loss": 2.266,
"num_input_tokens_seen": 778240,
"step": 95
},
{
"epoch": 1.2345679012345678,
"grad_norm": 22.410181045532227,
"learning_rate": 1.4450342784984632e-05,
"loss": 2.003,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.2345679012345678,
"eval_accuracy": 0.2222222222222222,
"eval_loss": 1.7976888418197632,
"eval_runtime": 0.9046,
"eval_samples_per_second": 79.592,
"eval_steps_per_second": 9.949,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.2962962962962963,
"grad_norm": 21.811655044555664,
"learning_rate": 1.3842267553958373e-05,
"loss": 1.9257,
"num_input_tokens_seen": 860160,
"step": 105
},
{
"epoch": 1.3580246913580247,
"grad_norm": 13.7644624710083,
"learning_rate": 1.3217031823311488e-05,
"loss": 1.7949,
"num_input_tokens_seen": 901120,
"step": 110
},
{
"epoch": 1.4197530864197532,
"grad_norm": 16.891998291015625,
"learning_rate": 1.2577428047595343e-05,
"loss": 1.7427,
"num_input_tokens_seen": 942080,
"step": 115
},
{
"epoch": 1.4814814814814814,
"grad_norm": 9.613762855529785,
"learning_rate": 1.1926312852538456e-05,
"loss": 1.5529,
"num_input_tokens_seen": 983040,
"step": 120
},
{
"epoch": 1.5432098765432098,
"grad_norm": 11.176910400390625,
"learning_rate": 1.126659427666257e-05,
"loss": 1.7114,
"num_input_tokens_seen": 1024000,
"step": 125
},
{
"epoch": 1.6049382716049383,
"grad_norm": 15.059788703918457,
"learning_rate": 1.0601218783276673e-05,
"loss": 1.7562,
"num_input_tokens_seen": 1064960,
"step": 130
},
{
"epoch": 1.6666666666666665,
"grad_norm": 14.87260913848877,
"learning_rate": 9.93315810085658e-06,
"loss": 1.6257,
"num_input_tokens_seen": 1105920,
"step": 135
},
{
"epoch": 1.7283950617283952,
"grad_norm": 11.261963844299316,
"learning_rate": 9.265395950584181e-06,
"loss": 1.574,
"num_input_tokens_seen": 1146880,
"step": 140
},
{
"epoch": 1.7901234567901234,
"grad_norm": 12.72499942779541,
"learning_rate": 8.600914720324315e-06,
"loss": 1.5799,
"num_input_tokens_seen": 1187840,
"step": 145
},
{
"epoch": 1.8518518518518519,
"grad_norm": 13.012349128723145,
"learning_rate": 7.942682144556605e-06,
"loss": 1.7323,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.8518518518518519,
"eval_accuracy": 0.2222222222222222,
"eval_loss": 1.7827554941177368,
"eval_runtime": 0.9045,
"eval_samples_per_second": 79.598,
"eval_steps_per_second": 9.95,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.9135802469135803,
"grad_norm": 14.650065422058105,
"learning_rate": 7.293638049752813e-06,
"loss": 1.6441,
"num_input_tokens_seen": 1269760,
"step": 155
},
{
"epoch": 1.9753086419753085,
"grad_norm": 14.940625190734863,
"learning_rate": 6.656681224398182e-06,
"loss": 1.8624,
"num_input_tokens_seen": 1310720,
"step": 160
},
{
"epoch": 2.037037037037037,
"grad_norm": 18.302019119262695,
"learning_rate": 6.034656472298374e-06,
"loss": 1.624,
"num_input_tokens_seen": 1351680,
"step": 165
},
{
"epoch": 2.0987654320987654,
"grad_norm": 13.879521369934082,
"learning_rate": 5.430341906995064e-06,
"loss": 1.5843,
"num_input_tokens_seen": 1392640,
"step": 170
},
{
"epoch": 2.1604938271604937,
"grad_norm": 11.124442100524902,
"learning_rate": 4.846436544036505e-06,
"loss": 1.6387,
"num_input_tokens_seen": 1433600,
"step": 175
},
{
"epoch": 2.2222222222222223,
"grad_norm": 9.738484382629395,
"learning_rate": 4.285548246518837e-06,
"loss": 1.5708,
"num_input_tokens_seen": 1474560,
"step": 180
},
{
"epoch": 2.2839506172839505,
"grad_norm": 12.508370399475098,
"learning_rate": 3.750182077736486e-06,
"loss": 1.6141,
"num_input_tokens_seen": 1515520,
"step": 185
},
{
"epoch": 2.3456790123456788,
"grad_norm": 12.206828117370605,
"learning_rate": 3.2427291129613502e-06,
"loss": 1.6146,
"num_input_tokens_seen": 1556480,
"step": 190
},
{
"epoch": 2.4074074074074074,
"grad_norm": 7.063596248626709,
"learning_rate": 2.765455760320196e-06,
"loss": 1.6204,
"num_input_tokens_seen": 1597440,
"step": 195
},
{
"epoch": 2.4691358024691357,
"grad_norm": 12.646849632263184,
"learning_rate": 2.3204936384657873e-06,
"loss": 1.6991,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.4691358024691357,
"eval_accuracy": 0.18055555555555555,
"eval_loss": 1.7454155683517456,
"eval_runtime": 0.9063,
"eval_samples_per_second": 79.445,
"eval_steps_per_second": 9.931,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.5308641975308643,
"grad_norm": 14.059237480163574,
"learning_rate": 1.9098300562505266e-06,
"loss": 1.5533,
"num_input_tokens_seen": 1679360,
"step": 205
},
{
"epoch": 2.5925925925925926,
"grad_norm": 18.09408950805664,
"learning_rate": 1.5352991369226865e-06,
"loss": 1.684,
"num_input_tokens_seen": 1720320,
"step": 210
},
{
"epoch": 2.6543209876543212,
"grad_norm": 15.153328895568848,
"learning_rate": 1.198573626486751e-06,
"loss": 1.5512,
"num_input_tokens_seen": 1761280,
"step": 215
},
{
"epoch": 2.7160493827160495,
"grad_norm": 18.182266235351562,
"learning_rate": 9.011574228136866e-07,
"loss": 1.632,
"num_input_tokens_seen": 1802240,
"step": 220
},
{
"epoch": 2.7777777777777777,
"grad_norm": 18.970129013061523,
"learning_rate": 6.443788588679823e-07,
"loss": 1.5761,
"num_input_tokens_seen": 1843200,
"step": 225
},
{
"epoch": 2.8395061728395063,
"grad_norm": 16.78023910522461,
"learning_rate": 4.2938477005015853e-07,
"loss": 1.6058,
"num_input_tokens_seen": 1884160,
"step": 230
},
{
"epoch": 2.9012345679012346,
"grad_norm": 14.316130638122559,
"learning_rate": 2.571353721514913e-07,
"loss": 1.645,
"num_input_tokens_seen": 1925120,
"step": 235
},
{
"epoch": 2.962962962962963,
"grad_norm": 17.25787925720215,
"learning_rate": 1.2839997279717075e-07,
"loss": 1.6722,
"num_input_tokens_seen": 1966080,
"step": 240
},
{
"epoch": 3.0,
"num_input_tokens_seen": 1990656,
"step": 243,
"total_flos": 3610849453277184.0,
"train_loss": 2.4843275125134627,
"train_runtime": 181.6704,
"train_samples_per_second": 10.651,
"train_steps_per_second": 1.338
}
],
"logging_steps": 5,
"max_steps": 243,
"num_input_tokens_seen": 1990656,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3610849453277184.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}