VERSIL91's picture
End of training
aac067a verified
{
"best_metric": 10.336307525634766,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.1146640826873385,
"eval_steps": 25,
"global_step": 71,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001614987080103359,
"grad_norm": 0.060144778341054916,
"learning_rate": 0.00015,
"loss": 10.3721,
"step": 1
},
{
"epoch": 0.001614987080103359,
"eval_loss": 10.374292373657227,
"eval_runtime": 0.0553,
"eval_samples_per_second": 903.711,
"eval_steps_per_second": 54.223,
"step": 1
},
{
"epoch": 0.003229974160206718,
"grad_norm": 0.05838553234934807,
"learning_rate": 0.0003,
"loss": 10.3717,
"step": 2
},
{
"epoch": 0.0048449612403100775,
"grad_norm": 0.06211625039577484,
"learning_rate": 0.0002998600959423082,
"loss": 10.3706,
"step": 3
},
{
"epoch": 0.006459948320413436,
"grad_norm": 0.06348743289709091,
"learning_rate": 0.0002994406737417567,
"loss": 10.3702,
"step": 4
},
{
"epoch": 0.008074935400516795,
"grad_norm": 0.0670604556798935,
"learning_rate": 0.00029874260271490463,
"loss": 10.3687,
"step": 5
},
{
"epoch": 0.009689922480620155,
"grad_norm": 0.069881372153759,
"learning_rate": 0.00029776732972055516,
"loss": 10.3693,
"step": 6
},
{
"epoch": 0.011304909560723515,
"grad_norm": 0.08161191642284393,
"learning_rate": 0.0002965168761609197,
"loss": 10.3677,
"step": 7
},
{
"epoch": 0.012919896640826873,
"grad_norm": 0.08520465344190598,
"learning_rate": 0.0002949938337919529,
"loss": 10.3677,
"step": 8
},
{
"epoch": 0.014534883720930232,
"grad_norm": 0.09009241312742233,
"learning_rate": 0.0002932013593515431,
"loss": 10.365,
"step": 9
},
{
"epoch": 0.01614987080103359,
"grad_norm": 0.09783443063497543,
"learning_rate": 0.00029114316801669057,
"loss": 10.3648,
"step": 10
},
{
"epoch": 0.01776485788113695,
"grad_norm": 0.10501207411289215,
"learning_rate": 0.00028882352570323616,
"loss": 10.363,
"step": 11
},
{
"epoch": 0.01937984496124031,
"grad_norm": 0.11170078814029694,
"learning_rate": 0.00028624724022409897,
"loss": 10.3631,
"step": 12
},
{
"epoch": 0.02099483204134367,
"grad_norm": 0.1278601884841919,
"learning_rate": 0.0002834196513243502,
"loss": 10.3582,
"step": 13
},
{
"epoch": 0.02260981912144703,
"grad_norm": 0.13118207454681396,
"learning_rate": 0.0002803466196137759,
"loss": 10.3578,
"step": 14
},
{
"epoch": 0.02422480620155039,
"grad_norm": 0.13799789547920227,
"learning_rate": 0.00027703451441986836,
"loss": 10.3548,
"step": 15
},
{
"epoch": 0.025839793281653745,
"grad_norm": 0.13469068706035614,
"learning_rate": 0.000273490200586422,
"loss": 10.3531,
"step": 16
},
{
"epoch": 0.027454780361757105,
"grad_norm": 0.12834832072257996,
"learning_rate": 0.00026972102424509665,
"loss": 10.3529,
"step": 17
},
{
"epoch": 0.029069767441860465,
"grad_norm": 0.12865591049194336,
"learning_rate": 0.00026573479758943753,
"loss": 10.3515,
"step": 18
},
{
"epoch": 0.030684754521963824,
"grad_norm": 0.12713314592838287,
"learning_rate": 0.0002615397826829114,
"loss": 10.3504,
"step": 19
},
{
"epoch": 0.03229974160206718,
"grad_norm": 0.11311966925859451,
"learning_rate": 0.0002571446743345183,
"loss": 10.3474,
"step": 20
},
{
"epoch": 0.03391472868217054,
"grad_norm": 0.10431112349033356,
"learning_rate": 0.00025255858207747205,
"loss": 10.3458,
"step": 21
},
{
"epoch": 0.0355297157622739,
"grad_norm": 0.08799866586923599,
"learning_rate": 0.0002477910112883017,
"loss": 10.3447,
"step": 22
},
{
"epoch": 0.03714470284237726,
"grad_norm": 0.08381524682044983,
"learning_rate": 0.00024285184348550706,
"loss": 10.3429,
"step": 23
},
{
"epoch": 0.03875968992248062,
"grad_norm": 0.07663460075855255,
"learning_rate": 0.0002377513158486027,
"loss": 10.3418,
"step": 24
},
{
"epoch": 0.04037467700258398,
"grad_norm": 0.08272749930620193,
"learning_rate": 0.00023249999999999999,
"loss": 10.3427,
"step": 25
},
{
"epoch": 0.04037467700258398,
"eval_loss": 10.34329605102539,
"eval_runtime": 0.0528,
"eval_samples_per_second": 946.304,
"eval_steps_per_second": 56.778,
"step": 25
},
{
"epoch": 0.04198966408268734,
"grad_norm": 0.058701954782009125,
"learning_rate": 0.00022710878009370554,
"loss": 10.3429,
"step": 26
},
{
"epoch": 0.0436046511627907,
"grad_norm": 0.06109394133090973,
"learning_rate": 0.00022158883025624965,
"loss": 10.3406,
"step": 27
},
{
"epoch": 0.04521963824289406,
"grad_norm": 0.06125456467270851,
"learning_rate": 0.0002159515914266029,
"loss": 10.3389,
"step": 28
},
{
"epoch": 0.04683462532299742,
"grad_norm": 0.05576397478580475,
"learning_rate": 0.0002102087476430831,
"loss": 10.3395,
"step": 29
},
{
"epoch": 0.04844961240310078,
"grad_norm": 0.05186418071389198,
"learning_rate": 0.00020437220182640135,
"loss": 10.3398,
"step": 30
},
{
"epoch": 0.05006459948320414,
"grad_norm": 0.05386773869395256,
"learning_rate": 0.00019845405110904146,
"loss": 10.3392,
"step": 31
},
{
"epoch": 0.05167958656330749,
"grad_norm": 0.04602474346756935,
"learning_rate": 0.00019246656176210558,
"loss": 10.3386,
"step": 32
},
{
"epoch": 0.05329457364341085,
"grad_norm": 0.05505594238638878,
"learning_rate": 0.0001864221437715939,
"loss": 10.3384,
"step": 33
},
{
"epoch": 0.05490956072351421,
"grad_norm": 0.04987030103802681,
"learning_rate": 0.0001803333251168141,
"loss": 10.336,
"step": 34
},
{
"epoch": 0.05652454780361757,
"grad_norm": 0.043966956436634064,
"learning_rate": 0.00017421272580423058,
"loss": 10.338,
"step": 35
},
{
"epoch": 0.05813953488372093,
"grad_norm": 0.04973195120692253,
"learning_rate": 0.00016807303171057425,
"loss": 10.3365,
"step": 36
},
{
"epoch": 0.05975452196382429,
"grad_norm": 0.04590706154704094,
"learning_rate": 0.00016192696828942573,
"loss": 10.3366,
"step": 37
},
{
"epoch": 0.06136950904392765,
"grad_norm": 0.05704723298549652,
"learning_rate": 0.00015578727419576942,
"loss": 10.3354,
"step": 38
},
{
"epoch": 0.06298449612403101,
"grad_norm": 0.04313088580965996,
"learning_rate": 0.00014966667488318586,
"loss": 10.3373,
"step": 39
},
{
"epoch": 0.06459948320413436,
"grad_norm": 0.05099692568182945,
"learning_rate": 0.00014357785622840606,
"loss": 10.3366,
"step": 40
},
{
"epoch": 0.06621447028423773,
"grad_norm": 0.04435805603861809,
"learning_rate": 0.00013753343823789445,
"loss": 10.3366,
"step": 41
},
{
"epoch": 0.06782945736434108,
"grad_norm": 0.052016858011484146,
"learning_rate": 0.00013154594889095854,
"loss": 10.335,
"step": 42
},
{
"epoch": 0.06944444444444445,
"grad_norm": 0.05064675211906433,
"learning_rate": 0.00012562779817359865,
"loss": 10.3359,
"step": 43
},
{
"epoch": 0.0710594315245478,
"grad_norm": 0.05927170813083649,
"learning_rate": 0.00011979125235691685,
"loss": 10.3343,
"step": 44
},
{
"epoch": 0.07267441860465117,
"grad_norm": 0.049980998039245605,
"learning_rate": 0.00011404840857339706,
"loss": 10.3346,
"step": 45
},
{
"epoch": 0.07428940568475452,
"grad_norm": 0.04975796490907669,
"learning_rate": 0.0001084111697437504,
"loss": 10.334,
"step": 46
},
{
"epoch": 0.07590439276485789,
"grad_norm": 0.05702081695199013,
"learning_rate": 0.00010289121990629447,
"loss": 10.3338,
"step": 47
},
{
"epoch": 0.07751937984496124,
"grad_norm": 0.06157432496547699,
"learning_rate": 9.750000000000003e-05,
"loss": 10.3325,
"step": 48
},
{
"epoch": 0.0791343669250646,
"grad_norm": 0.06715147942304611,
"learning_rate": 9.22486841513973e-05,
"loss": 10.3317,
"step": 49
},
{
"epoch": 0.08074935400516796,
"grad_norm": 0.05393562093377113,
"learning_rate": 8.714815651449293e-05,
"loss": 10.332,
"step": 50
},
{
"epoch": 0.08074935400516796,
"eval_loss": 10.336307525634766,
"eval_runtime": 0.0531,
"eval_samples_per_second": 942.328,
"eval_steps_per_second": 56.54,
"step": 50
},
{
"epoch": 0.08236434108527131,
"grad_norm": 0.06667087227106094,
"learning_rate": 8.220898871169827e-05,
"loss": 10.3355,
"step": 51
},
{
"epoch": 0.08397932816537468,
"grad_norm": 0.06240332871675491,
"learning_rate": 7.744141792252794e-05,
"loss": 10.3335,
"step": 52
},
{
"epoch": 0.08559431524547803,
"grad_norm": 0.061197634786367416,
"learning_rate": 7.285532566548172e-05,
"loss": 10.3336,
"step": 53
},
{
"epoch": 0.0872093023255814,
"grad_norm": 0.06306872516870499,
"learning_rate": 6.846021731708856e-05,
"loss": 10.3345,
"step": 54
},
{
"epoch": 0.08882428940568475,
"grad_norm": 0.0639386773109436,
"learning_rate": 6.426520241056245e-05,
"loss": 10.3335,
"step": 55
},
{
"epoch": 0.09043927648578812,
"grad_norm": 0.06657017767429352,
"learning_rate": 6.0278975754903317e-05,
"loss": 10.3319,
"step": 56
},
{
"epoch": 0.09205426356589147,
"grad_norm": 0.06972195208072662,
"learning_rate": 5.6509799413577934e-05,
"loss": 10.3338,
"step": 57
},
{
"epoch": 0.09366925064599484,
"grad_norm": 0.06879620254039764,
"learning_rate": 5.296548558013161e-05,
"loss": 10.3332,
"step": 58
},
{
"epoch": 0.09528423772609819,
"grad_norm": 0.07011278718709946,
"learning_rate": 4.9653380386224046e-05,
"loss": 10.3303,
"step": 59
},
{
"epoch": 0.09689922480620156,
"grad_norm": 0.07088246941566467,
"learning_rate": 4.658034867564977e-05,
"loss": 10.3298,
"step": 60
},
{
"epoch": 0.09851421188630491,
"grad_norm": 0.07106033712625504,
"learning_rate": 4.375275977590104e-05,
"loss": 10.3291,
"step": 61
},
{
"epoch": 0.10012919896640828,
"grad_norm": 0.07251573354005814,
"learning_rate": 4.117647429676387e-05,
"loss": 10.3304,
"step": 62
},
{
"epoch": 0.10174418604651163,
"grad_norm": 0.07086535543203354,
"learning_rate": 3.885683198330941e-05,
"loss": 10.3327,
"step": 63
},
{
"epoch": 0.10335917312661498,
"grad_norm": 0.07405930012464523,
"learning_rate": 3.679864064845691e-05,
"loss": 10.3308,
"step": 64
},
{
"epoch": 0.10497416020671835,
"grad_norm": 0.07449465990066528,
"learning_rate": 3.500616620804712e-05,
"loss": 10.3295,
"step": 65
},
{
"epoch": 0.1065891472868217,
"grad_norm": 0.06913011521100998,
"learning_rate": 3.348312383908033e-05,
"loss": 10.3298,
"step": 66
},
{
"epoch": 0.10820413436692507,
"grad_norm": 0.06790976226329803,
"learning_rate": 3.223267027944483e-05,
"loss": 10.3306,
"step": 67
},
{
"epoch": 0.10981912144702842,
"grad_norm": 0.06618337333202362,
"learning_rate": 3.125739728509535e-05,
"loss": 10.3305,
"step": 68
},
{
"epoch": 0.11143410852713179,
"grad_norm": 0.06993363052606583,
"learning_rate": 3.055932625824328e-05,
"loss": 10.3292,
"step": 69
},
{
"epoch": 0.11304909560723514,
"grad_norm": 0.07217420637607574,
"learning_rate": 3.0139904057691777e-05,
"loss": 10.3311,
"step": 70
},
{
"epoch": 0.1146640826873385,
"grad_norm": 0.06785973906517029,
"learning_rate": 2.9999999999999997e-05,
"loss": 10.3298,
"step": 71
}
],
"logging_steps": 1,
"max_steps": 71,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 57095359561728.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}