| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 40, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.06941480969665301, | |
| "learning_rate": 0.0, | |
| "loss": 1.855, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.07164680607161446, | |
| "learning_rate": 0.0001, | |
| "loss": 1.9926, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.21122759413879916, | |
| "learning_rate": 9.983786540671051e-05, | |
| "loss": 1.9784, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.21122759413879916, | |
| "learning_rate": 9.935251313189564e-05, | |
| "loss": 1.9088, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 0.2074284636466837, | |
| "learning_rate": 9.85470908713026e-05, | |
| "loss": 1.8429, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 0.07007210993624198, | |
| "learning_rate": 9.742682209735727e-05, | |
| "loss": 1.9524, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 0.0905879468709372, | |
| "learning_rate": 9.599897218294122e-05, | |
| "loss": 1.9214, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.0905879468709372, | |
| "learning_rate": 9.42728012826605e-05, | |
| "loss": 1.8367, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.20234933136371788, | |
| "learning_rate": 9.225950427718975e-05, | |
| "loss": 1.8091, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.09941803319220524, | |
| "learning_rate": 8.997213817017507e-05, | |
| "loss": 1.9017, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 0.10491043484744071, | |
| "learning_rate": 8.742553740855506e-05, | |
| "loss": 1.8573, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.10491043484744071, | |
| "learning_rate": 8.463621767547998e-05, | |
| "loss": 1.7248, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "grad_norm": 0.24596408794252209, | |
| "learning_rate": 8.162226877976887e-05, | |
| "loss": 1.768, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "grad_norm": 0.09982039968048108, | |
| "learning_rate": 7.840323733655778e-05, | |
| "loss": 1.851, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "grad_norm": 0.13610071898301376, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 1.81, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.13610071898301376, | |
| "learning_rate": 7.143462807015271e-05, | |
| "loss": 1.6049, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "grad_norm": 0.28854515290408267, | |
| "learning_rate": 6.773024435212678e-05, | |
| "loss": 1.7365, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "grad_norm": 0.1187014455632186, | |
| "learning_rate": 6.391087319582264e-05, | |
| "loss": 1.8143, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "grad_norm": 0.4012363328781708, | |
| "learning_rate": 6.0001284688802226e-05, | |
| "loss": 1.7726, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.4012363328781708, | |
| "learning_rate": 5.602683401276615e-05, | |
| "loss": 1.4847, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 0.34778222725864993, | |
| "learning_rate": 5.201329700547076e-05, | |
| "loss": 1.7088, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "grad_norm": 0.30708016946700367, | |
| "learning_rate": 4.798670299452926e-05, | |
| "loss": 1.7791, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "grad_norm": 0.14929092908721275, | |
| "learning_rate": 4.397316598723385e-05, | |
| "loss": 1.7415, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.14929092908721275, | |
| "learning_rate": 3.9998715311197785e-05, | |
| "loss": 1.3783, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 6.32, | |
| "grad_norm": 0.48640577832211485, | |
| "learning_rate": 3.608912680417737e-05, | |
| "loss": 1.6875, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 6.64, | |
| "grad_norm": 0.15793676210096758, | |
| "learning_rate": 3.226975564787322e-05, | |
| "loss": 1.7524, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 6.96, | |
| "grad_norm": 0.18039868067732304, | |
| "learning_rate": 2.8565371929847284e-05, | |
| "loss": 1.7237, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.18039868067732304, | |
| "learning_rate": 2.500000000000001e-05, | |
| "loss": 1.2939, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "grad_norm": 0.8701746840768638, | |
| "learning_rate": 2.1596762663442218e-05, | |
| "loss": 1.6718, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 7.64, | |
| "grad_norm": 0.16144962267720986, | |
| "learning_rate": 1.837773122023114e-05, | |
| "loss": 1.7342, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 7.96, | |
| "grad_norm": 0.16647167505073196, | |
| "learning_rate": 1.536378232452003e-05, | |
| "loss": 1.7079, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.6247704422969321, | |
| "learning_rate": 1.257446259144494e-05, | |
| "loss": 1.236, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 8.32, | |
| "grad_norm": 0.15864392217040782, | |
| "learning_rate": 1.0027861829824952e-05, | |
| "loss": 1.6617, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 8.64, | |
| "grad_norm": 0.1636361041162895, | |
| "learning_rate": 7.740495722810271e-06, | |
| "loss": 1.7242, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 8.96, | |
| "grad_norm": 0.2624031260134286, | |
| "learning_rate": 5.727198717339511e-06, | |
| "loss": 1.7007, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.2624031260134286, | |
| "learning_rate": 4.001027817058789e-06, | |
| "loss": 1.1971, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 9.32, | |
| "grad_norm": 0.6740139043193593, | |
| "learning_rate": 2.573177902642726e-06, | |
| "loss": 1.6565, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 9.64, | |
| "grad_norm": 0.16695365270308676, | |
| "learning_rate": 1.4529091286973995e-06, | |
| "loss": 1.7198, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 9.96, | |
| "grad_norm": 0.17715172064003862, | |
| "learning_rate": 6.474868681043578e-07, | |
| "loss": 1.6983, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.17715172064003862, | |
| "learning_rate": 1.6213459328950352e-07, | |
| "loss": 1.1898, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 40, | |
| "total_flos": 48225369194496.0, | |
| "train_loss": 1.7096602380275727, | |
| "train_runtime": 1613.4938, | |
| "train_samples_per_second": 0.614, | |
| "train_steps_per_second": 0.025 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 40, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 48225369194496.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |