| { | |
| "best_global_step": 400, | |
| "best_metric": 2.613794549688464e-06, | |
| "best_model_checkpoint": "qwen2-7b-instruct-trl-sft-newick_extraction_1/checkpoint-400", | |
| "epoch": 1.4195555555555557, | |
| "eval_steps": 10, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.035555555555555556, | |
| "grad_norm": 4.419339179992676, | |
| "learning_rate": 0.0002, | |
| "loss": 14.4699, | |
| "mean_token_accuracy": 0.5754725567996501, | |
| "num_tokens": 570240.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.035555555555555556, | |
| "eval_loss": 1.5543439388275146, | |
| "eval_mean_token_accuracy": 0.6078731870651245, | |
| "eval_num_tokens": 570240.0, | |
| "eval_runtime": 648.0159, | |
| "eval_samples_per_second": 1.543, | |
| "eval_steps_per_second": 0.386, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.07111111111111111, | |
| "grad_norm": 9.82264232635498, | |
| "learning_rate": 0.0002, | |
| "loss": 10.3302, | |
| "mean_token_accuracy": 0.6614481717348099, | |
| "num_tokens": 1140480.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07111111111111111, | |
| "eval_loss": 0.9738740921020508, | |
| "eval_mean_token_accuracy": 0.7448731756210327, | |
| "eval_num_tokens": 1140480.0, | |
| "eval_runtime": 630.2188, | |
| "eval_samples_per_second": 1.587, | |
| "eval_steps_per_second": 0.397, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10666666666666667, | |
| "grad_norm": 11.91273307800293, | |
| "learning_rate": 0.0002, | |
| "loss": 5.6105, | |
| "mean_token_accuracy": 0.8060442075133324, | |
| "num_tokens": 1710720.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.10666666666666667, | |
| "eval_loss": 0.3900993764400482, | |
| "eval_mean_token_accuracy": 0.8829268217086792, | |
| "eval_num_tokens": 1710720.0, | |
| "eval_runtime": 631.6971, | |
| "eval_samples_per_second": 1.583, | |
| "eval_steps_per_second": 0.396, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14222222222222222, | |
| "grad_norm": 6.84392786026001, | |
| "learning_rate": 0.0002, | |
| "loss": 1.7204, | |
| "mean_token_accuracy": 0.943353658914566, | |
| "num_tokens": 2280960.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14222222222222222, | |
| "eval_loss": 0.06896545737981796, | |
| "eval_mean_token_accuracy": 0.9878048896789551, | |
| "eval_num_tokens": 2280960.0, | |
| "eval_runtime": 627.3952, | |
| "eval_samples_per_second": 1.594, | |
| "eval_steps_per_second": 0.398, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 0.21437789499759674, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3396, | |
| "mean_token_accuracy": 0.9958536624908447, | |
| "num_tokens": 2851200.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "eval_loss": 0.03179474174976349, | |
| "eval_mean_token_accuracy": 0.997560977935791, | |
| "eval_num_tokens": 2851200.0, | |
| "eval_runtime": 631.7748, | |
| "eval_samples_per_second": 1.583, | |
| "eval_steps_per_second": 0.396, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21333333333333335, | |
| "grad_norm": 1.2222466468811035, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2231, | |
| "mean_token_accuracy": 0.997560977935791, | |
| "num_tokens": 3421440.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.21333333333333335, | |
| "eval_loss": 0.017346156761050224, | |
| "eval_mean_token_accuracy": 0.997560977935791, | |
| "eval_num_tokens": 3421440.0, | |
| "eval_runtime": 627.3871, | |
| "eval_samples_per_second": 1.594, | |
| "eval_steps_per_second": 0.398, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.24888888888888888, | |
| "grad_norm": 1.549639105796814, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0636, | |
| "mean_token_accuracy": 0.9984908550977707, | |
| "num_tokens": 3991680.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.24888888888888888, | |
| "eval_loss": 0.0006659275386482477, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 3991680.0, | |
| "eval_runtime": 627.7647, | |
| "eval_samples_per_second": 1.593, | |
| "eval_steps_per_second": 0.398, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.28444444444444444, | |
| "grad_norm": 0.034244876354932785, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0017, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 4561920.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.28444444444444444, | |
| "eval_loss": 4.906835238216445e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 4561920.0, | |
| "eval_runtime": 627.866, | |
| "eval_samples_per_second": 1.593, | |
| "eval_steps_per_second": 0.398, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.004514188971370459, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0003, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 5132160.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_loss": 2.5992721930379048e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 5132160.0, | |
| "eval_runtime": 630.504, | |
| "eval_samples_per_second": 1.586, | |
| "eval_steps_per_second": 0.397, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 0.0026128594763576984, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0002, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 5702400.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "eval_loss": 1.878049260994885e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 5702400.0, | |
| "eval_runtime": 628.0227, | |
| "eval_samples_per_second": 1.592, | |
| "eval_steps_per_second": 0.398, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.39111111111111113, | |
| "grad_norm": 0.0018496178090572357, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 6272640.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.39111111111111113, | |
| "eval_loss": 1.4819584976066835e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 6272640.0, | |
| "eval_runtime": 632.337, | |
| "eval_samples_per_second": 1.581, | |
| "eval_steps_per_second": 0.395, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4266666666666667, | |
| "grad_norm": 0.001428118092007935, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 6842880.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4266666666666667, | |
| "eval_loss": 1.2422192412486766e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 6842880.0, | |
| "eval_runtime": 627.3485, | |
| "eval_samples_per_second": 1.594, | |
| "eval_steps_per_second": 0.399, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4622222222222222, | |
| "grad_norm": 0.0011839906219393015, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 7413120.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.4622222222222222, | |
| "eval_loss": 1.0789405678224284e-05, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 7413120.0, | |
| "eval_runtime": 630.5032, | |
| "eval_samples_per_second": 1.586, | |
| "eval_steps_per_second": 0.397, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.49777777777777776, | |
| "grad_norm": 0.001027945545502007, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 7983360.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.49777777777777776, | |
| "eval_loss": 9.804618457565084e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 7983360.0, | |
| "eval_runtime": 626.6713, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 0.0009170864359475672, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 8553600.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "eval_loss": 8.696494660398457e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 8553600.0, | |
| "eval_runtime": 630.4604, | |
| "eval_samples_per_second": 1.586, | |
| "eval_steps_per_second": 0.397, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5688888888888889, | |
| "grad_norm": 0.0008168203639797866, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 9123840.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5688888888888889, | |
| "eval_loss": 7.992691280378494e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 9123840.0, | |
| "eval_runtime": 628.5297, | |
| "eval_samples_per_second": 1.591, | |
| "eval_steps_per_second": 0.398, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6044444444444445, | |
| "grad_norm": 0.0007576971547678113, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 9694080.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6044444444444445, | |
| "eval_loss": 7.28438317310065e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 9694080.0, | |
| "eval_runtime": 626.7184, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.0006879908032715321, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 10264320.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_loss": 6.8836661739624105e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 10264320.0, | |
| "eval_runtime": 756.3063, | |
| "eval_samples_per_second": 1.322, | |
| "eval_steps_per_second": 0.331, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6755555555555556, | |
| "grad_norm": 0.0006444182363338768, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 10834560.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6755555555555556, | |
| "eval_loss": 6.4065607148222625e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 10834560.0, | |
| "eval_runtime": 628.8265, | |
| "eval_samples_per_second": 1.59, | |
| "eval_steps_per_second": 0.398, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 0.0005997396074235439, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0001, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 11404800.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "eval_loss": 6.104866770328954e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 11404800.0, | |
| "eval_runtime": 688.2367, | |
| "eval_samples_per_second": 1.453, | |
| "eval_steps_per_second": 0.363, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7466666666666667, | |
| "grad_norm": 0.0005526828463189304, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 11975040.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7466666666666667, | |
| "eval_loss": 5.683682047674665e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 11975040.0, | |
| "eval_runtime": 754.3494, | |
| "eval_samples_per_second": 1.326, | |
| "eval_steps_per_second": 0.331, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7822222222222223, | |
| "grad_norm": 0.0005140851717442274, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 12545280.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.7822222222222223, | |
| "eval_loss": 5.339933522918727e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 12545280.0, | |
| "eval_runtime": 647.6999, | |
| "eval_samples_per_second": 1.544, | |
| "eval_steps_per_second": 0.386, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8177777777777778, | |
| "grad_norm": 0.0004886530223302543, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 13115520.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8177777777777778, | |
| "eval_loss": 5.0328567340329755e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 13115520.0, | |
| "eval_runtime": 631.1227, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8533333333333334, | |
| "grad_norm": 0.0004592030309140682, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 13685760.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8533333333333334, | |
| "eval_loss": 4.829912541026715e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 13685760.0, | |
| "eval_runtime": 631.5138, | |
| "eval_samples_per_second": 1.583, | |
| "eval_steps_per_second": 0.396, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.0004404531209729612, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 14256000.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "eval_loss": 4.590192020259565e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 14256000.0, | |
| "eval_runtime": 631.121, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.9244444444444444, | |
| "grad_norm": 0.00041581166442483664, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 14826240.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9244444444444444, | |
| "eval_loss": 4.422760412126081e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 14826240.0, | |
| "eval_runtime": 626.5972, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.00039238750468939543, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 15396480.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "eval_loss": 4.159514901402872e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 15396480.0, | |
| "eval_runtime": 626.6804, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.9955555555555555, | |
| "grad_norm": 0.0003751284384634346, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 15966720.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.9955555555555555, | |
| "eval_loss": 4.007378265669104e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 15966720.0, | |
| "eval_runtime": 626.6082, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0284444444444445, | |
| "grad_norm": 0.0003600440395530313, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 16492410.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.0284444444444445, | |
| "eval_loss": 3.86876126867719e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 16492410.0, | |
| "eval_runtime": 630.0967, | |
| "eval_samples_per_second": 1.587, | |
| "eval_steps_per_second": 0.397, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.064, | |
| "grad_norm": 0.00035800470504909754, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 17062650.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.064, | |
| "eval_loss": 3.728666797542246e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 17062650.0, | |
| "eval_runtime": 630.6538, | |
| "eval_samples_per_second": 1.586, | |
| "eval_steps_per_second": 0.396, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.0995555555555556, | |
| "grad_norm": 0.0003293608024250716, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 17632890.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.0995555555555556, | |
| "eval_loss": 3.5069003843091195e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 17632890.0, | |
| "eval_runtime": 626.3062, | |
| "eval_samples_per_second": 1.597, | |
| "eval_steps_per_second": 0.399, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.1351111111111112, | |
| "grad_norm": 0.000320440623909235, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 18203130.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.1351111111111112, | |
| "eval_loss": 3.4313563901378075e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 18203130.0, | |
| "eval_runtime": 626.8382, | |
| "eval_samples_per_second": 1.595, | |
| "eval_steps_per_second": 0.399, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.1706666666666667, | |
| "grad_norm": 0.00031058763852342963, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 18773370.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.1706666666666667, | |
| "eval_loss": 3.2939326501946198e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 18773370.0, | |
| "eval_runtime": 631.1457, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.2062222222222223, | |
| "grad_norm": 0.0002935119846370071, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 19343610.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2062222222222223, | |
| "eval_loss": 3.1630906960344873e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 19343610.0, | |
| "eval_runtime": 631.1257, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2417777777777779, | |
| "grad_norm": 0.00028090347768738866, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 19913850.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.2417777777777779, | |
| "eval_loss": 3.1076663162821205e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 19913850.0, | |
| "eval_runtime": 631.1307, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.2773333333333334, | |
| "grad_norm": 0.000271222204901278, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 20484090.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.2773333333333334, | |
| "eval_loss": 2.9442144295899197e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 20484090.0, | |
| "eval_runtime": 631.362, | |
| "eval_samples_per_second": 1.584, | |
| "eval_steps_per_second": 0.396, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3128888888888888, | |
| "grad_norm": 0.0002641026512719691, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 21054330.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3128888888888888, | |
| "eval_loss": 2.8731856218655594e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 21054330.0, | |
| "eval_runtime": 626.4927, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3484444444444446, | |
| "grad_norm": 0.0002599551226012409, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 21624570.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.3484444444444446, | |
| "eval_loss": 2.7712894734577276e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 21624570.0, | |
| "eval_runtime": 626.4481, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.384, | |
| "grad_norm": 0.0002494712534826249, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 22194810.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.384, | |
| "eval_loss": 2.666238515303121e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 22194810.0, | |
| "eval_runtime": 626.3873, | |
| "eval_samples_per_second": 1.596, | |
| "eval_steps_per_second": 0.399, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4195555555555557, | |
| "grad_norm": 0.00024175533326342702, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0, | |
| "mean_token_accuracy": 1.0, | |
| "num_tokens": 22765050.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4195555555555557, | |
| "eval_loss": 2.613794549688464e-06, | |
| "eval_mean_token_accuracy": 1.0, | |
| "eval_num_tokens": 22765050.0, | |
| "eval_runtime": 625.5663, | |
| "eval_samples_per_second": 1.599, | |
| "eval_steps_per_second": 0.4, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 846, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0584247636182528e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |