| { | |
| "best_global_step": 360, | |
| "best_metric": 0.018723424524068832, | |
| "best_model_checkpoint": "saves_multiple/lora/llama-3-8b-instruct/train_copa_101112_1760637989/checkpoint-360", | |
| "epoch": 20.0, | |
| "eval_steps": 90, | |
| "global_step": 1800, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05555555555555555, | |
| "grad_norm": 6.969470024108887, | |
| "learning_rate": 1.1111111111111112e-06, | |
| "loss": 0.5167, | |
| "num_input_tokens_seen": 1600, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "grad_norm": 6.765899181365967, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.7311, | |
| "num_input_tokens_seen": 3200, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 7.176608562469482, | |
| "learning_rate": 3.888888888888889e-06, | |
| "loss": 0.5149, | |
| "num_input_tokens_seen": 4736, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 7.202151775360107, | |
| "learning_rate": 5.277777777777778e-06, | |
| "loss": 0.5083, | |
| "num_input_tokens_seen": 6368, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 7.118894577026367, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.5981, | |
| "num_input_tokens_seen": 7872, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 5.019943714141846, | |
| "learning_rate": 8.055555555555557e-06, | |
| "loss": 0.3807, | |
| "num_input_tokens_seen": 9440, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3888888888888889, | |
| "grad_norm": 3.9722704887390137, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 0.1535, | |
| "num_input_tokens_seen": 11008, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.09416825324296951, | |
| "learning_rate": 1.0833333333333334e-05, | |
| "loss": 0.0363, | |
| "num_input_tokens_seen": 12608, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.3904372453689575, | |
| "learning_rate": 1.2222222222222222e-05, | |
| "loss": 0.1202, | |
| "num_input_tokens_seen": 14144, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 0.2530200779438019, | |
| "learning_rate": 1.3611111111111111e-05, | |
| "loss": 0.3211, | |
| "num_input_tokens_seen": 15744, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6111111111111112, | |
| "grad_norm": 0.8808571100234985, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.1725, | |
| "num_input_tokens_seen": 17312, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.05842273309826851, | |
| "learning_rate": 1.638888888888889e-05, | |
| "loss": 0.0036, | |
| "num_input_tokens_seen": 18880, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7222222222222222, | |
| "grad_norm": 6.380139350891113, | |
| "learning_rate": 1.777777777777778e-05, | |
| "loss": 0.0469, | |
| "num_input_tokens_seen": 20416, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "grad_norm": 0.608525276184082, | |
| "learning_rate": 1.9166666666666667e-05, | |
| "loss": 0.0511, | |
| "num_input_tokens_seen": 21920, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.10613561421632767, | |
| "learning_rate": 2.0555555555555555e-05, | |
| "loss": 0.0814, | |
| "num_input_tokens_seen": 23488, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 3.7538535594940186, | |
| "learning_rate": 2.1944444444444445e-05, | |
| "loss": 0.0793, | |
| "num_input_tokens_seen": 25056, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.9444444444444444, | |
| "grad_norm": 6.2909722328186035, | |
| "learning_rate": 2.3333333333333336e-05, | |
| "loss": 0.2267, | |
| "num_input_tokens_seen": 26592, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 5.723038673400879, | |
| "learning_rate": 2.4722222222222223e-05, | |
| "loss": 0.3004, | |
| "num_input_tokens_seen": 28192, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.03621668741106987, | |
| "eval_runtime": 0.5606, | |
| "eval_samples_per_second": 71.357, | |
| "eval_steps_per_second": 17.839, | |
| "num_input_tokens_seen": 28192, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.0555555555555556, | |
| "grad_norm": 4.0318193435668945, | |
| "learning_rate": 2.6111111111111114e-05, | |
| "loss": 0.0941, | |
| "num_input_tokens_seen": 29792, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.7587825655937195, | |
| "learning_rate": 2.7500000000000004e-05, | |
| "loss": 0.0461, | |
| "num_input_tokens_seen": 31360, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.1666666666666667, | |
| "grad_norm": 0.19478577375411987, | |
| "learning_rate": 2.8888888888888888e-05, | |
| "loss": 0.0026, | |
| "num_input_tokens_seen": 32832, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.2222222222222223, | |
| "grad_norm": 0.031167134642601013, | |
| "learning_rate": 3.0277777777777776e-05, | |
| "loss": 0.032, | |
| "num_input_tokens_seen": 34336, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.2777777777777777, | |
| "grad_norm": 4.281861782073975, | |
| "learning_rate": 3.1666666666666666e-05, | |
| "loss": 0.1357, | |
| "num_input_tokens_seen": 35936, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 2.4672231674194336, | |
| "learning_rate": 3.3055555555555553e-05, | |
| "loss": 0.0215, | |
| "num_input_tokens_seen": 37536, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.3888888888888888, | |
| "grad_norm": 0.5595906376838684, | |
| "learning_rate": 3.444444444444445e-05, | |
| "loss": 0.012, | |
| "num_input_tokens_seen": 39040, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.4444444444444444, | |
| "grad_norm": 0.013518299907445908, | |
| "learning_rate": 3.5833333333333335e-05, | |
| "loss": 0.1782, | |
| "num_input_tokens_seen": 40576, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 6.465619087219238, | |
| "learning_rate": 3.722222222222222e-05, | |
| "loss": 0.052, | |
| "num_input_tokens_seen": 42208, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 6.538039207458496, | |
| "learning_rate": 3.8611111111111116e-05, | |
| "loss": 0.1245, | |
| "num_input_tokens_seen": 43776, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.6111111111111112, | |
| "grad_norm": 0.006285431794822216, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0052, | |
| "num_input_tokens_seen": 45376, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.0060833171010017395, | |
| "learning_rate": 4.138888888888889e-05, | |
| "loss": 0.0882, | |
| "num_input_tokens_seen": 46944, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.7222222222222223, | |
| "grad_norm": 0.4324057698249817, | |
| "learning_rate": 4.277777777777778e-05, | |
| "loss": 0.0882, | |
| "num_input_tokens_seen": 48512, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.002744528232142329, | |
| "learning_rate": 4.4166666666666665e-05, | |
| "loss": 0.0334, | |
| "num_input_tokens_seen": 50112, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.8333333333333335, | |
| "grad_norm": 1.714436650276184, | |
| "learning_rate": 4.555555555555556e-05, | |
| "loss": 0.119, | |
| "num_input_tokens_seen": 51680, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.8888888888888888, | |
| "grad_norm": 0.32935142517089844, | |
| "learning_rate": 4.6944444444444446e-05, | |
| "loss": 0.0039, | |
| "num_input_tokens_seen": 53216, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.9444444444444444, | |
| "grad_norm": 0.10427265614271164, | |
| "learning_rate": 4.8333333333333334e-05, | |
| "loss": 0.2664, | |
| "num_input_tokens_seen": 54720, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.9518842697143555, | |
| "learning_rate": 4.972222222222223e-05, | |
| "loss": 0.0223, | |
| "num_input_tokens_seen": 56256, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.019590985029935837, | |
| "eval_runtime": 0.5639, | |
| "eval_samples_per_second": 70.938, | |
| "eval_steps_per_second": 17.734, | |
| "num_input_tokens_seen": 56256, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.0555555555555554, | |
| "grad_norm": 1.6625897884368896, | |
| "learning_rate": 4.9999247861994194e-05, | |
| "loss": 0.0047, | |
| "num_input_tokens_seen": 57856, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.111111111111111, | |
| "grad_norm": 0.20017452538013458, | |
| "learning_rate": 4.9996192378909786e-05, | |
| "loss": 0.0013, | |
| "num_input_tokens_seen": 59456, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.1666666666666665, | |
| "grad_norm": 0.23358377814292908, | |
| "learning_rate": 4.999078682916774e-05, | |
| "loss": 0.046, | |
| "num_input_tokens_seen": 61024, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.006113876588642597, | |
| "learning_rate": 4.998303172098155e-05, | |
| "loss": 0.0031, | |
| "num_input_tokens_seen": 62592, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.2777777777777777, | |
| "grad_norm": 0.0304940864443779, | |
| "learning_rate": 4.997292778346312e-05, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 64128, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 2.3333333333333335, | |
| "grad_norm": 0.006477740593254566, | |
| "learning_rate": 4.996047596655418e-05, | |
| "loss": 0.1357, | |
| "num_input_tokens_seen": 65696, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.388888888888889, | |
| "grad_norm": 0.22924263775348663, | |
| "learning_rate": 4.994567744093703e-05, | |
| "loss": 0.0792, | |
| "num_input_tokens_seen": 67200, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 4.572148323059082, | |
| "learning_rate": 4.992853359792444e-05, | |
| "loss": 0.0934, | |
| "num_input_tokens_seen": 68800, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.06531395763158798, | |
| "learning_rate": 4.9909046049328846e-05, | |
| "loss": 0.0038, | |
| "num_input_tokens_seen": 70368, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.5555555555555554, | |
| "grad_norm": 0.1085778996348381, | |
| "learning_rate": 4.988721662731083e-05, | |
| "loss": 0.001, | |
| "num_input_tokens_seen": 71872, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.611111111111111, | |
| "grad_norm": 0.06615499407052994, | |
| "learning_rate": 4.9863047384206835e-05, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 73472, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.002233164617791772, | |
| "learning_rate": 4.983654059233626e-05, | |
| "loss": 0.0009, | |
| "num_input_tokens_seen": 75040, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.7222222222222223, | |
| "grad_norm": 0.0015631787246093154, | |
| "learning_rate": 4.9807698743787744e-05, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 76544, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 2.7777777777777777, | |
| "grad_norm": 0.7781164050102234, | |
| "learning_rate": 4.9776524550184965e-05, | |
| "loss": 0.0172, | |
| "num_input_tokens_seen": 78048, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.8333333333333335, | |
| "grad_norm": 0.001406413852237165, | |
| "learning_rate": 4.974302094243164e-05, | |
| "loss": 0.002, | |
| "num_input_tokens_seen": 79680, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 0.00064898154232651, | |
| "learning_rate": 4.970719107043595e-05, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 81184, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.9444444444444446, | |
| "grad_norm": 13.987990379333496, | |
| "learning_rate": 4.966903830281449e-05, | |
| "loss": 0.0508, | |
| "num_input_tokens_seen": 82784, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0021102940663695335, | |
| "learning_rate": 4.962856622657541e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 84320, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.027023976668715477, | |
| "eval_runtime": 0.5674, | |
| "eval_samples_per_second": 70.503, | |
| "eval_steps_per_second": 17.626, | |
| "num_input_tokens_seen": 84320, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.0555555555555554, | |
| "grad_norm": 0.006488146726042032, | |
| "learning_rate": 4.9585778646781364e-05, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 85920, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 3.111111111111111, | |
| "grad_norm": 0.007048919331282377, | |
| "learning_rate": 4.9540679586191605e-05, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 87456, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.1666666666666665, | |
| "grad_norm": 0.0009962964104488492, | |
| "learning_rate": 4.9493273284883854e-05, | |
| "loss": 0.0096, | |
| "num_input_tokens_seen": 89056, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 3.2222222222222223, | |
| "grad_norm": 0.004784135147929192, | |
| "learning_rate": 4.9443564199855666e-05, | |
| "loss": 0.0011, | |
| "num_input_tokens_seen": 90560, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.2777777777777777, | |
| "grad_norm": 0.010379524901509285, | |
| "learning_rate": 4.939155700460536e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 92128, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.0009571870905347168, | |
| "learning_rate": 4.933725658869267e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 93728, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.388888888888889, | |
| "grad_norm": 0.0011284482898190618, | |
| "learning_rate": 4.9280668057279014e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 95264, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 3.4444444444444446, | |
| "grad_norm": 0.0012076611164957285, | |
| "learning_rate": 4.9221796730647516e-05, | |
| "loss": 0.0116, | |
| "num_input_tokens_seen": 96768, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.001389971817843616, | |
| "learning_rate": 4.916064814370287e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 98400, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 3.5555555555555554, | |
| "grad_norm": 0.005829916335642338, | |
| "learning_rate": 4.9097228045450864e-05, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 99968, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.611111111111111, | |
| "grad_norm": 0.0029490182641893625, | |
| "learning_rate": 4.9031542398457974e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 101504, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 3.6666666666666665, | |
| "grad_norm": 0.001763415988534689, | |
| "learning_rate": 4.896359737829071e-05, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 103008, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.7222222222222223, | |
| "grad_norm": 0.0006385208107531071, | |
| "learning_rate": 4.889339937293508e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 104608, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 3.7777777777777777, | |
| "grad_norm": 0.002814389066770673, | |
| "learning_rate": 4.8820954982195905e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 106144, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.8333333333333335, | |
| "grad_norm": 0.0006206316174939275, | |
| "learning_rate": 4.874627101707644e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 107744, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 3.888888888888889, | |
| "grad_norm": 0.00754989730194211, | |
| "learning_rate": 4.8669354499137955e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 109280, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.9444444444444446, | |
| "grad_norm": 0.0007078059134073555, | |
| "learning_rate": 4.859021265983959e-05, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 110848, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.0009143942734226584, | |
| "learning_rate": 4.850885293985853e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 112416, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.018723424524068832, | |
| "eval_runtime": 0.5654, | |
| "eval_samples_per_second": 70.742, | |
| "eval_steps_per_second": 17.686, | |
| "num_input_tokens_seen": 112416, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.055555555555555, | |
| "grad_norm": 0.0007080311770550907, | |
| "learning_rate": 4.8425282988390376e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 113984, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 4.111111111111111, | |
| "grad_norm": 0.00044843670912086964, | |
| "learning_rate": 4.8339510662430046e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 115552, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.166666666666667, | |
| "grad_norm": 0.0005601881421171129, | |
| "learning_rate": 4.825154402603308e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 117152, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 4.222222222222222, | |
| "grad_norm": 0.0014347453834488988, | |
| "learning_rate": 4.816139134955746e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 118784, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.277777777777778, | |
| "grad_norm": 0.10178469866514206, | |
| "learning_rate": 4.806906110888606e-05, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 120256, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 4.333333333333333, | |
| "grad_norm": 0.017962772399187088, | |
| "learning_rate": 4.797456198462979e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 121824, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 4.388888888888889, | |
| "grad_norm": 0.0006753335474058986, | |
| "learning_rate": 4.7877902861311446e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 123392, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 4.444444444444445, | |
| "grad_norm": 0.0006766640581190586, | |
| "learning_rate": 4.777909282653042e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 124928, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.0005540185375139117, | |
| "learning_rate": 4.7678141170108345e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 126496, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 4.555555555555555, | |
| "grad_norm": 0.0004221517301630229, | |
| "learning_rate": 4.757505738321563e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 128064, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 4.611111111111111, | |
| "grad_norm": 0.0003651464357972145, | |
| "learning_rate": 4.7469851157479177e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 129632, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.009821389801800251, | |
| "learning_rate": 4.736253238407119e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 131200, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 4.722222222222222, | |
| "grad_norm": 0.000310019648168236, | |
| "learning_rate": 4.725311115277924e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 132800, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 4.777777777777778, | |
| "grad_norm": 0.0003390127676539123, | |
| "learning_rate": 4.714159775105765e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 134336, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 4.833333333333333, | |
| "grad_norm": 0.00039185571949929, | |
| "learning_rate": 4.70280026630603e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 135904, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 4.888888888888889, | |
| "grad_norm": 0.0007595600327476859, | |
| "learning_rate": 4.6912336568654925e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 137504, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 4.944444444444445, | |
| "grad_norm": 0.0005244743661023676, | |
| "learning_rate": 4.679461034241906e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 139040, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.00047000046470202506, | |
| "learning_rate": 4.667483505261762e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 140544, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.023480204865336418, | |
| "eval_runtime": 0.5647, | |
| "eval_samples_per_second": 70.836, | |
| "eval_steps_per_second": 17.709, | |
| "num_input_tokens_seen": 140544, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.055555555555555, | |
| "grad_norm": 0.0004230744962114841, | |
| "learning_rate": 4.655302196016228e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 142176, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 5.111111111111111, | |
| "grad_norm": 0.000272398378001526, | |
| "learning_rate": 4.642918251755281e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 143776, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.166666666666667, | |
| "grad_norm": 0.0004483854863792658, | |
| "learning_rate": 4.6303328367800284e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 145312, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 5.222222222222222, | |
| "grad_norm": 0.001920283422805369, | |
| "learning_rate": 4.6175471343332485e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 146880, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 5.277777777777778, | |
| "grad_norm": 0.009260191582143307, | |
| "learning_rate": 4.604562346488144e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 148416, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 5.333333333333333, | |
| "grad_norm": 0.0003528697998262942, | |
| "learning_rate": 4.591379694035325e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 149952, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.388888888888889, | |
| "grad_norm": 0.0005055447691120207, | |
| "learning_rate": 4.5780004163680365e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 151488, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 5.444444444444445, | |
| "grad_norm": 0.0004068550479132682, | |
| "learning_rate": 4.5644257713656356e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 153024, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.0002740756899584085, | |
| "learning_rate": 4.550657035275323e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 154592, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 5.555555555555555, | |
| "grad_norm": 0.000244280876358971, | |
| "learning_rate": 4.536695502592162e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 156128, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.611111111111111, | |
| "grad_norm": 0.0007038679905235767, | |
| "learning_rate": 4.522542485937369e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 157728, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 5.666666666666667, | |
| "grad_norm": 0.010191836394369602, | |
| "learning_rate": 4.5081993159349056e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 159296, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 5.722222222222222, | |
| "grad_norm": 0.0002481580595485866, | |
| "learning_rate": 4.493667341086379e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 160896, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 5.777777777777778, | |
| "grad_norm": 0.0004509767168201506, | |
| "learning_rate": 4.478947927644258e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 162400, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 5.833333333333333, | |
| "grad_norm": 0.00023253183462657034, | |
| "learning_rate": 4.464042459483425e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 163968, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 5.888888888888889, | |
| "grad_norm": 0.0011648385552689433, | |
| "learning_rate": 4.448952337971064e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 165536, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 5.944444444444445, | |
| "grad_norm": 0.00022991045261733234, | |
| "learning_rate": 4.43367898183491e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 167168, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.0005931927589699626, | |
| "learning_rate": 4.418223827029867e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 168768, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.021528184413909912, | |
| "eval_runtime": 0.5668, | |
| "eval_samples_per_second": 70.573, | |
| "eval_steps_per_second": 17.643, | |
| "num_input_tokens_seen": 168768, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.055555555555555, | |
| "grad_norm": 0.00021295134501997381, | |
| "learning_rate": 4.402588326603002e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 170336, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 6.111111111111111, | |
| "grad_norm": 0.0002571464574430138, | |
| "learning_rate": 4.386773950556931e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 171904, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 6.166666666666667, | |
| "grad_norm": 0.00032813395955599844, | |
| "learning_rate": 4.3707821857116176e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 173440, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 6.222222222222222, | |
| "grad_norm": 0.00020974560175091028, | |
| "learning_rate": 4.354614535564588e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 174976, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 6.277777777777778, | |
| "grad_norm": 0.0002499670663382858, | |
| "learning_rate": 4.3382725201495723e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 176608, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 6.333333333333333, | |
| "grad_norm": 0.00023183479788713157, | |
| "learning_rate": 4.321757675893596e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 178144, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 6.388888888888889, | |
| "grad_norm": 0.00033398810774087906, | |
| "learning_rate": 4.305071555472534e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 179680, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 6.444444444444445, | |
| "grad_norm": 0.004918704275041819, | |
| "learning_rate": 4.288215727665129e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 181248, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 0.00017940586258191615, | |
| "learning_rate": 4.2711917772055e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 182848, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 6.555555555555555, | |
| "grad_norm": 0.0002138900017598644, | |
| "learning_rate": 4.254001304634151e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 184352, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 6.611111111111111, | |
| "grad_norm": 0.00017553276848047972, | |
| "learning_rate": 4.2366459261474933e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 185952, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 0.0005434207268990576, | |
| "learning_rate": 4.2191272734458955e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 187488, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.722222222222222, | |
| "grad_norm": 0.00020542769925668836, | |
| "learning_rate": 4.201446993580276e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 189056, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 6.777777777777778, | |
| "grad_norm": 0.00019911080016754568, | |
| "learning_rate": 4.183606748797251e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 190592, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 6.833333333333333, | |
| "grad_norm": 0.00028096616733819246, | |
| "learning_rate": 4.1656082163828566e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 192192, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 6.888888888888889, | |
| "grad_norm": 0.00032200716668739915, | |
| "learning_rate": 4.147453088504854e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 193824, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 6.944444444444445, | |
| "grad_norm": 0.0001844267826527357, | |
| "learning_rate": 4.129143072053638e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 195392, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.0008159065037034452, | |
| "learning_rate": 4.110679888481763e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 196896, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.02152571827173233, | |
| "eval_runtime": 0.5735, | |
| "eval_samples_per_second": 69.748, | |
| "eval_steps_per_second": 17.437, | |
| "num_input_tokens_seen": 196896, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 7.055555555555555, | |
| "grad_norm": 0.00033227234962396324, | |
| "learning_rate": 4.09206527364209e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 198496, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 7.111111111111111, | |
| "grad_norm": 0.00015425332821905613, | |
| "learning_rate": 4.073300977624594e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 200064, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 7.166666666666667, | |
| "grad_norm": 0.00037242184043861926, | |
| "learning_rate": 4.054388764591822e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 201600, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 7.222222222222222, | |
| "grad_norm": 0.00020593318913597614, | |
| "learning_rate": 4.035330412613035e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 203168, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 7.277777777777778, | |
| "grad_norm": 0.0009990110993385315, | |
| "learning_rate": 4.0161277134970345e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 204704, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 7.333333333333333, | |
| "grad_norm": 0.0030291674192994833, | |
| "learning_rate": 3.996782472623705e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 206272, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 7.388888888888889, | |
| "grad_norm": 0.0003464369510766119, | |
| "learning_rate": 3.977296508774278e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 207872, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 7.444444444444445, | |
| "grad_norm": 0.00023210722429212183, | |
| "learning_rate": 3.957671653960337e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 209408, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 0.00019133520254399627, | |
| "learning_rate": 3.9379097532515725e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 210912, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 7.555555555555555, | |
| "grad_norm": 0.0030801831744611263, | |
| "learning_rate": 3.918012664602317e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 212480, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 7.611111111111111, | |
| "grad_norm": 0.00017106349696405232, | |
| "learning_rate": 3.897982258676867e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 214016, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 7.666666666666667, | |
| "grad_norm": 0.0015756104839965701, | |
| "learning_rate": 3.8778204186736076e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 215584, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 7.722222222222222, | |
| "grad_norm": 0.00027148317894898355, | |
| "learning_rate": 3.8575290401479586e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 217216, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 7.777777777777778, | |
| "grad_norm": 0.005118280649185181, | |
| "learning_rate": 3.837110030834161e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 218752, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 7.833333333333333, | |
| "grad_norm": 0.0006711665191687644, | |
| "learning_rate": 3.8165653104659185e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 220256, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 7.888888888888889, | |
| "grad_norm": 0.00015742301184218377, | |
| "learning_rate": 3.79589681059591e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 221792, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 7.944444444444445, | |
| "grad_norm": 0.00018871431529987603, | |
| "learning_rate": 3.775106474414188e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 223424, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.00014996244863141328, | |
| "learning_rate": 3.75419625656549e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 225024, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.022488148882985115, | |
| "eval_runtime": 0.5746, | |
| "eval_samples_per_second": 69.61, | |
| "eval_steps_per_second": 17.402, | |
| "num_input_tokens_seen": 225024, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 8.055555555555555, | |
| "grad_norm": 0.0001428200484951958, | |
| "learning_rate": 3.7331681229654635e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 226560, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 8.11111111111111, | |
| "grad_norm": 0.000497679051477462, | |
| "learning_rate": 3.712024050615843e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 228128, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 8.166666666666666, | |
| "grad_norm": 0.0007231302442960441, | |
| "learning_rate": 3.690766027418573e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 229728, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 8.222222222222221, | |
| "grad_norm": 0.0003029635699931532, | |
| "learning_rate": 3.6693960519889106e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 231328, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 8.277777777777779, | |
| "grad_norm": 0.0023842931259423494, | |
| "learning_rate": 3.6479161334675296e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 232896, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 8.333333333333334, | |
| "grad_norm": 0.00014705506328027695, | |
| "learning_rate": 3.626328291331618e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 234496, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 8.38888888888889, | |
| "grad_norm": 0.0001251144421985373, | |
| "learning_rate": 3.60463455520502e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 236064, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 8.444444444444445, | |
| "grad_norm": 0.00010756115079857409, | |
| "learning_rate": 3.582836964667408e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 237632, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "grad_norm": 0.0011450349120423198, | |
| "learning_rate": 3.560937569062538e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 239200, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 8.555555555555555, | |
| "grad_norm": 0.00011714852007571608, | |
| "learning_rate": 3.538938427305573e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 240832, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 8.61111111111111, | |
| "grad_norm": 0.0009339886019006371, | |
| "learning_rate": 3.516841607689501e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 242304, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 8.666666666666666, | |
| "grad_norm": 0.00015842786524444818, | |
| "learning_rate": 3.494649187690695e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 243840, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 8.722222222222221, | |
| "grad_norm": 0.00011813634046120569, | |
| "learning_rate": 3.4723632537735846e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 245376, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 8.777777777777779, | |
| "grad_norm": 0.00016957512707449496, | |
| "learning_rate": 3.449985901194498e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 246880, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 8.833333333333334, | |
| "grad_norm": 0.00015034520765766501, | |
| "learning_rate": 3.427519233804667e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 248448, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 8.88888888888889, | |
| "grad_norm": 0.00013828229566570371, | |
| "learning_rate": 3.404965363852437e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 250016, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 8.944444444444445, | |
| "grad_norm": 0.00019199920643586665, | |
| "learning_rate": 3.382326411784672e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 251584, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.00021137826843187213, | |
| "learning_rate": 3.359604506047403e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 253152, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.02345685288310051, | |
| "eval_runtime": 0.5813, | |
| "eval_samples_per_second": 68.812, | |
| "eval_steps_per_second": 17.203, | |
| "num_input_tokens_seen": 253152, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 9.055555555555555, | |
| "grad_norm": 0.00016638640954624861, | |
| "learning_rate": 3.336801782885712e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 254720, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 9.11111111111111, | |
| "grad_norm": 0.003974846564233303, | |
| "learning_rate": 3.313920386142892e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 256288, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 9.166666666666666, | |
| "grad_norm": 0.00317560532130301, | |
| "learning_rate": 3.290962467058891e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 257824, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 9.222222222222221, | |
| "grad_norm": 0.00011459377856226638, | |
| "learning_rate": 3.267930184068057e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 259392, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 9.277777777777779, | |
| "grad_norm": 0.004041124135255814, | |
| "learning_rate": 3.244825702596205e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 260896, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 9.333333333333334, | |
| "grad_norm": 0.0001293610839638859, | |
| "learning_rate": 3.2216511948570374e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 262464, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 9.38888888888889, | |
| "grad_norm": 0.0007165624410845339, | |
| "learning_rate": 3.198408839647911e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 264000, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 9.444444444444445, | |
| "grad_norm": 0.0016301969299092889, | |
| "learning_rate": 3.1751008221450025e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 265536, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "grad_norm": 0.0004204019205644727, | |
| "learning_rate": 3.151729333697854e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 267040, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 9.555555555555555, | |
| "grad_norm": 0.0007552845636382699, | |
| "learning_rate": 3.1282965716233594e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 268576, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 9.61111111111111, | |
| "grad_norm": 9.118793968809769e-05, | |
| "learning_rate": 3.104804738999169e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 270144, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 9.666666666666666, | |
| "grad_norm": 0.00013944484817329794, | |
| "learning_rate": 3.0812560444565745e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 271648, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 9.722222222222221, | |
| "grad_norm": 0.00012299751688260585, | |
| "learning_rate": 3.057652701972848e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 273280, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 9.777777777777779, | |
| "grad_norm": 0.0010309257777407765, | |
| "learning_rate": 3.0339969306631005e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 274880, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 9.833333333333334, | |
| "grad_norm": 0.001862911507487297, | |
| "learning_rate": 3.0102909545716396e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 276480, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 9.88888888888889, | |
| "grad_norm": 0.00013120118819642812, | |
| "learning_rate": 2.9865370024628775e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 278080, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 9.944444444444445, | |
| "grad_norm": 0.0001308199280174449, | |
| "learning_rate": 2.9627373076117863e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 279680, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.002608845243230462, | |
| "learning_rate": 2.9388941075939334e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 281312, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.020556733012199402, | |
| "eval_runtime": 0.5799, | |
| "eval_samples_per_second": 68.976, | |
| "eval_steps_per_second": 17.244, | |
| "num_input_tokens_seen": 281312, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 10.055555555555555, | |
| "grad_norm": 0.00010262915748171508, | |
| "learning_rate": 2.9150096440751107e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 282848, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 10.11111111111111, | |
| "grad_norm": 0.000142398159368895, | |
| "learning_rate": 2.8910861626005776e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 284448, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 10.166666666666666, | |
| "grad_norm": 0.00014525721780955791, | |
| "learning_rate": 2.8671259123839472e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 285984, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 10.222222222222221, | |
| "grad_norm": 0.00012520988821052015, | |
| "learning_rate": 2.843131146095719e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 287488, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 10.277777777777779, | |
| "grad_norm": 0.0001061570001184009, | |
| "learning_rate": 2.8191041196514873e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 289024, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 10.333333333333334, | |
| "grad_norm": 0.0001443429646315053, | |
| "learning_rate": 2.795047091999849e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 290528, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 10.38888888888889, | |
| "grad_norm": 0.00010769419168354943, | |
| "learning_rate": 2.770962324910027e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 292128, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 10.444444444444445, | |
| "grad_norm": 9.104691707761958e-05, | |
| "learning_rate": 2.7468520827592197e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 293728, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "grad_norm": 0.0021691350266337395, | |
| "learning_rate": 2.7227186323197162e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 295264, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 10.555555555555555, | |
| "grad_norm": 0.0001162191474577412, | |
| "learning_rate": 2.6985642425457757e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 296896, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 10.61111111111111, | |
| "grad_norm": 0.00016877209418453276, | |
| "learning_rate": 2.674391184360313e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 298400, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 10.666666666666666, | |
| "grad_norm": 0.0001411411358276382, | |
| "learning_rate": 2.650201730441392e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 299936, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 10.722222222222221, | |
| "grad_norm": 0.000309384660795331, | |
| "learning_rate": 2.6259981550085504e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 301536, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 10.777777777777779, | |
| "grad_norm": 8.149706991389394e-05, | |
| "learning_rate": 2.60178273360899e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 303104, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 10.833333333333334, | |
| "grad_norm": 0.0001164573259302415, | |
| "learning_rate": 2.5775577429036345e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 304672, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 10.88888888888889, | |
| "grad_norm": 0.0011377756018191576, | |
| "learning_rate": 2.553325460453086e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 306272, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 10.944444444444445, | |
| "grad_norm": 0.00016332183440681547, | |
| "learning_rate": 2.5290881645034932e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 307808, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 0.0003309775493107736, | |
| "learning_rate": 2.504848133772358e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 309280, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 0.02247992344200611, | |
| "eval_runtime": 0.5699, | |
| "eval_samples_per_second": 70.182, | |
| "eval_steps_per_second": 17.546, | |
| "num_input_tokens_seen": 309280, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 11.055555555555555, | |
| "grad_norm": 0.0004851088742725551, | |
| "learning_rate": 2.4806076472342997e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 310816, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 11.11111111111111, | |
| "grad_norm": 0.0001135764759965241, | |
| "learning_rate": 2.4563689839067913e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 312416, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 11.166666666666666, | |
| "grad_norm": 0.00010679913975764066, | |
| "learning_rate": 2.432134422635893e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 314048, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 11.222222222222221, | |
| "grad_norm": 0.00016918167239055037, | |
| "learning_rate": 2.4079062418820002e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 315616, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 11.277777777777779, | |
| "grad_norm": 0.0020280464086681604, | |
| "learning_rate": 2.3836867195056335e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 317120, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 11.333333333333334, | |
| "grad_norm": 7.544541585957631e-05, | |
| "learning_rate": 2.3594781325532784e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 318688, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 11.38888888888889, | |
| "grad_norm": 0.001113244565203786, | |
| "learning_rate": 2.3352827570433036e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 320224, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 11.444444444444445, | |
| "grad_norm": 0.0001098668944905512, | |
| "learning_rate": 2.3111028677519804e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 321792, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 11.5, | |
| "grad_norm": 0.0002856272622011602, | |
| "learning_rate": 2.2869407379996088e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 323360, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 11.555555555555555, | |
| "grad_norm": 0.0001686100586084649, | |
| "learning_rate": 2.2627986394367938e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 324896, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 11.61111111111111, | |
| "grad_norm": 0.00012486428022384644, | |
| "learning_rate": 2.238678841830867e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 326496, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 11.666666666666666, | |
| "grad_norm": 0.00014248206571210176, | |
| "learning_rate": 2.2145836128524902e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 328064, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 11.722222222222221, | |
| "grad_norm": 0.00021148154337424785, | |
| "learning_rate": 2.1905152178624595e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 329664, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 11.777777777777779, | |
| "grad_norm": 0.0001073022503987886, | |
| "learning_rate": 2.1664759196987182e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 331296, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 11.833333333333334, | |
| "grad_norm": 9.826263703871518e-05, | |
| "learning_rate": 2.1424679784636144e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 332832, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 11.88888888888889, | |
| "grad_norm": 0.00021975382696837187, | |
| "learning_rate": 2.118493651311413e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 334400, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 11.944444444444445, | |
| "grad_norm": 0.00015810529293958098, | |
| "learning_rate": 2.0945551922360818e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 335968, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 0.00011251640535192564, | |
| "learning_rate": 2.070654851859383e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 337536, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.021509621292352676, | |
| "eval_runtime": 0.5677, | |
| "eval_samples_per_second": 70.456, | |
| "eval_steps_per_second": 17.614, | |
| "num_input_tokens_seen": 337536, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 12.055555555555555, | |
| "grad_norm": 0.00011595851538004354, | |
| "learning_rate": 2.0467948772192713e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 339072, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 12.11111111111111, | |
| "grad_norm": 0.0001870399428298697, | |
| "learning_rate": 2.022977511558638e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 340640, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 12.166666666666666, | |
| "grad_norm": 0.00011985289165750146, | |
| "learning_rate": 1.9992049941144066e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 342208, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 12.222222222222221, | |
| "grad_norm": 0.00034283590503036976, | |
| "learning_rate": 1.9754795599070068e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 343776, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 12.277777777777779, | |
| "grad_norm": 0.0002817510103341192, | |
| "learning_rate": 1.9518034395302414e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 345376, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 12.333333333333334, | |
| "grad_norm": 0.0008850801968947053, | |
| "learning_rate": 1.9281788589415804e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 347008, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 12.38888888888889, | |
| "grad_norm": 9.697239147499204e-05, | |
| "learning_rate": 1.9046080392528735e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 348576, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 12.444444444444445, | |
| "grad_norm": 9.033724199980497e-05, | |
| "learning_rate": 1.8810931965215356e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 350112, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 0.0004941816441714764, | |
| "learning_rate": 1.857636541542195e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 351648, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 12.555555555555555, | |
| "grad_norm": 0.00014010470476932824, | |
| "learning_rate": 1.8342402796388445e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 353216, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 12.61111111111111, | |
| "grad_norm": 0.00010255679808324203, | |
| "learning_rate": 1.8109066104575023e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 354784, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 12.666666666666666, | |
| "grad_norm": 9.848680201685056e-05, | |
| "learning_rate": 1.7876377277594053e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 356352, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 12.722222222222221, | |
| "grad_norm": 0.0011804875684902072, | |
| "learning_rate": 1.764435819214762e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 357920, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 12.777777777777779, | |
| "grad_norm": 9.178365144180134e-05, | |
| "learning_rate": 1.7413030661970742e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 359456, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 12.833333333333334, | |
| "grad_norm": 8.829342550598085e-05, | |
| "learning_rate": 1.7182416435780454e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 361024, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 12.88888888888889, | |
| "grad_norm": 7.05787242623046e-05, | |
| "learning_rate": 1.695253719523115e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 362528, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 12.944444444444445, | |
| "grad_norm": 8.048354357015342e-05, | |
| "learning_rate": 1.672341455287605e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 364064, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 0.0007057728944346309, | |
| "learning_rate": 1.649507005013532e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 365632, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 0.02345089241862297, | |
| "eval_runtime": 0.5691, | |
| "eval_samples_per_second": 70.289, | |
| "eval_steps_per_second": 17.572, | |
| "num_input_tokens_seen": 365632, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 13.055555555555555, | |
| "grad_norm": 0.00040255390922538936, | |
| "learning_rate": 1.6267525155270773e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 367232, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 13.11111111111111, | |
| "grad_norm": 0.00015434890519827604, | |
| "learning_rate": 1.6040801261367493e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 368800, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 13.166666666666666, | |
| "grad_norm": 0.001940408954396844, | |
| "learning_rate": 1.5814919684322545e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 370336, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 13.222222222222221, | |
| "grad_norm": 0.00046628896961919963, | |
| "learning_rate": 1.5589901660840896e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 371872, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 13.277777777777779, | |
| "grad_norm": 9.820911509450525e-05, | |
| "learning_rate": 1.5365768346438797e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 373472, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 13.333333333333334, | |
| "grad_norm": 0.00020039896480739117, | |
| "learning_rate": 1.5142540813454836e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 375040, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 13.38888888888889, | |
| "grad_norm": 0.0017673159018158913, | |
| "learning_rate": 1.4920240049068748e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 376512, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 13.444444444444445, | |
| "grad_norm": 9.511098323855549e-05, | |
| "learning_rate": 1.4698886953328292e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 378112, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 13.5, | |
| "grad_norm": 0.0001298331335419789, | |
| "learning_rate": 1.4478502337184274e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 379648, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 13.555555555555555, | |
| "grad_norm": 7.689925405429676e-05, | |
| "learning_rate": 1.4259106920533955e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 381184, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 13.61111111111111, | |
| "grad_norm": 7.704249583184719e-05, | |
| "learning_rate": 1.4040721330273062e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 382752, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 13.666666666666666, | |
| "grad_norm": 9.969378879759461e-05, | |
| "learning_rate": 1.3823366098356487e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 384320, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 13.722222222222221, | |
| "grad_norm": 7.952869054861367e-05, | |
| "learning_rate": 1.3607061659867892e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 385888, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 13.777777777777779, | |
| "grad_norm": 9.024488099385053e-05, | |
| "learning_rate": 1.3391828351098578e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 387488, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 13.833333333333334, | |
| "grad_norm": 8.695020369486883e-05, | |
| "learning_rate": 1.3177686407635417e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 389024, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 13.88888888888889, | |
| "grad_norm": 7.220877159852535e-05, | |
| "learning_rate": 1.29646559624584e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 390528, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 13.944444444444445, | |
| "grad_norm": 0.00012539667659439147, | |
| "learning_rate": 1.2752757044047827e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 392096, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 0.00011905840074177831, | |
| "learning_rate": 1.2542009574501246e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 393632, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.023450110107660294, | |
| "eval_runtime": 0.5665, | |
| "eval_samples_per_second": 70.608, | |
| "eval_steps_per_second": 17.652, | |
| "num_input_tokens_seen": 393632, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 14.055555555555555, | |
| "grad_norm": 0.00017190420476254076, | |
| "learning_rate": 1.2332433367660442e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 395136, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 14.11111111111111, | |
| "grad_norm": 0.00016550054715480655, | |
| "learning_rate": 1.2124048127248644e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 396672, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 14.166666666666666, | |
| "grad_norm": 8.005576819414273e-05, | |
| "learning_rate": 1.1916873445017982e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 398272, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 14.222222222222221, | |
| "grad_norm": 0.0002504843578208238, | |
| "learning_rate": 1.1710928798907556e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 399808, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 14.277777777777779, | |
| "grad_norm": 7.951293082442135e-05, | |
| "learning_rate": 1.1506233551212186e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 401344, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 14.333333333333334, | |
| "grad_norm": 7.455885497620329e-05, | |
| "learning_rate": 1.1302806946762004e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 402912, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 14.38888888888889, | |
| "grad_norm": 0.00011508566967677325, | |
| "learning_rate": 1.1100668111113166e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 404512, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 14.444444444444445, | |
| "grad_norm": 0.0017771677812561393, | |
| "learning_rate": 1.0899836048749645e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 406048, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 14.5, | |
| "grad_norm": 0.00012742944818455726, | |
| "learning_rate": 1.0700329641296541e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 407648, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 14.555555555555555, | |
| "grad_norm": 6.800785195082426e-05, | |
| "learning_rate": 1.0502167645744895e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 409216, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 14.61111111111111, | |
| "grad_norm": 7.308148633455858e-05, | |
| "learning_rate": 1.0305368692688174e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 410720, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 14.666666666666666, | |
| "grad_norm": 0.0004988294676877558, | |
| "learning_rate": 1.01099512845707e-05, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 412256, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 14.722222222222221, | |
| "grad_norm": 6.726184801664203e-05, | |
| "learning_rate": 9.91593379394811e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 413824, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 14.777777777777779, | |
| "grad_norm": 0.000125155333080329, | |
| "learning_rate": 9.723334461760006e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 415392, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 14.833333333333334, | |
| "grad_norm": 0.00019686762243509293, | |
| "learning_rate": 9.532171395615036e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 417024, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 14.88888888888889, | |
| "grad_norm": 0.0017825914546847343, | |
| "learning_rate": 9.342462568088416e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 418592, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 14.944444444444445, | |
| "grad_norm": 8.396849443670362e-05, | |
| "learning_rate": 9.154225815032242e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 420128, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 6.699386722175404e-05, | |
| "learning_rate": 8.967478833898612e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 421696, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 0.02150687947869301, | |
| "eval_runtime": 0.5699, | |
| "eval_samples_per_second": 70.187, | |
| "eval_steps_per_second": 17.547, | |
| "num_input_tokens_seen": 421696, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 15.055555555555555, | |
| "grad_norm": 0.00013656025112140924, | |
| "learning_rate": 8.78223918207575e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 423264, | |
| "step": 1355 | |
| }, | |
| { | |
| "epoch": 15.11111111111111, | |
| "grad_norm": 0.00011587158951442689, | |
| "learning_rate": 8.598524275237322e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 424864, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 15.166666666666666, | |
| "grad_norm": 0.00098511204123497, | |
| "learning_rate": 8.41635138570507e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 426464, | |
| "step": 1365 | |
| }, | |
| { | |
| "epoch": 15.222222222222221, | |
| "grad_norm": 9.587294334778562e-05, | |
| "learning_rate": 8.235737640824908e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 428032, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 15.277777777777779, | |
| "grad_norm": 7.411834667436779e-05, | |
| "learning_rate": 8.056700021356694e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 429632, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 15.333333333333334, | |
| "grad_norm": 0.00037025214987806976, | |
| "learning_rate": 7.879255359877705e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 431232, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 15.38888888888889, | |
| "grad_norm": 7.872714195400476e-05, | |
| "learning_rate": 7.703420339200101e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 432832, | |
| "step": 1385 | |
| }, | |
| { | |
| "epoch": 15.444444444444445, | |
| "grad_norm": 6.71739035169594e-05, | |
| "learning_rate": 7.529211490802498e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 434400, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 15.5, | |
| "grad_norm": 8.575282845413312e-05, | |
| "learning_rate": 7.3566451932756744e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 435968, | |
| "step": 1395 | |
| }, | |
| { | |
| "epoch": 15.555555555555555, | |
| "grad_norm": 0.0003745568392332643, | |
| "learning_rate": 7.185737670782727e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 437504, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 15.61111111111111, | |
| "grad_norm": 0.0001251415378646925, | |
| "learning_rate": 7.016504991533726e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 439072, | |
| "step": 1405 | |
| }, | |
| { | |
| "epoch": 15.666666666666666, | |
| "grad_norm": 0.00015347173030022532, | |
| "learning_rate": 6.848963066275027e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 440608, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 15.722222222222221, | |
| "grad_norm": 0.0001295220135943964, | |
| "learning_rate": 6.683127646793411e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 442144, | |
| "step": 1415 | |
| }, | |
| { | |
| "epoch": 15.777777777777779, | |
| "grad_norm": 6.701109668938443e-05, | |
| "learning_rate": 6.519014324435102e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 443680, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 15.833333333333334, | |
| "grad_norm": 0.0008761701174080372, | |
| "learning_rate": 6.356638528639955e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 445248, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 15.88888888888889, | |
| "grad_norm": 0.0007946295663714409, | |
| "learning_rate": 6.196015525490825e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 446816, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 15.944444444444445, | |
| "grad_norm": 6.699462392134592e-05, | |
| "learning_rate": 6.037160416278278e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 448384, | |
| "step": 1435 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "grad_norm": 0.000385272316634655, | |
| "learning_rate": 5.880088136080814e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 449984, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.026416266337037086, | |
| "eval_runtime": 0.5684, | |
| "eval_samples_per_second": 70.37, | |
| "eval_steps_per_second": 17.593, | |
| "num_input_tokens_seen": 449984, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 16.055555555555557, | |
| "grad_norm": 7.982327224453911e-05, | |
| "learning_rate": 5.724813452360736e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 451520, | |
| "step": 1445 | |
| }, | |
| { | |
| "epoch": 16.11111111111111, | |
| "grad_norm": 9.408822370460257e-05, | |
| "learning_rate": 5.571350963575728e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 453088, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 16.166666666666668, | |
| "grad_norm": 7.70150581956841e-05, | |
| "learning_rate": 5.4197150978063965e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 454592, | |
| "step": 1455 | |
| }, | |
| { | |
| "epoch": 16.22222222222222, | |
| "grad_norm": 0.00023611770302522928, | |
| "learning_rate": 5.269920111399732e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 456128, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 16.27777777777778, | |
| "grad_norm": 0.0006894396501593292, | |
| "learning_rate": 5.121980087628803e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 457760, | |
| "step": 1465 | |
| }, | |
| { | |
| "epoch": 16.333333333333332, | |
| "grad_norm": 6.712992762913927e-05, | |
| "learning_rate": 4.975908935368701e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 459328, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 16.38888888888889, | |
| "grad_norm": 6.62754537188448e-05, | |
| "learning_rate": 4.831720387788827e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 460960, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 16.444444444444443, | |
| "grad_norm": 6.946622306713834e-05, | |
| "learning_rate": 4.689428001061774e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 462528, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 16.5, | |
| "grad_norm": 9.66685765888542e-05, | |
| "learning_rate": 4.549045153088813e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 464064, | |
| "step": 1485 | |
| }, | |
| { | |
| "epoch": 16.555555555555557, | |
| "grad_norm": 7.496264879591763e-05, | |
| "learning_rate": 4.410585042242124e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 465600, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 16.61111111111111, | |
| "grad_norm": 0.0016269378829747438, | |
| "learning_rate": 4.274060686123959e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 467168, | |
| "step": 1495 | |
| }, | |
| { | |
| "epoch": 16.666666666666668, | |
| "grad_norm": 6.927455251570791e-05, | |
| "learning_rate": 4.1394849203427284e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 468768, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 16.72222222222222, | |
| "grad_norm": 8.212022657971829e-05, | |
| "learning_rate": 4.006870397306256e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 470336, | |
| "step": 1505 | |
| }, | |
| { | |
| "epoch": 16.77777777777778, | |
| "grad_norm": 0.0001231308124260977, | |
| "learning_rate": 3.876229585032245e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 471872, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 16.833333333333332, | |
| "grad_norm": 0.00010360088344896212, | |
| "learning_rate": 3.7475747659760502e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 473408, | |
| "step": 1515 | |
| }, | |
| { | |
| "epoch": 16.88888888888889, | |
| "grad_norm": 7.802182517480105e-05, | |
| "learning_rate": 3.6209180358759394e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 474880, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 16.944444444444443, | |
| "grad_norm": 8.598937711212784e-05, | |
| "learning_rate": 3.4962713026158694e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 476448, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "grad_norm": 0.0014108957257121801, | |
| "learning_rate": 3.373646285105958e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 478016, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 0.02150651253759861, | |
| "eval_runtime": 0.5686, | |
| "eval_samples_per_second": 70.345, | |
| "eval_steps_per_second": 17.586, | |
| "num_input_tokens_seen": 478016, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 17.055555555555557, | |
| "grad_norm": 8.879324741428718e-05, | |
| "learning_rate": 3.2530545121807145e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 479584, | |
| "step": 1535 | |
| }, | |
| { | |
| "epoch": 17.11111111111111, | |
| "grad_norm": 6.240269431145862e-05, | |
| "learning_rate": 3.1345073215151066e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 481184, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 17.166666666666668, | |
| "grad_norm": 0.0010509274434298277, | |
| "learning_rate": 3.0180158585586397e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 482752, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 17.22222222222222, | |
| "grad_norm": 0.00016104978567454964, | |
| "learning_rate": 2.9035910754875136e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 484288, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 17.27777777777778, | |
| "grad_norm": 7.16880604159087e-05, | |
| "learning_rate": 2.7912437301749026e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 485824, | |
| "step": 1555 | |
| }, | |
| { | |
| "epoch": 17.333333333333332, | |
| "grad_norm": 6.778180977562442e-05, | |
| "learning_rate": 2.6809843851795357e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 487328, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 17.38888888888889, | |
| "grad_norm": 9.489655349170789e-05, | |
| "learning_rate": 2.57282340675267e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 488896, | |
| "step": 1565 | |
| }, | |
| { | |
| "epoch": 17.444444444444443, | |
| "grad_norm": 7.715079846093431e-05, | |
| "learning_rate": 2.4667709638634434e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 490432, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "grad_norm": 0.00025187325081788003, | |
| "learning_rate": 2.3628370272428564e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 492032, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 17.555555555555557, | |
| "grad_norm": 0.0001285983162233606, | |
| "learning_rate": 2.2610313684463177e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 493600, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 17.61111111111111, | |
| "grad_norm": 7.69734542700462e-05, | |
| "learning_rate": 2.1613635589349756e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 495200, | |
| "step": 1585 | |
| }, | |
| { | |
| "epoch": 17.666666666666668, | |
| "grad_norm": 0.00019795860862359405, | |
| "learning_rate": 2.063842969175847e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 496768, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 17.72222222222222, | |
| "grad_norm": 0.0011514039942994714, | |
| "learning_rate": 1.968478767760812e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 498336, | |
| "step": 1595 | |
| }, | |
| { | |
| "epoch": 17.77777777777778, | |
| "grad_norm": 0.00010896333697019145, | |
| "learning_rate": 1.8752799205445982e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 499936, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 17.833333333333332, | |
| "grad_norm": 8.104776497930288e-05, | |
| "learning_rate": 1.784255189801895e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 501504, | |
| "step": 1605 | |
| }, | |
| { | |
| "epoch": 17.88888888888889, | |
| "grad_norm": 6.83706603012979e-05, | |
| "learning_rate": 1.6954131334034922e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 503040, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 17.944444444444443, | |
| "grad_norm": 7.833163545001298e-05, | |
| "learning_rate": 1.6087621040117157e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 504672, | |
| "step": 1615 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "grad_norm": 8.828964200802147e-05, | |
| "learning_rate": 1.524310248295152e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 506272, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 0.024432525038719177, | |
| "eval_runtime": 0.5687, | |
| "eval_samples_per_second": 70.333, | |
| "eval_steps_per_second": 17.583, | |
| "num_input_tokens_seen": 506272, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 18.055555555555557, | |
| "grad_norm": 9.823336586123332e-05, | |
| "learning_rate": 1.4420655061626932e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 507904, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 18.11111111111111, | |
| "grad_norm": 7.27442602510564e-05, | |
| "learning_rate": 1.362035610017079e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 509504, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 18.166666666666668, | |
| "grad_norm": 8.469615568174049e-05, | |
| "learning_rate": 1.2842280840278997e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 511040, | |
| "step": 1635 | |
| }, | |
| { | |
| "epoch": 18.22222222222222, | |
| "grad_norm": 0.00015341391554102302, | |
| "learning_rate": 1.2086502434241865e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 512576, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 18.27777777777778, | |
| "grad_norm": 0.00024517407291568816, | |
| "learning_rate": 1.1353091938067023e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 514144, | |
| "step": 1645 | |
| }, | |
| { | |
| "epoch": 18.333333333333332, | |
| "grad_norm": 0.00015534732665400952, | |
| "learning_rate": 1.0642118304798442e-06, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 515744, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 18.38888888888889, | |
| "grad_norm": 7.502298103645444e-05, | |
| "learning_rate": 9.95364837803392e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 517312, | |
| "step": 1655 | |
| }, | |
| { | |
| "epoch": 18.444444444444443, | |
| "grad_norm": 7.646170706721023e-05, | |
| "learning_rate": 9.287746885640603e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 518880, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 18.5, | |
| "grad_norm": 0.0012430755887180567, | |
| "learning_rate": 8.64447643366953e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 520480, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 18.555555555555557, | |
| "grad_norm": 5.92623146076221e-05, | |
| "learning_rate": 8.023897500469391e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 522016, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 18.61111111111111, | |
| "grad_norm": 9.752674668561667e-05, | |
| "learning_rate": 7.426068431000882e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 523552, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 18.666666666666668, | |
| "grad_norm": 0.0018209958216175437, | |
| "learning_rate": 6.851045431350927e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 525088, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 18.72222222222222, | |
| "grad_norm": 0.0001121691384469159, | |
| "learning_rate": 6.298882563448599e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 526688, | |
| "step": 1685 | |
| }, | |
| { | |
| "epoch": 18.77777777777778, | |
| "grad_norm": 0.00012427111505530775, | |
| "learning_rate": 5.769631739982267e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 528288, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 18.833333333333332, | |
| "grad_norm": 0.0004747202910948545, | |
| "learning_rate": 5.263342719518921e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 529888, | |
| "step": 1695 | |
| }, | |
| { | |
| "epoch": 18.88888888888889, | |
| "grad_norm": 0.00043994878069497645, | |
| "learning_rate": 4.780063101826132e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 531360, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 18.944444444444443, | |
| "grad_norm": 0.00011101184645667672, | |
| "learning_rate": 4.319838323396691e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 532896, | |
| "step": 1705 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "grad_norm": 0.0002416494971839711, | |
| "learning_rate": 3.88271165317694e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 534432, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 0.02247370220720768, | |
| "eval_runtime": 0.5689, | |
| "eval_samples_per_second": 70.313, | |
| "eval_steps_per_second": 17.578, | |
| "num_input_tokens_seen": 534432, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 19.055555555555557, | |
| "grad_norm": 8.658957085572183e-05, | |
| "learning_rate": 3.468724188498751e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 536000, | |
| "step": 1715 | |
| }, | |
| { | |
| "epoch": 19.11111111111111, | |
| "grad_norm": 7.289883797056973e-05, | |
| "learning_rate": 3.077914851215585e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 537568, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 19.166666666666668, | |
| "grad_norm": 0.0005327937542460859, | |
| "learning_rate": 2.71032038404323e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 539104, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 19.22222222222222, | |
| "grad_norm": 0.0009618631447665393, | |
| "learning_rate": 2.365975347105448e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 540672, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 19.27777777777778, | |
| "grad_norm": 5.3735868277726695e-05, | |
| "learning_rate": 2.0449121146845774e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 542208, | |
| "step": 1735 | |
| }, | |
| { | |
| "epoch": 19.333333333333332, | |
| "grad_norm": 9.372737258672714e-05, | |
| "learning_rate": 1.747160872177883e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 543744, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 19.38888888888889, | |
| "grad_norm": 0.0001242196885868907, | |
| "learning_rate": 1.472749613259661e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 545312, | |
| "step": 1745 | |
| }, | |
| { | |
| "epoch": 19.444444444444443, | |
| "grad_norm": 0.00012030125799356028, | |
| "learning_rate": 1.22170413724923e-07, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 546912, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 19.5, | |
| "grad_norm": 0.00018067783094011247, | |
| "learning_rate": 9.940480466855417e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 548512, | |
| "step": 1755 | |
| }, | |
| { | |
| "epoch": 19.555555555555557, | |
| "grad_norm": 0.00014770979760214686, | |
| "learning_rate": 7.898027451078982e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 550112, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 19.61111111111111, | |
| "grad_norm": 9.749268792802468e-05, | |
| "learning_rate": 6.089874350439506e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 551680, | |
| "step": 1765 | |
| }, | |
| { | |
| "epoch": 19.666666666666668, | |
| "grad_norm": 0.00019528920529410243, | |
| "learning_rate": 4.516191162040051e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 553280, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 19.72222222222222, | |
| "grad_norm": 7.867484237067401e-05, | |
| "learning_rate": 3.177125838830786e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 554848, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 19.77777777777778, | |
| "grad_norm": 0.0006988178938627243, | |
| "learning_rate": 2.0728042756967824e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 556512, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 19.833333333333332, | |
| "grad_norm": 9.091465472010896e-05, | |
| "learning_rate": 1.2033302976222071e-08, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 558112, | |
| "step": 1785 | |
| }, | |
| { | |
| "epoch": 19.88888888888889, | |
| "grad_norm": 0.0007256029639393091, | |
| "learning_rate": 5.687856499297928e-09, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 559712, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 19.944444444444443, | |
| "grad_norm": 8.747593528823927e-05, | |
| "learning_rate": 1.692299905944883e-09, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 561280, | |
| "step": 1795 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 0.00032079112133942544, | |
| "learning_rate": 4.700884634611935e-11, | |
| "loss": 0.0, | |
| "num_input_tokens_seen": 562848, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 0.02443142607808113, | |
| "eval_runtime": 0.5676, | |
| "eval_samples_per_second": 70.475, | |
| "eval_steps_per_second": 17.619, | |
| "num_input_tokens_seen": 562848, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "num_input_tokens_seen": 562848, | |
| "step": 1800, | |
| "total_flos": 2.539436691868877e+16, | |
| "train_loss": 0.01842850733881278, | |
| "train_runtime": 377.7415, | |
| "train_samples_per_second": 19.061, | |
| "train_steps_per_second": 4.765 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1800, | |
| "num_input_tokens_seen": 562848, | |
| "num_train_epochs": 20, | |
| "save_steps": 90, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.539436691868877e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |