| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 625, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0016, | |
| "grad_norm": 4.720448017120361, | |
| "learning_rate": 3.1746031746031746e-06, | |
| "loss": 1.8429, | |
| "mean_token_accuracy": 0.6849687695503235, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.008, | |
| "grad_norm": 4.610220909118652, | |
| "learning_rate": 1.5873015873015872e-05, | |
| "loss": 1.8484, | |
| "mean_token_accuracy": 0.6779065430164337, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 2.8285515308380127, | |
| "learning_rate": 3.1746031746031745e-05, | |
| "loss": 1.7723, | |
| "mean_token_accuracy": 0.6799486577510834, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.024, | |
| "grad_norm": 1.520603895187378, | |
| "learning_rate": 4.761904761904762e-05, | |
| "loss": 1.5141, | |
| "mean_token_accuracy": 0.7008779108524322, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 1.0638386011123657, | |
| "learning_rate": 6.349206349206349e-05, | |
| "loss": 1.2605, | |
| "mean_token_accuracy": 0.7299223065376281, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.9480394721031189, | |
| "learning_rate": 7.936507936507937e-05, | |
| "loss": 0.9797, | |
| "mean_token_accuracy": 0.7793598055839539, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "grad_norm": 0.8817314505577087, | |
| "learning_rate": 9.523809523809524e-05, | |
| "loss": 0.6795, | |
| "mean_token_accuracy": 0.8407030582427979, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.056, | |
| "grad_norm": 0.8544878363609314, | |
| "learning_rate": 0.00011111111111111112, | |
| "loss": 0.3682, | |
| "mean_token_accuracy": 0.9016113102436065, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 0.5889844298362732, | |
| "learning_rate": 0.00012698412698412698, | |
| "loss": 0.1517, | |
| "mean_token_accuracy": 0.9596078157424927, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.072, | |
| "grad_norm": 0.2203538715839386, | |
| "learning_rate": 0.00014285714285714287, | |
| "loss": 0.0955, | |
| "mean_token_accuracy": 0.9677912712097168, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.1825166940689087, | |
| "learning_rate": 0.00015873015873015873, | |
| "loss": 0.0826, | |
| "mean_token_accuracy": 0.9692689836025238, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.088, | |
| "grad_norm": 0.15790687501430511, | |
| "learning_rate": 0.00017460317460317462, | |
| "loss": 0.0851, | |
| "mean_token_accuracy": 0.9700896739959717, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 0.1395537108182907, | |
| "learning_rate": 0.00019047619047619048, | |
| "loss": 0.0786, | |
| "mean_token_accuracy": 0.9702458798885345, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.104, | |
| "grad_norm": 0.14210280776023865, | |
| "learning_rate": 0.00019999375039475277, | |
| "loss": 0.0774, | |
| "mean_token_accuracy": 0.9697415053844451, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "grad_norm": 0.12832307815551758, | |
| "learning_rate": 0.0001999234513064475, | |
| "loss": 0.0734, | |
| "mean_token_accuracy": 0.9710267901420593, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.10479996353387833, | |
| "learning_rate": 0.00019977509622105233, | |
| "loss": 0.0745, | |
| "mean_token_accuracy": 0.9724782824516296, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 0.1173420250415802, | |
| "learning_rate": 0.0001995488010273198, | |
| "loss": 0.0729, | |
| "mean_token_accuracy": 0.9719331085681915, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.136, | |
| "grad_norm": 0.08444946259260178, | |
| "learning_rate": 0.00019924474249753655, | |
| "loss": 0.0695, | |
| "mean_token_accuracy": 0.9733094036579132, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "grad_norm": 0.13057316839694977, | |
| "learning_rate": 0.00019886315814943647, | |
| "loss": 0.0739, | |
| "mean_token_accuracy": 0.9714005470275879, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.152, | |
| "grad_norm": 0.2025141566991806, | |
| "learning_rate": 0.0001984043460606618, | |
| "loss": 0.0736, | |
| "mean_token_accuracy": 0.9701524496078491, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.1009383425116539, | |
| "learning_rate": 0.0001978686646359173, | |
| "loss": 0.0728, | |
| "mean_token_accuracy": 0.9720687448978425, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.168, | |
| "grad_norm": 0.10014893114566803, | |
| "learning_rate": 0.0001972565323269996, | |
| "loss": 0.0715, | |
| "mean_token_accuracy": 0.9728178203105926, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.176, | |
| "grad_norm": 0.116126149892807, | |
| "learning_rate": 0.00019656842730592046, | |
| "loss": 0.0711, | |
| "mean_token_accuracy": 0.9717484176158905, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.184, | |
| "grad_norm": 0.08928077667951584, | |
| "learning_rate": 0.0001958048870913786, | |
| "loss": 0.0682, | |
| "mean_token_accuracy": 0.974213844537735, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 0.11367285251617432, | |
| "learning_rate": 0.0001949665081288729, | |
| "loss": 0.0706, | |
| "mean_token_accuracy": 0.9731513261795044, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.08238652348518372, | |
| "learning_rate": 0.00019405394532478424, | |
| "loss": 0.0696, | |
| "mean_token_accuracy": 0.9734784185886383, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.208, | |
| "grad_norm": 0.11063515394926071, | |
| "learning_rate": 0.00019306791153479006, | |
| "loss": 0.0717, | |
| "mean_token_accuracy": 0.9721537351608276, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.216, | |
| "grad_norm": 0.15030214190483093, | |
| "learning_rate": 0.00019200917700701176, | |
| "loss": 0.0696, | |
| "mean_token_accuracy": 0.9727127254009247, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 0.07141675055027008, | |
| "learning_rate": 0.0001908785687803289, | |
| "loss": 0.0698, | |
| "mean_token_accuracy": 0.9731925427913666, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.232, | |
| "grad_norm": 0.07116198539733887, | |
| "learning_rate": 0.00018967697003833157, | |
| "loss": 0.0683, | |
| "mean_token_accuracy": 0.9736765503883362, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.08784201741218567, | |
| "learning_rate": 0.0001884053194194142, | |
| "loss": 0.0703, | |
| "mean_token_accuracy": 0.9728135228157043, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.248, | |
| "grad_norm": 0.09302135556936264, | |
| "learning_rate": 0.00018706461028355104, | |
| "loss": 0.0722, | |
| "mean_token_accuracy": 0.9723609626293183, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 0.10910028219223022, | |
| "learning_rate": 0.00018565588993632487, | |
| "loss": 0.0705, | |
| "mean_token_accuracy": 0.9722101032733917, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.264, | |
| "grad_norm": 0.04893859848380089, | |
| "learning_rate": 0.0001841802588108161, | |
| "loss": 0.0738, | |
| "mean_token_accuracy": 0.971207445859909, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.272, | |
| "grad_norm": 0.12638282775878906, | |
| "learning_rate": 0.00018263886960799062, | |
| "loss": 0.0712, | |
| "mean_token_accuracy": 0.973378598690033, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.056649304926395416, | |
| "learning_rate": 0.00018103292639625837, | |
| "loss": 0.069, | |
| "mean_token_accuracy": 0.9724430739879608, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 0.12598562240600586, | |
| "learning_rate": 0.0001793636836709057, | |
| "loss": 0.0717, | |
| "mean_token_accuracy": 0.9714934885501861, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.296, | |
| "grad_norm": 0.07508666068315506, | |
| "learning_rate": 0.0001776324453741365, | |
| "loss": 0.072, | |
| "mean_token_accuracy": 0.9719970643520355, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.304, | |
| "grad_norm": 0.07222873717546463, | |
| "learning_rate": 0.00017584056387648727, | |
| "loss": 0.0685, | |
| "mean_token_accuracy": 0.9727974116802216, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.312, | |
| "grad_norm": 0.05881664529442787, | |
| "learning_rate": 0.0001739894389204122, | |
| "loss": 0.0719, | |
| "mean_token_accuracy": 0.9726415634155273, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.11033733189105988, | |
| "learning_rate": 0.00017208051652686335, | |
| "loss": 0.0702, | |
| "mean_token_accuracy": 0.97275031208992, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.328, | |
| "grad_norm": 0.10474064946174622, | |
| "learning_rate": 0.00017011528786571969, | |
| "loss": 0.0689, | |
| "mean_token_accuracy": 0.974085009098053, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.336, | |
| "grad_norm": 0.0579833947122097, | |
| "learning_rate": 0.00016809528809094807, | |
| "loss": 0.0671, | |
| "mean_token_accuracy": 0.9740655779838562, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.344, | |
| "grad_norm": 0.1026485487818718, | |
| "learning_rate": 0.0001660220951414055, | |
| "loss": 0.0695, | |
| "mean_token_accuracy": 0.9734614491462708, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 0.07079461216926575, | |
| "learning_rate": 0.00016389732850821966, | |
| "loss": 0.0664, | |
| "mean_token_accuracy": 0.9740577757358551, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.07856956869363785, | |
| "learning_rate": 0.0001617226479697105, | |
| "loss": 0.0698, | |
| "mean_token_accuracy": 0.973381620645523, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.368, | |
| "grad_norm": 0.08537988364696503, | |
| "learning_rate": 0.00015949975229484134, | |
| "loss": 0.0687, | |
| "mean_token_accuracy": 0.9732668340206146, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.376, | |
| "grad_norm": 0.07466263324022293, | |
| "learning_rate": 0.00015723037791621193, | |
| "loss": 0.0695, | |
| "mean_token_accuracy": 0.9729018092155457, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 0.08313712477684021, | |
| "learning_rate": 0.00015491629757363032, | |
| "loss": 0.0688, | |
| "mean_token_accuracy": 0.9735385298728942, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.392, | |
| "grad_norm": 0.04696296900510788, | |
| "learning_rate": 0.00015255931892932333, | |
| "loss": 0.0688, | |
| "mean_token_accuracy": 0.9733852624893189, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.06557007133960724, | |
| "learning_rate": 0.0001501612831558664, | |
| "loss": 0.0688, | |
| "mean_token_accuracy": 0.9734211206436157, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.408, | |
| "grad_norm": 0.11457318812608719, | |
| "learning_rate": 0.00014772406349793744, | |
| "loss": 0.0692, | |
| "mean_token_accuracy": 0.9734779357910156, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 0.09975489228963852, | |
| "learning_rate": 0.0001452495638090167, | |
| "loss": 0.0696, | |
| "mean_token_accuracy": 0.9729597091674804, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.424, | |
| "grad_norm": 0.0677042007446289, | |
| "learning_rate": 0.00014273971706417647, | |
| "loss": 0.0691, | |
| "mean_token_accuracy": 0.9730812609195709, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.432, | |
| "grad_norm": 0.054182957857847214, | |
| "learning_rate": 0.00014019648385012244, | |
| "loss": 0.069, | |
| "mean_token_accuracy": 0.972840279340744, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.07722967118024826, | |
| "learning_rate": 0.00013762185083366556, | |
| "loss": 0.0678, | |
| "mean_token_accuracy": 0.9736016094684601, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 0.07115447521209717, | |
| "learning_rate": 0.00013501782920982184, | |
| "loss": 0.0691, | |
| "mean_token_accuracy": 0.972719419002533, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.456, | |
| "grad_norm": 0.09139351546764374, | |
| "learning_rate": 0.00013238645313075104, | |
| "loss": 0.0694, | |
| "mean_token_accuracy": 0.9720758616924285, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.464, | |
| "grad_norm": 0.09019674360752106, | |
| "learning_rate": 0.00012972977811676287, | |
| "loss": 0.0674, | |
| "mean_token_accuracy": 0.974070566892624, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.472, | |
| "grad_norm": 0.05537855625152588, | |
| "learning_rate": 0.00012704987945063068, | |
| "loss": 0.0745, | |
| "mean_token_accuracy": 0.972056758403778, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.08490055799484253, | |
| "learning_rate": 0.00012434885055646823, | |
| "loss": 0.0665, | |
| "mean_token_accuracy": 0.9741802930831909, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.488, | |
| "grad_norm": 0.06208372861146927, | |
| "learning_rate": 0.00012162880136443447, | |
| "loss": 0.0672, | |
| "mean_token_accuracy": 0.9741501808166504, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.496, | |
| "grad_norm": 0.06884725391864777, | |
| "learning_rate": 0.00011889185666254506, | |
| "loss": 0.0683, | |
| "mean_token_accuracy": 0.9735860764980316, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.504, | |
| "grad_norm": 0.0937989354133606, | |
| "learning_rate": 0.00011614015443687722, | |
| "loss": 0.068, | |
| "mean_token_accuracy": 0.973924046754837, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 0.05234411731362343, | |
| "learning_rate": 0.0001133758442014651, | |
| "loss": 0.0675, | |
| "mean_token_accuracy": 0.9733489453792572, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.06664064526557922, | |
| "learning_rate": 0.00011060108531918971, | |
| "loss": 0.0677, | |
| "mean_token_accuracy": 0.9736957907676697, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.528, | |
| "grad_norm": 0.055740658193826675, | |
| "learning_rate": 0.0001078180453149754, | |
| "loss": 0.0675, | |
| "mean_token_accuracy": 0.973907732963562, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.536, | |
| "grad_norm": 0.05514289066195488, | |
| "learning_rate": 0.00010502889818261075, | |
| "loss": 0.0675, | |
| "mean_token_accuracy": 0.974098140001297, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.544, | |
| "grad_norm": 0.08672691881656647, | |
| "learning_rate": 0.00010223582268651586, | |
| "loss": 0.0677, | |
| "mean_token_accuracy": 0.9738192439079285, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.552, | |
| "grad_norm": 0.07068340480327606, | |
| "learning_rate": 9.94410006597835e-05, | |
| "loss": 0.0677, | |
| "mean_token_accuracy": 0.9725731074810028, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.09604080766439438, | |
| "learning_rate": 9.66466152998226e-05, | |
| "loss": 0.067, | |
| "mean_token_accuracy": 0.9743229866027832, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.568, | |
| "grad_norm": 0.07730761170387268, | |
| "learning_rate": 9.385484946293637e-05, | |
| "loss": 0.0669, | |
| "mean_token_accuracy": 0.9742228567600251, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 0.05239711329340935, | |
| "learning_rate": 9.106788395916678e-05, | |
| "loss": 0.0672, | |
| "mean_token_accuracy": 0.9740143895149231, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.584, | |
| "grad_norm": 0.06987932324409485, | |
| "learning_rate": 8.828789584873754e-05, | |
| "loss": 0.0667, | |
| "mean_token_accuracy": 0.9739366114139557, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.592, | |
| "grad_norm": 0.07997996360063553, | |
| "learning_rate": 8.551705674142617e-05, | |
| "loss": 0.0669, | |
| "mean_token_accuracy": 0.9741333663463593, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.08489294350147247, | |
| "learning_rate": 8.275753110019367e-05, | |
| "loss": 0.0668, | |
| "mean_token_accuracy": 0.9730526447296143, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.608, | |
| "grad_norm": 0.07549357414245605, | |
| "learning_rate": 8.001147455039735e-05, | |
| "loss": 0.0659, | |
| "mean_token_accuracy": 0.9744420766830444, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.616, | |
| "grad_norm": 0.06531010568141937, | |
| "learning_rate": 7.728103219590681e-05, | |
| "loss": 0.0659, | |
| "mean_token_accuracy": 0.9741898834705353, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.624, | |
| "grad_norm": 0.0814213976264, | |
| "learning_rate": 7.456833694343906e-05, | |
| "loss": 0.068, | |
| "mean_token_accuracy": 0.9731835901737214, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.632, | |
| "grad_norm": 0.06650282442569733, | |
| "learning_rate": 7.18755078364214e-05, | |
| "loss": 0.0664, | |
| "mean_token_accuracy": 0.9736463844776153, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.10044140368700027, | |
| "learning_rate": 6.920464839968405e-05, | |
| "loss": 0.0676, | |
| "mean_token_accuracy": 0.9736545443534851, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.648, | |
| "grad_norm": 0.05335594713687897, | |
| "learning_rate": 6.65578449962749e-05, | |
| "loss": 0.0668, | |
| "mean_token_accuracy": 0.9744662940502167, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.656, | |
| "grad_norm": 0.06943535059690475, | |
| "learning_rate": 6.393716519768047e-05, | |
| "loss": 0.0673, | |
| "mean_token_accuracy": 0.9734912216663361, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.664, | |
| "grad_norm": 0.060714706778526306, | |
| "learning_rate": 6.134465616872598e-05, | |
| "loss": 0.0665, | |
| "mean_token_accuracy": 0.9740535855293274, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 0.2205849438905716, | |
| "learning_rate": 5.878234306841637e-05, | |
| "loss": 0.0685, | |
| "mean_token_accuracy": 0.9733618140220642, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.07674142718315125, | |
| "learning_rate": 5.62522274679673e-05, | |
| "loss": 0.0662, | |
| "mean_token_accuracy": 0.9741897404193878, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.688, | |
| "grad_norm": 0.08134385943412781, | |
| "learning_rate": 5.375628578726181e-05, | |
| "loss": 0.0662, | |
| "mean_token_accuracy": 0.9741408348083496, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.696, | |
| "grad_norm": 0.07496479898691177, | |
| "learning_rate": 5.1296467750954314e-05, | |
| "loss": 0.0674, | |
| "mean_token_accuracy": 0.9741273105144501, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 0.061479806900024414, | |
| "learning_rate": 4.8874694865427676e-05, | |
| "loss": 0.0669, | |
| "mean_token_accuracy": 0.973869401216507, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.712, | |
| "grad_norm": 0.0543564110994339, | |
| "learning_rate": 4.649285891779327e-05, | |
| "loss": 0.0662, | |
| "mean_token_accuracy": 0.9746919453144074, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.07584322243928909, | |
| "learning_rate": 4.415282049810644e-05, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.9739439964294434, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.728, | |
| "grad_norm": 0.04671603813767433, | |
| "learning_rate": 4.1856407545951834e-05, | |
| "loss": 0.067, | |
| "mean_token_accuracy": 0.973672068119049, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.736, | |
| "grad_norm": 0.06221921369433403, | |
| "learning_rate": 3.9605413922533874e-05, | |
| "loss": 0.0665, | |
| "mean_token_accuracy": 0.9742021799087525, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.744, | |
| "grad_norm": 0.045539796352386475, | |
| "learning_rate": 3.740159800938784e-05, | |
| "loss": 0.0665, | |
| "mean_token_accuracy": 0.9738292276859284, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.752, | |
| "grad_norm": 0.0595044270157814, | |
| "learning_rate": 3.5246681334806175e-05, | |
| "loss": 0.0669, | |
| "mean_token_accuracy": 0.9747107863426209, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.06676959246397018, | |
| "learning_rate": 3.3142347229053015e-05, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.9743025422096252, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 0.05794461444020271, | |
| "learning_rate": 3.109023950941736e-05, | |
| "loss": 0.0651, | |
| "mean_token_accuracy": 0.9745893716812134, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.776, | |
| "grad_norm": 0.06010904908180237, | |
| "learning_rate": 2.909196119613218e-05, | |
| "loss": 0.0663, | |
| "mean_token_accuracy": 0.9746502339839935, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.784, | |
| "grad_norm": 0.05273396894335747, | |
| "learning_rate": 2.7149073260162416e-05, | |
| "loss": 0.0657, | |
| "mean_token_accuracy": 0.9738177537918091, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.792, | |
| "grad_norm": 0.07098868489265442, | |
| "learning_rate": 2.5263093403840142e-05, | |
| "loss": 0.0657, | |
| "mean_token_accuracy": 0.9750109136104583, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.07257623225450516, | |
| "learning_rate": 2.3435494875299314e-05, | |
| "loss": 0.0651, | |
| "mean_token_accuracy": 0.9739606738090515, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.808, | |
| "grad_norm": 0.09082420915365219, | |
| "learning_rate": 2.166770531763633e-05, | |
| "loss": 0.0672, | |
| "mean_token_accuracy": 0.9737493813037872, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.816, | |
| "grad_norm": 0.08171214908361435, | |
| "learning_rate": 1.9961105653695266e-05, | |
| "loss": 0.0658, | |
| "mean_token_accuracy": 0.9747599720954895, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.824, | |
| "grad_norm": 0.0967075377702713, | |
| "learning_rate": 1.8317029007349085e-05, | |
| "loss": 0.0664, | |
| "mean_token_accuracy": 0.9733656942844391, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 0.04340047016739845, | |
| "learning_rate": 1.6736759662119183e-05, | |
| "loss": 0.0662, | |
| "mean_token_accuracy": 0.9745591640472412, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.04629400745034218, | |
| "learning_rate": 1.5221532057947419e-05, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.9746206283569336, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.848, | |
| "grad_norm": 0.0683581680059433, | |
| "learning_rate": 1.3772529826903269e-05, | |
| "loss": 0.0652, | |
| "mean_token_accuracy": 0.9746199965476989, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.856, | |
| "grad_norm": 0.06316008418798447, | |
| "learning_rate": 1.23908848685804e-05, | |
| "loss": 0.0663, | |
| "mean_token_accuracy": 0.9744003236293792, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 0.06182578578591347, | |
| "learning_rate": 1.1077676465904208e-05, | |
| "loss": 0.0675, | |
| "mean_token_accuracy": 0.9744610726833344, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.872, | |
| "grad_norm": 0.07484375685453415, | |
| "learning_rate": 9.833930442041506e-06, | |
| "loss": 0.067, | |
| "mean_token_accuracy": 0.9738114476203918, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.05372326448559761, | |
| "learning_rate": 8.660618359070604e-06, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.9736744463443756, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.888, | |
| "grad_norm": 0.0624370239675045, | |
| "learning_rate": 7.558656759037797e-06, | |
| "loss": 0.0668, | |
| "mean_token_accuracy": 0.9736717522144318, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 0.06488735973834991, | |
| "learning_rate": 6.528906447993288e-06, | |
| "loss": 0.0656, | |
| "mean_token_accuracy": 0.9738507032394409, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.904, | |
| "grad_norm": 0.06491252779960632, | |
| "learning_rate": 5.572171823565797e-06, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.9741439461708069, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.912, | |
| "grad_norm": 0.06388004124164581, | |
| "learning_rate": 4.689200246600867e-06, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.9750186562538147, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.09783890843391418, | |
| "learning_rate": 3.880681457354118e-06, | |
| "loss": 0.067, | |
| "mean_token_accuracy": 0.9736691117286682, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.928, | |
| "grad_norm": 0.05274941399693489, | |
| "learning_rate": 3.1472470366950334e-06, | |
| "loss": 0.0662, | |
| "mean_token_accuracy": 0.9749404191970825, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.936, | |
| "grad_norm": 0.05393306538462639, | |
| "learning_rate": 2.4894699127426367e-06, | |
| "loss": 0.0672, | |
| "mean_token_accuracy": 0.9738192737102509, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.944, | |
| "grad_norm": 0.057169899344444275, | |
| "learning_rate": 1.907863913318153e-06, | |
| "loss": 0.0661, | |
| "mean_token_accuracy": 0.974576759338379, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.952, | |
| "grad_norm": 0.06199713051319122, | |
| "learning_rate": 1.4028833645643113e-06, | |
| "loss": 0.0671, | |
| "mean_token_accuracy": 0.9740838885307312, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.053632188588380814, | |
| "learning_rate": 9.749227360448143e-07, | |
| "loss": 0.0657, | |
| "mean_token_accuracy": 0.9742229998111724, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.968, | |
| "grad_norm": 0.0578572042286396, | |
| "learning_rate": 6.243163326014267e-07, | |
| "loss": 0.0648, | |
| "mean_token_accuracy": 0.9743983149528503, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.976, | |
| "grad_norm": 0.07826316356658936, | |
| "learning_rate": 3.5133803320896994e-07, | |
| "loss": 0.0671, | |
| "mean_token_accuracy": 0.9743796646595001, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.984, | |
| "grad_norm": 0.2104119062423706, | |
| "learning_rate": 1.562010770326916e-07, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.97383713722229, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.992, | |
| "grad_norm": 0.04197349026799202, | |
| "learning_rate": 3.905789685471062e-08, | |
| "loss": 0.0653, | |
| "mean_token_accuracy": 0.9747089684009552, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.07076921314001083, | |
| "learning_rate": 0.0, | |
| "loss": 0.0666, | |
| "mean_token_accuracy": 0.9736450254917145, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 625, | |
| "total_flos": 4738039144775680.0, | |
| "train_loss": 0.13301027479171754, | |
| "train_runtime": 396.9652, | |
| "train_samples_per_second": 12.596, | |
| "train_steps_per_second": 1.574 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 625, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4738039144775680.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |