| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.910344827586207, | |
| "eval_steps": 24, | |
| "global_step": 240, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.020689655172413793, | |
| "grad_norm": 1.1915137767791748, | |
| "learning_rate": 2e-05, | |
| "loss": 0.4295, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.020689655172413793, | |
| "eval_loss": 0.8328031301498413, | |
| "eval_runtime": 63.9443, | |
| "eval_samples_per_second": 1.204, | |
| "eval_steps_per_second": 1.204, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.041379310344827586, | |
| "grad_norm": 1.0899155139923096, | |
| "learning_rate": 4e-05, | |
| "loss": 0.3955, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.06206896551724138, | |
| "grad_norm": 1.8867642879486084, | |
| "learning_rate": 6e-05, | |
| "loss": 0.6812, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.08275862068965517, | |
| "grad_norm": 1.647360920906067, | |
| "learning_rate": 8e-05, | |
| "loss": 0.4719, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.10344827586206896, | |
| "grad_norm": 1.4603902101516724, | |
| "learning_rate": 0.0001, | |
| "loss": 0.3768, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12413793103448276, | |
| "grad_norm": 1.3657780885696411, | |
| "learning_rate": 0.00012, | |
| "loss": 0.6486, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.14482758620689656, | |
| "grad_norm": 0.6542850732803345, | |
| "learning_rate": 0.00014, | |
| "loss": 0.3636, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.16551724137931034, | |
| "grad_norm": 0.4936724901199341, | |
| "learning_rate": 0.00016, | |
| "loss": 0.3768, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.18620689655172415, | |
| "grad_norm": 0.4900519549846649, | |
| "learning_rate": 0.00018, | |
| "loss": 0.2961, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.20689655172413793, | |
| "grad_norm": 1.428287386894226, | |
| "learning_rate": 0.0002, | |
| "loss": 0.4558, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22758620689655173, | |
| "grad_norm": 0.8353174924850464, | |
| "learning_rate": 0.0001999906715964522, | |
| "loss": 0.3956, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2482758620689655, | |
| "grad_norm": 0.39203962683677673, | |
| "learning_rate": 0.00019996268812619107, | |
| "loss": 0.3254, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.2689655172413793, | |
| "grad_norm": 0.6304475665092468, | |
| "learning_rate": 0.00019991605481003866, | |
| "loss": 0.4182, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.2896551724137931, | |
| "grad_norm": 0.4418434500694275, | |
| "learning_rate": 0.0001998507803482828, | |
| "loss": 0.3459, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.3103448275862069, | |
| "grad_norm": 0.2997497022151947, | |
| "learning_rate": 0.00019976687691905393, | |
| "loss": 0.2566, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3310344827586207, | |
| "grad_norm": 0.5532309412956238, | |
| "learning_rate": 0.00019966436017605297, | |
| "loss": 0.2912, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.35172413793103446, | |
| "grad_norm": 0.31911540031433105, | |
| "learning_rate": 0.00019954324924563089, | |
| "loss": 0.2099, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.3724137931034483, | |
| "grad_norm": 0.33010149002075195, | |
| "learning_rate": 0.00019940356672322037, | |
| "loss": 0.2229, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.3931034482758621, | |
| "grad_norm": 0.28100574016571045, | |
| "learning_rate": 0.00019924533866912017, | |
| "loss": 0.1555, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.41379310344827586, | |
| "grad_norm": 0.3950953781604767, | |
| "learning_rate": 0.00019906859460363307, | |
| "loss": 0.2165, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.43448275862068964, | |
| "grad_norm": 0.4930034577846527, | |
| "learning_rate": 0.0001988733675015585, | |
| "loss": 0.3467, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.45517241379310347, | |
| "grad_norm": 0.21198025345802307, | |
| "learning_rate": 0.0001986596937860402, | |
| "loss": 0.1312, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.47586206896551725, | |
| "grad_norm": 0.3137592077255249, | |
| "learning_rate": 0.00019842761332177115, | |
| "loss": 0.2056, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.496551724137931, | |
| "grad_norm": 0.5739158391952515, | |
| "learning_rate": 0.00019817716940755586, | |
| "loss": 0.3379, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.496551724137931, | |
| "eval_loss": 0.1819331794977188, | |
| "eval_runtime": 64.0642, | |
| "eval_samples_per_second": 1.202, | |
| "eval_steps_per_second": 1.202, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.3340909481048584, | |
| "learning_rate": 0.00019790840876823232, | |
| "loss": 0.1835, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5379310344827586, | |
| "grad_norm": 0.39615124464035034, | |
| "learning_rate": 0.00019762138154595446, | |
| "loss": 0.2404, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.5586206896551724, | |
| "grad_norm": 0.3325432538986206, | |
| "learning_rate": 0.00019731614129083754, | |
| "loss": 0.2437, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.5793103448275863, | |
| "grad_norm": 0.25816088914871216, | |
| "learning_rate": 0.00019699274495096712, | |
| "loss": 0.1068, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.2888956367969513, | |
| "learning_rate": 0.00019665125286177449, | |
| "loss": 0.1472, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.6206896551724138, | |
| "grad_norm": 0.38325801491737366, | |
| "learning_rate": 0.00019629172873477995, | |
| "loss": 0.1835, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6413793103448275, | |
| "grad_norm": 0.4767705798149109, | |
| "learning_rate": 0.00019591423964570632, | |
| "loss": 0.2318, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.6620689655172414, | |
| "grad_norm": 0.45043617486953735, | |
| "learning_rate": 0.0001955188560219648, | |
| "loss": 0.2173, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.6827586206896552, | |
| "grad_norm": 0.3859993517398834, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.2733, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.7034482758620689, | |
| "grad_norm": 0.3636537194252014, | |
| "learning_rate": 0.00019467470355910438, | |
| "loss": 0.2444, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.7241379310344828, | |
| "grad_norm": 0.23173992335796356, | |
| "learning_rate": 0.00019422609221188207, | |
| "loss": 0.108, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.7448275862068966, | |
| "grad_norm": 0.486289918422699, | |
| "learning_rate": 0.00019375990128440204, | |
| "loss": 0.358, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.7655172413793103, | |
| "grad_norm": 0.42659395933151245, | |
| "learning_rate": 0.00019327621775300637, | |
| "loss": 0.2714, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.7862068965517242, | |
| "grad_norm": 0.3593175411224365, | |
| "learning_rate": 0.00019277513185759844, | |
| "loss": 0.1641, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.8068965517241379, | |
| "grad_norm": 0.39383402466773987, | |
| "learning_rate": 0.00019225673708480717, | |
| "loss": 0.3587, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.8275862068965517, | |
| "grad_norm": 0.33613619208335876, | |
| "learning_rate": 0.00019172113015054532, | |
| "loss": 0.207, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.8482758620689655, | |
| "grad_norm": 0.3084883689880371, | |
| "learning_rate": 0.00019116841098196536, | |
| "loss": 0.1763, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.8689655172413793, | |
| "grad_norm": 0.304338663816452, | |
| "learning_rate": 0.0001905986826988164, | |
| "loss": 0.1649, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.8896551724137931, | |
| "grad_norm": 0.4351169466972351, | |
| "learning_rate": 0.00019001205159420513, | |
| "loss": 0.225, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.9103448275862069, | |
| "grad_norm": 0.4224107563495636, | |
| "learning_rate": 0.00018940862711476513, | |
| "loss": 0.2423, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.9310344827586207, | |
| "grad_norm": 0.39995938539505005, | |
| "learning_rate": 0.0001887885218402375, | |
| "loss": 0.1818, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.9517241379310345, | |
| "grad_norm": 0.4263107180595398, | |
| "learning_rate": 0.00018815185146246716, | |
| "loss": 0.3607, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.9724137931034482, | |
| "grad_norm": 0.31094980239868164, | |
| "learning_rate": 0.00018749873476381828, | |
| "loss": 0.2562, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.993103448275862, | |
| "grad_norm": 0.2563900649547577, | |
| "learning_rate": 0.00018682929359501338, | |
| "loss": 0.2508, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.993103448275862, | |
| "eval_loss": 0.13709446787834167, | |
| "eval_runtime": 64.0725, | |
| "eval_samples_per_second": 1.202, | |
| "eval_steps_per_second": 1.202, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.5756971836090088, | |
| "learning_rate": 0.0001861436528524, | |
| "loss": 0.1448, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.0206896551724138, | |
| "grad_norm": 0.45615053176879883, | |
| "learning_rate": 0.00018544194045464886, | |
| "loss": 0.2187, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.0413793103448277, | |
| "grad_norm": 0.36705297231674194, | |
| "learning_rate": 0.00018472428731888837, | |
| "loss": 0.2107, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.0620689655172413, | |
| "grad_norm": 0.31725841760635376, | |
| "learning_rate": 0.00018399082733627965, | |
| "loss": 0.1774, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.0827586206896551, | |
| "grad_norm": 0.25364992022514343, | |
| "learning_rate": 0.00018324169734703683, | |
| "loss": 0.2267, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.103448275862069, | |
| "grad_norm": 0.25190988183021545, | |
| "learning_rate": 0.00018247703711489686, | |
| "loss": 0.2017, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.1241379310344828, | |
| "grad_norm": 0.33936384320259094, | |
| "learning_rate": 0.0001816969893010442, | |
| "loss": 0.1176, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.1448275862068966, | |
| "grad_norm": 0.5117663741111755, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.307, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.1655172413793102, | |
| "grad_norm": 0.3256533741950989, | |
| "learning_rate": 0.00018009131589994418, | |
| "loss": 0.1494, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.186206896551724, | |
| "grad_norm": 0.34564223885536194, | |
| "learning_rate": 0.00017926598988008582, | |
| "loss": 0.1823, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.206896551724138, | |
| "grad_norm": 0.27000346779823303, | |
| "learning_rate": 0.00017842587535740314, | |
| "loss": 0.1106, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.2275862068965517, | |
| "grad_norm": 0.2434675246477127, | |
| "learning_rate": 0.000177571129070442, | |
| "loss": 0.126, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.2482758620689656, | |
| "grad_norm": 0.26117900013923645, | |
| "learning_rate": 0.0001767019104875683, | |
| "loss": 0.0841, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.2689655172413792, | |
| "grad_norm": 0.42246413230895996, | |
| "learning_rate": 0.0001758183817772163, | |
| "loss": 0.1856, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.2896551724137932, | |
| "grad_norm": 0.40459299087524414, | |
| "learning_rate": 0.0001749207077776331, | |
| "loss": 0.2453, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.3103448275862069, | |
| "grad_norm": 0.271054744720459, | |
| "learning_rate": 0.0001740090559661252, | |
| "loss": 0.1664, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.3310344827586207, | |
| "grad_norm": 0.34983810782432556, | |
| "learning_rate": 0.00017308359642781242, | |
| "loss": 0.2203, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.3517241379310345, | |
| "grad_norm": 0.3788837790489197, | |
| "learning_rate": 0.00017214450182389559, | |
| "loss": 0.2225, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.3724137931034484, | |
| "grad_norm": 0.4210594594478607, | |
| "learning_rate": 0.00017119194735944337, | |
| "loss": 0.136, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.3931034482758622, | |
| "grad_norm": 0.27931544184684753, | |
| "learning_rate": 0.00017022611075070474, | |
| "loss": 0.1502, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.4137931034482758, | |
| "grad_norm": 0.2842492163181305, | |
| "learning_rate": 0.0001692471721919526, | |
| "loss": 0.1912, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 1.4344827586206896, | |
| "grad_norm": 0.5421637892723083, | |
| "learning_rate": 0.00016825531432186543, | |
| "loss": 0.2176, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.4551724137931035, | |
| "grad_norm": 0.3176499307155609, | |
| "learning_rate": 0.00016725072218945272, | |
| "loss": 0.1657, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.4758620689655173, | |
| "grad_norm": 0.3928243815898895, | |
| "learning_rate": 0.00016623358321953078, | |
| "loss": 0.2137, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.4758620689655173, | |
| "eval_loss": 0.1118382066488266, | |
| "eval_runtime": 63.9679, | |
| "eval_samples_per_second": 1.204, | |
| "eval_steps_per_second": 1.204, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.4965517241379311, | |
| "grad_norm": 0.3631574809551239, | |
| "learning_rate": 0.00016520408717775507, | |
| "loss": 0.1939, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.5172413793103448, | |
| "grad_norm": 0.3410647213459015, | |
| "learning_rate": 0.0001641624261352161, | |
| "loss": 0.1796, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.5379310344827586, | |
| "grad_norm": 0.41580328345298767, | |
| "learning_rate": 0.00016310879443260528, | |
| "loss": 0.2142, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.5586206896551724, | |
| "grad_norm": 0.2548270523548126, | |
| "learning_rate": 0.00016204338864395684, | |
| "loss": 0.1103, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.5793103448275863, | |
| "grad_norm": 0.3204116225242615, | |
| "learning_rate": 0.00016096640753997346, | |
| "loss": 0.1729, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.5006029605865479, | |
| "learning_rate": 0.00015987805205094227, | |
| "loss": 0.2593, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.6206896551724137, | |
| "grad_norm": 0.325984925031662, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 0.1328, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.6413793103448275, | |
| "grad_norm": 0.3798094391822815, | |
| "learning_rate": 0.00015766803221148673, | |
| "loss": 0.2076, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.6620689655172414, | |
| "grad_norm": 0.38768818974494934, | |
| "learning_rate": 0.0001565467801802006, | |
| "loss": 0.2336, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.6827586206896552, | |
| "grad_norm": 0.2826750576496124, | |
| "learning_rate": 0.0001554149783252175, | |
| "loss": 0.1368, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.703448275862069, | |
| "grad_norm": 0.3867325186729431, | |
| "learning_rate": 0.0001542728378046262, | |
| "loss": 0.3069, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.7241379310344827, | |
| "grad_norm": 0.3224121034145355, | |
| "learning_rate": 0.00015312057170538035, | |
| "loss": 0.1992, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.7448275862068967, | |
| "grad_norm": 0.46213704347610474, | |
| "learning_rate": 0.00015195839500354335, | |
| "loss": 0.2197, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.7655172413793103, | |
| "grad_norm": 0.3547733426094055, | |
| "learning_rate": 0.00015078652452418063, | |
| "loss": 0.1473, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.7862068965517242, | |
| "grad_norm": 0.24282805621623993, | |
| "learning_rate": 0.0001496051789009068, | |
| "loss": 0.1168, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.806896551724138, | |
| "grad_norm": 0.24835267663002014, | |
| "learning_rate": 0.00014841457853509606, | |
| "loss": 0.1237, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.8275862068965516, | |
| "grad_norm": 0.5328035354614258, | |
| "learning_rate": 0.00014721494555476188, | |
| "loss": 0.2102, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.8482758620689657, | |
| "grad_norm": 0.2665577828884125, | |
| "learning_rate": 0.00014600650377311522, | |
| "loss": 0.1455, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.8689655172413793, | |
| "grad_norm": 0.3814431130886078, | |
| "learning_rate": 0.0001447894786468082, | |
| "loss": 0.2461, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.889655172413793, | |
| "grad_norm": 0.222720667719841, | |
| "learning_rate": 0.0001435640972338709, | |
| "loss": 0.1094, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.910344827586207, | |
| "grad_norm": 0.2874739468097687, | |
| "learning_rate": 0.00014233058815134978, | |
| "loss": 0.155, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.9310344827586206, | |
| "grad_norm": 0.5041664838790894, | |
| "learning_rate": 0.00014108918153265485, | |
| "loss": 0.1978, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.9517241379310346, | |
| "grad_norm": 0.3331792652606964, | |
| "learning_rate": 0.00013984010898462416, | |
| "loss": 0.1715, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.9724137931034482, | |
| "grad_norm": 0.2189134806394577, | |
| "learning_rate": 0.00013858360354431355, | |
| "loss": 0.0767, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.9724137931034482, | |
| "eval_loss": 0.1199033260345459, | |
| "eval_runtime": 64.1816, | |
| "eval_samples_per_second": 1.2, | |
| "eval_steps_per_second": 1.2, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.993103448275862, | |
| "grad_norm": 0.26571527123451233, | |
| "learning_rate": 0.00013731989963551913, | |
| "loss": 0.1464, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.8830859065055847, | |
| "learning_rate": 0.00013604923302504147, | |
| "loss": 0.2415, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 2.0206896551724136, | |
| "grad_norm": 0.510640025138855, | |
| "learning_rate": 0.00013477184077869892, | |
| "loss": 0.2022, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 2.0413793103448277, | |
| "grad_norm": 0.22672274708747864, | |
| "learning_rate": 0.00013348796121709862, | |
| "loss": 0.1267, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.0620689655172413, | |
| "grad_norm": 0.3994502127170563, | |
| "learning_rate": 0.00013219783387117385, | |
| "loss": 0.1608, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 2.0827586206896553, | |
| "grad_norm": 0.3596170246601105, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.1791, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 2.103448275862069, | |
| "grad_norm": 0.42505013942718506, | |
| "learning_rate": 0.00012959979973336237, | |
| "loss": 0.1869, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 2.1241379310344826, | |
| "grad_norm": 0.30223432183265686, | |
| "learning_rate": 0.000128292377651693, | |
| "loss": 0.1213, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 2.1448275862068966, | |
| "grad_norm": 0.29373323917388916, | |
| "learning_rate": 0.00012697967711570242, | |
| "loss": 0.1025, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.1655172413793102, | |
| "grad_norm": 0.3115707337856293, | |
| "learning_rate": 0.00012566194303339739, | |
| "loss": 0.163, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 2.1862068965517243, | |
| "grad_norm": 0.45359566807746887, | |
| "learning_rate": 0.00012433942125188359, | |
| "loss": 0.182, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 2.206896551724138, | |
| "grad_norm": 0.3219972252845764, | |
| "learning_rate": 0.00012301235851149865, | |
| "loss": 0.125, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 2.227586206896552, | |
| "grad_norm": 0.430279940366745, | |
| "learning_rate": 0.00012168100239977809, | |
| "loss": 0.1513, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 2.2482758620689656, | |
| "grad_norm": 0.6318575143814087, | |
| "learning_rate": 0.0001203456013052634, | |
| "loss": 0.1824, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.268965517241379, | |
| "grad_norm": 0.35667774081230164, | |
| "learning_rate": 0.00011900640437116073, | |
| "loss": 0.1439, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 2.2896551724137932, | |
| "grad_norm": 0.39643311500549316, | |
| "learning_rate": 0.00011766366144885877, | |
| "loss": 0.1477, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 2.310344827586207, | |
| "grad_norm": 0.4252413511276245, | |
| "learning_rate": 0.00011631762305131424, | |
| "loss": 0.2058, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 2.3310344827586205, | |
| "grad_norm": 0.4794275164604187, | |
| "learning_rate": 0.00011496854030631443, | |
| "loss": 0.1077, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 2.3517241379310345, | |
| "grad_norm": 0.3109055161476135, | |
| "learning_rate": 0.00011361666490962468, | |
| "loss": 0.1773, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.372413793103448, | |
| "grad_norm": 0.27572667598724365, | |
| "learning_rate": 0.00011226224907802985, | |
| "loss": 0.0878, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.393103448275862, | |
| "grad_norm": 0.4329110085964203, | |
| "learning_rate": 0.00011090554550227899, | |
| "loss": 0.1487, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 2.413793103448276, | |
| "grad_norm": 0.6160383224487305, | |
| "learning_rate": 0.00010954680729994102, | |
| "loss": 0.2086, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 2.43448275862069, | |
| "grad_norm": 0.7214148640632629, | |
| "learning_rate": 0.00010818628796818133, | |
| "loss": 0.1993, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 2.4551724137931035, | |
| "grad_norm": 0.23550526797771454, | |
| "learning_rate": 0.0001068242413364671, | |
| "loss": 0.0515, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.4551724137931035, | |
| "eval_loss": 0.11466084420681, | |
| "eval_runtime": 64.1028, | |
| "eval_samples_per_second": 1.201, | |
| "eval_steps_per_second": 1.201, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.475862068965517, | |
| "grad_norm": 0.40542933344841003, | |
| "learning_rate": 0.000105460921519211, | |
| "loss": 0.1723, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 2.496551724137931, | |
| "grad_norm": 0.43189290165901184, | |
| "learning_rate": 0.00010409658286836143, | |
| "loss": 0.1377, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 2.5172413793103448, | |
| "grad_norm": 0.2595944106578827, | |
| "learning_rate": 0.00010273147992594861, | |
| "loss": 0.0659, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 2.5379310344827584, | |
| "grad_norm": 0.28212693333625793, | |
| "learning_rate": 0.0001013658673765951, | |
| "loss": 0.0923, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 2.5586206896551724, | |
| "grad_norm": 0.26988205313682556, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0932, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.5793103448275865, | |
| "grad_norm": 0.367355078458786, | |
| "learning_rate": 9.863413262340491e-05, | |
| "loss": 0.1645, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.5879760980606079, | |
| "learning_rate": 9.726852007405144e-05, | |
| "loss": 0.1832, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 2.6206896551724137, | |
| "grad_norm": 0.7866286635398865, | |
| "learning_rate": 9.590341713163858e-05, | |
| "loss": 0.2375, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 2.6413793103448278, | |
| "grad_norm": 0.27723127603530884, | |
| "learning_rate": 9.453907848078902e-05, | |
| "loss": 0.0845, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 2.6620689655172414, | |
| "grad_norm": 0.3435949683189392, | |
| "learning_rate": 9.317575866353292e-05, | |
| "loss": 0.1061, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.682758620689655, | |
| "grad_norm": 0.2630484700202942, | |
| "learning_rate": 9.181371203181872e-05, | |
| "loss": 0.1417, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 2.703448275862069, | |
| "grad_norm": 0.3467751145362854, | |
| "learning_rate": 9.0453192700059e-05, | |
| "loss": 0.128, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 2.7241379310344827, | |
| "grad_norm": 0.3706202208995819, | |
| "learning_rate": 8.909445449772102e-05, | |
| "loss": 0.1593, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 2.7448275862068967, | |
| "grad_norm": 0.46681544184684753, | |
| "learning_rate": 8.773775092197017e-05, | |
| "loss": 0.2293, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 2.7655172413793103, | |
| "grad_norm": 0.5786654353141785, | |
| "learning_rate": 8.638333509037536e-05, | |
| "loss": 0.1421, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.7862068965517244, | |
| "grad_norm": 0.25346508622169495, | |
| "learning_rate": 8.503145969368562e-05, | |
| "loss": 0.0813, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 2.806896551724138, | |
| "grad_norm": 0.30926162004470825, | |
| "learning_rate": 8.36823769486858e-05, | |
| "loss": 0.1069, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 2.8275862068965516, | |
| "grad_norm": 0.2529960870742798, | |
| "learning_rate": 8.233633855114127e-05, | |
| "loss": 0.1393, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 2.8482758620689657, | |
| "grad_norm": 0.2747327387332916, | |
| "learning_rate": 8.09935956288393e-05, | |
| "loss": 0.1028, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 2.8689655172413793, | |
| "grad_norm": 0.23684890568256378, | |
| "learning_rate": 7.965439869473664e-05, | |
| "loss": 0.0773, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.889655172413793, | |
| "grad_norm": 0.6713085770606995, | |
| "learning_rate": 7.831899760022192e-05, | |
| "loss": 0.2285, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 2.910344827586207, | |
| "grad_norm": 0.46524661779403687, | |
| "learning_rate": 7.698764148850137e-05, | |
| "loss": 0.164, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 2.9310344827586206, | |
| "grad_norm": 0.38125452399253845, | |
| "learning_rate": 7.566057874811642e-05, | |
| "loss": 0.122, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 2.9517241379310346, | |
| "grad_norm": 0.25452345609664917, | |
| "learning_rate": 7.433805696660266e-05, | |
| "loss": 0.1117, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 2.9517241379310346, | |
| "eval_loss": 0.1502632200717926, | |
| "eval_runtime": 64.1516, | |
| "eval_samples_per_second": 1.2, | |
| "eval_steps_per_second": 1.2, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 2.972413793103448, | |
| "grad_norm": 0.4600689709186554, | |
| "learning_rate": 7.302032288429756e-05, | |
| "loss": 0.1292, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.9931034482758623, | |
| "grad_norm": 0.26108217239379883, | |
| "learning_rate": 7.170762234830699e-05, | |
| "loss": 0.1287, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.37030014395713806, | |
| "learning_rate": 7.040020026663767e-05, | |
| "loss": 0.0708, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 3.0206896551724136, | |
| "grad_norm": 0.2752729654312134, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.1086, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 3.0413793103448277, | |
| "grad_norm": 0.3706147074699402, | |
| "learning_rate": 6.780216612882619e-05, | |
| "loss": 0.1045, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 3.0620689655172413, | |
| "grad_norm": 0.478118896484375, | |
| "learning_rate": 6.651203878290139e-05, | |
| "loss": 0.1291, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.0827586206896553, | |
| "grad_norm": 0.35050728917121887, | |
| "learning_rate": 6.522815922130112e-05, | |
| "loss": 0.0578, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 3.103448275862069, | |
| "grad_norm": 0.3276073932647705, | |
| "learning_rate": 6.395076697495854e-05, | |
| "loss": 0.0794, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 3.1241379310344826, | |
| "grad_norm": 0.3483630418777466, | |
| "learning_rate": 6.268010036448088e-05, | |
| "loss": 0.1385, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 3.1448275862068966, | |
| "grad_norm": 0.16762271523475647, | |
| "learning_rate": 6.141639645568646e-05, | |
| "loss": 0.0477, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 3.1655172413793102, | |
| "grad_norm": 0.5462561845779419, | |
| "learning_rate": 6.015989101537586e-05, | |
| "loss": 0.1533, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 3.1862068965517243, | |
| "grad_norm": 0.3756523132324219, | |
| "learning_rate": 5.8910818467345185e-05, | |
| "loss": 0.0991, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 3.206896551724138, | |
| "grad_norm": 0.8349863290786743, | |
| "learning_rate": 5.7669411848650235e-05, | |
| "loss": 0.1363, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 3.227586206896552, | |
| "grad_norm": 0.6413367986679077, | |
| "learning_rate": 5.643590276612909e-05, | |
| "loss": 0.161, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 3.2482758620689656, | |
| "grad_norm": 0.3455400764942169, | |
| "learning_rate": 5.521052135319182e-05, | |
| "loss": 0.1046, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 3.268965517241379, | |
| "grad_norm": 0.2701728940010071, | |
| "learning_rate": 5.399349622688479e-05, | |
| "loss": 0.06, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 3.2896551724137932, | |
| "grad_norm": 0.67363440990448, | |
| "learning_rate": 5.278505444523816e-05, | |
| "loss": 0.1387, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 3.310344827586207, | |
| "grad_norm": 0.3615413010120392, | |
| "learning_rate": 5.1585421464903994e-05, | |
| "loss": 0.0747, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 3.3310344827586205, | |
| "grad_norm": 0.4723159074783325, | |
| "learning_rate": 5.039482109909319e-05, | |
| "loss": 0.0745, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 3.3517241379310345, | |
| "grad_norm": 0.3647497594356537, | |
| "learning_rate": 4.921347547581939e-05, | |
| "loss": 0.1017, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 3.372413793103448, | |
| "grad_norm": 0.7250043749809265, | |
| "learning_rate": 4.804160499645667e-05, | |
| "loss": 0.1826, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 3.393103448275862, | |
| "grad_norm": 0.4937655031681061, | |
| "learning_rate": 4.687942829461969e-05, | |
| "loss": 0.0817, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 3.413793103448276, | |
| "grad_norm": 0.8257989287376404, | |
| "learning_rate": 4.572716219537385e-05, | |
| "loss": 0.1081, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 3.43448275862069, | |
| "grad_norm": 0.7046942114830017, | |
| "learning_rate": 4.4585021674782534e-05, | |
| "loss": 0.1822, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 3.43448275862069, | |
| "eval_loss": 0.14357824623584747, | |
| "eval_runtime": 64.0674, | |
| "eval_samples_per_second": 1.202, | |
| "eval_steps_per_second": 1.202, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 3.4551724137931035, | |
| "grad_norm": 0.4601941406726837, | |
| "learning_rate": 4.345321981979942e-05, | |
| "loss": 0.0894, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 3.475862068965517, | |
| "grad_norm": 0.6385725736618042, | |
| "learning_rate": 4.2331967788513295e-05, | |
| "loss": 0.1045, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.496551724137931, | |
| "grad_norm": 0.4329453408718109, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 0.0799, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 3.5172413793103448, | |
| "grad_norm": 0.29151296615600586, | |
| "learning_rate": 4.012194794905775e-05, | |
| "loss": 0.0511, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 3.5379310344827584, | |
| "grad_norm": 0.776920735836029, | |
| "learning_rate": 3.9033592460026545e-05, | |
| "loss": 0.1755, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 3.5586206896551724, | |
| "grad_norm": 0.3291413486003876, | |
| "learning_rate": 3.795661135604319e-05, | |
| "loss": 0.0803, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 3.5793103448275865, | |
| "grad_norm": 0.37422430515289307, | |
| "learning_rate": 3.689120556739475e-05, | |
| "loss": 0.0507, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 0.5673372149467468, | |
| "learning_rate": 3.583757386478389e-05, | |
| "loss": 0.0841, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 3.6206896551724137, | |
| "grad_norm": 0.40253427624702454, | |
| "learning_rate": 3.479591282224496e-05, | |
| "loss": 0.0749, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 3.6413793103448278, | |
| "grad_norm": 0.26780134439468384, | |
| "learning_rate": 3.3766416780469256e-05, | |
| "loss": 0.0619, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 3.6620689655172414, | |
| "grad_norm": 0.4485790431499481, | |
| "learning_rate": 3.2749277810547286e-05, | |
| "loss": 0.0631, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 3.682758620689655, | |
| "grad_norm": 0.3235170841217041, | |
| "learning_rate": 3.174468567813461e-05, | |
| "loss": 0.0714, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.703448275862069, | |
| "grad_norm": 0.5508309602737427, | |
| "learning_rate": 3.0752827808047445e-05, | |
| "loss": 0.1729, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 3.7241379310344827, | |
| "grad_norm": 0.4714655578136444, | |
| "learning_rate": 2.9773889249295294e-05, | |
| "loss": 0.1412, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 3.7448275862068967, | |
| "grad_norm": 0.6512071490287781, | |
| "learning_rate": 2.8808052640556637e-05, | |
| "loss": 0.0927, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 3.7655172413793103, | |
| "grad_norm": 0.3397248089313507, | |
| "learning_rate": 2.7855498176104434e-05, | |
| "loss": 0.0936, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 3.7862068965517244, | |
| "grad_norm": 0.38765978813171387, | |
| "learning_rate": 2.691640357218759e-05, | |
| "loss": 0.0552, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 3.806896551724138, | |
| "grad_norm": 0.5836214423179626, | |
| "learning_rate": 2.599094403387481e-05, | |
| "loss": 0.1287, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 3.8275862068965516, | |
| "grad_norm": 0.4288087487220764, | |
| "learning_rate": 2.50792922223669e-05, | |
| "loss": 0.0781, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 3.8482758620689657, | |
| "grad_norm": 0.3482179641723633, | |
| "learning_rate": 2.418161822278374e-05, | |
| "loss": 0.1031, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 3.8689655172413793, | |
| "grad_norm": 0.6050342321395874, | |
| "learning_rate": 2.329808951243174e-05, | |
| "loss": 0.1308, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 3.889655172413793, | |
| "grad_norm": 0.43189215660095215, | |
| "learning_rate": 2.242887092955801e-05, | |
| "loss": 0.077, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.910344827586207, | |
| "grad_norm": 0.32741305232048035, | |
| "learning_rate": 2.1574124642596883e-05, | |
| "loss": 0.0648, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 3.9310344827586206, | |
| "grad_norm": 0.32362306118011475, | |
| "learning_rate": 2.0734010119914192e-05, | |
| "loss": 0.0721, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 3.9310344827586206, | |
| "eval_loss": 0.14946511387825012, | |
| "eval_runtime": 63.9848, | |
| "eval_samples_per_second": 1.203, | |
| "eval_steps_per_second": 1.203, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 3.9517241379310346, | |
| "grad_norm": 0.5802838802337646, | |
| "learning_rate": 1.9908684100055842e-05, | |
| "loss": 0.1172, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 3.972413793103448, | |
| "grad_norm": 0.3685375154018402, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.1092, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 3.9931034482758623, | |
| "grad_norm": 0.29930379986763, | |
| "learning_rate": 1.8303010698955804e-05, | |
| "loss": 0.097, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.7061547636985779, | |
| "learning_rate": 1.7522962885103145e-05, | |
| "loss": 0.0744, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 4.020689655172414, | |
| "grad_norm": 0.3112177550792694, | |
| "learning_rate": 1.6758302652963175e-05, | |
| "loss": 0.0379, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 4.041379310344827, | |
| "grad_norm": 0.37092652916908264, | |
| "learning_rate": 1.600917266372035e-05, | |
| "loss": 0.0744, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 4.062068965517241, | |
| "grad_norm": 0.32302889227867126, | |
| "learning_rate": 1.5275712681111644e-05, | |
| "loss": 0.0765, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 4.082758620689655, | |
| "grad_norm": 0.2946412265300751, | |
| "learning_rate": 1.4558059545351143e-05, | |
| "loss": 0.0829, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 4.103448275862069, | |
| "grad_norm": 0.3764098584651947, | |
| "learning_rate": 1.3856347147600013e-05, | |
| "loss": 0.0646, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 4.124137931034483, | |
| "grad_norm": 0.4025175869464874, | |
| "learning_rate": 1.3170706404986644e-05, | |
| "loss": 0.0516, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 4.144827586206897, | |
| "grad_norm": 0.3134159743785858, | |
| "learning_rate": 1.2501265236181737e-05, | |
| "loss": 0.0698, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 4.165517241379311, | |
| "grad_norm": 0.437597393989563, | |
| "learning_rate": 1.1848148537532843e-05, | |
| "loss": 0.0691, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 4.186206896551724, | |
| "grad_norm": 0.2172604203224182, | |
| "learning_rate": 1.1211478159762478e-05, | |
| "loss": 0.0314, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 4.206896551724138, | |
| "grad_norm": 0.41428226232528687, | |
| "learning_rate": 1.0591372885234885e-05, | |
| "loss": 0.0746, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 4.227586206896552, | |
| "grad_norm": 0.31297600269317627, | |
| "learning_rate": 9.98794840579491e-06, | |
| "loss": 0.0672, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 4.248275862068965, | |
| "grad_norm": 0.3259146809577942, | |
| "learning_rate": 9.401317301183655e-06, | |
| "loss": 0.0791, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 4.268965517241379, | |
| "grad_norm": 0.3143876791000366, | |
| "learning_rate": 8.831589018034658e-06, | |
| "loss": 0.0498, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 4.289655172413793, | |
| "grad_norm": 0.3985753655433655, | |
| "learning_rate": 8.278869849454718e-06, | |
| "loss": 0.1353, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 4.310344827586207, | |
| "grad_norm": 0.4066532850265503, | |
| "learning_rate": 7.74326291519284e-06, | |
| "loss": 0.1012, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 4.3310344827586205, | |
| "grad_norm": 0.33012518286705017, | |
| "learning_rate": 7.224868142401542e-06, | |
| "loss": 0.0509, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 4.3517241379310345, | |
| "grad_norm": 0.4188860058784485, | |
| "learning_rate": 6.7237822469936485e-06, | |
| "loss": 0.0659, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 4.372413793103449, | |
| "grad_norm": 0.5927184820175171, | |
| "learning_rate": 6.240098715597975e-06, | |
| "loss": 0.094, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 4.393103448275862, | |
| "grad_norm": 0.3828081488609314, | |
| "learning_rate": 5.77390778811796e-06, | |
| "loss": 0.068, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 4.413793103448276, | |
| "grad_norm": 0.3546643555164337, | |
| "learning_rate": 5.325296440895622e-06, | |
| "loss": 0.0816, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 4.413793103448276, | |
| "eval_loss": 0.1458887904882431, | |
| "eval_runtime": 64.0771, | |
| "eval_samples_per_second": 1.202, | |
| "eval_steps_per_second": 1.202, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 4.43448275862069, | |
| "grad_norm": 0.28252580761909485, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.0495, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 4.455172413793104, | |
| "grad_norm": 0.42864277958869934, | |
| "learning_rate": 4.481143978035196e-06, | |
| "loss": 0.0811, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 4.475862068965517, | |
| "grad_norm": 0.3355552554130554, | |
| "learning_rate": 4.085760354293677e-06, | |
| "loss": 0.0666, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 4.496551724137931, | |
| "grad_norm": 0.5437521934509277, | |
| "learning_rate": 3.7082712652200867e-06, | |
| "loss": 0.0899, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 4.517241379310345, | |
| "grad_norm": 0.48877090215682983, | |
| "learning_rate": 3.3487471382255275e-06, | |
| "loss": 0.0753, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 4.537931034482758, | |
| "grad_norm": 0.42101794481277466, | |
| "learning_rate": 3.0072550490328753e-06, | |
| "loss": 0.0698, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 4.558620689655172, | |
| "grad_norm": 0.48849114775657654, | |
| "learning_rate": 2.683858709162468e-06, | |
| "loss": 0.0728, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 4.5793103448275865, | |
| "grad_norm": 0.26907044649124146, | |
| "learning_rate": 2.3786184540455448e-06, | |
| "loss": 0.0586, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "grad_norm": 0.4350108504295349, | |
| "learning_rate": 2.091591231767709e-06, | |
| "loss": 0.0548, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 4.620689655172414, | |
| "grad_norm": 0.49473094940185547, | |
| "learning_rate": 1.822830592444147e-06, | |
| "loss": 0.0633, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 4.641379310344828, | |
| "grad_norm": 0.6359331011772156, | |
| "learning_rate": 1.5723866782288543e-06, | |
| "loss": 0.087, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 4.662068965517241, | |
| "grad_norm": 0.4741656184196472, | |
| "learning_rate": 1.3403062139598076e-06, | |
| "loss": 0.0774, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 4.682758620689655, | |
| "grad_norm": 0.38687583804130554, | |
| "learning_rate": 1.1266324984415266e-06, | |
| "loss": 0.0657, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 4.703448275862069, | |
| "grad_norm": 0.40613865852355957, | |
| "learning_rate": 9.314053963669245e-07, | |
| "loss": 0.0986, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.724137931034483, | |
| "grad_norm": 0.5870184302330017, | |
| "learning_rate": 7.546613308798466e-07, | |
| "loss": 0.0507, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 4.744827586206896, | |
| "grad_norm": 0.46438148617744446, | |
| "learning_rate": 5.964332767796399e-07, | |
| "loss": 0.068, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 4.76551724137931, | |
| "grad_norm": 0.3601086735725403, | |
| "learning_rate": 4.567507543691174e-07, | |
| "loss": 0.0479, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 4.786206896551724, | |
| "grad_norm": 0.3085576295852661, | |
| "learning_rate": 3.3563982394704266e-07, | |
| "loss": 0.0492, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 4.8068965517241375, | |
| "grad_norm": 0.36751386523246765, | |
| "learning_rate": 2.3312308094607382e-07, | |
| "loss": 0.0487, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 4.827586206896552, | |
| "grad_norm": 0.6906239986419678, | |
| "learning_rate": 1.4921965171720287e-07, | |
| "loss": 0.109, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 4.848275862068966, | |
| "grad_norm": 0.39117613434791565, | |
| "learning_rate": 8.394518996135414e-08, | |
| "loss": 0.0878, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 4.86896551724138, | |
| "grad_norm": 0.3347634971141815, | |
| "learning_rate": 3.731187380893175e-08, | |
| "loss": 0.0509, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 4.889655172413793, | |
| "grad_norm": 0.381293922662735, | |
| "learning_rate": 9.32840354779252e-09, | |
| "loss": 0.0836, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 4.910344827586207, | |
| "grad_norm": 0.39473918080329895, | |
| "learning_rate": 0.0, | |
| "loss": 0.0785, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.910344827586207, | |
| "eval_loss": 0.14615046977996826, | |
| "eval_runtime": 64.0233, | |
| "eval_samples_per_second": 1.203, | |
| "eval_steps_per_second": 1.203, | |
| "step": 240 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 240, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 48, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.635259140758307e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |