| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 1245, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.060350030175015085, | |
| "grad_norm": 0.3043883144855499, | |
| "learning_rate": 0.00010909090909090909, | |
| "loss": 1.8586, | |
| "mean_token_accuracy": 0.6245462906360626, | |
| "num_tokens": 157786.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12070006035003017, | |
| "grad_norm": 0.24695894122123718, | |
| "learning_rate": 0.00022272727272727272, | |
| "loss": 0.9383, | |
| "mean_token_accuracy": 0.7651280963420868, | |
| "num_tokens": 283437.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.18105009052504525, | |
| "grad_norm": 0.19023272395133972, | |
| "learning_rate": 0.0003363636363636364, | |
| "loss": 0.649, | |
| "mean_token_accuracy": 0.8234076803922653, | |
| "num_tokens": 442299.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.24140012070006034, | |
| "grad_norm": 0.22401368618011475, | |
| "learning_rate": 0.0003999849510577617, | |
| "loss": 0.5125, | |
| "mean_token_accuracy": 0.8562434083223343, | |
| "num_tokens": 569621.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.30175015087507545, | |
| "grad_norm": 0.23774276673793793, | |
| "learning_rate": 0.0003998388342637495, | |
| "loss": 0.3972, | |
| "mean_token_accuracy": 0.8866757136583329, | |
| "num_tokens": 725959.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.3621001810500905, | |
| "grad_norm": 0.3460250496864319, | |
| "learning_rate": 0.00039953738658223166, | |
| "loss": 0.3554, | |
| "mean_token_accuracy": 0.9000710541009903, | |
| "num_tokens": 851583.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4224502112251056, | |
| "grad_norm": 0.2395441234111786, | |
| "learning_rate": 0.00039908084232270096, | |
| "loss": 0.2503, | |
| "mean_token_accuracy": 0.9276190227270127, | |
| "num_tokens": 1010006.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4828002414001207, | |
| "grad_norm": 0.28712666034698486, | |
| "learning_rate": 0.00039846955634824144, | |
| "loss": 0.2602, | |
| "mean_token_accuracy": 0.9267517280578613, | |
| "num_tokens": 1137732.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5431502715751357, | |
| "grad_norm": 0.17056156694889069, | |
| "learning_rate": 0.00039770400379969973, | |
| "loss": 0.1851, | |
| "mean_token_accuracy": 0.9475770330429077, | |
| "num_tokens": 1294237.0, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.6035003017501509, | |
| "grad_norm": 0.21380072832107544, | |
| "learning_rate": 0.00039678477972636774, | |
| "loss": 0.1622, | |
| "mean_token_accuracy": 0.9546336072683335, | |
| "num_tokens": 1419338.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.663850331925166, | |
| "grad_norm": 0.20465914905071259, | |
| "learning_rate": 0.00039571259862346183, | |
| "loss": 0.1329, | |
| "mean_token_accuracy": 0.961662837266922, | |
| "num_tokens": 1576946.0, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.724200362100181, | |
| "grad_norm": 0.2113582342863083, | |
| "learning_rate": 0.00039448829387675954, | |
| "loss": 0.1287, | |
| "mean_token_accuracy": 0.9641147536039353, | |
| "num_tokens": 1703683.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7845503922751962, | |
| "grad_norm": 0.15074850618839264, | |
| "learning_rate": 0.0003931128171148249, | |
| "loss": 0.1232, | |
| "mean_token_accuracy": 0.9665374368429184, | |
| "num_tokens": 1860622.0, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.8449004224502112, | |
| "grad_norm": 0.4272288978099823, | |
| "learning_rate": 0.00039158723746932566, | |
| "loss": 0.0974, | |
| "mean_token_accuracy": 0.9736644911766053, | |
| "num_tokens": 1987884.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.9052504526252263, | |
| "grad_norm": 0.13680922985076904, | |
| "learning_rate": 0.00038991274074401806, | |
| "loss": 0.1007, | |
| "mean_token_accuracy": 0.9723848593235016, | |
| "num_tokens": 2145729.0, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.9656004828002414, | |
| "grad_norm": 0.15923215448856354, | |
| "learning_rate": 0.00038809062849304407, | |
| "loss": 0.0747, | |
| "mean_token_accuracy": 0.9784966939687729, | |
| "num_tokens": 2273076.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.09010029584169388, | |
| "eval_mean_token_accuracy": 0.9750096942927386, | |
| "eval_num_tokens": 2354180.0, | |
| "eval_runtime": 15.8229, | |
| "eval_samples_per_second": 23.321, | |
| "eval_steps_per_second": 11.692, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.024140012070006, | |
| "grad_norm": 0.12324528396129608, | |
| "learning_rate": 0.0003861223170092585, | |
| "loss": 0.0923, | |
| "mean_token_accuracy": 0.9740137457847595, | |
| "num_tokens": 2422803.0, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.0844900422450212, | |
| "grad_norm": 0.21983321011066437, | |
| "learning_rate": 0.00038400933622337167, | |
| "loss": 0.0619, | |
| "mean_token_accuracy": 0.9822656351327896, | |
| "num_tokens": 2563793.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1448400724200363, | |
| "grad_norm": 0.11064327508211136, | |
| "learning_rate": 0.00038175332851476387, | |
| "loss": 0.075, | |
| "mean_token_accuracy": 0.9794844657182693, | |
| "num_tokens": 2706197.0, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.2051901025950513, | |
| "grad_norm": 0.1068505346775055, | |
| "learning_rate": 0.00037935604743489506, | |
| "loss": 0.0544, | |
| "mean_token_accuracy": 0.9840904027223587, | |
| "num_tokens": 2850268.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2655401327700664, | |
| "grad_norm": 0.15822124481201172, | |
| "learning_rate": 0.00037681935634430327, | |
| "loss": 0.078, | |
| "mean_token_accuracy": 0.9782475352287292, | |
| "num_tokens": 2992275.0, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.3258901629450814, | |
| "grad_norm": 0.10555300116539001, | |
| "learning_rate": 0.0003741452269642502, | |
| "loss": 0.0542, | |
| "mean_token_accuracy": 0.9846927672624588, | |
| "num_tokens": 3135599.0, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.3862401931200965, | |
| "grad_norm": 0.07533632218837738, | |
| "learning_rate": 0.0003713357378441402, | |
| "loss": 0.0641, | |
| "mean_token_accuracy": 0.9822721928358078, | |
| "num_tokens": 3278124.0, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.4465902232951118, | |
| "grad_norm": 0.1403859406709671, | |
| "learning_rate": 0.00036839307274590355, | |
| "loss": 0.0491, | |
| "mean_token_accuracy": 0.985781243443489, | |
| "num_tokens": 3421679.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.5069402534701268, | |
| "grad_norm": 0.10379917174577713, | |
| "learning_rate": 0.00036531951894660034, | |
| "loss": 0.069, | |
| "mean_token_accuracy": 0.9809466338157654, | |
| "num_tokens": 3564486.0, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.567290283645142, | |
| "grad_norm": 0.09850325435400009, | |
| "learning_rate": 0.00036211746546056415, | |
| "loss": 0.0516, | |
| "mean_token_accuracy": 0.9853906160593033, | |
| "num_tokens": 3707529.0, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.627640313820157, | |
| "grad_norm": 0.11964337527751923, | |
| "learning_rate": 0.00035878940118246673, | |
| "loss": 0.0596, | |
| "mean_token_accuracy": 0.9836755973100663, | |
| "num_tokens": 3848570.0, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.687990343995172, | |
| "grad_norm": 0.05955597385764122, | |
| "learning_rate": 0.00035533791295274834, | |
| "loss": 0.0428, | |
| "mean_token_accuracy": 0.9875983273983002, | |
| "num_tokens": 3990912.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.748340374170187, | |
| "grad_norm": 0.07114022970199585, | |
| "learning_rate": 0.0003517656835469161, | |
| "loss": 0.0627, | |
| "mean_token_accuracy": 0.9830698877573013, | |
| "num_tokens": 4131622.0, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.8086904043452021, | |
| "grad_norm": 0.09389431029558182, | |
| "learning_rate": 0.0003480754895902742, | |
| "loss": 0.0459, | |
| "mean_token_accuracy": 0.9864954763650894, | |
| "num_tokens": 4273640.0, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.8690404345202172, | |
| "grad_norm": 0.06497396528720856, | |
| "learning_rate": 0.0003442701993997064, | |
| "loss": 0.0577, | |
| "mean_token_accuracy": 0.9838357955217362, | |
| "num_tokens": 4414471.0, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.9293904646952322, | |
| "grad_norm": 0.11333642154932022, | |
| "learning_rate": 0.00034035277075418854, | |
| "loss": 0.0431, | |
| "mean_token_accuracy": 0.9871519947052002, | |
| "num_tokens": 4555376.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.9897404948702473, | |
| "grad_norm": 0.05740824714303017, | |
| "learning_rate": 0.0003363262485957633, | |
| "loss": 0.0418, | |
| "mean_token_accuracy": 0.9883395010232925, | |
| "num_tokens": 4688406.0, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.053012676537036896, | |
| "eval_mean_token_accuracy": 0.9854460068651147, | |
| "eval_num_tokens": 4708360.0, | |
| "eval_runtime": 15.7874, | |
| "eval_samples_per_second": 23.373, | |
| "eval_steps_per_second": 11.718, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.048280024140012, | |
| "grad_norm": 0.09828540682792664, | |
| "learning_rate": 0.00033219376266276594, | |
| "loss": 0.0495, | |
| "mean_token_accuracy": 0.9852321922164602, | |
| "num_tokens": 4836946.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.1086300543150274, | |
| "grad_norm": 0.10078238695859909, | |
| "learning_rate": 0.00032795852505713806, | |
| "loss": 0.0313, | |
| "mean_token_accuracy": 0.9908820760250091, | |
| "num_tokens": 4968203.0, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 2.1689800844900424, | |
| "grad_norm": 0.07217393070459366, | |
| "learning_rate": 0.0003236238277477231, | |
| "loss": 0.0471, | |
| "mean_token_accuracy": 0.985995357632637, | |
| "num_tokens": 5118541.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.2293301146650575, | |
| "grad_norm": 0.09504982829093933, | |
| "learning_rate": 0.0003191930400114816, | |
| "loss": 0.0322, | |
| "mean_token_accuracy": 0.9904332131147384, | |
| "num_tokens": 5251007.0, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 2.2896801448400725, | |
| "grad_norm": 0.04393278807401657, | |
| "learning_rate": 0.0003146696058146176, | |
| "loss": 0.0481, | |
| "mean_token_accuracy": 0.9860882490873337, | |
| "num_tokens": 5403517.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.3500301750150876, | |
| "grad_norm": 0.12678726017475128, | |
| "learning_rate": 0.00031005704113564917, | |
| "loss": 0.0349, | |
| "mean_token_accuracy": 0.9900296354293823, | |
| "num_tokens": 5536492.0, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 2.4103802051901027, | |
| "grad_norm": 0.06465219706296921, | |
| "learning_rate": 0.00030535893123250635, | |
| "loss": 0.0484, | |
| "mean_token_accuracy": 0.9857117992639541, | |
| "num_tokens": 5688657.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.4707302353651177, | |
| "grad_norm": 0.07911183685064316, | |
| "learning_rate": 0.00030057892785577867, | |
| "loss": 0.0313, | |
| "mean_token_accuracy": 0.9907743036746979, | |
| "num_tokens": 5821424.0, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 2.5310802655401328, | |
| "grad_norm": 0.04166734963655472, | |
| "learning_rate": 0.00029572074641027996, | |
| "loss": 0.0448, | |
| "mean_token_accuracy": 0.9870600712299347, | |
| "num_tokens": 5970144.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.591430295715148, | |
| "grad_norm": 0.09666065871715546, | |
| "learning_rate": 0.0002907881630671351, | |
| "loss": 0.0301, | |
| "mean_token_accuracy": 0.9908553779125213, | |
| "num_tokens": 6103689.0, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 2.651780325890163, | |
| "grad_norm": 0.07375594228506088, | |
| "learning_rate": 0.00028578501182863507, | |
| "loss": 0.0425, | |
| "mean_token_accuracy": 0.9875227802991867, | |
| "num_tokens": 6256525.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.712130356065178, | |
| "grad_norm": 0.086298368871212, | |
| "learning_rate": 0.00028071518154814036, | |
| "loss": 0.0323, | |
| "mean_token_accuracy": 0.9902477955818176, | |
| "num_tokens": 6390348.0, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 2.772480386240193, | |
| "grad_norm": 0.03587740287184715, | |
| "learning_rate": 0.0002755826129073503, | |
| "loss": 0.0427, | |
| "mean_token_accuracy": 0.9872915095090866, | |
| "num_tokens": 6540900.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.832830416415208, | |
| "grad_norm": 0.08814697712659836, | |
| "learning_rate": 0.00027039129535328646, | |
| "loss": 0.0328, | |
| "mean_token_accuracy": 0.990228921175003, | |
| "num_tokens": 6673531.0, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 2.8931804465902236, | |
| "grad_norm": 0.054661527276039124, | |
| "learning_rate": 0.00026514526399737235, | |
| "loss": 0.0406, | |
| "mean_token_accuracy": 0.9875594407320023, | |
| "num_tokens": 6826045.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.9535304767652386, | |
| "grad_norm": 0.04500816389918327, | |
| "learning_rate": 0.00025984859647901865, | |
| "loss": 0.0282, | |
| "mean_token_accuracy": 0.9915571695566178, | |
| "num_tokens": 6959176.0, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.04558952525258064, | |
| "eval_mean_token_accuracy": 0.9875019208804982, | |
| "eval_num_tokens": 7062540.0, | |
| "eval_runtime": 15.8013, | |
| "eval_samples_per_second": 23.352, | |
| "eval_steps_per_second": 11.708, | |
| "step": 1245 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 2905, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.0683713883526144e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |