| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.47292732361741147, | |
| "eval_steps": 100, | |
| "global_step": 4900, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 5.391254425048828, | |
| "learning_rate": 2.450490098019604e-05, | |
| "loss": 1.8358, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "eval_loss": 1.450282096862793, | |
| "eval_runtime": 27.2888, | |
| "eval_samples_per_second": 18.323, | |
| "eval_steps_per_second": 2.309, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 5.370517253875732, | |
| "learning_rate": 2.400480096019204e-05, | |
| "loss": 1.6054, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "eval_loss": 1.4298174381256104, | |
| "eval_runtime": 27.2786, | |
| "eval_samples_per_second": 18.329, | |
| "eval_steps_per_second": 2.31, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 5.93058443069458, | |
| "learning_rate": 2.3504700940188037e-05, | |
| "loss": 1.7294, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "eval_loss": 1.404078483581543, | |
| "eval_runtime": 27.2841, | |
| "eval_samples_per_second": 18.326, | |
| "eval_steps_per_second": 2.309, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 3.4010226726531982, | |
| "learning_rate": 2.3004600920184037e-05, | |
| "loss": 1.591, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "eval_loss": 1.3933534622192383, | |
| "eval_runtime": 27.2614, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 5.741560459136963, | |
| "learning_rate": 2.2504500900180037e-05, | |
| "loss": 1.6501, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "eval_loss": 1.3843183517456055, | |
| "eval_runtime": 27.2728, | |
| "eval_samples_per_second": 18.333, | |
| "eval_steps_per_second": 2.31, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 3.929501533508301, | |
| "learning_rate": 2.2004400880176038e-05, | |
| "loss": 1.5758, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_loss": 1.3718712329864502, | |
| "eval_runtime": 27.2551, | |
| "eval_samples_per_second": 18.345, | |
| "eval_steps_per_second": 2.311, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 6.033923149108887, | |
| "learning_rate": 2.1504300860172034e-05, | |
| "loss": 1.6159, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_loss": 1.377557396888733, | |
| "eval_runtime": 27.2622, | |
| "eval_samples_per_second": 18.34, | |
| "eval_steps_per_second": 2.311, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 7.7004313468933105, | |
| "learning_rate": 2.1004200840168034e-05, | |
| "loss": 1.6272, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "eval_loss": 1.3639216423034668, | |
| "eval_runtime": 27.271, | |
| "eval_samples_per_second": 18.334, | |
| "eval_steps_per_second": 2.31, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 3.8936071395874023, | |
| "learning_rate": 2.0504100820164035e-05, | |
| "loss": 1.5669, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "eval_loss": 1.3606789112091064, | |
| "eval_runtime": 27.2614, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 2.6616313457489014, | |
| "learning_rate": 2.0004000800160035e-05, | |
| "loss": 1.5046, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 1.3455876111984253, | |
| "eval_runtime": 27.2532, | |
| "eval_samples_per_second": 18.346, | |
| "eval_steps_per_second": 2.312, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 4.210849761962891, | |
| "learning_rate": 1.950390078015603e-05, | |
| "loss": 1.5338, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_loss": 1.3470462560653687, | |
| "eval_runtime": 27.2373, | |
| "eval_samples_per_second": 18.357, | |
| "eval_steps_per_second": 2.313, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 7.99553108215332, | |
| "learning_rate": 1.900380076015203e-05, | |
| "loss": 1.5559, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_loss": 1.3312621116638184, | |
| "eval_runtime": 27.2565, | |
| "eval_samples_per_second": 18.344, | |
| "eval_steps_per_second": 2.311, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 7.216369152069092, | |
| "learning_rate": 1.8503700740148032e-05, | |
| "loss": 1.5616, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_loss": 1.3286023139953613, | |
| "eval_runtime": 27.2461, | |
| "eval_samples_per_second": 18.351, | |
| "eval_steps_per_second": 2.312, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 3.162696361541748, | |
| "learning_rate": 1.800360072014403e-05, | |
| "loss": 1.4969, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_loss": 1.3286837339401245, | |
| "eval_runtime": 27.2552, | |
| "eval_samples_per_second": 18.345, | |
| "eval_steps_per_second": 2.311, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 7.42529821395874, | |
| "learning_rate": 1.750350070014003e-05, | |
| "loss": 1.5723, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_loss": 1.3215287923812866, | |
| "eval_runtime": 27.2475, | |
| "eval_samples_per_second": 18.35, | |
| "eval_steps_per_second": 2.312, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 2.7483532428741455, | |
| "learning_rate": 1.700340068013603e-05, | |
| "loss": 1.4844, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "eval_loss": 1.3189783096313477, | |
| "eval_runtime": 27.2412, | |
| "eval_samples_per_second": 18.355, | |
| "eval_steps_per_second": 2.313, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 11.681495666503906, | |
| "learning_rate": 1.6503300660132026e-05, | |
| "loss": 1.5056, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_loss": 1.313588261604309, | |
| "eval_runtime": 27.2711, | |
| "eval_samples_per_second": 18.334, | |
| "eval_steps_per_second": 2.31, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 6.721160888671875, | |
| "learning_rate": 1.6003200640128026e-05, | |
| "loss": 1.5118, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_loss": 1.3094464540481567, | |
| "eval_runtime": 27.2681, | |
| "eval_samples_per_second": 18.336, | |
| "eval_steps_per_second": 2.31, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 6.978147029876709, | |
| "learning_rate": 1.5503100620124026e-05, | |
| "loss": 1.4269, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_loss": 1.3047374486923218, | |
| "eval_runtime": 27.289, | |
| "eval_samples_per_second": 18.322, | |
| "eval_steps_per_second": 2.309, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 6.569401741027832, | |
| "learning_rate": 1.5003000600120023e-05, | |
| "loss": 1.6078, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_loss": 1.2957228422164917, | |
| "eval_runtime": 27.2723, | |
| "eval_samples_per_second": 18.334, | |
| "eval_steps_per_second": 2.31, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 7.020974159240723, | |
| "learning_rate": 1.4502900580116025e-05, | |
| "loss": 1.5223, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 1.2962332963943481, | |
| "eval_runtime": 27.2629, | |
| "eval_samples_per_second": 18.34, | |
| "eval_steps_per_second": 2.311, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 11.144767761230469, | |
| "learning_rate": 1.4002800560112023e-05, | |
| "loss": 1.5419, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "eval_loss": 1.2900757789611816, | |
| "eval_runtime": 27.2619, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 8.033313751220703, | |
| "learning_rate": 1.3502700540108021e-05, | |
| "loss": 1.5363, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_loss": 1.2853684425354004, | |
| "eval_runtime": 27.2673, | |
| "eval_samples_per_second": 18.337, | |
| "eval_steps_per_second": 2.31, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 8.654736518859863, | |
| "learning_rate": 1.3002600520104022e-05, | |
| "loss": 1.4545, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "eval_loss": 1.2811810970306396, | |
| "eval_runtime": 27.2594, | |
| "eval_samples_per_second": 18.342, | |
| "eval_steps_per_second": 2.311, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 9.393715858459473, | |
| "learning_rate": 1.250250050010002e-05, | |
| "loss": 1.4054, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 1.2781436443328857, | |
| "eval_runtime": 27.2437, | |
| "eval_samples_per_second": 18.353, | |
| "eval_steps_per_second": 2.312, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 3.9280712604522705, | |
| "learning_rate": 1.200240048009602e-05, | |
| "loss": 1.5613, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "eval_loss": 1.2793227434158325, | |
| "eval_runtime": 27.2593, | |
| "eval_samples_per_second": 18.342, | |
| "eval_steps_per_second": 2.311, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 9.87479019165039, | |
| "learning_rate": 1.1502300460092019e-05, | |
| "loss": 1.3906, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "eval_loss": 1.2742949724197388, | |
| "eval_runtime": 27.2694, | |
| "eval_samples_per_second": 18.336, | |
| "eval_steps_per_second": 2.31, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 8.556775093078613, | |
| "learning_rate": 1.1002200440088019e-05, | |
| "loss": 1.5082, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_loss": 1.2731441259384155, | |
| "eval_runtime": 27.2537, | |
| "eval_samples_per_second": 18.346, | |
| "eval_steps_per_second": 2.312, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 4.113871097564697, | |
| "learning_rate": 1.0502100420084017e-05, | |
| "loss": 1.4439, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "eval_loss": 1.2708263397216797, | |
| "eval_runtime": 27.2642, | |
| "eval_samples_per_second": 18.339, | |
| "eval_steps_per_second": 2.311, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 7.74875545501709, | |
| "learning_rate": 1.0002000400080017e-05, | |
| "loss": 1.4189, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_loss": 1.2717357873916626, | |
| "eval_runtime": 27.2628, | |
| "eval_samples_per_second": 18.34, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 3.830393075942993, | |
| "learning_rate": 9.501900380076016e-06, | |
| "loss": 1.4567, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 1.2686272859573364, | |
| "eval_runtime": 27.2577, | |
| "eval_samples_per_second": 18.343, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 7.333063125610352, | |
| "learning_rate": 9.001800360072014e-06, | |
| "loss": 1.4613, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_loss": 1.266152262687683, | |
| "eval_runtime": 27.2615, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 2.688906192779541, | |
| "learning_rate": 8.501700340068014e-06, | |
| "loss": 1.4106, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_loss": 1.2621879577636719, | |
| "eval_runtime": 27.2709, | |
| "eval_samples_per_second": 18.335, | |
| "eval_steps_per_second": 2.31, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 5.106052398681641, | |
| "learning_rate": 8.001600320064013e-06, | |
| "loss": 1.4848, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_loss": 1.259779453277588, | |
| "eval_runtime": 27.2724, | |
| "eval_samples_per_second": 18.334, | |
| "eval_steps_per_second": 2.31, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 7.964311599731445, | |
| "learning_rate": 7.501500300060011e-06, | |
| "loss": 1.3907, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "eval_loss": 1.2596435546875, | |
| "eval_runtime": 27.2626, | |
| "eval_samples_per_second": 18.34, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 6.268681526184082, | |
| "learning_rate": 7.0014002800560115e-06, | |
| "loss": 1.44, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "eval_loss": 1.2597826719284058, | |
| "eval_runtime": 27.2599, | |
| "eval_samples_per_second": 18.342, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 3.744537115097046, | |
| "learning_rate": 6.501300260052011e-06, | |
| "loss": 1.4985, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_loss": 1.2589830160140991, | |
| "eval_runtime": 27.2604, | |
| "eval_samples_per_second": 18.342, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 6.038290500640869, | |
| "learning_rate": 6.00120024004801e-06, | |
| "loss": 1.3521, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "eval_loss": 1.255460500717163, | |
| "eval_runtime": 27.2617, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 8.274654388427734, | |
| "learning_rate": 5.501100220044009e-06, | |
| "loss": 1.4754, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_loss": 1.254140019416809, | |
| "eval_runtime": 27.2664, | |
| "eval_samples_per_second": 18.338, | |
| "eval_steps_per_second": 2.311, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 6.011349201202393, | |
| "learning_rate": 5.001000200040009e-06, | |
| "loss": 1.3927, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_loss": 1.252474069595337, | |
| "eval_runtime": 27.2815, | |
| "eval_samples_per_second": 18.327, | |
| "eval_steps_per_second": 2.309, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 8.328156471252441, | |
| "learning_rate": 4.500900180036007e-06, | |
| "loss": 1.468, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_loss": 1.2514727115631104, | |
| "eval_runtime": 27.2652, | |
| "eval_samples_per_second": 18.338, | |
| "eval_steps_per_second": 2.311, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 7.536208152770996, | |
| "learning_rate": 4.0008001600320064e-06, | |
| "loss": 1.4471, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_loss": 1.2502838373184204, | |
| "eval_runtime": 27.2698, | |
| "eval_samples_per_second": 18.335, | |
| "eval_steps_per_second": 2.31, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 6.996093273162842, | |
| "learning_rate": 3.5007001400280057e-06, | |
| "loss": 1.445, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_loss": 1.2483798265457153, | |
| "eval_runtime": 27.2509, | |
| "eval_samples_per_second": 18.348, | |
| "eval_steps_per_second": 2.312, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 5.2884063720703125, | |
| "learning_rate": 3.000600120024005e-06, | |
| "loss": 1.465, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_loss": 1.2479716539382935, | |
| "eval_runtime": 27.2406, | |
| "eval_samples_per_second": 18.355, | |
| "eval_steps_per_second": 2.313, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 6.742663383483887, | |
| "learning_rate": 2.5005001000200043e-06, | |
| "loss": 1.3964, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_loss": 1.2463291883468628, | |
| "eval_runtime": 27.2445, | |
| "eval_samples_per_second": 18.352, | |
| "eval_steps_per_second": 2.312, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 5.316954612731934, | |
| "learning_rate": 2.0004000800160032e-06, | |
| "loss": 1.4355, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_loss": 1.244441032409668, | |
| "eval_runtime": 27.2539, | |
| "eval_samples_per_second": 18.346, | |
| "eval_steps_per_second": 2.312, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 7.313477516174316, | |
| "learning_rate": 1.5003000600120025e-06, | |
| "loss": 1.4902, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "eval_loss": 1.2442010641098022, | |
| "eval_runtime": 27.2614, | |
| "eval_samples_per_second": 18.341, | |
| "eval_steps_per_second": 2.311, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 4.464217185974121, | |
| "learning_rate": 1.0002000400080016e-06, | |
| "loss": 1.3817, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "eval_loss": 1.2431871891021729, | |
| "eval_runtime": 27.2574, | |
| "eval_samples_per_second": 18.344, | |
| "eval_steps_per_second": 2.311, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 9.320337295532227, | |
| "learning_rate": 5.001000200040008e-07, | |
| "loss": 1.3901, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "eval_loss": 1.2422610521316528, | |
| "eval_runtime": 27.2557, | |
| "eval_samples_per_second": 18.345, | |
| "eval_steps_per_second": 2.311, | |
| "step": 4900 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 5000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 1.866856780959744e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |