| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 49971, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.9799883932680958e-05, | |
| "loss": 0.7746, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 1.959976786536191e-05, | |
| "loss": 0.6052, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 1.9399651798042868e-05, | |
| "loss": 0.5685, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 1.919953573072382e-05, | |
| "loss": 0.5537, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 1.8999419663404778e-05, | |
| "loss": 0.5239, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 1.879930359608573e-05, | |
| "loss": 0.5159, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 1.8599187528766688e-05, | |
| "loss": 0.5019, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 1.839907146144764e-05, | |
| "loss": 0.4966, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 1.8198955394128598e-05, | |
| "loss": 0.4927, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 1.799883932680955e-05, | |
| "loss": 0.4769, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 1.7798723259490508e-05, | |
| "loss": 0.4843, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 1.759860719217146e-05, | |
| "loss": 0.4739, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 1.7398491124852417e-05, | |
| "loss": 0.4597, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.719837505753337e-05, | |
| "loss": 0.4617, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 1.6998258990214324e-05, | |
| "loss": 0.4615, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.679814292289528e-05, | |
| "loss": 0.4416, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 1.6598026855576234e-05, | |
| "loss": 0.4451, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.639791078825719e-05, | |
| "loss": 0.4566, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 1.6197794720938144e-05, | |
| "loss": 0.4418, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.59976786536191e-05, | |
| "loss": 0.4435, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 1.5797562586300054e-05, | |
| "loss": 0.4406, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.559744651898101e-05, | |
| "loss": 0.4331, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.5397330451661963e-05, | |
| "loss": 0.4261, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 1.519721438434292e-05, | |
| "loss": 0.4262, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.4997098317023875e-05, | |
| "loss": 0.4206, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.479698224970483e-05, | |
| "loss": 0.4203, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.4596866182385785e-05, | |
| "loss": 0.4217, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.439675011506674e-05, | |
| "loss": 0.4169, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.4196634047747695e-05, | |
| "loss": 0.413, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 1.399651798042865e-05, | |
| "loss": 0.4224, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.3796401913109605e-05, | |
| "loss": 0.4197, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 1.359628584579056e-05, | |
| "loss": 0.4043, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 1.3396169778471514e-05, | |
| "loss": 0.4054, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9216036796569824, | |
| "eval_loss": 0.21408772468566895, | |
| "eval_runtime": 28.2461, | |
| "eval_samples_per_second": 138.639, | |
| "eval_steps_per_second": 4.355, | |
| "step": 16657 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.319605371115247e-05, | |
| "loss": 0.3641, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.2995937643833424e-05, | |
| "loss": 0.3487, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.279582157651438e-05, | |
| "loss": 0.3472, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 1.2595705509195336e-05, | |
| "loss": 0.3443, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 1.239558944187629e-05, | |
| "loss": 0.3466, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 1.2195473374557246e-05, | |
| "loss": 0.3492, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 1.19953573072382e-05, | |
| "loss": 0.3559, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 1.1795241239919156e-05, | |
| "loss": 0.3408, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.1595125172600107e-05, | |
| "loss": 0.3391, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 1.1395009105281064e-05, | |
| "loss": 0.3429, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 1.1194893037962019e-05, | |
| "loss": 0.3466, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 1.0994776970642974e-05, | |
| "loss": 0.3382, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 1.0794660903323929e-05, | |
| "loss": 0.3379, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 1.0594544836004884e-05, | |
| "loss": 0.3513, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 1.0394428768685838e-05, | |
| "loss": 0.3467, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 1.0194312701366793e-05, | |
| "loss": 0.3448, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 9.994196634047748e-06, | |
| "loss": 0.3482, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 9.794080566728703e-06, | |
| "loss": 0.3432, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 9.593964499409658e-06, | |
| "loss": 0.3412, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 9.393848432090613e-06, | |
| "loss": 0.3449, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 9.193732364771568e-06, | |
| "loss": 0.3395, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.993616297452523e-06, | |
| "loss": 0.3295, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 8.793500230133478e-06, | |
| "loss": 0.3376, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 8.593384162814433e-06, | |
| "loss": 0.3371, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 8.393268095495388e-06, | |
| "loss": 0.3326, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 8.193152028176343e-06, | |
| "loss": 0.3382, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 7.993035960857298e-06, | |
| "loss": 0.3377, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 7.792919893538253e-06, | |
| "loss": 0.3289, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 7.592803826219208e-06, | |
| "loss": 0.3256, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 7.3926877589001625e-06, | |
| "loss": 0.3275, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 7.192571691581117e-06, | |
| "loss": 0.3317, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 6.992455624262072e-06, | |
| "loss": 0.3212, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 6.792339556943027e-06, | |
| "loss": 0.3297, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9236465692520142, | |
| "eval_loss": 0.2144681066274643, | |
| "eval_runtime": 28.2644, | |
| "eval_samples_per_second": 138.549, | |
| "eval_steps_per_second": 4.352, | |
| "step": 33314 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 6.592223489623982e-06, | |
| "loss": 0.3145, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 6.392107422304937e-06, | |
| "loss": 0.2768, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 6.191991354985892e-06, | |
| "loss": 0.269, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 5.991875287666847e-06, | |
| "loss": 0.275, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 5.791759220347802e-06, | |
| "loss": 0.2764, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 5.591643153028758e-06, | |
| "loss": 0.2691, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 5.391527085709713e-06, | |
| "loss": 0.2654, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 5.191411018390668e-06, | |
| "loss": 0.2668, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 4.991294951071622e-06, | |
| "loss": 0.2707, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 4.791178883752577e-06, | |
| "loss": 0.2567, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 4.5910628164335316e-06, | |
| "loss": 0.2534, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 4.390946749114487e-06, | |
| "loss": 0.2708, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 4.190830681795442e-06, | |
| "loss": 0.2651, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 3.990714614476396e-06, | |
| "loss": 0.2725, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 3.7905985471573513e-06, | |
| "loss": 0.2713, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 3.5904824798383066e-06, | |
| "loss": 0.2644, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 3.3903664125192616e-06, | |
| "loss": 0.2649, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 3.1902503452002165e-06, | |
| "loss": 0.2716, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.9901342778811715e-06, | |
| "loss": 0.2589, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.7900182105621264e-06, | |
| "loss": 0.2575, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.589902143243081e-06, | |
| "loss": 0.2623, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.3897860759240363e-06, | |
| "loss": 0.2668, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.189670008604991e-06, | |
| "loss": 0.266, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 1.989553941285946e-06, | |
| "loss": 0.2631, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 1.789437873966901e-06, | |
| "loss": 0.2652, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 1.5893218066478558e-06, | |
| "loss": 0.2679, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 1.3892057393288107e-06, | |
| "loss": 0.2678, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 1.1890896720097659e-06, | |
| "loss": 0.2601, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 9.889736046907208e-07, | |
| "loss": 0.2587, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 7.888575373716756e-07, | |
| "loss": 0.2664, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 5.887414700526306e-07, | |
| "loss": 0.2618, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 3.886254027335855e-07, | |
| "loss": 0.2672, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 1.8850933541454047e-07, | |
| "loss": 0.2645, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9287538528442383, | |
| "eval_loss": 0.20848523080348969, | |
| "eval_runtime": 28.2503, | |
| "eval_samples_per_second": 138.618, | |
| "eval_steps_per_second": 4.354, | |
| "step": 49971 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 49971, | |
| "total_flos": 4.207223016483379e+17, | |
| "train_loss": 0.35883062176062114, | |
| "train_runtime": 31863.0731, | |
| "train_samples_per_second": 50.184, | |
| "train_steps_per_second": 1.568 | |
| } | |
| ], | |
| "max_steps": 49971, | |
| "num_train_epochs": 3, | |
| "total_flos": 4.207223016483379e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |