| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.976076555023924, | |
| "global_step": 130, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0, | |
| "loss": 25.0234, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 5e-06, | |
| "loss": 20.7754, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 7.924812503605782e-06, | |
| "loss": 20.7559, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 1e-05, | |
| "loss": 13.4424, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 1e-05, | |
| "loss": 8.2891, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 9.920634920634922e-06, | |
| "loss": 6.2437, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 9.841269841269842e-06, | |
| "loss": 2.3472, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.761904761904762e-06, | |
| "loss": 1.9103, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 9.682539682539683e-06, | |
| "loss": 1.5619, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.603174603174605e-06, | |
| "loss": 1.3292, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 9.523809523809525e-06, | |
| "loss": 1.3256, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 0.902, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 9.365079365079366e-06, | |
| "loss": 1.163, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 9.285714285714288e-06, | |
| "loss": 1.0538, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.206349206349207e-06, | |
| "loss": 0.944, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 9.126984126984127e-06, | |
| "loss": 0.8447, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 9.047619047619049e-06, | |
| "loss": 1.1726, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 8.968253968253968e-06, | |
| "loss": 1.138, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 1.1524, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 8.80952380952381e-06, | |
| "loss": 1.026, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 8.730158730158731e-06, | |
| "loss": 1.0569, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 8.650793650793651e-06, | |
| "loss": 0.9084, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 8.571428571428571e-06, | |
| "loss": 0.8767, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 8.492063492063492e-06, | |
| "loss": 1.101, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 8.412698412698414e-06, | |
| "loss": 1.0402, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.8947, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 8.253968253968254e-06, | |
| "loss": 0.8985, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 8.174603174603175e-06, | |
| "loss": 0.8772, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 8.095238095238097e-06, | |
| "loss": 0.9256, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 8.015873015873016e-06, | |
| "loss": 0.9597, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 7.936507936507936e-06, | |
| "loss": 0.9373, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 7.857142857142858e-06, | |
| "loss": 0.8894, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 0.8194, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 7.698412698412699e-06, | |
| "loss": 0.9131, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 7.61904761904762e-06, | |
| "loss": 0.9411, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 7.53968253968254e-06, | |
| "loss": 0.8598, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 7.460317460317461e-06, | |
| "loss": 0.959, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 7.380952380952382e-06, | |
| "loss": 0.8196, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 7.301587301587301e-06, | |
| "loss": 1.0758, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 0.822, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 7.1428571428571436e-06, | |
| "loss": 0.889, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 7.063492063492064e-06, | |
| "loss": 0.8716, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 6.984126984126984e-06, | |
| "loss": 0.8108, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 6.9047619047619055e-06, | |
| "loss": 0.7736, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 6.825396825396826e-06, | |
| "loss": 0.7957, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 6.746031746031747e-06, | |
| "loss": 0.6562, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.9037, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.587301587301588e-06, | |
| "loss": 0.7161, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 6.507936507936509e-06, | |
| "loss": 0.8124, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 6.4285714285714295e-06, | |
| "loss": 0.8216, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 6.349206349206349e-06, | |
| "loss": 0.8413, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 6.26984126984127e-06, | |
| "loss": 0.8208, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 6.1904761904761914e-06, | |
| "loss": 0.673, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 0.7171, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 6.031746031746032e-06, | |
| "loss": 0.6513, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 5.9523809523809525e-06, | |
| "loss": 0.6463, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 5.873015873015874e-06, | |
| "loss": 0.6753, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 5.793650793650795e-06, | |
| "loss": 0.8448, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 5.7142857142857145e-06, | |
| "loss": 0.7941, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 5.634920634920635e-06, | |
| "loss": 0.7198, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 0.6262, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 5.476190476190477e-06, | |
| "loss": 0.7605, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 5.396825396825397e-06, | |
| "loss": 0.6028, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 5.317460317460318e-06, | |
| "loss": 0.4278, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 5.2380952380952384e-06, | |
| "loss": 0.6504, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 5.15873015873016e-06, | |
| "loss": 0.7392, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 5.07936507936508e-06, | |
| "loss": 0.5725, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6088, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 4.920634920634921e-06, | |
| "loss": 0.6365, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 4.841269841269842e-06, | |
| "loss": 0.6722, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 4.761904761904762e-06, | |
| "loss": 0.8014, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 4.682539682539683e-06, | |
| "loss": 0.6103, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 4.603174603174604e-06, | |
| "loss": 0.8294, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 4.523809523809524e-06, | |
| "loss": 0.6915, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.6409, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 4.365079365079366e-06, | |
| "loss": 0.8038, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 4.2857142857142855e-06, | |
| "loss": 0.7051, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 4.206349206349207e-06, | |
| "loss": 0.6997, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 4.126984126984127e-06, | |
| "loss": 0.3337, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 4.047619047619048e-06, | |
| "loss": 0.6256, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 3.968253968253968e-06, | |
| "loss": 0.4855, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.6082, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 3.80952380952381e-06, | |
| "loss": 0.6208, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 3.7301587301587305e-06, | |
| "loss": 0.4755, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 3.6507936507936507e-06, | |
| "loss": 0.4758, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 3.5714285714285718e-06, | |
| "loss": 0.4895, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 3.492063492063492e-06, | |
| "loss": 0.5123, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 3.412698412698413e-06, | |
| "loss": 0.5344, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.5446, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 3.2539682539682544e-06, | |
| "loss": 0.5323, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 3.1746031746031746e-06, | |
| "loss": 0.5662, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 3.0952380952380957e-06, | |
| "loss": 0.5845, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 3.015873015873016e-06, | |
| "loss": 0.5142, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 2.936507936507937e-06, | |
| "loss": 0.5691, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 0.5474, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 0.7691, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 2.6984126984126986e-06, | |
| "loss": 0.5452, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 2.6190476190476192e-06, | |
| "loss": 0.5924, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 2.53968253968254e-06, | |
| "loss": 0.5127, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 2.4603174603174605e-06, | |
| "loss": 0.4649, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 2.380952380952381e-06, | |
| "loss": 0.524, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 2.301587301587302e-06, | |
| "loss": 0.6315, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 0.6544, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 2.1428571428571427e-06, | |
| "loss": 0.6733, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 2.0634920634920634e-06, | |
| "loss": 0.6155, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 1.984126984126984e-06, | |
| "loss": 0.504, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 1.904761904761905e-06, | |
| "loss": 0.4849, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 1.8253968253968254e-06, | |
| "loss": 0.465, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 1.746031746031746e-06, | |
| "loss": 0.4481, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.6087, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 1.5873015873015873e-06, | |
| "loss": 0.4352, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 1.507936507936508e-06, | |
| "loss": 0.4346, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 1.4285714285714286e-06, | |
| "loss": 0.57, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 1.3492063492063493e-06, | |
| "loss": 0.4804, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 1.26984126984127e-06, | |
| "loss": 0.5602, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 1.1904761904761906e-06, | |
| "loss": 0.5102, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.3693, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 1.0317460317460317e-06, | |
| "loss": 0.5274, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 9.523809523809525e-07, | |
| "loss": 0.4763, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "learning_rate": 8.73015873015873e-07, | |
| "loss": 0.5746, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 7.936507936507937e-07, | |
| "loss": 0.4454, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 7.142857142857143e-07, | |
| "loss": 0.3733, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 6.34920634920635e-07, | |
| "loss": 0.5295, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 0.4557, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 4.7619047619047623e-07, | |
| "loss": 0.3931, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 3.9682539682539683e-07, | |
| "loss": 0.4848, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 3.174603174603175e-07, | |
| "loss": 0.5132, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 2.3809523809523811e-07, | |
| "loss": 0.3818, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 1.5873015873015874e-07, | |
| "loss": 0.5238, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 7.936507936507937e-08, | |
| "loss": 0.5189, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "step": 130, | |
| "total_flos": 13498620395520.0, | |
| "train_loss": 1.4273854613304138, | |
| "train_runtime": 4877.3379, | |
| "train_samples_per_second": 3.428, | |
| "train_steps_per_second": 0.027 | |
| } | |
| ], | |
| "max_steps": 130, | |
| "num_train_epochs": 5, | |
| "total_flos": 13498620395520.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |