| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 670, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0299625468164794, | |
| "grad_norm": 3.7318975925445557, | |
| "learning_rate": 5e-06, | |
| "loss": 1.6973, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0599250936329588, | |
| "grad_norm": 3.4212851524353027, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.6821, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0898876404494382, | |
| "grad_norm": 2.5065758228302, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.4689, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1198501872659176, | |
| "grad_norm": 2.081493854522705, | |
| "learning_rate": 3.5e-05, | |
| "loss": 1.411, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.149812734082397, | |
| "grad_norm": 2.0012290477752686, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.1681, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1797752808988764, | |
| "grad_norm": 1.9056272506713867, | |
| "learning_rate": 4.992424242424243e-05, | |
| "loss": 0.9463, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.20973782771535582, | |
| "grad_norm": 2.0275352001190186, | |
| "learning_rate": 4.9772727272727275e-05, | |
| "loss": 0.8338, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.2397003745318352, | |
| "grad_norm": 1.6332467794418335, | |
| "learning_rate": 4.962121212121213e-05, | |
| "loss": 0.6327, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.2696629213483146, | |
| "grad_norm": 1.3445727825164795, | |
| "learning_rate": 4.946969696969697e-05, | |
| "loss": 0.4954, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.299625468164794, | |
| "grad_norm": 0.9636670351028442, | |
| "learning_rate": 4.931818181818182e-05, | |
| "loss": 0.3803, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.3295880149812734, | |
| "grad_norm": 0.4598312973976135, | |
| "learning_rate": 4.9166666666666665e-05, | |
| "loss": 0.3355, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.3595505617977528, | |
| "grad_norm": 0.6035417318344116, | |
| "learning_rate": 4.901515151515152e-05, | |
| "loss": 0.3144, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3895131086142322, | |
| "grad_norm": 0.48754820227622986, | |
| "learning_rate": 4.886363636363637e-05, | |
| "loss": 0.2577, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.41947565543071164, | |
| "grad_norm": 0.5186039805412292, | |
| "learning_rate": 4.8712121212121216e-05, | |
| "loss": 0.2478, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.449438202247191, | |
| "grad_norm": 0.6087205410003662, | |
| "learning_rate": 4.856060606060606e-05, | |
| "loss": 0.2272, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4794007490636704, | |
| "grad_norm": 0.8594069480895996, | |
| "learning_rate": 4.840909090909091e-05, | |
| "loss": 0.1992, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.5093632958801498, | |
| "grad_norm": 0.6205722093582153, | |
| "learning_rate": 4.825757575757576e-05, | |
| "loss": 0.1631, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.5393258426966292, | |
| "grad_norm": 0.4654604196548462, | |
| "learning_rate": 4.810606060606061e-05, | |
| "loss": 0.1629, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.5692883895131086, | |
| "grad_norm": 0.9451954364776611, | |
| "learning_rate": 4.795454545454546e-05, | |
| "loss": 0.1814, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.599250936329588, | |
| "grad_norm": 0.4229886829853058, | |
| "learning_rate": 4.7803030303030304e-05, | |
| "loss": 0.14, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6292134831460674, | |
| "grad_norm": 0.43104586005210876, | |
| "learning_rate": 4.765151515151515e-05, | |
| "loss": 0.1495, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.6591760299625468, | |
| "grad_norm": 0.48881301283836365, | |
| "learning_rate": 4.75e-05, | |
| "loss": 0.0983, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.6891385767790262, | |
| "grad_norm": 0.3992343544960022, | |
| "learning_rate": 4.7348484848484855e-05, | |
| "loss": 0.0893, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.7191011235955056, | |
| "grad_norm": 0.4501804709434509, | |
| "learning_rate": 4.71969696969697e-05, | |
| "loss": 0.092, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.7490636704119851, | |
| "grad_norm": 0.576210081577301, | |
| "learning_rate": 4.704545454545455e-05, | |
| "loss": 0.0741, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7790262172284644, | |
| "grad_norm": 0.4083947241306305, | |
| "learning_rate": 4.689393939393939e-05, | |
| "loss": 0.0572, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.8089887640449438, | |
| "grad_norm": 0.5864295363426208, | |
| "learning_rate": 4.6742424242424245e-05, | |
| "loss": 0.0769, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.8389513108614233, | |
| "grad_norm": 0.7252892851829529, | |
| "learning_rate": 4.659090909090909e-05, | |
| "loss": 0.0619, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.8689138576779026, | |
| "grad_norm": 0.42146825790405273, | |
| "learning_rate": 4.6439393939393944e-05, | |
| "loss": 0.0384, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.898876404494382, | |
| "grad_norm": 0.593726634979248, | |
| "learning_rate": 4.628787878787879e-05, | |
| "loss": 0.052, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9288389513108615, | |
| "grad_norm": 0.5652066469192505, | |
| "learning_rate": 4.6136363636363635e-05, | |
| "loss": 0.0468, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.9588014981273408, | |
| "grad_norm": 0.28684931993484497, | |
| "learning_rate": 4.598484848484849e-05, | |
| "loss": 0.0295, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.9887640449438202, | |
| "grad_norm": 0.2981000244617462, | |
| "learning_rate": 4.5833333333333334e-05, | |
| "loss": 0.032, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.0149812734082397, | |
| "grad_norm": 0.48514777421951294, | |
| "learning_rate": 4.5681818181818186e-05, | |
| "loss": 0.0506, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.0449438202247192, | |
| "grad_norm": 0.32234543561935425, | |
| "learning_rate": 4.553030303030303e-05, | |
| "loss": 0.0318, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0749063670411985, | |
| "grad_norm": 0.38080576062202454, | |
| "learning_rate": 4.5378787878787885e-05, | |
| "loss": 0.0258, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.104868913857678, | |
| "grad_norm": 0.3635996878147125, | |
| "learning_rate": 4.522727272727273e-05, | |
| "loss": 0.037, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.1348314606741572, | |
| "grad_norm": 0.42449885606765747, | |
| "learning_rate": 4.5075757575757577e-05, | |
| "loss": 0.0393, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.1647940074906367, | |
| "grad_norm": 0.5858486890792847, | |
| "learning_rate": 4.492424242424242e-05, | |
| "loss": 0.0207, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.1947565543071161, | |
| "grad_norm": 0.24009667336940765, | |
| "learning_rate": 4.4772727272727275e-05, | |
| "loss": 0.0189, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2247191011235956, | |
| "grad_norm": 0.21841216087341309, | |
| "learning_rate": 4.462121212121213e-05, | |
| "loss": 0.0173, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.2546816479400749, | |
| "grad_norm": 0.34650227427482605, | |
| "learning_rate": 4.4469696969696973e-05, | |
| "loss": 0.0311, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.2846441947565543, | |
| "grad_norm": 0.3726528584957123, | |
| "learning_rate": 4.431818181818182e-05, | |
| "loss": 0.0272, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.3146067415730336, | |
| "grad_norm": 0.2907704710960388, | |
| "learning_rate": 4.4166666666666665e-05, | |
| "loss": 0.0235, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.344569288389513, | |
| "grad_norm": 0.21228495240211487, | |
| "learning_rate": 4.401515151515152e-05, | |
| "loss": 0.0206, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3745318352059925, | |
| "grad_norm": 0.2763824462890625, | |
| "learning_rate": 4.386363636363637e-05, | |
| "loss": 0.0222, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.404494382022472, | |
| "grad_norm": 0.2596808969974518, | |
| "learning_rate": 4.3712121212121216e-05, | |
| "loss": 0.0171, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.4344569288389513, | |
| "grad_norm": 0.36683645844459534, | |
| "learning_rate": 4.356060606060606e-05, | |
| "loss": 0.0213, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.4644194756554307, | |
| "grad_norm": 0.25381800532341003, | |
| "learning_rate": 4.340909090909091e-05, | |
| "loss": 0.0149, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.49438202247191, | |
| "grad_norm": 0.4874545633792877, | |
| "learning_rate": 4.325757575757576e-05, | |
| "loss": 0.0235, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5243445692883895, | |
| "grad_norm": 0.35136911273002625, | |
| "learning_rate": 4.3106060606060606e-05, | |
| "loss": 0.0186, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.554307116104869, | |
| "grad_norm": 0.3259860873222351, | |
| "learning_rate": 4.295454545454546e-05, | |
| "loss": 0.0228, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.5842696629213484, | |
| "grad_norm": 0.24276824295520782, | |
| "learning_rate": 4.2803030303030305e-05, | |
| "loss": 0.0147, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.6142322097378277, | |
| "grad_norm": 0.4493170976638794, | |
| "learning_rate": 4.265151515151515e-05, | |
| "loss": 0.0152, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.6441947565543071, | |
| "grad_norm": 0.4474326968193054, | |
| "learning_rate": 4.25e-05, | |
| "loss": 0.0199, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6741573033707864, | |
| "grad_norm": 0.229719340801239, | |
| "learning_rate": 4.234848484848485e-05, | |
| "loss": 0.0166, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.7041198501872659, | |
| "grad_norm": 0.2303067445755005, | |
| "learning_rate": 4.21969696969697e-05, | |
| "loss": 0.017, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.7340823970037453, | |
| "grad_norm": 0.24925805628299713, | |
| "learning_rate": 4.204545454545455e-05, | |
| "loss": 0.0147, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.7640449438202248, | |
| "grad_norm": 0.18361054360866547, | |
| "learning_rate": 4.189393939393939e-05, | |
| "loss": 0.0191, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.7940074906367043, | |
| "grad_norm": 0.2936908006668091, | |
| "learning_rate": 4.1742424242424246e-05, | |
| "loss": 0.0189, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8239700374531835, | |
| "grad_norm": 0.2073887437582016, | |
| "learning_rate": 4.159090909090909e-05, | |
| "loss": 0.0171, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.8539325842696628, | |
| "grad_norm": 0.3212382197380066, | |
| "learning_rate": 4.143939393939394e-05, | |
| "loss": 0.0204, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.8838951310861423, | |
| "grad_norm": 0.1888497769832611, | |
| "learning_rate": 4.128787878787879e-05, | |
| "loss": 0.0201, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.9138576779026217, | |
| "grad_norm": 0.18828518688678741, | |
| "learning_rate": 4.113636363636364e-05, | |
| "loss": 0.0179, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.9438202247191012, | |
| "grad_norm": 0.22073791921138763, | |
| "learning_rate": 4.098484848484849e-05, | |
| "loss": 0.02, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.9737827715355807, | |
| "grad_norm": 0.23251274228096008, | |
| "learning_rate": 4.0833333333333334e-05, | |
| "loss": 0.0184, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.2079184651374817, | |
| "learning_rate": 4.068181818181818e-05, | |
| "loss": 0.0192, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 2.0299625468164795, | |
| "grad_norm": 0.21343930065631866, | |
| "learning_rate": 4.053030303030303e-05, | |
| "loss": 0.0166, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 2.059925093632959, | |
| "grad_norm": 0.2166517972946167, | |
| "learning_rate": 4.0378787878787885e-05, | |
| "loss": 0.0159, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 2.0898876404494384, | |
| "grad_norm": 0.17601799964904785, | |
| "learning_rate": 4.022727272727273e-05, | |
| "loss": 0.0157, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.1198501872659175, | |
| "grad_norm": 0.292283296585083, | |
| "learning_rate": 4.007575757575758e-05, | |
| "loss": 0.0214, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 2.149812734082397, | |
| "grad_norm": 0.2258112132549286, | |
| "learning_rate": 3.992424242424242e-05, | |
| "loss": 0.0184, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 2.1797752808988764, | |
| "grad_norm": 0.15815454721450806, | |
| "learning_rate": 3.9772727272727275e-05, | |
| "loss": 0.017, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 2.209737827715356, | |
| "grad_norm": 0.26803699135780334, | |
| "learning_rate": 3.962121212121213e-05, | |
| "loss": 0.0182, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 2.2397003745318353, | |
| "grad_norm": 0.2198362499475479, | |
| "learning_rate": 3.9469696969696974e-05, | |
| "loss": 0.0167, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.2696629213483144, | |
| "grad_norm": 0.201346293091774, | |
| "learning_rate": 3.931818181818182e-05, | |
| "loss": 0.0161, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 2.299625468164794, | |
| "grad_norm": 0.22819150984287262, | |
| "learning_rate": 3.9166666666666665e-05, | |
| "loss": 0.015, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 2.3295880149812733, | |
| "grad_norm": 0.2712112069129944, | |
| "learning_rate": 3.901515151515152e-05, | |
| "loss": 0.0163, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 2.359550561797753, | |
| "grad_norm": 0.19178956747055054, | |
| "learning_rate": 3.8863636363636364e-05, | |
| "loss": 0.0163, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 2.3895131086142323, | |
| "grad_norm": 0.1914001852273941, | |
| "learning_rate": 3.8712121212121217e-05, | |
| "loss": 0.0168, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.4194756554307117, | |
| "grad_norm": 0.1301780492067337, | |
| "learning_rate": 3.856060606060606e-05, | |
| "loss": 0.0151, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 2.449438202247191, | |
| "grad_norm": 0.22975341975688934, | |
| "learning_rate": 3.840909090909091e-05, | |
| "loss": 0.0171, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 2.4794007490636703, | |
| "grad_norm": 0.2856428623199463, | |
| "learning_rate": 3.825757575757576e-05, | |
| "loss": 0.0182, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 2.5093632958801497, | |
| "grad_norm": 0.22180147469043732, | |
| "learning_rate": 3.810606060606061e-05, | |
| "loss": 0.0164, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 2.539325842696629, | |
| "grad_norm": 0.18585917353630066, | |
| "learning_rate": 3.795454545454545e-05, | |
| "loss": 0.0175, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.5692883895131087, | |
| "grad_norm": 0.21161819994449615, | |
| "learning_rate": 3.7803030303030305e-05, | |
| "loss": 0.0149, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 2.599250936329588, | |
| "grad_norm": 0.14073120057582855, | |
| "learning_rate": 3.765151515151516e-05, | |
| "loss": 0.0143, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 2.629213483146067, | |
| "grad_norm": 0.1988099217414856, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.0155, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 2.6591760299625467, | |
| "grad_norm": 0.17829206585884094, | |
| "learning_rate": 3.734848484848485e-05, | |
| "loss": 0.0172, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 2.689138576779026, | |
| "grad_norm": 0.25729623436927795, | |
| "learning_rate": 3.7196969696969695e-05, | |
| "loss": 0.0161, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.7191011235955056, | |
| "grad_norm": 0.15630777180194855, | |
| "learning_rate": 3.704545454545455e-05, | |
| "loss": 0.0161, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 2.749063670411985, | |
| "grad_norm": 0.10787078738212585, | |
| "learning_rate": 3.68939393939394e-05, | |
| "loss": 0.0139, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 2.7790262172284645, | |
| "grad_norm": 0.11183971166610718, | |
| "learning_rate": 3.6742424242424246e-05, | |
| "loss": 0.0142, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 2.808988764044944, | |
| "grad_norm": 0.3003017008304596, | |
| "learning_rate": 3.659090909090909e-05, | |
| "loss": 0.0201, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 2.8389513108614235, | |
| "grad_norm": 0.15837068855762482, | |
| "learning_rate": 3.643939393939394e-05, | |
| "loss": 0.014, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.8689138576779025, | |
| "grad_norm": 0.15499652922153473, | |
| "learning_rate": 3.628787878787879e-05, | |
| "loss": 0.0166, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.898876404494382, | |
| "grad_norm": 0.166601300239563, | |
| "learning_rate": 3.613636363636364e-05, | |
| "loss": 0.0136, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 2.9288389513108615, | |
| "grad_norm": 0.23306916654109955, | |
| "learning_rate": 3.598484848484849e-05, | |
| "loss": 0.0145, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 2.958801498127341, | |
| "grad_norm": 0.23148281872272491, | |
| "learning_rate": 3.5833333333333335e-05, | |
| "loss": 0.0166, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 2.98876404494382, | |
| "grad_norm": 0.2514810562133789, | |
| "learning_rate": 3.568181818181818e-05, | |
| "loss": 0.0197, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.0149812734082397, | |
| "grad_norm": 0.15844318270683289, | |
| "learning_rate": 3.553030303030303e-05, | |
| "loss": 0.0127, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 3.044943820224719, | |
| "grad_norm": 0.15709055960178375, | |
| "learning_rate": 3.537878787878788e-05, | |
| "loss": 0.0144, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 3.0749063670411987, | |
| "grad_norm": 0.21092790365219116, | |
| "learning_rate": 3.522727272727273e-05, | |
| "loss": 0.0174, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 3.1048689138576777, | |
| "grad_norm": 0.22406940162181854, | |
| "learning_rate": 3.507575757575758e-05, | |
| "loss": 0.0151, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 3.134831460674157, | |
| "grad_norm": 0.17414535582065582, | |
| "learning_rate": 3.492424242424242e-05, | |
| "loss": 0.0159, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.1647940074906367, | |
| "grad_norm": 0.1722804456949234, | |
| "learning_rate": 3.4772727272727276e-05, | |
| "loss": 0.0146, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 3.194756554307116, | |
| "grad_norm": 0.26471957564353943, | |
| "learning_rate": 3.462121212121212e-05, | |
| "loss": 0.0157, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 3.2247191011235956, | |
| "grad_norm": 0.13862167298793793, | |
| "learning_rate": 3.4469696969696974e-05, | |
| "loss": 0.0137, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 3.254681647940075, | |
| "grad_norm": 0.24047966301441193, | |
| "learning_rate": 3.431818181818182e-05, | |
| "loss": 0.017, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 3.284644194756554, | |
| "grad_norm": 0.1237376257777214, | |
| "learning_rate": 3.4166666666666666e-05, | |
| "loss": 0.0151, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.3146067415730336, | |
| "grad_norm": 0.15304620563983917, | |
| "learning_rate": 3.401515151515152e-05, | |
| "loss": 0.0155, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 3.344569288389513, | |
| "grad_norm": 0.15872317552566528, | |
| "learning_rate": 3.3863636363636364e-05, | |
| "loss": 0.0147, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 3.3745318352059925, | |
| "grad_norm": 0.18952298164367676, | |
| "learning_rate": 3.371212121212121e-05, | |
| "loss": 0.0154, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 3.404494382022472, | |
| "grad_norm": 0.21287596225738525, | |
| "learning_rate": 3.356060606060606e-05, | |
| "loss": 0.0166, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 3.4344569288389515, | |
| "grad_norm": 0.19477762281894684, | |
| "learning_rate": 3.3409090909090915e-05, | |
| "loss": 0.0167, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.464419475655431, | |
| "grad_norm": 0.1801714450120926, | |
| "learning_rate": 3.325757575757576e-05, | |
| "loss": 0.0145, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 3.49438202247191, | |
| "grad_norm": 0.14180026948451996, | |
| "learning_rate": 3.310606060606061e-05, | |
| "loss": 0.0138, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 3.5243445692883895, | |
| "grad_norm": 0.21012850105762482, | |
| "learning_rate": 3.295454545454545e-05, | |
| "loss": 0.0165, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 3.554307116104869, | |
| "grad_norm": 0.1273059993982315, | |
| "learning_rate": 3.2803030303030305e-05, | |
| "loss": 0.0148, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 3.5842696629213484, | |
| "grad_norm": 0.13745197653770447, | |
| "learning_rate": 3.265151515151516e-05, | |
| "loss": 0.0137, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.6142322097378274, | |
| "grad_norm": 0.19960223138332367, | |
| "learning_rate": 3.2500000000000004e-05, | |
| "loss": 0.0178, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 3.644194756554307, | |
| "grad_norm": 0.21453413367271423, | |
| "learning_rate": 3.234848484848485e-05, | |
| "loss": 0.0156, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 3.6741573033707864, | |
| "grad_norm": 0.17091749608516693, | |
| "learning_rate": 3.2196969696969696e-05, | |
| "loss": 0.0157, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 3.704119850187266, | |
| "grad_norm": 0.15684160590171814, | |
| "learning_rate": 3.204545454545455e-05, | |
| "loss": 0.0148, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 3.7340823970037453, | |
| "grad_norm": 0.15323583781719208, | |
| "learning_rate": 3.18939393939394e-05, | |
| "loss": 0.015, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.764044943820225, | |
| "grad_norm": 0.13383668661117554, | |
| "learning_rate": 3.174242424242425e-05, | |
| "loss": 0.0154, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 3.7940074906367043, | |
| "grad_norm": 0.15054528415203094, | |
| "learning_rate": 3.159090909090909e-05, | |
| "loss": 0.0153, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 3.8239700374531838, | |
| "grad_norm": 0.10618451982736588, | |
| "learning_rate": 3.143939393939394e-05, | |
| "loss": 0.0131, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 3.853932584269663, | |
| "grad_norm": 0.20882849395275116, | |
| "learning_rate": 3.128787878787879e-05, | |
| "loss": 0.019, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 3.8838951310861423, | |
| "grad_norm": 0.19676630198955536, | |
| "learning_rate": 3.113636363636364e-05, | |
| "loss": 0.0164, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.9138576779026217, | |
| "grad_norm": 0.10724353790283203, | |
| "learning_rate": 3.098484848484849e-05, | |
| "loss": 0.0145, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 3.943820224719101, | |
| "grad_norm": 0.11003574728965759, | |
| "learning_rate": 3.0833333333333335e-05, | |
| "loss": 0.0153, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 3.9737827715355807, | |
| "grad_norm": 0.10973632335662842, | |
| "learning_rate": 3.068181818181818e-05, | |
| "loss": 0.0131, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.21611331403255463, | |
| "learning_rate": 3.0530303030303034e-05, | |
| "loss": 0.0184, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 4.0299625468164795, | |
| "grad_norm": 0.11275507509708405, | |
| "learning_rate": 3.037878787878788e-05, | |
| "loss": 0.0159, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.059925093632959, | |
| "grad_norm": 0.182546004652977, | |
| "learning_rate": 3.0227272727272725e-05, | |
| "loss": 0.0143, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 4.089887640449438, | |
| "grad_norm": 0.11307963728904724, | |
| "learning_rate": 3.0075757575757578e-05, | |
| "loss": 0.014, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 4.119850187265918, | |
| "grad_norm": 0.13171932101249695, | |
| "learning_rate": 2.9924242424242427e-05, | |
| "loss": 0.0159, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 4.149812734082397, | |
| "grad_norm": 0.2185821682214737, | |
| "learning_rate": 2.9772727272727273e-05, | |
| "loss": 0.0158, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 4.179775280898877, | |
| "grad_norm": 0.1575690507888794, | |
| "learning_rate": 2.9621212121212122e-05, | |
| "loss": 0.0168, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.209737827715355, | |
| "grad_norm": 0.1727718561887741, | |
| "learning_rate": 2.9469696969696968e-05, | |
| "loss": 0.0154, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 4.239700374531835, | |
| "grad_norm": 0.13435468077659607, | |
| "learning_rate": 2.9318181818181817e-05, | |
| "loss": 0.0165, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 4.269662921348314, | |
| "grad_norm": 0.15063047409057617, | |
| "learning_rate": 2.916666666666667e-05, | |
| "loss": 0.0173, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 4.299625468164794, | |
| "grad_norm": 0.12659698724746704, | |
| "learning_rate": 2.901515151515152e-05, | |
| "loss": 0.0152, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 4.329588014981273, | |
| "grad_norm": 0.19473259150981903, | |
| "learning_rate": 2.8863636363636365e-05, | |
| "loss": 0.016, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.359550561797753, | |
| "grad_norm": 0.1952139437198639, | |
| "learning_rate": 2.8712121212121214e-05, | |
| "loss": 0.0164, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 4.389513108614232, | |
| "grad_norm": 0.15907561779022217, | |
| "learning_rate": 2.856060606060606e-05, | |
| "loss": 0.0126, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 4.419475655430712, | |
| "grad_norm": 0.197600319981575, | |
| "learning_rate": 2.8409090909090912e-05, | |
| "loss": 0.0166, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 4.449438202247191, | |
| "grad_norm": 0.09373123943805695, | |
| "learning_rate": 2.825757575757576e-05, | |
| "loss": 0.016, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 4.479400749063671, | |
| "grad_norm": 0.14290565252304077, | |
| "learning_rate": 2.8106060606060607e-05, | |
| "loss": 0.0172, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.50936329588015, | |
| "grad_norm": 0.1223093792796135, | |
| "learning_rate": 2.7954545454545457e-05, | |
| "loss": 0.0157, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 4.539325842696629, | |
| "grad_norm": 0.11388926208019257, | |
| "learning_rate": 2.7803030303030303e-05, | |
| "loss": 0.015, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 4.569288389513108, | |
| "grad_norm": 0.10850539058446884, | |
| "learning_rate": 2.7651515151515152e-05, | |
| "loss": 0.0166, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 4.599250936329588, | |
| "grad_norm": 0.13963742554187775, | |
| "learning_rate": 2.7500000000000004e-05, | |
| "loss": 0.0158, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 4.629213483146067, | |
| "grad_norm": 0.15747179090976715, | |
| "learning_rate": 2.734848484848485e-05, | |
| "loss": 0.0144, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.659176029962547, | |
| "grad_norm": 0.1513000726699829, | |
| "learning_rate": 2.71969696969697e-05, | |
| "loss": 0.0164, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 4.689138576779026, | |
| "grad_norm": 0.14622750878334045, | |
| "learning_rate": 2.7045454545454545e-05, | |
| "loss": 0.016, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 4.719101123595506, | |
| "grad_norm": 0.1300605684518814, | |
| "learning_rate": 2.6893939393939394e-05, | |
| "loss": 0.0158, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 4.749063670411985, | |
| "grad_norm": 0.1521446704864502, | |
| "learning_rate": 2.674242424242424e-05, | |
| "loss": 0.0139, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 4.7790262172284645, | |
| "grad_norm": 0.13343091309070587, | |
| "learning_rate": 2.6590909090909093e-05, | |
| "loss": 0.0141, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.808988764044944, | |
| "grad_norm": 0.1171831339597702, | |
| "learning_rate": 2.6439393939393942e-05, | |
| "loss": 0.0129, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 4.8389513108614235, | |
| "grad_norm": 0.10482454299926758, | |
| "learning_rate": 2.6287878787878788e-05, | |
| "loss": 0.0115, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 4.868913857677903, | |
| "grad_norm": 0.13335193693637848, | |
| "learning_rate": 2.6136363636363637e-05, | |
| "loss": 0.0155, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 4.898876404494382, | |
| "grad_norm": 0.1327083259820938, | |
| "learning_rate": 2.5984848484848483e-05, | |
| "loss": 0.0154, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 4.928838951310862, | |
| "grad_norm": 0.15616530179977417, | |
| "learning_rate": 2.5833333333333336e-05, | |
| "loss": 0.0186, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.9588014981273405, | |
| "grad_norm": 0.1196502074599266, | |
| "learning_rate": 2.5681818181818185e-05, | |
| "loss": 0.0151, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 4.98876404494382, | |
| "grad_norm": 0.11737090349197388, | |
| "learning_rate": 2.553030303030303e-05, | |
| "loss": 0.013, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 5.01498127340824, | |
| "grad_norm": 0.14445465803146362, | |
| "learning_rate": 2.537878787878788e-05, | |
| "loss": 0.0134, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 5.044943820224719, | |
| "grad_norm": 0.10734406858682632, | |
| "learning_rate": 2.5227272727272726e-05, | |
| "loss": 0.0138, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 5.074906367041199, | |
| "grad_norm": 0.11528757214546204, | |
| "learning_rate": 2.5075757575757575e-05, | |
| "loss": 0.013, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 5.104868913857678, | |
| "grad_norm": 0.17952029407024384, | |
| "learning_rate": 2.4924242424242424e-05, | |
| "loss": 0.0152, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 5.134831460674158, | |
| "grad_norm": 0.12444639205932617, | |
| "learning_rate": 2.4772727272727277e-05, | |
| "loss": 0.0148, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 5.164794007490637, | |
| "grad_norm": 0.12819650769233704, | |
| "learning_rate": 2.4621212121212123e-05, | |
| "loss": 0.0146, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 5.194756554307116, | |
| "grad_norm": 0.13396833837032318, | |
| "learning_rate": 2.4469696969696972e-05, | |
| "loss": 0.0175, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 5.224719101123595, | |
| "grad_norm": 0.1286836415529251, | |
| "learning_rate": 2.431818181818182e-05, | |
| "loss": 0.0144, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.254681647940075, | |
| "grad_norm": 0.12591804563999176, | |
| "learning_rate": 2.4166666666666667e-05, | |
| "loss": 0.0167, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 5.284644194756554, | |
| "grad_norm": 0.12226350605487823, | |
| "learning_rate": 2.4015151515151516e-05, | |
| "loss": 0.0133, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 5.314606741573034, | |
| "grad_norm": 0.10504593700170517, | |
| "learning_rate": 2.3863636363636365e-05, | |
| "loss": 0.0135, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 5.344569288389513, | |
| "grad_norm": 0.1078866645693779, | |
| "learning_rate": 2.3712121212121214e-05, | |
| "loss": 0.0147, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 5.3745318352059925, | |
| "grad_norm": 0.28363969922065735, | |
| "learning_rate": 2.356060606060606e-05, | |
| "loss": 0.0156, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 5.404494382022472, | |
| "grad_norm": 0.1177087053656578, | |
| "learning_rate": 2.340909090909091e-05, | |
| "loss": 0.0138, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 5.4344569288389515, | |
| "grad_norm": 0.16584421694278717, | |
| "learning_rate": 2.325757575757576e-05, | |
| "loss": 0.0179, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 5.464419475655431, | |
| "grad_norm": 0.10715971887111664, | |
| "learning_rate": 2.3106060606060605e-05, | |
| "loss": 0.0144, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 5.49438202247191, | |
| "grad_norm": 0.11904123425483704, | |
| "learning_rate": 2.2954545454545457e-05, | |
| "loss": 0.0133, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 5.52434456928839, | |
| "grad_norm": 0.19020037353038788, | |
| "learning_rate": 2.2803030303030303e-05, | |
| "loss": 0.0178, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 5.554307116104869, | |
| "grad_norm": 0.1367521733045578, | |
| "learning_rate": 2.2651515151515152e-05, | |
| "loss": 0.0155, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 5.584269662921348, | |
| "grad_norm": 0.12585178017616272, | |
| "learning_rate": 2.25e-05, | |
| "loss": 0.0143, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 5.614232209737827, | |
| "grad_norm": 0.20084144175052643, | |
| "learning_rate": 2.234848484848485e-05, | |
| "loss": 0.0178, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 5.644194756554307, | |
| "grad_norm": 0.12197957187891006, | |
| "learning_rate": 2.21969696969697e-05, | |
| "loss": 0.0142, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 5.674157303370786, | |
| "grad_norm": 0.11580586433410645, | |
| "learning_rate": 2.2045454545454546e-05, | |
| "loss": 0.0158, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 5.704119850187266, | |
| "grad_norm": 0.1596754640340805, | |
| "learning_rate": 2.1893939393939395e-05, | |
| "loss": 0.0153, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 5.734082397003745, | |
| "grad_norm": 0.11820989102125168, | |
| "learning_rate": 2.1742424242424244e-05, | |
| "loss": 0.0133, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 5.764044943820225, | |
| "grad_norm": 0.1528993546962738, | |
| "learning_rate": 2.1590909090909093e-05, | |
| "loss": 0.0131, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 5.794007490636704, | |
| "grad_norm": 0.15335053205490112, | |
| "learning_rate": 2.143939393939394e-05, | |
| "loss": 0.0188, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 5.823970037453184, | |
| "grad_norm": 0.13823996484279633, | |
| "learning_rate": 2.128787878787879e-05, | |
| "loss": 0.0156, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 5.853932584269663, | |
| "grad_norm": 0.11527912318706512, | |
| "learning_rate": 2.1136363636363638e-05, | |
| "loss": 0.0156, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 5.883895131086143, | |
| "grad_norm": 0.09302780777215958, | |
| "learning_rate": 2.0984848484848483e-05, | |
| "loss": 0.0144, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 5.913857677902621, | |
| "grad_norm": 0.1214398592710495, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 0.0137, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 5.943820224719101, | |
| "grad_norm": 0.17180432379245758, | |
| "learning_rate": 2.0681818181818182e-05, | |
| "loss": 0.0158, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 5.97378277153558, | |
| "grad_norm": 0.13470757007598877, | |
| "learning_rate": 2.053030303030303e-05, | |
| "loss": 0.016, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.12095994502305984, | |
| "learning_rate": 2.037878787878788e-05, | |
| "loss": 0.0136, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 6.0299625468164795, | |
| "grad_norm": 0.13292968273162842, | |
| "learning_rate": 2.022727272727273e-05, | |
| "loss": 0.0146, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 6.059925093632959, | |
| "grad_norm": 0.143639475107193, | |
| "learning_rate": 2.0075757575757575e-05, | |
| "loss": 0.0159, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 6.089887640449438, | |
| "grad_norm": 0.13313822448253632, | |
| "learning_rate": 1.9924242424242425e-05, | |
| "loss": 0.017, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 6.119850187265918, | |
| "grad_norm": 0.17898374795913696, | |
| "learning_rate": 1.9772727272727274e-05, | |
| "loss": 0.0162, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 6.149812734082397, | |
| "grad_norm": 0.12149132043123245, | |
| "learning_rate": 1.962121212121212e-05, | |
| "loss": 0.0143, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 6.179775280898877, | |
| "grad_norm": 0.12255310267210007, | |
| "learning_rate": 1.9469696969696972e-05, | |
| "loss": 0.0148, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 6.209737827715355, | |
| "grad_norm": 0.17352508008480072, | |
| "learning_rate": 1.9318181818181818e-05, | |
| "loss": 0.0153, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 6.239700374531835, | |
| "grad_norm": 0.15836010873317719, | |
| "learning_rate": 1.9166666666666667e-05, | |
| "loss": 0.0152, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 6.269662921348314, | |
| "grad_norm": 0.13117386400699615, | |
| "learning_rate": 1.9015151515151516e-05, | |
| "loss": 0.0131, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 6.299625468164794, | |
| "grad_norm": 0.09744709730148315, | |
| "learning_rate": 1.8863636363636362e-05, | |
| "loss": 0.0133, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 6.329588014981273, | |
| "grad_norm": 0.11249203234910965, | |
| "learning_rate": 1.8712121212121215e-05, | |
| "loss": 0.0137, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 6.359550561797753, | |
| "grad_norm": 0.14073345065116882, | |
| "learning_rate": 1.856060606060606e-05, | |
| "loss": 0.0158, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 6.389513108614232, | |
| "grad_norm": 0.19942159950733185, | |
| "learning_rate": 1.840909090909091e-05, | |
| "loss": 0.0179, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 6.419475655430712, | |
| "grad_norm": 0.12104730308055878, | |
| "learning_rate": 1.825757575757576e-05, | |
| "loss": 0.0143, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 6.449438202247191, | |
| "grad_norm": 0.12874731421470642, | |
| "learning_rate": 1.810606060606061e-05, | |
| "loss": 0.0147, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 6.479400749063671, | |
| "grad_norm": 0.09665031731128693, | |
| "learning_rate": 1.7954545454545454e-05, | |
| "loss": 0.0126, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 6.50936329588015, | |
| "grad_norm": 0.22135666012763977, | |
| "learning_rate": 1.7803030303030303e-05, | |
| "loss": 0.0162, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 6.539325842696629, | |
| "grad_norm": 0.12249334901571274, | |
| "learning_rate": 1.7651515151515153e-05, | |
| "loss": 0.013, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 6.569288389513108, | |
| "grad_norm": 0.15929952263832092, | |
| "learning_rate": 1.75e-05, | |
| "loss": 0.0144, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 6.599250936329588, | |
| "grad_norm": 0.116636261343956, | |
| "learning_rate": 1.734848484848485e-05, | |
| "loss": 0.0136, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 6.629213483146067, | |
| "grad_norm": 0.26052579283714294, | |
| "learning_rate": 1.7196969696969697e-05, | |
| "loss": 0.0167, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 6.659176029962547, | |
| "grad_norm": 0.16209331154823303, | |
| "learning_rate": 1.7045454545454546e-05, | |
| "loss": 0.0152, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 6.689138576779026, | |
| "grad_norm": 0.11612333357334137, | |
| "learning_rate": 1.6893939393939395e-05, | |
| "loss": 0.0153, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 6.719101123595506, | |
| "grad_norm": 0.092486672103405, | |
| "learning_rate": 1.674242424242424e-05, | |
| "loss": 0.0133, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 6.749063670411985, | |
| "grad_norm": 0.10298772156238556, | |
| "learning_rate": 1.6590909090909094e-05, | |
| "loss": 0.0136, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 6.7790262172284645, | |
| "grad_norm": 0.13439835608005524, | |
| "learning_rate": 1.643939393939394e-05, | |
| "loss": 0.016, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 6.808988764044944, | |
| "grad_norm": 0.14403583109378815, | |
| "learning_rate": 1.628787878787879e-05, | |
| "loss": 0.014, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 6.8389513108614235, | |
| "grad_norm": 0.1841178834438324, | |
| "learning_rate": 1.6136363636363638e-05, | |
| "loss": 0.0149, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 6.868913857677903, | |
| "grad_norm": 0.09390496462583542, | |
| "learning_rate": 1.5984848484848487e-05, | |
| "loss": 0.0141, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 6.898876404494382, | |
| "grad_norm": 0.16365155577659607, | |
| "learning_rate": 1.5833333333333333e-05, | |
| "loss": 0.0157, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 6.928838951310862, | |
| "grad_norm": 0.1577712446451187, | |
| "learning_rate": 1.5681818181818182e-05, | |
| "loss": 0.0141, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 6.9588014981273405, | |
| "grad_norm": 0.173916295170784, | |
| "learning_rate": 1.553030303030303e-05, | |
| "loss": 0.0157, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 6.98876404494382, | |
| "grad_norm": 0.16699424386024475, | |
| "learning_rate": 1.5378787878787877e-05, | |
| "loss": 0.0169, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 7.01498127340824, | |
| "grad_norm": 0.12708672881126404, | |
| "learning_rate": 1.5227272727272728e-05, | |
| "loss": 0.0142, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 7.044943820224719, | |
| "grad_norm": 0.15440689027309418, | |
| "learning_rate": 1.5075757575757576e-05, | |
| "loss": 0.0158, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 7.074906367041199, | |
| "grad_norm": 0.1230991929769516, | |
| "learning_rate": 1.4924242424242423e-05, | |
| "loss": 0.0136, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 7.104868913857678, | |
| "grad_norm": 0.21653351187705994, | |
| "learning_rate": 1.4772727272727274e-05, | |
| "loss": 0.0159, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 7.134831460674158, | |
| "grad_norm": 0.1883053332567215, | |
| "learning_rate": 1.4621212121212122e-05, | |
| "loss": 0.0155, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 7.164794007490637, | |
| "grad_norm": 0.22849740087985992, | |
| "learning_rate": 1.446969696969697e-05, | |
| "loss": 0.0171, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 7.194756554307116, | |
| "grad_norm": 0.12744282186031342, | |
| "learning_rate": 1.431818181818182e-05, | |
| "loss": 0.0142, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 7.224719101123595, | |
| "grad_norm": 0.09133846312761307, | |
| "learning_rate": 1.4166666666666668e-05, | |
| "loss": 0.0133, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 7.254681647940075, | |
| "grad_norm": 0.13602511584758759, | |
| "learning_rate": 1.4015151515151515e-05, | |
| "loss": 0.014, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 7.284644194756554, | |
| "grad_norm": 0.13092727959156036, | |
| "learning_rate": 1.3863636363636364e-05, | |
| "loss": 0.0149, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 7.314606741573034, | |
| "grad_norm": 0.12762710452079773, | |
| "learning_rate": 1.3712121212121212e-05, | |
| "loss": 0.0143, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 7.344569288389513, | |
| "grad_norm": 0.12416286021471024, | |
| "learning_rate": 1.3560606060606063e-05, | |
| "loss": 0.0145, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 7.3745318352059925, | |
| "grad_norm": 0.145538792014122, | |
| "learning_rate": 1.340909090909091e-05, | |
| "loss": 0.0145, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 7.404494382022472, | |
| "grad_norm": 0.11264869570732117, | |
| "learning_rate": 1.3257575757575758e-05, | |
| "loss": 0.0156, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 7.4344569288389515, | |
| "grad_norm": 0.10543698072433472, | |
| "learning_rate": 1.3106060606060607e-05, | |
| "loss": 0.0141, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 7.464419475655431, | |
| "grad_norm": 0.1221228539943695, | |
| "learning_rate": 1.2954545454545455e-05, | |
| "loss": 0.0146, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 7.49438202247191, | |
| "grad_norm": 0.12155947834253311, | |
| "learning_rate": 1.2803030303030302e-05, | |
| "loss": 0.0141, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 7.52434456928839, | |
| "grad_norm": 0.12504641711711884, | |
| "learning_rate": 1.2651515151515153e-05, | |
| "loss": 0.0128, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 7.554307116104869, | |
| "grad_norm": 0.11169607192277908, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.0128, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 7.584269662921348, | |
| "grad_norm": 0.1285315901041031, | |
| "learning_rate": 1.234848484848485e-05, | |
| "loss": 0.0161, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 7.614232209737827, | |
| "grad_norm": 0.10635136067867279, | |
| "learning_rate": 1.2196969696969697e-05, | |
| "loss": 0.0149, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 7.644194756554307, | |
| "grad_norm": 0.11686955392360687, | |
| "learning_rate": 1.2045454545454547e-05, | |
| "loss": 0.0138, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 7.674157303370786, | |
| "grad_norm": 0.08601470291614532, | |
| "learning_rate": 1.1893939393939394e-05, | |
| "loss": 0.0131, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 7.704119850187266, | |
| "grad_norm": 0.0880078673362732, | |
| "learning_rate": 1.1742424242424243e-05, | |
| "loss": 0.0131, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 7.734082397003745, | |
| "grad_norm": 0.15093624591827393, | |
| "learning_rate": 1.159090909090909e-05, | |
| "loss": 0.0162, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 7.764044943820225, | |
| "grad_norm": 0.11285064369440079, | |
| "learning_rate": 1.143939393939394e-05, | |
| "loss": 0.015, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 7.794007490636704, | |
| "grad_norm": 0.10577835887670517, | |
| "learning_rate": 1.128787878787879e-05, | |
| "loss": 0.0132, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 7.823970037453184, | |
| "grad_norm": 0.19718210399150848, | |
| "learning_rate": 1.1136363636363637e-05, | |
| "loss": 0.0143, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 7.853932584269663, | |
| "grad_norm": 0.13446953892707825, | |
| "learning_rate": 1.0984848484848486e-05, | |
| "loss": 0.0157, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 7.883895131086143, | |
| "grad_norm": 0.12463881075382233, | |
| "learning_rate": 1.0833333333333334e-05, | |
| "loss": 0.015, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 7.913857677902621, | |
| "grad_norm": 0.13918359577655792, | |
| "learning_rate": 1.0681818181818181e-05, | |
| "loss": 0.0124, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 7.943820224719101, | |
| "grad_norm": 0.1380302906036377, | |
| "learning_rate": 1.053030303030303e-05, | |
| "loss": 0.014, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 7.97378277153558, | |
| "grad_norm": 0.14899957180023193, | |
| "learning_rate": 1.037878787878788e-05, | |
| "loss": 0.0175, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.11646629869937897, | |
| "learning_rate": 1.0227272727272729e-05, | |
| "loss": 0.0145, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 8.02996254681648, | |
| "grad_norm": 0.14022769033908844, | |
| "learning_rate": 1.0075757575757576e-05, | |
| "loss": 0.0121, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 8.059925093632959, | |
| "grad_norm": 0.15053826570510864, | |
| "learning_rate": 9.924242424242425e-06, | |
| "loss": 0.0157, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 8.089887640449438, | |
| "grad_norm": 0.11895288527011871, | |
| "learning_rate": 9.772727272727273e-06, | |
| "loss": 0.0141, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 8.119850187265918, | |
| "grad_norm": 0.11707016080617905, | |
| "learning_rate": 9.62121212121212e-06, | |
| "loss": 0.0145, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 8.149812734082397, | |
| "grad_norm": 0.10449573397636414, | |
| "learning_rate": 9.46969696969697e-06, | |
| "loss": 0.0146, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 8.179775280898877, | |
| "grad_norm": 0.13925935328006744, | |
| "learning_rate": 9.318181818181819e-06, | |
| "loss": 0.014, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 8.209737827715356, | |
| "grad_norm": 0.12868592143058777, | |
| "learning_rate": 9.166666666666666e-06, | |
| "loss": 0.0138, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 8.239700374531836, | |
| "grad_norm": 0.11764951050281525, | |
| "learning_rate": 9.015151515151516e-06, | |
| "loss": 0.0147, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 8.269662921348315, | |
| "grad_norm": 0.21383918821811676, | |
| "learning_rate": 8.863636363636365e-06, | |
| "loss": 0.0134, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 8.299625468164795, | |
| "grad_norm": 0.14592906832695007, | |
| "learning_rate": 8.712121212121212e-06, | |
| "loss": 0.0151, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 8.329588014981274, | |
| "grad_norm": 0.1377759724855423, | |
| "learning_rate": 8.56060606060606e-06, | |
| "loss": 0.0138, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 8.359550561797754, | |
| "grad_norm": 0.15097083151340485, | |
| "learning_rate": 8.409090909090909e-06, | |
| "loss": 0.013, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 8.389513108614231, | |
| "grad_norm": 0.1862707883119583, | |
| "learning_rate": 8.257575757575758e-06, | |
| "loss": 0.0119, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 8.41947565543071, | |
| "grad_norm": 0.19083456695079803, | |
| "learning_rate": 8.106060606060606e-06, | |
| "loss": 0.0175, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 8.44943820224719, | |
| "grad_norm": 0.18206746876239777, | |
| "learning_rate": 7.954545454545455e-06, | |
| "loss": 0.0153, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 8.47940074906367, | |
| "grad_norm": 0.1560777872800827, | |
| "learning_rate": 7.803030303030304e-06, | |
| "loss": 0.0142, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 8.50936329588015, | |
| "grad_norm": 0.11597474664449692, | |
| "learning_rate": 7.651515151515152e-06, | |
| "loss": 0.015, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 8.539325842696629, | |
| "grad_norm": 0.14232860505580902, | |
| "learning_rate": 7.5e-06, | |
| "loss": 0.0154, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 8.569288389513108, | |
| "grad_norm": 0.12590251863002777, | |
| "learning_rate": 7.3484848484848486e-06, | |
| "loss": 0.0141, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 8.599250936329588, | |
| "grad_norm": 0.161521315574646, | |
| "learning_rate": 7.196969696969698e-06, | |
| "loss": 0.0152, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 8.629213483146067, | |
| "grad_norm": 0.12818320095539093, | |
| "learning_rate": 7.045454545454545e-06, | |
| "loss": 0.0133, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 8.659176029962547, | |
| "grad_norm": 0.10711938887834549, | |
| "learning_rate": 6.8939393939393945e-06, | |
| "loss": 0.0143, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 8.689138576779026, | |
| "grad_norm": 0.11545701324939728, | |
| "learning_rate": 6.742424242424243e-06, | |
| "loss": 0.0153, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 8.719101123595506, | |
| "grad_norm": 0.15045291185379028, | |
| "learning_rate": 6.59090909090909e-06, | |
| "loss": 0.0133, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 8.749063670411985, | |
| "grad_norm": 0.1033000648021698, | |
| "learning_rate": 6.43939393939394e-06, | |
| "loss": 0.0145, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 8.779026217228465, | |
| "grad_norm": 0.16855841875076294, | |
| "learning_rate": 6.287878787878789e-06, | |
| "loss": 0.0152, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 8.808988764044944, | |
| "grad_norm": 0.11125820875167847, | |
| "learning_rate": 6.136363636363636e-06, | |
| "loss": 0.0143, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 8.838951310861423, | |
| "grad_norm": 0.11163821816444397, | |
| "learning_rate": 5.984848484848485e-06, | |
| "loss": 0.0143, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 8.868913857677903, | |
| "grad_norm": 0.11566553264856339, | |
| "learning_rate": 5.833333333333334e-06, | |
| "loss": 0.0142, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 8.898876404494382, | |
| "grad_norm": 0.11087555438280106, | |
| "learning_rate": 5.681818181818182e-06, | |
| "loss": 0.0148, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 8.928838951310862, | |
| "grad_norm": 0.1234087273478508, | |
| "learning_rate": 5.530303030303031e-06, | |
| "loss": 0.0156, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 8.958801498127341, | |
| "grad_norm": 0.18318650126457214, | |
| "learning_rate": 5.378787878787879e-06, | |
| "loss": 0.0145, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 8.98876404494382, | |
| "grad_norm": 0.09113040566444397, | |
| "learning_rate": 5.2272727272727274e-06, | |
| "loss": 0.0132, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 9.014981273408239, | |
| "grad_norm": 0.10578633099794388, | |
| "learning_rate": 5.075757575757576e-06, | |
| "loss": 0.0143, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 9.044943820224718, | |
| "grad_norm": 0.15693627297878265, | |
| "learning_rate": 4.924242424242424e-06, | |
| "loss": 0.015, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 9.074906367041198, | |
| "grad_norm": 0.1018158420920372, | |
| "learning_rate": 4.772727272727273e-06, | |
| "loss": 0.0139, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 9.104868913857677, | |
| "grad_norm": 0.17492976784706116, | |
| "learning_rate": 4.621212121212122e-06, | |
| "loss": 0.0156, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 9.134831460674157, | |
| "grad_norm": 0.11221963167190552, | |
| "learning_rate": 4.46969696969697e-06, | |
| "loss": 0.0152, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 9.164794007490636, | |
| "grad_norm": 0.12558336555957794, | |
| "learning_rate": 4.3181818181818185e-06, | |
| "loss": 0.0143, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 9.194756554307116, | |
| "grad_norm": 0.11668603867292404, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.0127, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 9.224719101123595, | |
| "grad_norm": 0.14164716005325317, | |
| "learning_rate": 4.015151515151515e-06, | |
| "loss": 0.0155, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 9.254681647940075, | |
| "grad_norm": 0.12440814077854156, | |
| "learning_rate": 3.863636363636364e-06, | |
| "loss": 0.0134, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 9.284644194756554, | |
| "grad_norm": 0.14497360587120056, | |
| "learning_rate": 3.7121212121212124e-06, | |
| "loss": 0.0152, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 9.314606741573034, | |
| "grad_norm": 0.13429833948612213, | |
| "learning_rate": 3.5606060606060608e-06, | |
| "loss": 0.015, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 9.344569288389513, | |
| "grad_norm": 0.11680711805820465, | |
| "learning_rate": 3.409090909090909e-06, | |
| "loss": 0.0141, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 9.374531835205993, | |
| "grad_norm": 0.141137033700943, | |
| "learning_rate": 3.257575757575758e-06, | |
| "loss": 0.0137, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 9.404494382022472, | |
| "grad_norm": 0.11425523459911346, | |
| "learning_rate": 3.106060606060606e-06, | |
| "loss": 0.0153, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 9.434456928838951, | |
| "grad_norm": 0.175795778632164, | |
| "learning_rate": 2.9545454545454547e-06, | |
| "loss": 0.0137, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 9.464419475655431, | |
| "grad_norm": 0.21211205422878265, | |
| "learning_rate": 2.803030303030303e-06, | |
| "loss": 0.0145, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 9.49438202247191, | |
| "grad_norm": 0.12809644639492035, | |
| "learning_rate": 2.651515151515152e-06, | |
| "loss": 0.0137, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 9.52434456928839, | |
| "grad_norm": 0.1471203863620758, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0122, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 9.55430711610487, | |
| "grad_norm": 0.11589685082435608, | |
| "learning_rate": 2.3484848484848486e-06, | |
| "loss": 0.0137, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 9.584269662921349, | |
| "grad_norm": 0.11537714302539825, | |
| "learning_rate": 2.196969696969697e-06, | |
| "loss": 0.0147, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 9.614232209737828, | |
| "grad_norm": 0.10002223402261734, | |
| "learning_rate": 2.0454545454545457e-06, | |
| "loss": 0.0139, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 9.644194756554308, | |
| "grad_norm": 0.1463649868965149, | |
| "learning_rate": 1.8939393939393941e-06, | |
| "loss": 0.0138, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 9.674157303370787, | |
| "grad_norm": 0.15200403332710266, | |
| "learning_rate": 1.7424242424242427e-06, | |
| "loss": 0.0127, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 9.704119850187267, | |
| "grad_norm": 0.1294555813074112, | |
| "learning_rate": 1.5909090909090908e-06, | |
| "loss": 0.013, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 9.734082397003746, | |
| "grad_norm": 0.12834538519382477, | |
| "learning_rate": 1.4393939393939396e-06, | |
| "loss": 0.014, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 9.764044943820224, | |
| "grad_norm": 0.16599954664707184, | |
| "learning_rate": 1.287878787878788e-06, | |
| "loss": 0.0153, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 9.794007490636703, | |
| "grad_norm": 0.14348356425762177, | |
| "learning_rate": 1.1363636363636364e-06, | |
| "loss": 0.0145, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 9.823970037453183, | |
| "grad_norm": 0.1504441797733307, | |
| "learning_rate": 9.848484848484847e-07, | |
| "loss": 0.0148, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 9.853932584269662, | |
| "grad_norm": 0.12484189122915268, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.0156, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 9.883895131086142, | |
| "grad_norm": 0.17609864473342896, | |
| "learning_rate": 6.818181818181818e-07, | |
| "loss": 0.0154, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 9.913857677902621, | |
| "grad_norm": 0.1843879073858261, | |
| "learning_rate": 5.303030303030304e-07, | |
| "loss": 0.0133, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 9.9438202247191, | |
| "grad_norm": 0.145470529794693, | |
| "learning_rate": 3.787878787878788e-07, | |
| "loss": 0.0142, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 9.97378277153558, | |
| "grad_norm": 0.13248123228549957, | |
| "learning_rate": 2.2727272727272726e-07, | |
| "loss": 0.0145, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.18026700615882874, | |
| "learning_rate": 7.575757575757576e-08, | |
| "loss": 0.0139, | |
| "step": 670 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 670, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.804222263410688e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |