leduckhai's picture
Upload folder using huggingface_hub
7db9682 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.21606648199446,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0110803324099723,
"grad_norm": 0.9041995406150818,
"learning_rate": 0.0001,
"loss": 2.5533,
"step": 1
},
{
"epoch": 0.0221606648199446,
"grad_norm": 0.9356410503387451,
"learning_rate": 9.949748743718594e-05,
"loss": 2.5542,
"step": 2
},
{
"epoch": 0.0332409972299169,
"grad_norm": 0.9131423830986023,
"learning_rate": 9.899497487437186e-05,
"loss": 2.5028,
"step": 3
},
{
"epoch": 0.0443213296398892,
"grad_norm": 0.9732369780540466,
"learning_rate": 9.84924623115578e-05,
"loss": 2.393,
"step": 4
},
{
"epoch": 0.055401662049861494,
"grad_norm": 0.9332369565963745,
"learning_rate": 9.798994974874372e-05,
"loss": 2.2448,
"step": 5
},
{
"epoch": 0.0664819944598338,
"grad_norm": 1.0083566904067993,
"learning_rate": 9.748743718592965e-05,
"loss": 2.1345,
"step": 6
},
{
"epoch": 0.07756232686980609,
"grad_norm": 0.8932923078536987,
"learning_rate": 9.698492462311559e-05,
"loss": 1.9912,
"step": 7
},
{
"epoch": 0.0886426592797784,
"grad_norm": 1.8232415914535522,
"learning_rate": 9.64824120603015e-05,
"loss": 1.844,
"step": 8
},
{
"epoch": 0.0997229916897507,
"grad_norm": 0.8558672070503235,
"learning_rate": 9.597989949748745e-05,
"loss": 1.7406,
"step": 9
},
{
"epoch": 0.11080332409972299,
"grad_norm": 0.7986319661140442,
"learning_rate": 9.547738693467337e-05,
"loss": 1.636,
"step": 10
},
{
"epoch": 0.12188365650969529,
"grad_norm": 0.8156765699386597,
"learning_rate": 9.49748743718593e-05,
"loss": 1.6183,
"step": 11
},
{
"epoch": 0.1329639889196676,
"grad_norm": 0.7248062491416931,
"learning_rate": 9.447236180904523e-05,
"loss": 1.57,
"step": 12
},
{
"epoch": 0.1440443213296399,
"grad_norm": 0.6793098449707031,
"learning_rate": 9.396984924623115e-05,
"loss": 1.4801,
"step": 13
},
{
"epoch": 0.15512465373961218,
"grad_norm": 0.566728949546814,
"learning_rate": 9.34673366834171e-05,
"loss": 1.4867,
"step": 14
},
{
"epoch": 0.16620498614958448,
"grad_norm": 0.5523749589920044,
"learning_rate": 9.296482412060302e-05,
"loss": 1.36,
"step": 15
},
{
"epoch": 0.1772853185595568,
"grad_norm": 0.5163611173629761,
"learning_rate": 9.246231155778895e-05,
"loss": 1.4472,
"step": 16
},
{
"epoch": 0.1883656509695291,
"grad_norm": 0.5090933442115784,
"learning_rate": 9.195979899497488e-05,
"loss": 1.4181,
"step": 17
},
{
"epoch": 0.1994459833795014,
"grad_norm": 0.5989904999732971,
"learning_rate": 9.14572864321608e-05,
"loss": 1.4414,
"step": 18
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.5392615795135498,
"learning_rate": 9.095477386934675e-05,
"loss": 1.3723,
"step": 19
},
{
"epoch": 0.22160664819944598,
"grad_norm": 0.6069510579109192,
"learning_rate": 9.045226130653267e-05,
"loss": 1.5178,
"step": 20
},
{
"epoch": 0.23268698060941828,
"grad_norm": 0.5653948187828064,
"learning_rate": 8.99497487437186e-05,
"loss": 1.2593,
"step": 21
},
{
"epoch": 0.24376731301939059,
"grad_norm": 0.5368112325668335,
"learning_rate": 8.944723618090453e-05,
"loss": 1.3471,
"step": 22
},
{
"epoch": 0.2548476454293629,
"grad_norm": 0.5639390349388123,
"learning_rate": 8.894472361809045e-05,
"loss": 1.4432,
"step": 23
},
{
"epoch": 0.2659279778393352,
"grad_norm": 0.5520769953727722,
"learning_rate": 8.84422110552764e-05,
"loss": 1.3426,
"step": 24
},
{
"epoch": 0.2770083102493075,
"grad_norm": 0.6374968886375427,
"learning_rate": 8.793969849246232e-05,
"loss": 1.5108,
"step": 25
},
{
"epoch": 0.2880886426592798,
"grad_norm": 0.6635875701904297,
"learning_rate": 8.743718592964825e-05,
"loss": 1.4302,
"step": 26
},
{
"epoch": 0.29916897506925205,
"grad_norm": 0.6544961929321289,
"learning_rate": 8.693467336683418e-05,
"loss": 1.4542,
"step": 27
},
{
"epoch": 0.31024930747922436,
"grad_norm": 0.5790326595306396,
"learning_rate": 8.64321608040201e-05,
"loss": 1.3176,
"step": 28
},
{
"epoch": 0.32132963988919666,
"grad_norm": 0.6066296696662903,
"learning_rate": 8.592964824120603e-05,
"loss": 1.355,
"step": 29
},
{
"epoch": 0.33240997229916897,
"grad_norm": 0.6547830700874329,
"learning_rate": 8.542713567839196e-05,
"loss": 1.3518,
"step": 30
},
{
"epoch": 0.34349030470914127,
"grad_norm": 0.6181479096412659,
"learning_rate": 8.49246231155779e-05,
"loss": 1.2498,
"step": 31
},
{
"epoch": 0.3545706371191136,
"grad_norm": 0.7298603057861328,
"learning_rate": 8.442211055276383e-05,
"loss": 1.2897,
"step": 32
},
{
"epoch": 0.3656509695290859,
"grad_norm": 0.7295474410057068,
"learning_rate": 8.391959798994975e-05,
"loss": 1.3034,
"step": 33
},
{
"epoch": 0.3767313019390582,
"grad_norm": 0.7686471343040466,
"learning_rate": 8.341708542713568e-05,
"loss": 1.3327,
"step": 34
},
{
"epoch": 0.3878116343490305,
"grad_norm": 0.7613719701766968,
"learning_rate": 8.291457286432161e-05,
"loss": 1.2834,
"step": 35
},
{
"epoch": 0.3988919667590028,
"grad_norm": 0.8543422222137451,
"learning_rate": 8.241206030150754e-05,
"loss": 1.3813,
"step": 36
},
{
"epoch": 0.4099722991689751,
"grad_norm": 0.9008685946464539,
"learning_rate": 8.190954773869348e-05,
"loss": 1.3528,
"step": 37
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.8236178159713745,
"learning_rate": 8.14070351758794e-05,
"loss": 1.3074,
"step": 38
},
{
"epoch": 0.43213296398891965,
"grad_norm": 0.8271133899688721,
"learning_rate": 8.090452261306533e-05,
"loss": 1.264,
"step": 39
},
{
"epoch": 0.44321329639889195,
"grad_norm": 0.8218770623207092,
"learning_rate": 8.040201005025126e-05,
"loss": 1.2871,
"step": 40
},
{
"epoch": 0.45429362880886426,
"grad_norm": 0.7466350197792053,
"learning_rate": 7.989949748743719e-05,
"loss": 1.2691,
"step": 41
},
{
"epoch": 0.46537396121883656,
"grad_norm": 0.7745970487594604,
"learning_rate": 7.939698492462313e-05,
"loss": 1.2766,
"step": 42
},
{
"epoch": 0.47645429362880887,
"grad_norm": 0.7701446413993835,
"learning_rate": 7.889447236180904e-05,
"loss": 1.294,
"step": 43
},
{
"epoch": 0.48753462603878117,
"grad_norm": 0.6183106899261475,
"learning_rate": 7.839195979899498e-05,
"loss": 1.3002,
"step": 44
},
{
"epoch": 0.4986149584487535,
"grad_norm": 0.5864247679710388,
"learning_rate": 7.788944723618091e-05,
"loss": 1.2178,
"step": 45
},
{
"epoch": 0.5096952908587258,
"grad_norm": 0.5792540907859802,
"learning_rate": 7.738693467336684e-05,
"loss": 1.2126,
"step": 46
},
{
"epoch": 0.5207756232686981,
"grad_norm": 0.6436092853546143,
"learning_rate": 7.688442211055277e-05,
"loss": 1.317,
"step": 47
},
{
"epoch": 0.5318559556786704,
"grad_norm": 0.5778934359550476,
"learning_rate": 7.638190954773869e-05,
"loss": 1.2303,
"step": 48
},
{
"epoch": 0.5429362880886427,
"grad_norm": 0.5667629837989807,
"learning_rate": 7.587939698492463e-05,
"loss": 1.239,
"step": 49
},
{
"epoch": 0.554016620498615,
"grad_norm": 0.579045832157135,
"learning_rate": 7.537688442211056e-05,
"loss": 1.1976,
"step": 50
},
{
"epoch": 0.5650969529085873,
"grad_norm": 0.5645351409912109,
"learning_rate": 7.487437185929649e-05,
"loss": 1.2011,
"step": 51
},
{
"epoch": 0.5761772853185596,
"grad_norm": 0.6186327934265137,
"learning_rate": 7.437185929648241e-05,
"loss": 1.26,
"step": 52
},
{
"epoch": 0.5872576177285319,
"grad_norm": 0.6174798011779785,
"learning_rate": 7.386934673366834e-05,
"loss": 1.2812,
"step": 53
},
{
"epoch": 0.5983379501385041,
"grad_norm": 0.6200773119926453,
"learning_rate": 7.336683417085427e-05,
"loss": 1.2493,
"step": 54
},
{
"epoch": 0.6094182825484764,
"grad_norm": 0.5842644572257996,
"learning_rate": 7.28643216080402e-05,
"loss": 1.2587,
"step": 55
},
{
"epoch": 0.6204986149584487,
"grad_norm": 0.5945526957511902,
"learning_rate": 7.236180904522614e-05,
"loss": 1.2105,
"step": 56
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.6002059578895569,
"learning_rate": 7.185929648241206e-05,
"loss": 1.2404,
"step": 57
},
{
"epoch": 0.6426592797783933,
"grad_norm": 0.5362327098846436,
"learning_rate": 7.135678391959799e-05,
"loss": 1.3024,
"step": 58
},
{
"epoch": 0.6537396121883656,
"grad_norm": 0.5753970146179199,
"learning_rate": 7.085427135678392e-05,
"loss": 1.2076,
"step": 59
},
{
"epoch": 0.6648199445983379,
"grad_norm": 0.6161749958992004,
"learning_rate": 7.035175879396985e-05,
"loss": 1.1563,
"step": 60
},
{
"epoch": 0.6759002770083102,
"grad_norm": 0.578284502029419,
"learning_rate": 6.984924623115579e-05,
"loss": 1.2165,
"step": 61
},
{
"epoch": 0.6869806094182825,
"grad_norm": 0.6425468325614929,
"learning_rate": 6.93467336683417e-05,
"loss": 1.2972,
"step": 62
},
{
"epoch": 0.6980609418282548,
"grad_norm": 0.5888572335243225,
"learning_rate": 6.884422110552764e-05,
"loss": 1.1187,
"step": 63
},
{
"epoch": 0.7091412742382271,
"grad_norm": 0.6597657203674316,
"learning_rate": 6.834170854271357e-05,
"loss": 1.2613,
"step": 64
},
{
"epoch": 0.7202216066481995,
"grad_norm": 0.6418899893760681,
"learning_rate": 6.78391959798995e-05,
"loss": 1.1614,
"step": 65
},
{
"epoch": 0.7313019390581718,
"grad_norm": 0.5714394450187683,
"learning_rate": 6.733668341708544e-05,
"loss": 1.1293,
"step": 66
},
{
"epoch": 0.7423822714681441,
"grad_norm": 0.6186115145683289,
"learning_rate": 6.683417085427135e-05,
"loss": 1.1836,
"step": 67
},
{
"epoch": 0.7534626038781164,
"grad_norm": 0.5801815390586853,
"learning_rate": 6.633165829145729e-05,
"loss": 1.2035,
"step": 68
},
{
"epoch": 0.7645429362880887,
"grad_norm": 0.6500537991523743,
"learning_rate": 6.582914572864322e-05,
"loss": 1.2234,
"step": 69
},
{
"epoch": 0.775623268698061,
"grad_norm": 0.5818614363670349,
"learning_rate": 6.532663316582915e-05,
"loss": 1.2251,
"step": 70
},
{
"epoch": 0.7867036011080333,
"grad_norm": 0.6213693022727966,
"learning_rate": 6.482412060301508e-05,
"loss": 1.2281,
"step": 71
},
{
"epoch": 0.7977839335180056,
"grad_norm": 0.5809823870658875,
"learning_rate": 6.4321608040201e-05,
"loss": 1.18,
"step": 72
},
{
"epoch": 0.8088642659279779,
"grad_norm": 0.5340937376022339,
"learning_rate": 6.381909547738694e-05,
"loss": 1.1571,
"step": 73
},
{
"epoch": 0.8199445983379502,
"grad_norm": 0.5844171047210693,
"learning_rate": 6.331658291457287e-05,
"loss": 1.2212,
"step": 74
},
{
"epoch": 0.8310249307479224,
"grad_norm": 0.5754027366638184,
"learning_rate": 6.28140703517588e-05,
"loss": 1.2023,
"step": 75
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.6343287229537964,
"learning_rate": 6.231155778894473e-05,
"loss": 1.2817,
"step": 76
},
{
"epoch": 0.853185595567867,
"grad_norm": 0.6712394952774048,
"learning_rate": 6.180904522613065e-05,
"loss": 1.2516,
"step": 77
},
{
"epoch": 0.8642659279778393,
"grad_norm": 0.5741068720817566,
"learning_rate": 6.130653266331658e-05,
"loss": 1.2114,
"step": 78
},
{
"epoch": 0.8753462603878116,
"grad_norm": 0.6043746471405029,
"learning_rate": 6.080402010050251e-05,
"loss": 1.1762,
"step": 79
},
{
"epoch": 0.8864265927977839,
"grad_norm": 0.6717391610145569,
"learning_rate": 6.030150753768844e-05,
"loss": 1.228,
"step": 80
},
{
"epoch": 0.8975069252077562,
"grad_norm": 0.6319631338119507,
"learning_rate": 5.979899497487438e-05,
"loss": 1.2198,
"step": 81
},
{
"epoch": 0.9085872576177285,
"grad_norm": 0.6088309288024902,
"learning_rate": 5.929648241206031e-05,
"loss": 1.211,
"step": 82
},
{
"epoch": 0.9196675900277008,
"grad_norm": 0.6102275252342224,
"learning_rate": 5.879396984924623e-05,
"loss": 1.187,
"step": 83
},
{
"epoch": 0.9307479224376731,
"grad_norm": 0.5819908976554871,
"learning_rate": 5.829145728643216e-05,
"loss": 1.2206,
"step": 84
},
{
"epoch": 0.9418282548476454,
"grad_norm": 0.601245641708374,
"learning_rate": 5.778894472361809e-05,
"loss": 1.2269,
"step": 85
},
{
"epoch": 0.9529085872576177,
"grad_norm": 0.6378527283668518,
"learning_rate": 5.728643216080403e-05,
"loss": 1.2919,
"step": 86
},
{
"epoch": 0.96398891966759,
"grad_norm": 0.6004720330238342,
"learning_rate": 5.6783919597989955e-05,
"loss": 1.204,
"step": 87
},
{
"epoch": 0.9750692520775623,
"grad_norm": 0.6289650797843933,
"learning_rate": 5.628140703517588e-05,
"loss": 1.1608,
"step": 88
},
{
"epoch": 0.9861495844875346,
"grad_norm": 0.6542637944221497,
"learning_rate": 5.577889447236181e-05,
"loss": 1.1592,
"step": 89
},
{
"epoch": 0.997229916897507,
"grad_norm": 0.6260697245597839,
"learning_rate": 5.527638190954774e-05,
"loss": 1.2151,
"step": 90
},
{
"epoch": 1.0083102493074791,
"grad_norm": 1.5121455192565918,
"learning_rate": 5.477386934673368e-05,
"loss": 2.0839,
"step": 91
},
{
"epoch": 1.0193905817174516,
"grad_norm": 0.574207067489624,
"learning_rate": 5.4271356783919604e-05,
"loss": 1.1613,
"step": 92
},
{
"epoch": 1.0304709141274238,
"grad_norm": 0.6616407036781311,
"learning_rate": 5.376884422110553e-05,
"loss": 1.3486,
"step": 93
},
{
"epoch": 1.0415512465373962,
"grad_norm": 0.5284487009048462,
"learning_rate": 5.3266331658291455e-05,
"loss": 1.041,
"step": 94
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.5977286696434021,
"learning_rate": 5.276381909547739e-05,
"loss": 1.1503,
"step": 95
},
{
"epoch": 1.0637119113573408,
"grad_norm": 0.5718993544578552,
"learning_rate": 5.226130653266332e-05,
"loss": 1.1083,
"step": 96
},
{
"epoch": 1.074792243767313,
"grad_norm": 0.6474930644035339,
"learning_rate": 5.175879396984925e-05,
"loss": 1.2006,
"step": 97
},
{
"epoch": 1.0858725761772854,
"grad_norm": 0.5949592590332031,
"learning_rate": 5.125628140703518e-05,
"loss": 1.1012,
"step": 98
},
{
"epoch": 1.0969529085872576,
"grad_norm": 0.5743479132652283,
"learning_rate": 5.0753768844221104e-05,
"loss": 1.094,
"step": 99
},
{
"epoch": 1.10803324099723,
"grad_norm": 0.7052620053291321,
"learning_rate": 5.0251256281407036e-05,
"loss": 1.2209,
"step": 100
},
{
"epoch": 1.1191135734072022,
"grad_norm": 0.6266711354255676,
"learning_rate": 4.974874371859297e-05,
"loss": 1.0795,
"step": 101
},
{
"epoch": 1.1301939058171746,
"grad_norm": 0.6345641016960144,
"learning_rate": 4.92462311557789e-05,
"loss": 1.1237,
"step": 102
},
{
"epoch": 1.1412742382271468,
"grad_norm": 0.5873332023620605,
"learning_rate": 4.874371859296483e-05,
"loss": 1.062,
"step": 103
},
{
"epoch": 1.1523545706371192,
"grad_norm": 0.6368873119354248,
"learning_rate": 4.824120603015075e-05,
"loss": 0.9626,
"step": 104
},
{
"epoch": 1.1634349030470914,
"grad_norm": 0.6816115975379944,
"learning_rate": 4.7738693467336685e-05,
"loss": 1.0869,
"step": 105
},
{
"epoch": 1.1745152354570636,
"grad_norm": 0.7037101984024048,
"learning_rate": 4.723618090452262e-05,
"loss": 1.2178,
"step": 106
},
{
"epoch": 1.185595567867036,
"grad_norm": 0.6814127564430237,
"learning_rate": 4.673366834170855e-05,
"loss": 1.1456,
"step": 107
},
{
"epoch": 1.1966759002770084,
"grad_norm": 0.6351640224456787,
"learning_rate": 4.6231155778894475e-05,
"loss": 1.0543,
"step": 108
},
{
"epoch": 1.2077562326869806,
"grad_norm": 0.729073703289032,
"learning_rate": 4.57286432160804e-05,
"loss": 1.1579,
"step": 109
},
{
"epoch": 1.2188365650969528,
"grad_norm": 0.7375718355178833,
"learning_rate": 4.522613065326633e-05,
"loss": 1.178,
"step": 110
},
{
"epoch": 1.2299168975069252,
"grad_norm": 0.686299204826355,
"learning_rate": 4.4723618090452266e-05,
"loss": 1.1791,
"step": 111
},
{
"epoch": 1.2409972299168974,
"grad_norm": 0.6791194081306458,
"learning_rate": 4.42211055276382e-05,
"loss": 1.1277,
"step": 112
},
{
"epoch": 1.2520775623268698,
"grad_norm": 0.7525886297225952,
"learning_rate": 4.3718592964824124e-05,
"loss": 1.1455,
"step": 113
},
{
"epoch": 1.263157894736842,
"grad_norm": 0.7229343056678772,
"learning_rate": 4.321608040201005e-05,
"loss": 1.2113,
"step": 114
},
{
"epoch": 1.2742382271468145,
"grad_norm": 0.7222431302070618,
"learning_rate": 4.271356783919598e-05,
"loss": 1.1342,
"step": 115
},
{
"epoch": 1.2853185595567866,
"grad_norm": 0.677331805229187,
"learning_rate": 4.2211055276381914e-05,
"loss": 1.1411,
"step": 116
},
{
"epoch": 1.296398891966759,
"grad_norm": 0.6559180617332458,
"learning_rate": 4.170854271356784e-05,
"loss": 1.1108,
"step": 117
},
{
"epoch": 1.3074792243767313,
"grad_norm": 0.6477547287940979,
"learning_rate": 4.120603015075377e-05,
"loss": 1.079,
"step": 118
},
{
"epoch": 1.3185595567867037,
"grad_norm": 0.710292398929596,
"learning_rate": 4.07035175879397e-05,
"loss": 1.1465,
"step": 119
},
{
"epoch": 1.3296398891966759,
"grad_norm": 0.7174103260040283,
"learning_rate": 4.020100502512563e-05,
"loss": 1.2304,
"step": 120
},
{
"epoch": 1.3407202216066483,
"grad_norm": 0.6680272221565247,
"learning_rate": 3.969849246231156e-05,
"loss": 1.0935,
"step": 121
},
{
"epoch": 1.3518005540166205,
"grad_norm": 0.6414808630943298,
"learning_rate": 3.919597989949749e-05,
"loss": 1.0343,
"step": 122
},
{
"epoch": 1.3628808864265927,
"grad_norm": 0.7363560795783997,
"learning_rate": 3.869346733668342e-05,
"loss": 1.248,
"step": 123
},
{
"epoch": 1.373961218836565,
"grad_norm": 0.6751046776771545,
"learning_rate": 3.8190954773869346e-05,
"loss": 1.109,
"step": 124
},
{
"epoch": 1.3850415512465375,
"grad_norm": 0.6871734261512756,
"learning_rate": 3.768844221105528e-05,
"loss": 1.1761,
"step": 125
},
{
"epoch": 1.3961218836565097,
"grad_norm": 0.7280701398849487,
"learning_rate": 3.7185929648241204e-05,
"loss": 1.1457,
"step": 126
},
{
"epoch": 1.4072022160664819,
"grad_norm": 0.6830523014068604,
"learning_rate": 3.668341708542714e-05,
"loss": 0.985,
"step": 127
},
{
"epoch": 1.4182825484764543,
"grad_norm": 0.732204794883728,
"learning_rate": 3.618090452261307e-05,
"loss": 1.1521,
"step": 128
},
{
"epoch": 1.4293628808864267,
"grad_norm": 0.7047545909881592,
"learning_rate": 3.5678391959798995e-05,
"loss": 1.0978,
"step": 129
},
{
"epoch": 1.440443213296399,
"grad_norm": 0.7437470555305481,
"learning_rate": 3.517587939698493e-05,
"loss": 1.1808,
"step": 130
},
{
"epoch": 1.451523545706371,
"grad_norm": 0.6570298671722412,
"learning_rate": 3.467336683417085e-05,
"loss": 0.9725,
"step": 131
},
{
"epoch": 1.4626038781163435,
"grad_norm": 0.681265115737915,
"learning_rate": 3.4170854271356785e-05,
"loss": 1.1196,
"step": 132
},
{
"epoch": 1.4736842105263157,
"grad_norm": 0.7734697461128235,
"learning_rate": 3.366834170854272e-05,
"loss": 1.0839,
"step": 133
},
{
"epoch": 1.4847645429362881,
"grad_norm": 0.6945009231567383,
"learning_rate": 3.3165829145728643e-05,
"loss": 1.0336,
"step": 134
},
{
"epoch": 1.4958448753462603,
"grad_norm": 0.7448641657829285,
"learning_rate": 3.2663316582914576e-05,
"loss": 1.0924,
"step": 135
},
{
"epoch": 1.5069252077562327,
"grad_norm": 0.7725421190261841,
"learning_rate": 3.21608040201005e-05,
"loss": 1.1409,
"step": 136
},
{
"epoch": 1.5180055401662051,
"grad_norm": 0.8401060700416565,
"learning_rate": 3.1658291457286434e-05,
"loss": 1.1962,
"step": 137
},
{
"epoch": 1.5290858725761773,
"grad_norm": 0.708717405796051,
"learning_rate": 3.1155778894472366e-05,
"loss": 1.1007,
"step": 138
},
{
"epoch": 1.5401662049861495,
"grad_norm": 0.7954943776130676,
"learning_rate": 3.065326633165829e-05,
"loss": 1.1094,
"step": 139
},
{
"epoch": 1.5512465373961217,
"grad_norm": 0.7701108455657959,
"learning_rate": 3.015075376884422e-05,
"loss": 1.152,
"step": 140
},
{
"epoch": 1.5623268698060941,
"grad_norm": 0.7365975379943848,
"learning_rate": 2.9648241206030153e-05,
"loss": 1.0735,
"step": 141
},
{
"epoch": 1.5734072022160666,
"grad_norm": 0.6943490505218506,
"learning_rate": 2.914572864321608e-05,
"loss": 1.0759,
"step": 142
},
{
"epoch": 1.5844875346260388,
"grad_norm": 0.7694918513298035,
"learning_rate": 2.8643216080402015e-05,
"loss": 1.1438,
"step": 143
},
{
"epoch": 1.595567867036011,
"grad_norm": 0.6781268119812012,
"learning_rate": 2.814070351758794e-05,
"loss": 0.9776,
"step": 144
},
{
"epoch": 1.6066481994459834,
"grad_norm": 0.6973868012428284,
"learning_rate": 2.763819095477387e-05,
"loss": 1.0492,
"step": 145
},
{
"epoch": 1.6177285318559558,
"grad_norm": 0.770706295967102,
"learning_rate": 2.7135678391959802e-05,
"loss": 1.0905,
"step": 146
},
{
"epoch": 1.628808864265928,
"grad_norm": 0.7229887247085571,
"learning_rate": 2.6633165829145728e-05,
"loss": 1.1664,
"step": 147
},
{
"epoch": 1.6398891966759002,
"grad_norm": 0.762946367263794,
"learning_rate": 2.613065326633166e-05,
"loss": 1.1373,
"step": 148
},
{
"epoch": 1.6509695290858726,
"grad_norm": 0.7556053996086121,
"learning_rate": 2.562814070351759e-05,
"loss": 1.0613,
"step": 149
},
{
"epoch": 1.662049861495845,
"grad_norm": 0.7181993722915649,
"learning_rate": 2.5125628140703518e-05,
"loss": 1.0686,
"step": 150
},
{
"epoch": 1.6731301939058172,
"grad_norm": 0.7401473522186279,
"learning_rate": 2.462311557788945e-05,
"loss": 1.0952,
"step": 151
},
{
"epoch": 1.6842105263157894,
"grad_norm": 0.7067743539810181,
"learning_rate": 2.4120603015075376e-05,
"loss": 0.9935,
"step": 152
},
{
"epoch": 1.6952908587257618,
"grad_norm": 0.7622341513633728,
"learning_rate": 2.361809045226131e-05,
"loss": 1.1452,
"step": 153
},
{
"epoch": 1.7063711911357342,
"grad_norm": 0.7768684029579163,
"learning_rate": 2.3115577889447238e-05,
"loss": 1.1314,
"step": 154
},
{
"epoch": 1.7174515235457064,
"grad_norm": 0.7184272408485413,
"learning_rate": 2.2613065326633167e-05,
"loss": 1.049,
"step": 155
},
{
"epoch": 1.7285318559556786,
"grad_norm": 0.7589651942253113,
"learning_rate": 2.21105527638191e-05,
"loss": 1.0791,
"step": 156
},
{
"epoch": 1.739612188365651,
"grad_norm": 0.7551384568214417,
"learning_rate": 2.1608040201005025e-05,
"loss": 1.1264,
"step": 157
},
{
"epoch": 1.7506925207756234,
"grad_norm": 0.7738797664642334,
"learning_rate": 2.1105527638190957e-05,
"loss": 1.0658,
"step": 158
},
{
"epoch": 1.7617728531855956,
"grad_norm": 0.8027400374412537,
"learning_rate": 2.0603015075376886e-05,
"loss": 1.0526,
"step": 159
},
{
"epoch": 1.7728531855955678,
"grad_norm": 0.693533718585968,
"learning_rate": 2.0100502512562815e-05,
"loss": 1.0348,
"step": 160
},
{
"epoch": 1.78393351800554,
"grad_norm": 0.7291271686553955,
"learning_rate": 1.9597989949748744e-05,
"loss": 1.0686,
"step": 161
},
{
"epoch": 1.7950138504155124,
"grad_norm": 0.7514436841011047,
"learning_rate": 1.9095477386934673e-05,
"loss": 1.1501,
"step": 162
},
{
"epoch": 1.8060941828254848,
"grad_norm": 0.7121478915214539,
"learning_rate": 1.8592964824120602e-05,
"loss": 1.0153,
"step": 163
},
{
"epoch": 1.817174515235457,
"grad_norm": 0.7232415676116943,
"learning_rate": 1.8090452261306535e-05,
"loss": 1.034,
"step": 164
},
{
"epoch": 1.8282548476454292,
"grad_norm": 0.8162721991539001,
"learning_rate": 1.7587939698492464e-05,
"loss": 1.1254,
"step": 165
},
{
"epoch": 1.8393351800554016,
"grad_norm": 0.7215307354927063,
"learning_rate": 1.7085427135678393e-05,
"loss": 1.0638,
"step": 166
},
{
"epoch": 1.850415512465374,
"grad_norm": 0.7444539666175842,
"learning_rate": 1.6582914572864322e-05,
"loss": 1.0895,
"step": 167
},
{
"epoch": 1.8614958448753463,
"grad_norm": 0.787027895450592,
"learning_rate": 1.608040201005025e-05,
"loss": 0.9595,
"step": 168
},
{
"epoch": 1.8725761772853184,
"grad_norm": 0.7815292477607727,
"learning_rate": 1.5577889447236183e-05,
"loss": 1.1505,
"step": 169
},
{
"epoch": 1.8836565096952909,
"grad_norm": 0.7654871344566345,
"learning_rate": 1.507537688442211e-05,
"loss": 1.1074,
"step": 170
},
{
"epoch": 1.8947368421052633,
"grad_norm": 0.7809323072433472,
"learning_rate": 1.457286432160804e-05,
"loss": 1.0832,
"step": 171
},
{
"epoch": 1.9058171745152355,
"grad_norm": 0.8374095559120178,
"learning_rate": 1.407035175879397e-05,
"loss": 1.1028,
"step": 172
},
{
"epoch": 1.9168975069252077,
"grad_norm": 0.7976056933403015,
"learning_rate": 1.3567839195979901e-05,
"loss": 1.0124,
"step": 173
},
{
"epoch": 1.92797783933518,
"grad_norm": 0.8494656682014465,
"learning_rate": 1.306532663316583e-05,
"loss": 1.1487,
"step": 174
},
{
"epoch": 1.9390581717451525,
"grad_norm": 0.7171152234077454,
"learning_rate": 1.2562814070351759e-05,
"loss": 1.0215,
"step": 175
},
{
"epoch": 1.9501385041551247,
"grad_norm": 0.7844187617301941,
"learning_rate": 1.2060301507537688e-05,
"loss": 1.1526,
"step": 176
},
{
"epoch": 1.9612188365650969,
"grad_norm": 0.7948229908943176,
"learning_rate": 1.1557788944723619e-05,
"loss": 1.1165,
"step": 177
},
{
"epoch": 1.9722991689750693,
"grad_norm": 0.6978301405906677,
"learning_rate": 1.105527638190955e-05,
"loss": 0.9733,
"step": 178
},
{
"epoch": 1.9833795013850417,
"grad_norm": 0.8851218819618225,
"learning_rate": 1.0552763819095479e-05,
"loss": 1.167,
"step": 179
},
{
"epoch": 1.994459833795014,
"grad_norm": 0.7181246876716614,
"learning_rate": 1.0050251256281408e-05,
"loss": 1.0187,
"step": 180
},
{
"epoch": 2.005540166204986,
"grad_norm": 1.8050236701965332,
"learning_rate": 9.547738693467337e-06,
"loss": 1.97,
"step": 181
},
{
"epoch": 2.0166204986149583,
"grad_norm": 0.7432425618171692,
"learning_rate": 9.045226130653267e-06,
"loss": 1.0561,
"step": 182
},
{
"epoch": 2.027700831024931,
"grad_norm": 0.7358068227767944,
"learning_rate": 8.542713567839196e-06,
"loss": 0.9627,
"step": 183
},
{
"epoch": 2.038781163434903,
"grad_norm": 0.7742936015129089,
"learning_rate": 8.040201005025125e-06,
"loss": 1.143,
"step": 184
},
{
"epoch": 2.0498614958448753,
"grad_norm": 0.7715827226638794,
"learning_rate": 7.537688442211055e-06,
"loss": 1.0638,
"step": 185
},
{
"epoch": 2.0609418282548475,
"grad_norm": 0.7287455797195435,
"learning_rate": 7.035175879396985e-06,
"loss": 1.0142,
"step": 186
},
{
"epoch": 2.07202216066482,
"grad_norm": 0.7636083960533142,
"learning_rate": 6.532663316582915e-06,
"loss": 1.0196,
"step": 187
},
{
"epoch": 2.0831024930747923,
"grad_norm": 0.7313657999038696,
"learning_rate": 6.030150753768844e-06,
"loss": 1.0466,
"step": 188
},
{
"epoch": 2.0941828254847645,
"grad_norm": 0.7313005924224854,
"learning_rate": 5.527638190954775e-06,
"loss": 1.0009,
"step": 189
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.729709267616272,
"learning_rate": 5.025125628140704e-06,
"loss": 1.0601,
"step": 190
},
{
"epoch": 2.1163434903047094,
"grad_norm": 0.7446528077125549,
"learning_rate": 4.522613065326634e-06,
"loss": 0.9965,
"step": 191
},
{
"epoch": 2.1274238227146816,
"grad_norm": 0.7408772110939026,
"learning_rate": 4.020100502512563e-06,
"loss": 0.9824,
"step": 192
},
{
"epoch": 2.1385041551246537,
"grad_norm": 0.7374000549316406,
"learning_rate": 3.5175879396984926e-06,
"loss": 1.13,
"step": 193
},
{
"epoch": 2.149584487534626,
"grad_norm": 0.7011594772338867,
"learning_rate": 3.015075376884422e-06,
"loss": 0.9952,
"step": 194
},
{
"epoch": 2.160664819944598,
"grad_norm": 0.6886879801750183,
"learning_rate": 2.512562814070352e-06,
"loss": 0.9617,
"step": 195
},
{
"epoch": 2.1717451523545708,
"grad_norm": 0.8077765703201294,
"learning_rate": 2.0100502512562813e-06,
"loss": 1.0507,
"step": 196
},
{
"epoch": 2.182825484764543,
"grad_norm": 0.7857282757759094,
"learning_rate": 1.507537688442211e-06,
"loss": 1.1114,
"step": 197
},
{
"epoch": 2.193905817174515,
"grad_norm": 0.7135636806488037,
"learning_rate": 1.0050251256281407e-06,
"loss": 1.0691,
"step": 198
},
{
"epoch": 2.2049861495844874,
"grad_norm": 0.720593273639679,
"learning_rate": 5.025125628140703e-07,
"loss": 0.9977,
"step": 199
},
{
"epoch": 2.21606648199446,
"grad_norm": 0.7399870157241821,
"learning_rate": 0.0,
"loss": 1.0131,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7777393737908224e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}