Safetensors
English
qwen2_5_vl
ViLaSR-cold-start / trainer_state.json
Hyperwjf's picture
Upload folder using huggingface_hub
075964b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1629,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01841620626151013,
"grad_norm": 3.54826512795615,
"learning_rate": 5.521472392638038e-07,
"loss": 0.4164,
"step": 10
},
{
"epoch": 0.03683241252302026,
"grad_norm": 2.150976325911803,
"learning_rate": 1.165644171779141e-06,
"loss": 0.3752,
"step": 20
},
{
"epoch": 0.055248618784530384,
"grad_norm": 1.301734345190514,
"learning_rate": 1.7791411042944787e-06,
"loss": 0.3446,
"step": 30
},
{
"epoch": 0.07366482504604052,
"grad_norm": 1.0129957906382288,
"learning_rate": 2.392638036809816e-06,
"loss": 0.3248,
"step": 40
},
{
"epoch": 0.09208103130755065,
"grad_norm": 1.0713399309035538,
"learning_rate": 3.0061349693251535e-06,
"loss": 0.3026,
"step": 50
},
{
"epoch": 0.11049723756906077,
"grad_norm": 1.0507425813581446,
"learning_rate": 3.6196319018404913e-06,
"loss": 0.304,
"step": 60
},
{
"epoch": 0.1289134438305709,
"grad_norm": 0.8381317525552241,
"learning_rate": 4.233128834355829e-06,
"loss": 0.291,
"step": 70
},
{
"epoch": 0.14732965009208104,
"grad_norm": 0.9207152395423905,
"learning_rate": 4.846625766871166e-06,
"loss": 0.2962,
"step": 80
},
{
"epoch": 0.16574585635359115,
"grad_norm": 0.9703285578071639,
"learning_rate": 5.460122699386503e-06,
"loss": 0.2877,
"step": 90
},
{
"epoch": 0.1841620626151013,
"grad_norm": 0.9941438616319775,
"learning_rate": 6.073619631901841e-06,
"loss": 0.2894,
"step": 100
},
{
"epoch": 0.20257826887661143,
"grad_norm": 1.0580118824781048,
"learning_rate": 6.687116564417178e-06,
"loss": 0.2865,
"step": 110
},
{
"epoch": 0.22099447513812154,
"grad_norm": 1.0527321061745536,
"learning_rate": 7.300613496932516e-06,
"loss": 0.2744,
"step": 120
},
{
"epoch": 0.23941068139963168,
"grad_norm": 0.9446407682756467,
"learning_rate": 7.914110429447854e-06,
"loss": 0.2844,
"step": 130
},
{
"epoch": 0.2578268876611418,
"grad_norm": 1.0199363774215442,
"learning_rate": 8.527607361963191e-06,
"loss": 0.2793,
"step": 140
},
{
"epoch": 0.27624309392265195,
"grad_norm": 0.9261191382922327,
"learning_rate": 9.14110429447853e-06,
"loss": 0.2822,
"step": 150
},
{
"epoch": 0.2946593001841621,
"grad_norm": 0.9028861540282759,
"learning_rate": 9.754601226993867e-06,
"loss": 0.2758,
"step": 160
},
{
"epoch": 0.31307550644567217,
"grad_norm": 0.8298039066694322,
"learning_rate": 9.999586697215748e-06,
"loss": 0.2867,
"step": 170
},
{
"epoch": 0.3314917127071823,
"grad_norm": 0.9401043712912456,
"learning_rate": 9.997061205416203e-06,
"loss": 0.2935,
"step": 180
},
{
"epoch": 0.34990791896869244,
"grad_norm": 0.8170942120374912,
"learning_rate": 9.992240992810445e-06,
"loss": 0.276,
"step": 190
},
{
"epoch": 0.3683241252302026,
"grad_norm": 0.7970942467849401,
"learning_rate": 9.985128272907917e-06,
"loss": 0.2797,
"step": 200
},
{
"epoch": 0.3867403314917127,
"grad_norm": 0.8675972760036051,
"learning_rate": 9.975726311969664e-06,
"loss": 0.2782,
"step": 210
},
{
"epoch": 0.40515653775322286,
"grad_norm": 0.8765243656942432,
"learning_rate": 9.964039427508418e-06,
"loss": 0.2815,
"step": 220
},
{
"epoch": 0.42357274401473294,
"grad_norm": 0.8649203720855488,
"learning_rate": 9.950072986305938e-06,
"loss": 0.2757,
"step": 230
},
{
"epoch": 0.4419889502762431,
"grad_norm": 0.8513089510736624,
"learning_rate": 9.933833401948514e-06,
"loss": 0.2728,
"step": 240
},
{
"epoch": 0.4604051565377532,
"grad_norm": 0.775184166522136,
"learning_rate": 9.915328131881745e-06,
"loss": 0.2741,
"step": 250
},
{
"epoch": 0.47882136279926335,
"grad_norm": 0.7978646282069273,
"learning_rate": 9.894565673985986e-06,
"loss": 0.2725,
"step": 260
},
{
"epoch": 0.4972375690607735,
"grad_norm": 0.6613832505637378,
"learning_rate": 9.871555562673996e-06,
"loss": 0.2675,
"step": 270
},
{
"epoch": 0.5156537753222836,
"grad_norm": 0.8757001574454977,
"learning_rate": 9.846308364512607e-06,
"loss": 0.2661,
"step": 280
},
{
"epoch": 0.5340699815837937,
"grad_norm": 0.7547104807299371,
"learning_rate": 9.8188356733704e-06,
"loss": 0.2742,
"step": 290
},
{
"epoch": 0.5524861878453039,
"grad_norm": 0.7251338284529852,
"learning_rate": 9.789150105093647e-06,
"loss": 0.2683,
"step": 300
},
{
"epoch": 0.570902394106814,
"grad_norm": 0.7543457855276412,
"learning_rate": 9.75726529171293e-06,
"loss": 0.2756,
"step": 310
},
{
"epoch": 0.5893186003683242,
"grad_norm": 0.7747270052423587,
"learning_rate": 9.72319587518312e-06,
"loss": 0.2687,
"step": 320
},
{
"epoch": 0.6077348066298343,
"grad_norm": 0.8708864438161477,
"learning_rate": 9.68695750065959e-06,
"loss": 0.2687,
"step": 330
},
{
"epoch": 0.6261510128913443,
"grad_norm": 0.7181496119078857,
"learning_rate": 9.648566809313738e-06,
"loss": 0.2754,
"step": 340
},
{
"epoch": 0.6445672191528545,
"grad_norm": 0.7212363207801823,
"learning_rate": 9.608041430691126e-06,
"loss": 0.2699,
"step": 350
},
{
"epoch": 0.6629834254143646,
"grad_norm": 0.707979023449836,
"learning_rate": 9.565399974615744e-06,
"loss": 0.2688,
"step": 360
},
{
"epoch": 0.6813996316758748,
"grad_norm": 0.8334790893435787,
"learning_rate": 9.52066202264412e-06,
"loss": 0.2776,
"step": 370
},
{
"epoch": 0.6998158379373849,
"grad_norm": 0.7596815946675103,
"learning_rate": 9.473848119073188e-06,
"loss": 0.2786,
"step": 380
},
{
"epoch": 0.7182320441988951,
"grad_norm": 0.7331861291943189,
"learning_rate": 9.42497976150607e-06,
"loss": 0.2622,
"step": 390
},
{
"epoch": 0.7366482504604052,
"grad_norm": 0.6706665335188781,
"learning_rate": 9.374079390980058e-06,
"loss": 0.2695,
"step": 400
},
{
"epoch": 0.7550644567219152,
"grad_norm": 0.7515563648690087,
"learning_rate": 9.321170381661383e-06,
"loss": 0.2671,
"step": 410
},
{
"epoch": 0.7734806629834254,
"grad_norm": 0.6978960493224456,
"learning_rate": 9.266277030111474e-06,
"loss": 0.2721,
"step": 420
},
{
"epoch": 0.7918968692449355,
"grad_norm": 0.6317044519904149,
"learning_rate": 9.209424544129621e-06,
"loss": 0.2611,
"step": 430
},
{
"epoch": 0.8103130755064457,
"grad_norm": 0.6933884011578646,
"learning_rate": 9.150639031177211e-06,
"loss": 0.2679,
"step": 440
},
{
"epoch": 0.8287292817679558,
"grad_norm": 0.6662516396211534,
"learning_rate": 9.08994748638881e-06,
"loss": 0.2718,
"step": 450
},
{
"epoch": 0.8471454880294659,
"grad_norm": 0.7537377588910371,
"learning_rate": 9.02737778017562e-06,
"loss": 0.2625,
"step": 460
},
{
"epoch": 0.8655616942909761,
"grad_norm": 0.6490665535396116,
"learning_rate": 8.962958645426989e-06,
"loss": 0.2601,
"step": 470
},
{
"epoch": 0.8839779005524862,
"grad_norm": 0.6773727676397064,
"learning_rate": 8.896719664315866e-06,
"loss": 0.2651,
"step": 480
},
{
"epoch": 0.9023941068139963,
"grad_norm": 0.6798276722239568,
"learning_rate": 8.828691254714259e-06,
"loss": 0.2625,
"step": 490
},
{
"epoch": 0.9208103130755064,
"grad_norm": 0.6979929906196801,
"learning_rate": 8.758904656224904e-06,
"loss": 0.2749,
"step": 500
},
{
"epoch": 0.9392265193370166,
"grad_norm": 0.7308459913669413,
"learning_rate": 8.687391915835617e-06,
"loss": 0.2666,
"step": 510
},
{
"epoch": 0.9576427255985267,
"grad_norm": 0.8055261702848038,
"learning_rate": 8.614185873202852e-06,
"loss": 0.2705,
"step": 520
},
{
"epoch": 0.9760589318600368,
"grad_norm": 0.7313961658432152,
"learning_rate": 8.539320145571277e-06,
"loss": 0.2619,
"step": 530
},
{
"epoch": 0.994475138121547,
"grad_norm": 0.6331517103040231,
"learning_rate": 8.462829112336266e-06,
"loss": 0.2671,
"step": 540
},
{
"epoch": 1.0128913443830572,
"grad_norm": 0.7085468234966358,
"learning_rate": 8.384747899256386e-06,
"loss": 0.2388,
"step": 550
},
{
"epoch": 1.0313075506445673,
"grad_norm": 0.6748955679522807,
"learning_rate": 8.30511236232316e-06,
"loss": 0.2226,
"step": 560
},
{
"epoch": 1.0497237569060773,
"grad_norm": 0.7100102091480333,
"learning_rate": 8.223959071295492e-06,
"loss": 0.2297,
"step": 570
},
{
"epoch": 1.0681399631675874,
"grad_norm": 0.6668769883519982,
"learning_rate": 8.141325292906325e-06,
"loss": 0.2263,
"step": 580
},
{
"epoch": 1.0865561694290977,
"grad_norm": 0.6720758025127164,
"learning_rate": 8.057248973749216e-06,
"loss": 0.2288,
"step": 590
},
{
"epoch": 1.1049723756906078,
"grad_norm": 0.6411173465966967,
"learning_rate": 7.971768722852741e-06,
"loss": 0.2253,
"step": 600
},
{
"epoch": 1.1233885819521179,
"grad_norm": 0.6724293241376955,
"learning_rate": 7.884923793950684e-06,
"loss": 0.2326,
"step": 610
},
{
"epoch": 1.141804788213628,
"grad_norm": 0.6371467332633137,
"learning_rate": 7.796754067456168e-06,
"loss": 0.2235,
"step": 620
},
{
"epoch": 1.160220994475138,
"grad_norm": 0.6085432732125634,
"learning_rate": 7.707300032148004e-06,
"loss": 0.2289,
"step": 630
},
{
"epoch": 1.1786372007366483,
"grad_norm": 0.7730530541898509,
"learning_rate": 7.616602766577683e-06,
"loss": 0.2263,
"step": 640
},
{
"epoch": 1.1970534069981584,
"grad_norm": 0.7420431722106332,
"learning_rate": 7.524703920205521e-06,
"loss": 0.2331,
"step": 650
},
{
"epoch": 1.2154696132596685,
"grad_norm": 0.6604167477110746,
"learning_rate": 7.43164569427464e-06,
"loss": 0.2278,
"step": 660
},
{
"epoch": 1.2338858195211786,
"grad_norm": 0.7778434196490822,
"learning_rate": 7.3374708224315725e-06,
"loss": 0.2242,
"step": 670
},
{
"epoch": 1.2523020257826887,
"grad_norm": 0.7114711250180328,
"learning_rate": 7.2422225511023555e-06,
"loss": 0.2261,
"step": 680
},
{
"epoch": 1.270718232044199,
"grad_norm": 0.7059769736094257,
"learning_rate": 7.145944619633176e-06,
"loss": 0.2218,
"step": 690
},
{
"epoch": 1.289134438305709,
"grad_norm": 0.6846868707521909,
"learning_rate": 7.048681240204641e-06,
"loss": 0.2305,
"step": 700
},
{
"epoch": 1.3075506445672191,
"grad_norm": 0.636969343049607,
"learning_rate": 6.950477077528927e-06,
"loss": 0.2242,
"step": 710
},
{
"epoch": 1.3259668508287292,
"grad_norm": 0.633812292072214,
"learning_rate": 6.851377228339106e-06,
"loss": 0.2302,
"step": 720
},
{
"epoch": 1.3443830570902393,
"grad_norm": 0.6630293802277623,
"learning_rate": 6.751427200680109e-06,
"loss": 0.2295,
"step": 730
},
{
"epoch": 1.3627992633517496,
"grad_norm": 0.6315090136897826,
"learning_rate": 6.650672893010769e-06,
"loss": 0.2221,
"step": 740
},
{
"epoch": 1.3812154696132597,
"grad_norm": 0.7126623318254198,
"learning_rate": 6.549160573126623e-06,
"loss": 0.2293,
"step": 750
},
{
"epoch": 1.3996316758747698,
"grad_norm": 0.6653513173488009,
"learning_rate": 6.4469368569130786e-06,
"loss": 0.2282,
"step": 760
},
{
"epoch": 1.4180478821362799,
"grad_norm": 0.6343921369098496,
"learning_rate": 6.344048686938745e-06,
"loss": 0.2273,
"step": 770
},
{
"epoch": 1.43646408839779,
"grad_norm": 0.6210167648750854,
"learning_rate": 6.2405433108987456e-06,
"loss": 0.2338,
"step": 780
},
{
"epoch": 1.4548802946593002,
"grad_norm": 0.6784962640818855,
"learning_rate": 6.136468259917917e-06,
"loss": 0.2289,
"step": 790
},
{
"epoch": 1.4732965009208103,
"grad_norm": 0.578659502472278,
"learning_rate": 6.031871326723837e-06,
"loss": 0.2363,
"step": 800
},
{
"epoch": 1.4917127071823204,
"grad_norm": 0.7040713369273666,
"learning_rate": 5.92680054369974e-06,
"loss": 0.2233,
"step": 810
},
{
"epoch": 1.5101289134438307,
"grad_norm": 0.7100429365248154,
"learning_rate": 5.821304160827371e-06,
"loss": 0.2275,
"step": 820
},
{
"epoch": 1.5285451197053406,
"grad_norm": 0.5875774880678092,
"learning_rate": 5.71543062352991e-06,
"loss": 0.225,
"step": 830
},
{
"epoch": 1.5469613259668509,
"grad_norm": 0.5895563949605374,
"learning_rate": 5.609228550425154e-06,
"loss": 0.2347,
"step": 840
},
{
"epoch": 1.565377532228361,
"grad_norm": 0.6923142403050372,
"learning_rate": 5.50274671099917e-06,
"loss": 0.2212,
"step": 850
},
{
"epoch": 1.583793738489871,
"grad_norm": 0.6402043458583734,
"learning_rate": 5.3960340032106515e-06,
"loss": 0.221,
"step": 860
},
{
"epoch": 1.6022099447513813,
"grad_norm": 0.6583270982368973,
"learning_rate": 5.28913943103629e-06,
"loss": 0.2263,
"step": 870
},
{
"epoch": 1.6206261510128912,
"grad_norm": 0.6545061377359965,
"learning_rate": 5.182112081967467e-06,
"loss": 0.2221,
"step": 880
},
{
"epoch": 1.6390423572744015,
"grad_norm": 0.7057814110328757,
"learning_rate": 5.075001104468576e-06,
"loss": 0.2303,
"step": 890
},
{
"epoch": 1.6574585635359116,
"grad_norm": 0.6738126191275001,
"learning_rate": 4.967855685407368e-06,
"loss": 0.2235,
"step": 900
},
{
"epoch": 1.6758747697974217,
"grad_norm": 0.6229975201509437,
"learning_rate": 4.860725027467641e-06,
"loss": 0.2276,
"step": 910
},
{
"epoch": 1.694290976058932,
"grad_norm": 0.6485619857748801,
"learning_rate": 4.7536583265546775e-06,
"loss": 0.2294,
"step": 920
},
{
"epoch": 1.7127071823204418,
"grad_norm": 0.6952097611858238,
"learning_rate": 4.646704749203794e-06,
"loss": 0.22,
"step": 930
},
{
"epoch": 1.7311233885819521,
"grad_norm": 0.6993773129475868,
"learning_rate": 4.539913410002378e-06,
"loss": 0.2253,
"step": 940
},
{
"epoch": 1.7495395948434622,
"grad_norm": 0.7179073268017699,
"learning_rate": 4.433333349035773e-06,
"loss": 0.2262,
"step": 950
},
{
"epoch": 1.7679558011049723,
"grad_norm": 0.6202631830964883,
"learning_rate": 4.327013509367386e-06,
"loss": 0.2212,
"step": 960
},
{
"epoch": 1.7863720073664826,
"grad_norm": 0.6608879257543027,
"learning_rate": 4.221002714563347e-06,
"loss": 0.2205,
"step": 970
},
{
"epoch": 1.8047882136279927,
"grad_norm": 0.59224317673099,
"learning_rate": 4.115349646272029e-06,
"loss": 0.2205,
"step": 980
},
{
"epoch": 1.8232044198895028,
"grad_norm": 0.5878582444161091,
"learning_rate": 4.010102821868762e-06,
"loss": 0.2229,
"step": 990
},
{
"epoch": 1.8416206261510129,
"grad_norm": 0.6461746282587334,
"learning_rate": 3.90531057217597e-06,
"loss": 0.2232,
"step": 1000
},
{
"epoch": 1.860036832412523,
"grad_norm": 0.6205082651968326,
"learning_rate": 3.8010210192689688e-06,
"loss": 0.2277,
"step": 1010
},
{
"epoch": 1.8784530386740332,
"grad_norm": 0.7181661264692984,
"learning_rate": 3.6972820543776404e-06,
"loss": 0.2261,
"step": 1020
},
{
"epoch": 1.8968692449355433,
"grad_norm": 0.6649391919825204,
"learning_rate": 3.5941413158941086e-06,
"loss": 0.2259,
"step": 1030
},
{
"epoch": 1.9152854511970534,
"grad_norm": 0.6044692798894635,
"learning_rate": 3.4916461674965074e-06,
"loss": 0.222,
"step": 1040
},
{
"epoch": 1.9337016574585635,
"grad_norm": 0.615450434298784,
"learning_rate": 3.389843676398925e-06,
"loss": 0.2254,
"step": 1050
},
{
"epoch": 1.9521178637200736,
"grad_norm": 0.6691163050124782,
"learning_rate": 3.2887805917374736e-06,
"loss": 0.2189,
"step": 1060
},
{
"epoch": 1.9705340699815839,
"grad_norm": 0.6022414370557024,
"learning_rate": 3.1885033231024253e-06,
"loss": 0.2227,
"step": 1070
},
{
"epoch": 1.988950276243094,
"grad_norm": 0.6942520950174368,
"learning_rate": 3.089057919226277e-06,
"loss": 0.2254,
"step": 1080
},
{
"epoch": 2.007366482504604,
"grad_norm": 0.5986817795885931,
"learning_rate": 2.9904900468375298e-06,
"loss": 0.2091,
"step": 1090
},
{
"epoch": 2.0257826887661143,
"grad_norm": 0.5655950429797426,
"learning_rate": 2.892844969689876e-06,
"loss": 0.184,
"step": 1100
},
{
"epoch": 2.044198895027624,
"grad_norm": 0.6492187365821179,
"learning_rate": 2.7961675277764498e-06,
"loss": 0.1815,
"step": 1110
},
{
"epoch": 2.0626151012891345,
"grad_norm": 0.5494541259912535,
"learning_rate": 2.7005021167386804e-06,
"loss": 0.1782,
"step": 1120
},
{
"epoch": 2.0810313075506444,
"grad_norm": 0.6018040555497874,
"learning_rate": 2.605892667479173e-06,
"loss": 0.1806,
"step": 1130
},
{
"epoch": 2.0994475138121547,
"grad_norm": 0.609141353634607,
"learning_rate": 2.5123826259880324e-06,
"loss": 0.1827,
"step": 1140
},
{
"epoch": 2.117863720073665,
"grad_norm": 0.6522701323065019,
"learning_rate": 2.420014933391849e-06,
"loss": 0.1834,
"step": 1150
},
{
"epoch": 2.136279926335175,
"grad_norm": 0.6209038557527796,
"learning_rate": 2.3288320062345276e-06,
"loss": 0.1832,
"step": 1160
},
{
"epoch": 2.154696132596685,
"grad_norm": 0.671734526610132,
"learning_rate": 2.238875716999019e-06,
"loss": 0.181,
"step": 1170
},
{
"epoch": 2.1731123388581954,
"grad_norm": 0.6211775861295186,
"learning_rate": 2.1501873748788804e-06,
"loss": 0.1859,
"step": 1180
},
{
"epoch": 2.1915285451197053,
"grad_norm": 0.6161892750894669,
"learning_rate": 2.0628077068085173e-06,
"loss": 0.1833,
"step": 1190
},
{
"epoch": 2.2099447513812156,
"grad_norm": 0.6171060565609299,
"learning_rate": 1.976776838760801e-06,
"loss": 0.1823,
"step": 1200
},
{
"epoch": 2.2283609576427255,
"grad_norm": 0.6627457414621876,
"learning_rate": 1.8921342773206553e-06,
"loss": 0.1802,
"step": 1210
},
{
"epoch": 2.2467771639042358,
"grad_norm": 0.5766657805895978,
"learning_rate": 1.8089188915430794e-06,
"loss": 0.1801,
"step": 1220
},
{
"epoch": 2.265193370165746,
"grad_norm": 0.6528068390986206,
"learning_rate": 1.7271688951039312e-06,
"loss": 0.1817,
"step": 1230
},
{
"epoch": 2.283609576427256,
"grad_norm": 0.6108145113882844,
"learning_rate": 1.6469218287516664e-06,
"loss": 0.1824,
"step": 1240
},
{
"epoch": 2.3020257826887662,
"grad_norm": 0.6223360388944672,
"learning_rate": 1.568214543068103e-06,
"loss": 0.1817,
"step": 1250
},
{
"epoch": 2.320441988950276,
"grad_norm": 0.5730293037393086,
"learning_rate": 1.4910831815461125e-06,
"loss": 0.1783,
"step": 1260
},
{
"epoch": 2.3388581952117864,
"grad_norm": 0.6804328273268474,
"learning_rate": 1.4155631639920208e-06,
"loss": 0.1793,
"step": 1270
},
{
"epoch": 2.3572744014732967,
"grad_norm": 0.5936583186703833,
"learning_rate": 1.3416891702603357e-06,
"loss": 0.1772,
"step": 1280
},
{
"epoch": 2.3756906077348066,
"grad_norm": 0.6646979614560136,
"learning_rate": 1.2694951243282682e-06,
"loss": 0.1805,
"step": 1290
},
{
"epoch": 2.394106813996317,
"grad_norm": 0.5867258571902689,
"learning_rate": 1.1990141787173648e-06,
"loss": 0.1793,
"step": 1300
},
{
"epoch": 2.4125230202578267,
"grad_norm": 0.6356400483556068,
"learning_rate": 1.1302786992694049e-06,
"loss": 0.1793,
"step": 1310
},
{
"epoch": 2.430939226519337,
"grad_norm": 0.6071714234787918,
"learning_rate": 1.0633202502835494e-06,
"loss": 0.1831,
"step": 1320
},
{
"epoch": 2.4493554327808473,
"grad_norm": 0.6031318900784148,
"learning_rate": 9.981695800215701e-07,
"loss": 0.1795,
"step": 1330
},
{
"epoch": 2.467771639042357,
"grad_norm": 0.6195067631818659,
"learning_rate": 9.348566065878218e-07,
"loss": 0.1807,
"step": 1340
},
{
"epoch": 2.4861878453038675,
"grad_norm": 0.640824325612606,
"learning_rate": 8.734104041904129e-07,
"loss": 0.1835,
"step": 1350
},
{
"epoch": 2.5046040515653774,
"grad_norm": 0.5801881807297086,
"learning_rate": 8.138591897899345e-07,
"loss": 0.1792,
"step": 1360
},
{
"epoch": 2.5230202578268877,
"grad_norm": 0.6192805732625175,
"learning_rate": 7.56230310141835e-07,
"loss": 0.179,
"step": 1370
},
{
"epoch": 2.541436464088398,
"grad_norm": 0.6016304803765787,
"learning_rate": 7.005502292383898e-07,
"loss": 0.1799,
"step": 1380
},
{
"epoch": 2.559852670349908,
"grad_norm": 0.6115554388128484,
"learning_rate": 6.46844516156081e-07,
"loss": 0.1758,
"step": 1390
},
{
"epoch": 2.578268876611418,
"grad_norm": 0.5927866192371272,
"learning_rate": 5.951378333139118e-07,
"loss": 0.1796,
"step": 1400
},
{
"epoch": 2.596685082872928,
"grad_norm": 0.6811377078596266,
"learning_rate": 5.454539251480739e-07,
"loss": 0.1808,
"step": 1410
},
{
"epoch": 2.6151012891344383,
"grad_norm": 0.6077558733405245,
"learning_rate": 4.978156072081669e-07,
"loss": 0.1784,
"step": 1420
},
{
"epoch": 2.6335174953959486,
"grad_norm": 0.5643531059218968,
"learning_rate": 4.522447556799875e-07,
"loss": 0.1795,
"step": 1430
},
{
"epoch": 2.6519337016574585,
"grad_norm": 0.5619239404266387,
"learning_rate": 4.0876229733966655e-07,
"loss": 0.1759,
"step": 1440
},
{
"epoch": 2.6703499079189688,
"grad_norm": 0.6328209657239623,
"learning_rate": 3.6738819994379946e-07,
"loss": 0.1808,
"step": 1450
},
{
"epoch": 2.6887661141804786,
"grad_norm": 0.6086714396313364,
"learning_rate": 3.281414630599811e-07,
"loss": 0.1772,
"step": 1460
},
{
"epoch": 2.707182320441989,
"grad_norm": 0.6266202928122402,
"learning_rate": 2.9104010934192795e-07,
"loss": 0.1798,
"step": 1470
},
{
"epoch": 2.7255985267034992,
"grad_norm": 0.61076576372029,
"learning_rate": 2.561011762532212e-07,
"loss": 0.1749,
"step": 1480
},
{
"epoch": 2.744014732965009,
"grad_norm": 0.6615569148493705,
"learning_rate": 2.2334070824347243e-07,
"loss": 0.1805,
"step": 1490
},
{
"epoch": 2.7624309392265194,
"grad_norm": 0.6278844879654595,
"learning_rate": 1.9277374938047989e-07,
"loss": 0.1808,
"step": 1500
},
{
"epoch": 2.7808471454880292,
"grad_norm": 0.57717673931454,
"learning_rate": 1.644143364417794e-07,
"loss": 0.1775,
"step": 1510
},
{
"epoch": 2.7992633517495396,
"grad_norm": 0.5988782652066212,
"learning_rate": 1.3827549246876627e-07,
"loss": 0.1841,
"step": 1520
},
{
"epoch": 2.81767955801105,
"grad_norm": 0.6220611373904551,
"learning_rate": 1.1436922078632395e-07,
"loss": 0.1773,
"step": 1530
},
{
"epoch": 2.8360957642725597,
"grad_norm": 0.6549658009335396,
"learning_rate": 9.270649949073229e-08,
"loss": 0.1796,
"step": 1540
},
{
"epoch": 2.85451197053407,
"grad_norm": 0.5897064017884592,
"learning_rate": 7.329727640837059e-08,
"loss": 0.1781,
"step": 1550
},
{
"epoch": 2.87292817679558,
"grad_norm": 0.6006548076063878,
"learning_rate": 5.615046452753403e-08,
"loss": 0.1789,
"step": 1560
},
{
"epoch": 2.89134438305709,
"grad_norm": 0.5940577802255843,
"learning_rate": 4.127393790546719e-08,
"loss": 0.1801,
"step": 1570
},
{
"epoch": 2.9097605893186005,
"grad_norm": 0.6226132514184528,
"learning_rate": 2.8674528052484162e-08,
"loss": 0.1774,
"step": 1580
},
{
"epoch": 2.9281767955801103,
"grad_norm": 0.6274949409362496,
"learning_rate": 1.8358020794843056e-08,
"loss": 0.1827,
"step": 1590
},
{
"epoch": 2.9465930018416207,
"grad_norm": 0.6038116585141188,
"learning_rate": 1.0329153617812948e-08,
"loss": 0.1781,
"step": 1600
},
{
"epoch": 2.9650092081031305,
"grad_norm": 0.5802100431657281,
"learning_rate": 4.5916134901552445e-09,
"loss": 0.1825,
"step": 1610
},
{
"epoch": 2.983425414364641,
"grad_norm": 0.6005447860892711,
"learning_rate": 1.148035171014139e-09,
"loss": 0.1839,
"step": 1620
},
{
"epoch": 3.0,
"step": 1629,
"total_flos": 2800162131935232.0,
"train_loss": 0.22954999712656582,
"train_runtime": 49788.0682,
"train_samples_per_second": 2.094,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 1629,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2800162131935232.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}