richardprobe's picture
Upload PEFT LoRA adapter
d5fb74c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5479876160990713,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030959752321981424,
"grad_norm": 9.773098945617676,
"learning_rate": 2.991640866873065e-05,
"loss": 9.245,
"mean_token_accuracy": 0.2123243510723114,
"num_tokens": 5327.0,
"step": 10
},
{
"epoch": 0.06191950464396285,
"grad_norm": 3.128091335296631,
"learning_rate": 2.9823529411764707e-05,
"loss": 6.1602,
"mean_token_accuracy": 0.25044268518686297,
"num_tokens": 10795.0,
"step": 20
},
{
"epoch": 0.09287925696594428,
"grad_norm": 4.203906536102295,
"learning_rate": 2.973065015479876e-05,
"loss": 5.6948,
"mean_token_accuracy": 0.262071692943573,
"num_tokens": 16240.0,
"step": 30
},
{
"epoch": 0.1238390092879257,
"grad_norm": 4.233511924743652,
"learning_rate": 2.9637770897832817e-05,
"loss": 5.2733,
"mean_token_accuracy": 0.27527774721384046,
"num_tokens": 21582.0,
"step": 40
},
{
"epoch": 0.15479876160990713,
"grad_norm": 7.080459117889404,
"learning_rate": 2.9544891640866874e-05,
"loss": 4.9304,
"mean_token_accuracy": 0.2877007365226746,
"num_tokens": 27142.0,
"step": 50
},
{
"epoch": 0.18575851393188855,
"grad_norm": 7.273204326629639,
"learning_rate": 2.945201238390093e-05,
"loss": 4.689,
"mean_token_accuracy": 0.28890604972839357,
"num_tokens": 32801.0,
"step": 60
},
{
"epoch": 0.21671826625386997,
"grad_norm": 2.2185206413269043,
"learning_rate": 2.9359133126934984e-05,
"loss": 4.3965,
"mean_token_accuracy": 0.28117197155952456,
"num_tokens": 38472.0,
"step": 70
},
{
"epoch": 0.2476780185758514,
"grad_norm": 2.0464794635772705,
"learning_rate": 2.926625386996904e-05,
"loss": 4.062,
"mean_token_accuracy": 0.299494668841362,
"num_tokens": 43743.0,
"step": 80
},
{
"epoch": 0.2786377708978328,
"grad_norm": 1.633155345916748,
"learning_rate": 2.9173374613003097e-05,
"loss": 4.0378,
"mean_token_accuracy": 0.30819864571094513,
"num_tokens": 49087.0,
"step": 90
},
{
"epoch": 0.30959752321981426,
"grad_norm": 1.4594247341156006,
"learning_rate": 2.908049535603715e-05,
"loss": 3.8513,
"mean_token_accuracy": 0.3258361428976059,
"num_tokens": 54433.0,
"step": 100
},
{
"epoch": 0.34055727554179566,
"grad_norm": 1.5312635898590088,
"learning_rate": 2.898761609907121e-05,
"loss": 3.9162,
"mean_token_accuracy": 0.32140363454818727,
"num_tokens": 59629.0,
"step": 110
},
{
"epoch": 0.3715170278637771,
"grad_norm": 1.3190491199493408,
"learning_rate": 2.8894736842105263e-05,
"loss": 3.902,
"mean_token_accuracy": 0.3103078156709671,
"num_tokens": 65326.0,
"step": 120
},
{
"epoch": 0.4024767801857585,
"grad_norm": 1.6095689535140991,
"learning_rate": 2.880185758513932e-05,
"loss": 3.7107,
"mean_token_accuracy": 0.3353793561458588,
"num_tokens": 70440.0,
"step": 130
},
{
"epoch": 0.43343653250773995,
"grad_norm": 1.6634972095489502,
"learning_rate": 2.8708978328173377e-05,
"loss": 3.7747,
"mean_token_accuracy": 0.3298566401004791,
"num_tokens": 75712.0,
"step": 140
},
{
"epoch": 0.46439628482972134,
"grad_norm": 1.3906605243682861,
"learning_rate": 2.861609907120743e-05,
"loss": 3.7344,
"mean_token_accuracy": 0.34043932259082793,
"num_tokens": 81272.0,
"step": 150
},
{
"epoch": 0.4953560371517028,
"grad_norm": 1.6273926496505737,
"learning_rate": 2.8523219814241487e-05,
"loss": 3.6722,
"mean_token_accuracy": 0.33802524507045745,
"num_tokens": 86836.0,
"step": 160
},
{
"epoch": 0.5263157894736842,
"grad_norm": 1.595566987991333,
"learning_rate": 2.8430340557275543e-05,
"loss": 3.5486,
"mean_token_accuracy": 0.36929037272930143,
"num_tokens": 91622.0,
"step": 170
},
{
"epoch": 0.5572755417956656,
"grad_norm": 1.9571454524993896,
"learning_rate": 2.83374613003096e-05,
"loss": 3.6849,
"mean_token_accuracy": 0.3387055486440659,
"num_tokens": 97019.0,
"step": 180
},
{
"epoch": 0.5882352941176471,
"grad_norm": 1.6203333139419556,
"learning_rate": 2.8244582043343653e-05,
"loss": 3.5592,
"mean_token_accuracy": 0.36260710954666137,
"num_tokens": 102273.0,
"step": 190
},
{
"epoch": 0.6191950464396285,
"grad_norm": 1.8625439405441284,
"learning_rate": 2.815170278637771e-05,
"loss": 3.4542,
"mean_token_accuracy": 0.3554231733083725,
"num_tokens": 107847.0,
"step": 200
},
{
"epoch": 0.6501547987616099,
"grad_norm": 1.5171610116958618,
"learning_rate": 2.8058823529411766e-05,
"loss": 3.6914,
"mean_token_accuracy": 0.3506886214017868,
"num_tokens": 113499.0,
"step": 210
},
{
"epoch": 0.6811145510835913,
"grad_norm": 1.465408205986023,
"learning_rate": 2.796594427244582e-05,
"loss": 3.6008,
"mean_token_accuracy": 0.3558589071035385,
"num_tokens": 119014.0,
"step": 220
},
{
"epoch": 0.7120743034055728,
"grad_norm": 1.5382874011993408,
"learning_rate": 2.787306501547988e-05,
"loss": 3.5375,
"mean_token_accuracy": 0.3548148155212402,
"num_tokens": 124170.0,
"step": 230
},
{
"epoch": 0.7430340557275542,
"grad_norm": 1.773881196975708,
"learning_rate": 2.7780185758513933e-05,
"loss": 3.573,
"mean_token_accuracy": 0.3465736091136932,
"num_tokens": 129487.0,
"step": 240
},
{
"epoch": 0.7739938080495357,
"grad_norm": 1.7652744054794312,
"learning_rate": 2.7687306501547986e-05,
"loss": 3.6811,
"mean_token_accuracy": 0.33623204231262205,
"num_tokens": 135007.0,
"step": 250
},
{
"epoch": 0.804953560371517,
"grad_norm": 1.7662419080734253,
"learning_rate": 2.7594427244582046e-05,
"loss": 3.505,
"mean_token_accuracy": 0.3567329585552216,
"num_tokens": 140143.0,
"step": 260
},
{
"epoch": 0.8359133126934984,
"grad_norm": 1.9441474676132202,
"learning_rate": 2.75015479876161e-05,
"loss": 3.4804,
"mean_token_accuracy": 0.36218210160732267,
"num_tokens": 145363.0,
"step": 270
},
{
"epoch": 0.8668730650154799,
"grad_norm": 1.745896816253662,
"learning_rate": 2.7408668730650156e-05,
"loss": 3.6519,
"mean_token_accuracy": 0.34941086173057556,
"num_tokens": 150840.0,
"step": 280
},
{
"epoch": 0.8978328173374613,
"grad_norm": 1.928284764289856,
"learning_rate": 2.7315789473684213e-05,
"loss": 3.6138,
"mean_token_accuracy": 0.34826839864254,
"num_tokens": 156077.0,
"step": 290
},
{
"epoch": 0.9287925696594427,
"grad_norm": 2.177100896835327,
"learning_rate": 2.722291021671827e-05,
"loss": 3.4666,
"mean_token_accuracy": 0.36537405848503113,
"num_tokens": 160953.0,
"step": 300
},
{
"epoch": 0.9597523219814241,
"grad_norm": 2.203282594680786,
"learning_rate": 2.7130030959752322e-05,
"loss": 3.5842,
"mean_token_accuracy": 0.34776660799980164,
"num_tokens": 166286.0,
"step": 310
},
{
"epoch": 0.9907120743034056,
"grad_norm": 1.724373459815979,
"learning_rate": 2.7037151702786376e-05,
"loss": 3.4961,
"mean_token_accuracy": 0.35577190220355986,
"num_tokens": 171629.0,
"step": 320
},
{
"epoch": 1.021671826625387,
"grad_norm": 1.7433867454528809,
"learning_rate": 2.6944272445820436e-05,
"loss": 3.4088,
"mean_token_accuracy": 0.36975419521331787,
"num_tokens": 176968.0,
"step": 330
},
{
"epoch": 1.0526315789473684,
"grad_norm": 2.0577471256256104,
"learning_rate": 2.685139318885449e-05,
"loss": 3.5364,
"mean_token_accuracy": 0.35527182221412656,
"num_tokens": 182390.0,
"step": 340
},
{
"epoch": 1.08359133126935,
"grad_norm": 1.7357635498046875,
"learning_rate": 2.6758513931888546e-05,
"loss": 3.4495,
"mean_token_accuracy": 0.3565235286951065,
"num_tokens": 188181.0,
"step": 350
},
{
"epoch": 1.1145510835913313,
"grad_norm": 2.024507761001587,
"learning_rate": 2.6665634674922602e-05,
"loss": 3.3766,
"mean_token_accuracy": 0.3696540713310242,
"num_tokens": 193427.0,
"step": 360
},
{
"epoch": 1.1455108359133126,
"grad_norm": 2.1170592308044434,
"learning_rate": 2.6572755417956655e-05,
"loss": 3.4759,
"mean_token_accuracy": 0.35803447663784027,
"num_tokens": 199046.0,
"step": 370
},
{
"epoch": 1.1764705882352942,
"grad_norm": 2.118878126144409,
"learning_rate": 2.6479876160990712e-05,
"loss": 3.4685,
"mean_token_accuracy": 0.3570233076810837,
"num_tokens": 204089.0,
"step": 380
},
{
"epoch": 1.2074303405572755,
"grad_norm": 2.2280914783477783,
"learning_rate": 2.638699690402477e-05,
"loss": 3.4822,
"mean_token_accuracy": 0.36153341829776764,
"num_tokens": 209656.0,
"step": 390
},
{
"epoch": 1.238390092879257,
"grad_norm": 2.444979667663574,
"learning_rate": 2.6294117647058825e-05,
"loss": 3.3666,
"mean_token_accuracy": 0.3748770415782928,
"num_tokens": 214465.0,
"step": 400
},
{
"epoch": 1.2693498452012384,
"grad_norm": 1.9609659910202026,
"learning_rate": 2.620123839009288e-05,
"loss": 3.4161,
"mean_token_accuracy": 0.3594685852527618,
"num_tokens": 219728.0,
"step": 410
},
{
"epoch": 1.3003095975232197,
"grad_norm": 1.9759095907211304,
"learning_rate": 2.6108359133126935e-05,
"loss": 3.405,
"mean_token_accuracy": 0.3668099522590637,
"num_tokens": 224992.0,
"step": 420
},
{
"epoch": 1.3312693498452013,
"grad_norm": 2.1737940311431885,
"learning_rate": 2.6015479876160992e-05,
"loss": 3.3809,
"mean_token_accuracy": 0.37462269365787504,
"num_tokens": 230431.0,
"step": 430
},
{
"epoch": 1.3622291021671826,
"grad_norm": 2.475351333618164,
"learning_rate": 2.5922600619195045e-05,
"loss": 3.3454,
"mean_token_accuracy": 0.36902076900005343,
"num_tokens": 235868.0,
"step": 440
},
{
"epoch": 1.3931888544891642,
"grad_norm": 2.1027772426605225,
"learning_rate": 2.5829721362229105e-05,
"loss": 3.3967,
"mean_token_accuracy": 0.3779816538095474,
"num_tokens": 241149.0,
"step": 450
},
{
"epoch": 1.4241486068111455,
"grad_norm": 2.613186836242676,
"learning_rate": 2.5736842105263158e-05,
"loss": 3.3383,
"mean_token_accuracy": 0.37455591559410095,
"num_tokens": 246200.0,
"step": 460
},
{
"epoch": 1.4551083591331269,
"grad_norm": 2.1689629554748535,
"learning_rate": 2.5643962848297215e-05,
"loss": 3.4927,
"mean_token_accuracy": 0.3641968876123428,
"num_tokens": 251354.0,
"step": 470
},
{
"epoch": 1.4860681114551084,
"grad_norm": 1.9075849056243896,
"learning_rate": 2.555108359133127e-05,
"loss": 3.4211,
"mean_token_accuracy": 0.3670921057462692,
"num_tokens": 257201.0,
"step": 480
},
{
"epoch": 1.5170278637770898,
"grad_norm": 2.128737211227417,
"learning_rate": 2.5458204334365325e-05,
"loss": 3.3306,
"mean_token_accuracy": 0.3747966349124908,
"num_tokens": 262640.0,
"step": 490
},
{
"epoch": 1.5479876160990713,
"grad_norm": 1.9061874151229858,
"learning_rate": 2.536532507739938e-05,
"loss": 3.3874,
"mean_token_accuracy": 0.3706284284591675,
"num_tokens": 268129.0,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 3230,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.2696375046569984e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}