| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 800, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 4.144060134887695, | |
| "learning_rate": 3.1249999999999997e-07, | |
| "logits/chosen": 3.1186840534210205, | |
| "logits/rejected": 3.2627792358398438, | |
| "logps/chosen": -54.367942810058594, | |
| "logps/rejected": -41.90058517456055, | |
| "loss": 0.6909, | |
| "rewards/accuracies": 0.5649999976158142, | |
| "rewards/chosen": 0.004131872206926346, | |
| "rewards/margins": 0.004790368489921093, | |
| "rewards/rejected": -0.0006584958755411208, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 2.6885554790496826, | |
| "learning_rate": 4.861111111111111e-07, | |
| "logits/chosen": 3.1031572818756104, | |
| "logits/rejected": 3.2696139812469482, | |
| "logps/chosen": -54.12215805053711, | |
| "logps/rejected": -40.67683029174805, | |
| "loss": 0.6682, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.03350482136011124, | |
| "rewards/margins": 0.05115475505590439, | |
| "rewards/rejected": -0.01764993742108345, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 2.406733751296997, | |
| "learning_rate": 4.513888888888889e-07, | |
| "logits/chosen": 3.1083221435546875, | |
| "logits/rejected": 3.2473440170288086, | |
| "logps/chosen": -53.839054107666016, | |
| "logps/rejected": -43.20619583129883, | |
| "loss": 0.6225, | |
| "rewards/accuracies": 0.9900000095367432, | |
| "rewards/chosen": 0.08696311712265015, | |
| "rewards/margins": 0.1482781618833542, | |
| "rewards/rejected": -0.06131504476070404, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 2.196671724319458, | |
| "learning_rate": 4.1666666666666667e-07, | |
| "logits/chosen": 3.114607810974121, | |
| "logits/rejected": 3.270479440689087, | |
| "logps/chosen": -49.034297943115234, | |
| "logps/rejected": -39.99443817138672, | |
| "loss": 0.5765, | |
| "rewards/accuracies": 0.9900000095367432, | |
| "rewards/chosen": 0.15706641972064972, | |
| "rewards/margins": 0.2536722421646118, | |
| "rewards/rejected": -0.09660584479570389, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 2.1391797065734863, | |
| "learning_rate": 3.819444444444444e-07, | |
| "logits/chosen": 3.0528652667999268, | |
| "logits/rejected": 3.1882102489471436, | |
| "logps/chosen": -50.195011138916016, | |
| "logps/rejected": -42.24689865112305, | |
| "loss": 0.5097, | |
| "rewards/accuracies": 0.9950000047683716, | |
| "rewards/chosen": 0.25386032462120056, | |
| "rewards/margins": 0.4180786609649658, | |
| "rewards/rejected": -0.16421833634376526, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 2.2767980098724365, | |
| "learning_rate": 3.472222222222222e-07, | |
| "logits/chosen": 2.9493257999420166, | |
| "logits/rejected": 3.099841833114624, | |
| "logps/chosen": -49.544803619384766, | |
| "logps/rejected": -41.29837417602539, | |
| "loss": 0.4578, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2961386442184448, | |
| "rewards/margins": 0.5633676648139954, | |
| "rewards/rejected": -0.2672290802001953, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4375, | |
| "grad_norm": 1.8134651184082031, | |
| "learning_rate": 3.1249999999999997e-07, | |
| "logits/chosen": 2.9306814670562744, | |
| "logits/rejected": 3.1076226234436035, | |
| "logps/chosen": -49.26047897338867, | |
| "logps/rejected": -43.91292953491211, | |
| "loss": 0.419, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3598053753376007, | |
| "rewards/margins": 0.677994966506958, | |
| "rewards/rejected": -0.3181896507740021, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 2.273711919784546, | |
| "learning_rate": 2.7777777777777776e-07, | |
| "logits/chosen": 2.9157378673553467, | |
| "logits/rejected": 3.065505266189575, | |
| "logps/chosen": -47.57904815673828, | |
| "logps/rejected": -43.553749084472656, | |
| "loss": 0.3937, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.33699238300323486, | |
| "rewards/margins": 0.7744642496109009, | |
| "rewards/rejected": -0.4374719560146332, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5625, | |
| "grad_norm": 1.6072301864624023, | |
| "learning_rate": 2.4305555555555555e-07, | |
| "logits/chosen": 2.9038708209991455, | |
| "logits/rejected": 3.0539231300354004, | |
| "logps/chosen": -47.44837951660156, | |
| "logps/rejected": -45.976158142089844, | |
| "loss": 0.3513, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.38920170068740845, | |
| "rewards/margins": 0.9139933586120605, | |
| "rewards/rejected": -0.5247916579246521, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 2.385540008544922, | |
| "learning_rate": 2.0833333333333333e-07, | |
| "logits/chosen": 2.953505754470825, | |
| "logits/rejected": 3.1365463733673096, | |
| "logps/chosen": -50.15205001831055, | |
| "logps/rejected": -49.16044998168945, | |
| "loss": 0.3337, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3933267295360565, | |
| "rewards/margins": 0.9976506233215332, | |
| "rewards/rejected": -0.6043238639831543, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6875, | |
| "grad_norm": 2.3325698375701904, | |
| "learning_rate": 1.736111111111111e-07, | |
| "logits/chosen": 2.9089577198028564, | |
| "logits/rejected": 3.068243980407715, | |
| "logps/chosen": -48.45305633544922, | |
| "logps/rejected": -48.769771575927734, | |
| "loss": 0.2935, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.41794300079345703, | |
| "rewards/margins": 1.1487656831741333, | |
| "rewards/rejected": -0.7308226227760315, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 2.3242642879486084, | |
| "learning_rate": 1.3888888888888888e-07, | |
| "logits/chosen": 2.916025400161743, | |
| "logits/rejected": 3.075576066970825, | |
| "logps/chosen": -48.39594268798828, | |
| "logps/rejected": -47.85335922241211, | |
| "loss": 0.2915, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.4126347303390503, | |
| "rewards/margins": 1.1786757707595825, | |
| "rewards/rejected": -0.7660410404205322, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8125, | |
| "grad_norm": 1.8319804668426514, | |
| "learning_rate": 1.0416666666666667e-07, | |
| "logits/chosen": 2.8800783157348633, | |
| "logits/rejected": 3.0461764335632324, | |
| "logps/chosen": -49.678184509277344, | |
| "logps/rejected": -50.534385681152344, | |
| "loss": 0.2617, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3970913290977478, | |
| "rewards/margins": 1.3191609382629395, | |
| "rewards/rejected": -0.9220696091651917, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 1.8796759843826294, | |
| "learning_rate": 6.944444444444444e-08, | |
| "logits/chosen": 2.8562612533569336, | |
| "logits/rejected": 3.0434064865112305, | |
| "logps/chosen": -50.938533782958984, | |
| "logps/rejected": -51.9145622253418, | |
| "loss": 0.2487, | |
| "rewards/accuracies": 0.9950000047683716, | |
| "rewards/chosen": 0.44071799516677856, | |
| "rewards/margins": 1.3961044549942017, | |
| "rewards/rejected": -0.9553865194320679, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 1.6690614223480225, | |
| "learning_rate": 3.472222222222222e-08, | |
| "logits/chosen": 2.904910087585449, | |
| "logits/rejected": 3.0448944568634033, | |
| "logps/chosen": -50.198997497558594, | |
| "logps/rejected": -52.53700637817383, | |
| "loss": 0.2723, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3550363779067993, | |
| "rewards/margins": 1.2905099391937256, | |
| "rewards/rejected": -0.9354735612869263, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.8712382316589355, | |
| "learning_rate": 0.0, | |
| "logits/chosen": 2.856696128845215, | |
| "logits/rejected": 3.0441014766693115, | |
| "logps/chosen": -49.916290283203125, | |
| "logps/rejected": -52.326080322265625, | |
| "loss": 0.2438, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.40912488102912903, | |
| "rewards/margins": 1.3978087902069092, | |
| "rewards/rejected": -0.9886838793754578, | |
| "step": 800 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 800, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |