diff --git "a/logs/training_log.jsonl" "b/logs/training_log.jsonl" new file mode 100644--- /dev/null +++ "b/logs/training_log.jsonl" @@ -0,0 +1,535 @@ +{"timestamp": "2025-06-01T19:29:20.376558", "epoch": 1, "step": 0, "train_loss": 3.524900197982788, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:29:48.834210", "epoch": 1, "step": 0, "train_loss": 2.81072735786438, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:30:41.154468", "epoch": 1, "step": 50, "train_loss": 0.3133639991283417, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:31:32.116446", "epoch": 1, "step": 100, "train_loss": 0.21039174497127533, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:32:23.179255", "epoch": 1, "step": 150, "train_loss": 0.17104099690914154, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:33:14.259132", "epoch": 1, "step": 200, "train_loss": 0.16185185313224792, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:34:05.167897", "epoch": 1, "step": 250, "train_loss": 0.20554713904857635, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:34:56.254664", "epoch": 1, "step": 300, "train_loss": 0.1998378038406372, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:35:47.356193", "epoch": 1, "step": 350, "train_loss": 0.13730229437351227, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:36:38.452550", "epoch": 1, "step": 400, "train_loss": 0.40445923805236816, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:37:39.152791", "epoch": 1, "step": 445, "train_loss": 0.20068979742989113, "eval_loss": 0.30075392201542855, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:37:43.993522", "epoch": 2, "step": 0, "train_loss": 0.10371153801679611, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:38:34.916077", "epoch": 2, "step": 50, "train_loss": 0.1277485489845276, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:39:25.921511", "epoch": 2, "step": 100, "train_loss": 0.1855606883764267, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:40:16.921424", "epoch": 2, "step": 150, "train_loss": 0.1159445270895958, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:41:07.861213", "epoch": 2, "step": 200, "train_loss": 0.19776782393455505, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:41:58.861687", "epoch": 2, "step": 250, "train_loss": 0.08470696210861206, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:42:49.804838", "epoch": 2, "step": 300, "train_loss": 0.1134047880768776, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:43:40.764690", "epoch": 2, "step": 350, "train_loss": 0.18287509679794312, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:44:31.823030", "epoch": 2, "step": 400, "train_loss": 0.04173869267106056, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:45:32.547953", "epoch": 2, "step": 445, "train_loss": 0.12181951338320636, "eval_loss": 0.3041363428533077, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:45:35.805237", "epoch": 3, "step": 0, "train_loss": 0.059496551752090454, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:46:26.898851", "epoch": 3, "step": 50, "train_loss": 0.028947368264198303, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:47:17.930947", "epoch": 3, "step": 100, "train_loss": 0.08451557904481888, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:48:08.979778", "epoch": 3, "step": 150, "train_loss": 0.09526653587818146, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:49:00.067637", "epoch": 3, "step": 200, "train_loss": 0.1219121441245079, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:49:51.128230", "epoch": 3, "step": 250, "train_loss": 0.06129244714975357, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:50:42.148474", "epoch": 3, "step": 300, "train_loss": 0.17596393823623657, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:51:33.146462", "epoch": 3, "step": 350, "train_loss": 0.05183371901512146, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:52:24.148123", "epoch": 3, "step": 400, "train_loss": 0.089112289249897, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:53:24.754394", "epoch": 3, "step": 445, "train_loss": 0.08273345859030659, "eval_loss": 0.33536846801638603, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:57:05.089163", "epoch": 1, "step": 39, "train_loss": 0.06922850012779236, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:57:44.747514", "epoch": 1, "step": 79, "train_loss": 0.04548594728112221, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:58:24.429975", "epoch": 1, "step": 119, "train_loss": 0.020628273487091064, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:59:04.067181", "epoch": 1, "step": 159, "train_loss": 0.03198147937655449, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T19:59:43.706886", "epoch": 1, "step": 199, "train_loss": 0.07774143666028976, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:00:23.424607", "epoch": 1, "step": 239, "train_loss": 0.03321843966841698, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:01:03.149727", "epoch": 1, "step": 279, "train_loss": 0.01682046428322792, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:01:42.822416", "epoch": 1, "step": 319, "train_loss": 0.06053723022341728, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:02:22.562140", "epoch": 1, "step": 359, "train_loss": 0.018926121294498444, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:03:02.204325", "epoch": 1, "step": 399, "train_loss": 0.04185406118631363, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:03:41.891858", "epoch": 1, "step": 439, "train_loss": 0.017674105241894722, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:04:02.595678", "epoch": 1, "step": 445, "train_loss": 1.0, "eval_loss": 0.3030813105404377, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:04:52.438475", "epoch": 2, "step": 39, "train_loss": 0.050144582986831665, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:05:31.978483", "epoch": 2, "step": 79, "train_loss": 0.028663137927651405, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:06:11.566311", "epoch": 2, "step": 119, "train_loss": 0.025636816397309303, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:06:51.213690", "epoch": 2, "step": 159, "train_loss": 0.03970843926072121, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:07:30.746547", "epoch": 2, "step": 199, "train_loss": 0.02714698575437069, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:08:10.346184", "epoch": 2, "step": 239, "train_loss": 0.03504960983991623, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:08:50.022493", "epoch": 2, "step": 279, "train_loss": 0.034590307623147964, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:09:29.726756", "epoch": 2, "step": 319, "train_loss": 0.027135854586958885, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:10:09.465031", "epoch": 2, "step": 359, "train_loss": 0.012477712705731392, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:10:49.166644", "epoch": 2, "step": 399, "train_loss": 0.0737474262714386, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:11:28.948396", "epoch": 2, "step": 439, "train_loss": 0.040819406509399414, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:11:49.692735", "epoch": 2, "step": 445, "train_loss": 0.0, "eval_loss": 0.2950605894625187, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:12:39.370891", "epoch": 3, "step": 39, "train_loss": 0.010346654802560806, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:13:19.092113", "epoch": 3, "step": 79, "train_loss": 0.04593149572610855, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:13:58.747143", "epoch": 3, "step": 119, "train_loss": 0.02548792026937008, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:14:38.454715", "epoch": 3, "step": 159, "train_loss": 0.0639609694480896, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:15:18.084853", "epoch": 3, "step": 199, "train_loss": 0.044488225132226944, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:15:57.747591", "epoch": 3, "step": 239, "train_loss": 0.05441173538565636, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:16:37.399131", "epoch": 3, "step": 279, "train_loss": 0.017255663871765137, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:17:17.045222", "epoch": 3, "step": 319, "train_loss": 0.007170054595917463, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:17:56.689284", "epoch": 3, "step": 359, "train_loss": 0.04171650856733322, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:18:36.330558", "epoch": 3, "step": 399, "train_loss": 0.024907315149903297, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:19:15.978645", "epoch": 3, "step": 439, "train_loss": 0.04432027414441109, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:19:36.667634", "epoch": 3, "step": 445, "train_loss": 0.0, "eval_loss": 0.31036733090877533, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:32:46.390106", "epoch": 1, "step": 39, "train_loss": 0.08221826702356339, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:33:26.074663", "epoch": 1, "step": 79, "train_loss": 0.03235713392496109, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:34:05.742676", "epoch": 1, "step": 119, "train_loss": 0.08009503781795502, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:34:45.422311", "epoch": 1, "step": 159, "train_loss": 0.017971767112612724, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:35:25.103304", "epoch": 1, "step": 199, "train_loss": 0.06597942113876343, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:36:04.853256", "epoch": 1, "step": 239, "train_loss": 0.07271922379732132, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:36:44.603590", "epoch": 1, "step": 279, "train_loss": 0.03367779776453972, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:37:24.242397", "epoch": 1, "step": 319, "train_loss": 0.019335070624947548, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:38:04.004106", "epoch": 1, "step": 359, "train_loss": 0.01986299268901348, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:38:43.701465", "epoch": 1, "step": 399, "train_loss": 0.02587876096367836, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:39:23.436295", "epoch": 1, "step": 439, "train_loss": 0.04761381447315216, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:39:44.161392", "epoch": 1, "step": 445, "train_loss": 1.0, "eval_loss": 0.3028329715132713, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:40:34.179873", "epoch": 2, "step": 39, "train_loss": 0.013632766902446747, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:41:13.866794", "epoch": 2, "step": 79, "train_loss": 0.049981217831373215, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:41:53.547157", "epoch": 2, "step": 119, "train_loss": 0.013170614838600159, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:42:33.233850", "epoch": 2, "step": 159, "train_loss": 0.0693974569439888, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:43:12.865739", "epoch": 2, "step": 199, "train_loss": 0.04174257814884186, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:43:52.547556", "epoch": 2, "step": 239, "train_loss": 0.015808967873454094, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:44:32.187760", "epoch": 2, "step": 279, "train_loss": 0.03163633495569229, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:45:11.781795", "epoch": 2, "step": 319, "train_loss": 0.019663220271468163, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:45:51.520781", "epoch": 2, "step": 359, "train_loss": 0.020531272515654564, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:47:13.105017", "epoch": 1, "step": 39, "train_loss": 0.051606375724077225, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:47:52.746360", "epoch": 1, "step": 79, "train_loss": 0.0878080353140831, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:48:32.419884", "epoch": 1, "step": 119, "train_loss": 0.0454392209649086, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:49:12.112344", "epoch": 1, "step": 159, "train_loss": 0.053957778960466385, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:49:51.813361", "epoch": 1, "step": 199, "train_loss": 0.08081021159887314, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:50:31.508066", "epoch": 1, "step": 239, "train_loss": 0.04007469490170479, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:51:11.228888", "epoch": 1, "step": 279, "train_loss": 0.04472988843917847, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:51:50.926727", "epoch": 1, "step": 319, "train_loss": 0.02681681141257286, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:52:30.552619", "epoch": 1, "step": 359, "train_loss": 0.05535126104950905, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:53:10.163040", "epoch": 1, "step": 399, "train_loss": 0.05872930958867073, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:53:49.770624", "epoch": 1, "step": 439, "train_loss": 0.042377907782793045, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:54:10.417355", "epoch": 1, "step": 445, "train_loss": 0.24801604487505313, "eval_loss": 0.3016563691198826, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:54:55.905954", "epoch": 2, "step": 39, "train_loss": 0.04502039775252342, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:55:35.515727", "epoch": 2, "step": 79, "train_loss": 0.0230083204805851, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:56:15.186486", "epoch": 2, "step": 119, "train_loss": 0.023951154202222824, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:56:54.813574", "epoch": 2, "step": 159, "train_loss": 0.05826043710112572, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:57:34.532779", "epoch": 2, "step": 199, "train_loss": 0.020296450704336166, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:58:14.194052", "epoch": 2, "step": 239, "train_loss": 0.01794370636343956, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:58:53.930752", "epoch": 2, "step": 279, "train_loss": 0.05890517309308052, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T20:59:33.685517", "epoch": 2, "step": 319, "train_loss": 0.038709405809640884, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:00:13.444876", "epoch": 2, "step": 359, "train_loss": 0.03556858375668526, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:00:53.085223", "epoch": 2, "step": 399, "train_loss": 0.0746232271194458, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:01:32.837100", "epoch": 2, "step": 439, "train_loss": 0.03451881185173988, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:01:53.535090", "epoch": 2, "step": 445, "train_loss": 0.13530343977801568, "eval_loss": 0.2977147123217583, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:02:43.996578", "epoch": 3, "step": 39, "train_loss": 0.02297547459602356, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:03:23.636297", "epoch": 3, "step": 79, "train_loss": 0.029536208137869835, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:04:03.327485", "epoch": 3, "step": 119, "train_loss": 0.033324722200632095, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:04:42.995052", "epoch": 3, "step": 159, "train_loss": 0.019266150891780853, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:05:22.612132", "epoch": 3, "step": 199, "train_loss": 0.029346229508519173, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:06:02.159752", "epoch": 3, "step": 239, "train_loss": 0.015352142974734306, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:06:41.772975", "epoch": 3, "step": 279, "train_loss": 0.0247634444385767, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:07:21.383183", "epoch": 3, "step": 319, "train_loss": 0.04491565749049187, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:08:00.938771", "epoch": 3, "step": 359, "train_loss": 0.029564589262008667, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:08:40.623419", "epoch": 3, "step": 399, "train_loss": 0.010723487474024296, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:09:20.318902", "epoch": 3, "step": 439, "train_loss": 0.02455490082502365, "eval_loss": null, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:09:40.990175", "epoch": 3, "step": 445, "train_loss": 0.10217315550507591, "eval_loss": 0.30789587646722794, "learning_rate": 5e-06} +{"timestamp": "2025-06-01T21:28:38.345159", "epoch": 1, "step": 10, "global_step": 10, "train_loss": 0.3654320538043976, "eval_loss": null, "learning_rate": 7.5757575757575764e-06, "grad_norm": 2.772820472717285} +{"timestamp": "2025-06-01T21:28:58.309887", "epoch": 1, "step": 20, "global_step": 20, "train_loss": 0.28977086395025253, "eval_loss": null, "learning_rate": 1.5151515151515153e-05, "grad_norm": 1.516599178314209} +{"timestamp": "2025-06-01T21:29:18.351336", "epoch": 1, "step": 30, "global_step": 30, "train_loss": 0.3377501890063286, "eval_loss": null, "learning_rate": 2.272727272727273e-05, "grad_norm": 1.5503112077713013} +{"timestamp": "2025-06-01T21:29:38.391330", "epoch": 1, "step": 40, "global_step": 40, "train_loss": 0.1996304914355278, "eval_loss": null, "learning_rate": 3.0303030303030306e-05, "grad_norm": 1.155813455581665} +{"timestamp": "2025-06-01T21:30:14.186094", "epoch": 1, "step": 50, "global_step": 50, "train_loss": 0.22122004628181458, "eval_loss": null, "learning_rate": 3.787878787878788e-05, "grad_norm": 1.0997822284698486} +{"timestamp": "2025-06-01T21:30:34.170682", "epoch": 1, "step": 60, "global_step": 60, "train_loss": 0.25290004909038544, "eval_loss": null, "learning_rate": 4.545454545454546e-05, "grad_norm": 1.2135834693908691} +{"timestamp": "2025-06-01T21:30:54.215110", "epoch": 1, "step": 70, "global_step": 70, "train_loss": 0.24107269197702408, "eval_loss": null, "learning_rate": 4.999451708687114e-05, "grad_norm": 0.9687574505805969} +{"timestamp": "2025-06-01T21:31:14.272116", "epoch": 1, "step": 80, "global_step": 80, "train_loss": 0.12881212309002876, "eval_loss": null, "learning_rate": 4.9932861930611454e-05, "grad_norm": 0.6741231679916382} +{"timestamp": "2025-06-01T21:31:34.324325", "epoch": 1, "step": 90, "global_step": 90, "train_loss": 0.13792214542627335, "eval_loss": null, "learning_rate": 4.980286753286195e-05, "grad_norm": 0.6903950572013855} +{"timestamp": "2025-06-01T21:32:10.150901", "epoch": 1, "step": 100, "global_step": 100, "train_loss": 0.1145484633743763, "eval_loss": null, "learning_rate": 4.960489019923105e-05, "grad_norm": 0.5706943869590759} +{"timestamp": "2025-06-01T21:32:30.225373", "epoch": 1, "step": 110, "global_step": 110, "train_loss": 0.25623634457588196, "eval_loss": null, "learning_rate": 4.933947257182901e-05, "grad_norm": 0.8630030155181885} +{"timestamp": "2025-06-01T21:32:50.294602", "epoch": 1, "step": 120, "global_step": 120, "train_loss": 0.235854160040617, "eval_loss": null, "learning_rate": 4.900734214192358e-05, "grad_norm": 0.8920634984970093} +{"timestamp": "2025-06-01T21:33:10.371980", "epoch": 1, "step": 130, "global_step": 130, "train_loss": 0.13601172342896461, "eval_loss": null, "learning_rate": 4.860940925593703e-05, "grad_norm": 0.606371283531189} +{"timestamp": "2025-06-01T21:33:30.433103", "epoch": 1, "step": 140, "global_step": 140, "train_loss": 0.28409121185541153, "eval_loss": null, "learning_rate": 4.814676462024988e-05, "grad_norm": 0.9493577480316162} +{"timestamp": "2025-06-01T21:34:06.228542", "epoch": 1, "step": 150, "global_step": 150, "train_loss": 0.15630479343235493, "eval_loss": null, "learning_rate": 4.762067631165049e-05, "grad_norm": 0.6829037070274353} +{"timestamp": "2025-06-01T21:34:26.313918", "epoch": 1, "step": 160, "global_step": 160, "train_loss": 0.11771508678793907, "eval_loss": null, "learning_rate": 4.70325863016248e-05, "grad_norm": 0.5817739963531494} +{"timestamp": "2025-06-01T21:34:46.394953", "epoch": 1, "step": 170, "global_step": 170, "train_loss": 0.19300248846411705, "eval_loss": null, "learning_rate": 4.638410650401267e-05, "grad_norm": 0.6763443946838379} +{"timestamp": "2025-06-01T21:35:06.473979", "epoch": 1, "step": 180, "global_step": 180, "train_loss": 0.22782772034406662, "eval_loss": null, "learning_rate": 4.567701435686404e-05, "grad_norm": 0.7709096670150757} +{"timestamp": "2025-06-01T21:35:26.476050", "epoch": 1, "step": 190, "global_step": 190, "train_loss": 0.1449054293334484, "eval_loss": null, "learning_rate": 4.491324795060491e-05, "grad_norm": 0.616167426109314} +{"timestamp": "2025-06-01T21:36:02.276543", "epoch": 1, "step": 200, "global_step": 200, "train_loss": 0.14596863836050034, "eval_loss": null, "learning_rate": 4.4094900715866064e-05, "grad_norm": 0.5989382266998291} +{"timestamp": "2025-06-01T21:36:22.324115", "epoch": 1, "step": 210, "global_step": 210, "train_loss": 0.16977427899837494, "eval_loss": null, "learning_rate": 4.3224215685535294e-05, "grad_norm": 0.7380771636962891} +{"timestamp": "2025-06-01T21:36:42.372868", "epoch": 1, "step": 220, "global_step": 220, "train_loss": 0.26240008883178234, "eval_loss": null, "learning_rate": 4.230357934676017e-05, "grad_norm": 0.7695736289024353} +{"timestamp": "2025-06-01T21:37:03.104434", "epoch": 1, "step": 445, "global_step": 0.0, "train_loss": 0.3755896493792534, "eval_loss": 4.211367764821722e-05, "learning_rate": null, "grad_norm": null} +{"timestamp": "2025-06-01T21:37:33.536774", "epoch": 2, "step": 10, "global_step": 232, "train_loss": 0.21501633524894714, "eval_loss": null, "learning_rate": 4.113644219309877e-05, "grad_norm": 0.9702261090278625} +{"timestamp": "2025-06-01T21:37:53.597495", "epoch": 2, "step": 20, "global_step": 242, "train_loss": 0.14239238016307354, "eval_loss": null, "learning_rate": 4.011497787155938e-05, "grad_norm": 1.003491759300232} +{"timestamp": "2025-06-01T21:38:29.393834", "epoch": 2, "step": 30, "global_step": 252, "train_loss": 0.2480131760239601, "eval_loss": null, "learning_rate": 3.905208444630327e-05, "grad_norm": 1.1059428453445435} +{"timestamp": "2025-06-01T21:38:49.452342", "epoch": 2, "step": 40, "global_step": 262, "train_loss": 0.0967799685895443, "eval_loss": null, "learning_rate": 3.795067523432826e-05, "grad_norm": 0.5218726396560669} +{"timestamp": "2025-06-01T21:39:09.511004", "epoch": 2, "step": 50, "global_step": 272, "train_loss": 0.2043626606464386, "eval_loss": null, "learning_rate": 3.681376912172636e-05, "grad_norm": 0.9244187474250793} +{"timestamp": "2025-06-01T21:39:29.564498", "epoch": 2, "step": 60, "global_step": 282, "train_loss": 0.1412193588912487, "eval_loss": null, "learning_rate": 3.564448228912682e-05, "grad_norm": 0.734333336353302} +{"timestamp": "2025-06-01T21:39:49.612536", "epoch": 2, "step": 70, "global_step": 292, "train_loss": 0.07862233370542526, "eval_loss": null, "learning_rate": 3.444601967046168e-05, "grad_norm": 0.45494452118873596} +{"timestamp": "2025-06-01T21:40:25.357437", "epoch": 2, "step": 80, "global_step": 302, "train_loss": 0.14893431216478348, "eval_loss": null, "learning_rate": 3.322166616846458e-05, "grad_norm": 0.8110602498054504} +{"timestamp": "2025-06-01T21:40:45.396211", "epoch": 2, "step": 90, "global_step": 312, "train_loss": 0.06204509176313877, "eval_loss": null, "learning_rate": 3.1974777650980735e-05, "grad_norm": 0.40773648023605347} +{"timestamp": "2025-06-01T21:41:05.372969", "epoch": 2, "step": 100, "global_step": 322, "train_loss": 0.1343545764684677, "eval_loss": null, "learning_rate": 3.0708771752766394e-05, "grad_norm": 0.6392779350280762} +{"timestamp": "2025-06-01T21:41:25.410825", "epoch": 2, "step": 110, "global_step": 332, "train_loss": 0.16127215884625912, "eval_loss": null, "learning_rate": 2.9427118507989586e-05, "grad_norm": 0.7108875513076782} +{"timestamp": "2025-06-01T21:41:45.458391", "epoch": 2, "step": 120, "global_step": 342, "train_loss": 0.08834659680724144, "eval_loss": null, "learning_rate": 2.8133330839107608e-05, "grad_norm": 0.5206277966499329} +{"timestamp": "2025-06-01T21:42:21.275019", "epoch": 2, "step": 130, "global_step": 352, "train_loss": 0.26349911093711853, "eval_loss": null, "learning_rate": 2.6830954928190794e-05, "grad_norm": 1.0775514841079712} +{"timestamp": "2025-06-01T21:42:41.331798", "epoch": 2, "step": 140, "global_step": 362, "train_loss": 0.057911599054932594, "eval_loss": null, "learning_rate": 2.5523560497083926e-05, "grad_norm": 0.3855135440826416} +{"timestamp": "2025-06-01T21:43:01.395258", "epoch": 2, "step": 150, "global_step": 372, "train_loss": 0.14316857606172562, "eval_loss": null, "learning_rate": 2.4214731023046793e-05, "grad_norm": 0.696833074092865} +{"timestamp": "2025-06-01T21:43:21.393868", "epoch": 2, "step": 160, "global_step": 382, "train_loss": 0.09762179665267467, "eval_loss": null, "learning_rate": 2.2908053916692117e-05, "grad_norm": 0.5986480712890625} +{"timestamp": "2025-06-01T21:43:41.464467", "epoch": 2, "step": 170, "global_step": 392, "train_loss": 0.13424796983599663, "eval_loss": null, "learning_rate": 2.1607110689142393e-05, "grad_norm": 0.8275265693664551} +{"timestamp": "2025-06-01T21:44:17.310538", "epoch": 2, "step": 180, "global_step": 402, "train_loss": 0.07054666429758072, "eval_loss": null, "learning_rate": 2.031546713535688e-05, "grad_norm": 0.4495707154273987} +{"timestamp": "2025-06-01T21:44:37.391615", "epoch": 2, "step": 190, "global_step": 412, "train_loss": 0.14264677464962006, "eval_loss": null, "learning_rate": 1.9036663560535483e-05, "grad_norm": 0.7072724103927612} +{"timestamp": "2025-06-01T21:44:57.460793", "epoch": 2, "step": 200, "global_step": 422, "train_loss": 0.08645220659673214, "eval_loss": null, "learning_rate": 1.7774205076388206e-05, "grad_norm": 0.507695734500885} +{"timestamp": "2025-06-01T21:45:17.536882", "epoch": 2, "step": 210, "global_step": 432, "train_loss": 0.06477563455700874, "eval_loss": null, "learning_rate": 1.6531551993867717e-05, "grad_norm": 0.4929099380970001} +{"timestamp": "2025-06-01T21:45:37.612832", "epoch": 2, "step": 220, "global_step": 442, "train_loss": 0.11159641668200493, "eval_loss": null, "learning_rate": 1.5312110338697426e-05, "grad_norm": 0.6396675109863281} +{"timestamp": "2025-06-01T21:45:58.399654", "epoch": 2, "step": 445, "global_step": 0.0, "train_loss": 0.35234986141324043, "eval_loss": 1.5071302734130489e-05, "learning_rate": null, "grad_norm": null} +{"timestamp": "2025-06-01T21:46:45.766323", "epoch": 3, "step": 10, "global_step": 454, "train_loss": 0.025412561371922493, "eval_loss": null, "learning_rate": 1.388412052037682e-05, "grad_norm": 0.29059073328971863} +{"timestamp": "2025-06-01T21:47:05.818766", "epoch": 3, "step": 20, "global_step": 464, "train_loss": 0.047711532562971115, "eval_loss": null, "learning_rate": 1.272740615962148e-05, "grad_norm": 0.45620161294937134} +{"timestamp": "2025-06-01T21:47:25.883576", "epoch": 3, "step": 30, "global_step": 474, "train_loss": 0.02798507083207369, "eval_loss": null, "learning_rate": 1.1604330125525079e-05, "grad_norm": 0.4250833988189697} +{"timestamp": "2025-06-01T21:47:45.935422", "epoch": 3, "step": 40, "global_step": 484, "train_loss": 0.05298864468932152, "eval_loss": null, "learning_rate": 1.0517970691433035e-05, "grad_norm": 0.5790302753448486} +{"timestamp": "2025-06-01T21:48:05.991633", "epoch": 3, "step": 50, "global_step": 494, "train_loss": 0.048333559185266495, "eval_loss": null, "learning_rate": 9.471305493042243e-06, "grad_norm": 0.5165087580680847} +{"timestamp": "2025-06-01T21:48:41.773813", "epoch": 3, "step": 60, "global_step": 504, "train_loss": 0.05257786065340042, "eval_loss": null, "learning_rate": 8.467203366908707e-06, "grad_norm": 0.552693784236908} +{"timestamp": "2025-06-01T21:49:01.811589", "epoch": 3, "step": 70, "global_step": 514, "train_loss": 0.07181144878268242, "eval_loss": null, "learning_rate": 7.508416487165862e-06, "grad_norm": 0.7754930257797241} +{"timestamp": "2025-06-01T21:49:21.793845", "epoch": 3, "step": 80, "global_step": 524, "train_loss": 0.07094884291291237, "eval_loss": null, "learning_rate": 6.5975728220066425e-06, "grad_norm": 0.6762060523033142} +{"timestamp": "2025-06-01T21:49:41.816944", "epoch": 3, "step": 90, "global_step": 534, "train_loss": 0.03771686181426048, "eval_loss": null, "learning_rate": 5.737168930605272e-06, "grad_norm": 0.40150219202041626} +{"timestamp": "2025-06-01T21:50:01.854890", "epoch": 3, "step": 100, "global_step": 544, "train_loss": 0.029204631224274635, "eval_loss": null, "learning_rate": 4.929563120222141e-06, "grad_norm": 0.4076344966888428} +{"timestamp": "2025-06-01T21:50:37.572835", "epoch": 3, "step": 110, "global_step": 554, "train_loss": 0.04074634239077568, "eval_loss": null, "learning_rate": 4.176968982247514e-06, "grad_norm": 0.42891767621040344} +{"timestamp": "2025-06-01T21:50:57.601007", "epoch": 3, "step": 120, "global_step": 564, "train_loss": 0.045719388872385025, "eval_loss": null, "learning_rate": 3.4814493249014116e-06, "grad_norm": 0.62813800573349} +{"timestamp": "2025-06-01T21:51:17.627024", "epoch": 3, "step": 130, "global_step": 574, "train_loss": 0.03330159839242697, "eval_loss": null, "learning_rate": 2.8449105192196316e-06, "grad_norm": 0.40329810976982117} +{"timestamp": "2025-06-01T21:51:37.598967", "epoch": 3, "step": 140, "global_step": 584, "train_loss": 0.07887263223528862, "eval_loss": null, "learning_rate": 2.269097273823287e-06, "grad_norm": 0.7101573944091797} +{"timestamp": "2025-06-01T21:51:57.623482", "epoch": 3, "step": 150, "global_step": 594, "train_loss": 0.03782119043171406, "eval_loss": null, "learning_rate": 1.7555878527937164e-06, "grad_norm": 0.4536014497280121} +{"timestamp": "2025-06-01T21:52:33.398884", "epoch": 3, "step": 160, "global_step": 604, "train_loss": 0.06211592070758343, "eval_loss": null, "learning_rate": 1.305789749760361e-06, "grad_norm": 0.7652478218078613} +{"timestamp": "2025-06-01T21:52:53.375947", "epoch": 3, "step": 170, "global_step": 614, "train_loss": 0.030361107550561428, "eval_loss": null, "learning_rate": 9.209358300585474e-07, "grad_norm": 0.4066828191280365} +{"timestamp": "2025-06-01T21:53:13.415093", "epoch": 3, "step": 180, "global_step": 624, "train_loss": 0.052656447514891624, "eval_loss": null, "learning_rate": 6.020809515313142e-07, "grad_norm": 0.5592647194862366} +{"timestamp": "2025-06-01T21:53:33.459282", "epoch": 3, "step": 190, "global_step": 634, "train_loss": 0.07712651789188385, "eval_loss": null, "learning_rate": 3.5009907323737825e-07, "grad_norm": 0.7295299172401428} +{"timestamp": "2025-06-01T21:53:53.453342", "epoch": 3, "step": 200, "global_step": 644, "train_loss": 0.040785291232168674, "eval_loss": null, "learning_rate": 1.6568085999008888e-07, "grad_norm": 0.4535972774028778} +{"timestamp": "2025-06-01T21:54:29.282510", "epoch": 3, "step": 210, "global_step": 654, "train_loss": 0.03364027012139559, "eval_loss": null, "learning_rate": 4.9331789293211026e-08, "grad_norm": 0.4371737539768219} +{"timestamp": "2025-06-01T21:54:49.341572", "epoch": 3, "step": 220, "global_step": 664, "train_loss": 0.04337713774293661, "eval_loss": null, "learning_rate": 1.3707658621964215e-09, "grad_norm": 0.5807801485061646} +{"timestamp": "2025-06-01T21:55:10.125779", "epoch": 3, "step": 445, "global_step": 0.0, "train_loss": 0.4152634061872959, "eval_loss": 0.0, "learning_rate": null, "grad_norm": null} +{"timestamp": "2025-06-01T22:03:59.670279", "epoch": 1, "step": 10, "global_step": 10, "train_loss": 0.8434943556785583, "train_perplexity": 2.3244753453913645, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.0303030303030305e-06, "grad_norm": 102.3875732421875} +{"timestamp": "2025-06-01T22:04:03.549851", "epoch": 1, "step": 20, "global_step": 20, "train_loss": 0.22898440062999725, "train_perplexity": 1.2573224254191329, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.060606060606061e-06, "grad_norm": 1.7053817510604858} +{"timestamp": "2025-06-01T22:04:07.432481", "epoch": 1, "step": 30, "global_step": 30, "train_loss": 0.2637656033039093, "train_perplexity": 1.3018230175216408, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.090909090909091e-06, "grad_norm": 1.5768458843231201} +{"timestamp": "2025-06-01T22:04:11.307540", "epoch": 1, "step": 40, "global_step": 40, "train_loss": 0.1735742725431919, "train_perplexity": 1.1895490343206996, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2121212121212122e-05, "grad_norm": 1.1902400255203247} +{"timestamp": "2025-06-01T22:05:02.055315", "epoch": 1, "step": 10, "global_step": 10, "train_loss": 0.8137438297271729, "train_perplexity": 2.2563395447315457, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.0303030303030305e-06, "grad_norm": 72.95942687988281} +{"timestamp": "2025-06-01T22:05:05.958744", "epoch": 1, "step": 20, "global_step": 20, "train_loss": 0.29271482676267624, "train_perplexity": 1.3400605865214401, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.060606060606061e-06, "grad_norm": 1.6651322841644287} +{"timestamp": "2025-06-01T22:05:09.857689", "epoch": 1, "step": 30, "global_step": 30, "train_loss": 0.3038218915462494, "train_perplexity": 1.3550276927087428, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.090909090909091e-06, "grad_norm": 2.0858819484710693} +{"timestamp": "2025-06-01T22:05:13.753503", "epoch": 1, "step": 40, "global_step": 40, "train_loss": 0.1266566701233387, "train_perplexity": 1.1350272621080246, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2121212121212122e-05, "grad_norm": 0.9732949137687683} +{"timestamp": "2025-06-01T22:05:20.575502", "epoch": 1, "step": 50, "global_step": 50, "train_loss": 0.1560215763747692, "train_perplexity": 1.1688514223941366, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5151515151515153e-05, "grad_norm": 1.2803590297698975} +{"timestamp": "2025-06-01T22:05:24.473369", "epoch": 1, "step": 60, "global_step": 60, "train_loss": 0.2502725012600422, "train_perplexity": 1.2843753629099428, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.8181818181818182e-05, "grad_norm": 1.3564119338989258} +{"timestamp": "2025-06-01T22:05:28.369659", "epoch": 1, "step": 70, "global_step": 70, "train_loss": 0.12209589406847954, "train_perplexity": 1.12986244369211, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9997806834748455e-05, "grad_norm": 0.901718258857727} +{"timestamp": "2025-06-01T22:05:32.252739", "epoch": 1, "step": 80, "global_step": 80, "train_loss": 0.23546278104186058, "train_perplexity": 1.2654942800020523, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.997314477224458e-05, "grad_norm": 1.2605928182601929} +{"timestamp": "2025-06-01T22:05:36.153750", "epoch": 1, "step": 90, "global_step": 90, "train_loss": 0.10111308470368385, "train_perplexity": 1.1064017518035323, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9921147013144782e-05, "grad_norm": 0.7848919630050659} +{"timestamp": "2025-06-01T22:05:42.950848", "epoch": 1, "step": 100, "global_step": 100, "train_loss": 0.20313000679016113, "train_perplexity": 1.2252317463402034, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.984195607969242e-05, "grad_norm": 1.1468751430511475} +{"timestamp": "2025-06-01T22:05:46.861676", "epoch": 1, "step": 110, "global_step": 110, "train_loss": 0.18049801141023636, "train_perplexity": 1.197813739517977, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9735789028731603e-05, "grad_norm": 1.0690646171569824} +{"timestamp": "2025-06-01T22:05:50.767304", "epoch": 1, "step": 120, "global_step": 120, "train_loss": 0.27773335576057434, "train_perplexity": 1.32013414410992, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9602936856769432e-05, "grad_norm": 1.3712029457092285} +{"timestamp": "2025-06-01T22:05:54.676969", "epoch": 1, "step": 130, "global_step": 130, "train_loss": 0.1380462571978569, "train_perplexity": 1.1480286536345394, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.944376370237481e-05, "grad_norm": 0.8374546766281128} +{"timestamp": "2025-06-01T22:05:58.581729", "epoch": 1, "step": 140, "global_step": 140, "train_loss": 0.16407223790884018, "train_perplexity": 1.178299429905231, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.925870584809995e-05, "grad_norm": 0.966931164264679} +{"timestamp": "2025-06-01T22:06:05.395296", "epoch": 1, "step": 150, "global_step": 150, "train_loss": 0.2105036936700344, "train_perplexity": 1.2342996123092556, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9048270524660197e-05, "grad_norm": 1.0160281658172607} +{"timestamp": "2025-06-01T22:06:09.295716", "epoch": 1, "step": 160, "global_step": 160, "train_loss": 0.10738411918282509, "train_perplexity": 1.1133618359915674, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.8813034520649923e-05, "grad_norm": 0.7891165018081665} +{"timestamp": "2025-06-01T22:06:13.194579", "epoch": 1, "step": 170, "global_step": 170, "train_loss": 0.14196841046214104, "train_perplexity": 1.1525402397484021, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.855364260160507e-05, "grad_norm": 0.7959848046302795} +{"timestamp": "2025-06-01T22:06:17.093075", "epoch": 1, "step": 180, "global_step": 180, "train_loss": 0.1399242952466011, "train_perplexity": 1.1501867209590906, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.827080574274562e-05, "grad_norm": 0.8153330683708191} +{"timestamp": "2025-06-01T22:06:20.991307", "epoch": 1, "step": 190, "global_step": 190, "train_loss": 0.12908079847693443, "train_perplexity": 1.1377820515087322, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.7965299180241963e-05, "grad_norm": 0.7708629369735718} +{"timestamp": "2025-06-01T22:06:27.790970", "epoch": 1, "step": 200, "global_step": 200, "train_loss": 0.12317676842212677, "train_perplexity": 1.1310843432716442, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.7637960286346423e-05, "grad_norm": 0.7775571346282959} +{"timestamp": "2025-06-01T22:06:31.693835", "epoch": 1, "step": 210, "global_step": 210, "train_loss": 0.14282390475273132, "train_perplexity": 1.1535266532185677, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.7289686274214116e-05, "grad_norm": 0.8746703863143921} +{"timestamp": "2025-06-01T22:06:35.594329", "epoch": 1, "step": 220, "global_step": 220, "train_loss": 0.17150412499904633, "train_perplexity": 1.187089039463344, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.692143173870407e-05, "grad_norm": 0.9210086464881897} +{"timestamp": "2025-06-01T22:06:39.466313", "epoch": 1, "step": 445, "global_step": 222, "train_loss": 0.38245899953485074, "train_perplexity": 1.4658847711679721, "eval_loss": 0.25567520707845687, "eval_perplexity": 1.2913332439236922, "learning_rate": 1.684547105928689e-05, "grad_norm": null} +{"timestamp": "2025-06-01T22:06:53.844308", "epoch": 2, "step": 10, "global_step": 232, "train_loss": 0.09346754848957062, "train_perplexity": 1.0979749718169052, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.645457687723951e-05, "grad_norm": 0.7057094573974609} +{"timestamp": "2025-06-01T22:06:57.746504", "epoch": 2, "step": 20, "global_step": 242, "train_loss": 0.1060151606798172, "train_perplexity": 1.111838732609789, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.6045991148623752e-05, "grad_norm": 1.1032780408859253} +{"timestamp": "2025-06-01T22:07:04.528826", "epoch": 2, "step": 30, "global_step": 252, "train_loss": 0.0718829445540905, "train_perplexity": 1.0745295571654796, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5620833778521306e-05, "grad_norm": 0.626437783241272} +{"timestamp": "2025-06-01T22:07:08.423936", "epoch": 2, "step": 40, "global_step": 262, "train_loss": 0.06429124996066093, "train_perplexity": 1.06640294335156, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5180270093731305e-05, "grad_norm": 0.6892837285995483} +{"timestamp": "2025-06-01T22:07:12.325149", "epoch": 2, "step": 50, "global_step": 272, "train_loss": 0.1755613572895527, "train_perplexity": 1.1919151190887807, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4725507648690542e-05, "grad_norm": 1.383681058883667} +{"timestamp": "2025-06-01T22:07:16.222272", "epoch": 2, "step": 60, "global_step": 282, "train_loss": 0.07900071516633034, "train_perplexity": 1.0822050960266851, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4257792915650728e-05, "grad_norm": 0.7304014563560486} +{"timestamp": "2025-06-01T22:07:20.120050", "epoch": 2, "step": 70, "global_step": 292, "train_loss": 0.046099383383989334, "train_perplexity": 1.0471784779263649, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.3778407868184674e-05, "grad_norm": 0.5057010650634766} +{"timestamp": "2025-06-01T22:07:26.935749", "epoch": 2, "step": 80, "global_step": 302, "train_loss": 0.10349602624773979, "train_perplexity": 1.1090413863005961, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.3288666467385834e-05, "grad_norm": 0.836716890335083} +{"timestamp": "2025-06-01T22:07:30.836151", "epoch": 2, "step": 90, "global_step": 312, "train_loss": 0.1171758659183979, "train_perplexity": 1.1243171413712918, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2789911060392295e-05, "grad_norm": 0.9622392654418945} +{"timestamp": "2025-06-01T22:07:34.718198", "epoch": 2, "step": 100, "global_step": 322, "train_loss": 0.09719482064247131, "train_perplexity": 1.102075059678872, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2283508701106559e-05, "grad_norm": 0.8060721158981323} +{"timestamp": "2025-06-01T22:07:38.622386", "epoch": 2, "step": 110, "global_step": 332, "train_loss": 0.08367760479450226, "train_perplexity": 1.087278303985377, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.1770847403195836e-05, "grad_norm": 0.7443540692329407} +{"timestamp": "2025-06-01T22:07:42.524450", "epoch": 2, "step": 120, "global_step": 342, "train_loss": 0.11173233762383461, "train_perplexity": 1.1182135168983098, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.1253332335643043e-05, "grad_norm": 0.9510127902030945} +{"timestamp": "2025-06-01T22:07:49.353989", "epoch": 2, "step": 130, "global_step": 352, "train_loss": 0.1161351129412651, "train_perplexity": 1.1231476136594583, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0732381971276318e-05, "grad_norm": 0.8503321409225464} +{"timestamp": "2025-06-01T22:07:53.257479", "epoch": 2, "step": 140, "global_step": 362, "train_loss": 0.057295866310596466, "train_perplexity": 1.0589690773245934, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0209424198833571e-05, "grad_norm": 0.6340060234069824} +{"timestamp": "2025-06-01T22:07:57.159212", "epoch": 2, "step": 150, "global_step": 372, "train_loss": 0.06733607687056065, "train_perplexity": 1.069654904046588, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.685892409218718e-06, "grad_norm": 0.6270581483840942} +{"timestamp": "2025-06-01T22:08:01.065336", "epoch": 2, "step": 160, "global_step": 382, "train_loss": 0.05863553658127785, "train_perplexity": 1.060388697414153, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.163221566676847e-06, "grad_norm": 0.5811100006103516} +{"timestamp": "2025-06-01T22:08:04.973440", "epoch": 2, "step": 170, "global_step": 392, "train_loss": 0.11198949068784714, "train_perplexity": 1.1185011059060128, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.642844275656957e-06, "grad_norm": 0.8527700304985046} +{"timestamp": "2025-06-01T22:08:11.797351", "epoch": 2, "step": 180, "global_step": 402, "train_loss": 0.06049897149205208, "train_perplexity": 1.0623665049173692, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.126186854142752e-06, "grad_norm": 0.5777755379676819} +{"timestamp": "2025-06-01T22:08:15.700367", "epoch": 2, "step": 190, "global_step": 412, "train_loss": 0.13823366910219193, "train_perplexity": 1.1482438280332403, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.6146654242141935e-06, "grad_norm": 1.0172908306121826} +{"timestamp": "2025-06-01T22:08:19.585851", "epoch": 2, "step": 200, "global_step": 422, "train_loss": 0.06519987806677818, "train_perplexity": 1.0673723473855157, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.109682030555283e-06, "grad_norm": 0.6860023140907288} +{"timestamp": "2025-06-01T22:08:23.473603", "epoch": 2, "step": 210, "global_step": 432, "train_loss": 0.06504671461880207, "train_perplexity": 1.067208877475643, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.612620797547087e-06, "grad_norm": 0.6190823912620544} +{"timestamp": "2025-06-01T22:08:27.375470", "epoch": 2, "step": 220, "global_step": 442, "train_loss": 0.0975542925298214, "train_perplexity": 1.1024712958941993, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.124844135478971e-06, "grad_norm": 0.7935145497322083} +{"timestamp": "2025-06-01T22:08:31.256631", "epoch": 2, "step": 445, "global_step": 444, "train_loss": 0.09071190734939263, "train_perplexity": 1.0949535117554743, "eval_loss": 0.2383137834072113, "eval_perplexity": 1.2691073551832994, "learning_rate": 6.028521093652195e-06, "grad_norm": null} +{"timestamp": "2025-06-01T22:08:48.876018", "epoch": 3, "step": 10, "global_step": 454, "train_loss": 0.031211395747959614, "train_perplexity": 1.0317035785853463, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.553648208150728e-06, "grad_norm": 0.48356005549430847} +{"timestamp": "2025-06-01T22:08:52.776927", "epoch": 3, "step": 20, "global_step": 464, "train_loss": 0.031000780873000622, "train_perplexity": 1.0314863093460178, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.090962463848592e-06, "grad_norm": 0.5302640199661255} +{"timestamp": "2025-06-01T22:08:56.677996", "epoch": 3, "step": 30, "global_step": 474, "train_loss": 0.04129863902926445, "train_perplexity": 1.0421632897104725, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.641732050210032e-06, "grad_norm": 0.7962318062782288} +{"timestamp": "2025-06-01T22:09:00.576642", "epoch": 3, "step": 40, "global_step": 484, "train_loss": 0.04581455513834953, "train_perplexity": 1.0468802543908846, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.207188276573214e-06, "grad_norm": 0.7152215838432312} +{"timestamp": "2025-06-01T22:09:04.474363", "epoch": 3, "step": 50, "global_step": 494, "train_loss": 0.03719506040215492, "train_perplexity": 1.0378954533987617, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.7885221972168974e-06, "grad_norm": 0.5870726704597473} +{"timestamp": "2025-06-01T22:09:11.283191", "epoch": 3, "step": 60, "global_step": 504, "train_loss": 0.02477585431188345, "train_perplexity": 1.0250853263158552, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.3868813467634833e-06, "grad_norm": 0.47100794315338135} +{"timestamp": "2025-06-01T22:09:15.188720", "epoch": 3, "step": 70, "global_step": 514, "train_loss": 0.04708791710436344, "train_perplexity": 1.0482141609825184, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.003366594866345e-06, "grad_norm": 0.9475640654563904} +{"timestamp": "2025-06-01T22:09:19.090076", "epoch": 3, "step": 80, "global_step": 524, "train_loss": 0.04347562417387962, "train_perplexity": 1.0444345350471118, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.639029128802657e-06, "grad_norm": 0.6836437582969666} +{"timestamp": "2025-06-01T22:09:22.994807", "epoch": 3, "step": 90, "global_step": 534, "train_loss": 0.045809678733348846, "train_perplexity": 1.046875149391224, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.2948675722421086e-06, "grad_norm": 0.7145618796348572} +{"timestamp": "2025-06-01T22:09:26.903036", "epoch": 3, "step": 100, "global_step": 544, "train_loss": 0.08468991331756115, "train_perplexity": 1.0883795223717765, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9718252480888567e-06, "grad_norm": 1.2155704498291016} +{"timestamp": "2025-06-01T22:09:33.732771", "epoch": 3, "step": 110, "global_step": 554, "train_loss": 0.04231490008533001, "train_perplexity": 1.0432229380243245, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.6707875928990059e-06, "grad_norm": 0.8035908341407776} +{"timestamp": "2025-06-01T22:09:37.632505", "epoch": 3, "step": 120, "global_step": 564, "train_loss": 0.024803001433610916, "train_perplexity": 1.0251131548097199, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.3925797299605649e-06, "grad_norm": 0.4342428743839264} +{"timestamp": "2025-06-01T22:09:41.516456", "epoch": 3, "step": 130, "global_step": 574, "train_loss": 0.047217048704624176, "train_perplexity": 1.0483495272943881, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.1379642076878528e-06, "grad_norm": 0.9755510687828064} +{"timestamp": "2025-06-01T22:09:45.415379", "epoch": 3, "step": 140, "global_step": 584, "train_loss": 0.030878172256052494, "train_perplexity": 1.0313598479890123, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.076389095293148e-07, "grad_norm": 0.5726606845855713} +{"timestamp": "2025-06-01T22:09:49.319436", "epoch": 3, "step": 150, "global_step": 594, "train_loss": 0.037491971626877785, "train_perplexity": 1.0382036619619912, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.022351411174866e-07, "grad_norm": 0.700962483882904} +{"timestamp": "2025-06-01T22:09:56.133785", "epoch": 3, "step": 160, "global_step": 604, "train_loss": 0.04206804744899273, "train_perplexity": 1.0429654474742038, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.223158999041444e-07, "grad_norm": 0.6717422008514404} +{"timestamp": "2025-06-01T22:10:00.035383", "epoch": 3, "step": 170, "global_step": 614, "train_loss": 0.03362067602574825, "train_perplexity": 1.0341922384054545, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.68374332023419e-07, "grad_norm": 0.5888093113899231} +{"timestamp": "2025-06-01T22:10:03.922594", "epoch": 3, "step": 180, "global_step": 624, "train_loss": 0.053529851138591766, "train_perplexity": 1.0549884839049966, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.4083238061252565e-07, "grad_norm": 0.9545252323150635} +{"timestamp": "2025-06-01T22:10:07.809788", "epoch": 3, "step": 190, "global_step": 634, "train_loss": 0.04251237213611603, "train_perplexity": 1.0434289657390112, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.400396292949513e-07, "grad_norm": 0.7707187533378601} +{"timestamp": "2025-06-01T22:10:11.713551", "epoch": 3, "step": 200, "global_step": 644, "train_loss": 0.029588287696242332, "train_perplexity": 1.0300303704660803, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.627234399603554e-08, "grad_norm": 0.5699613094329834} +{"timestamp": "2025-06-01T22:10:18.546978", "epoch": 3, "step": 210, "global_step": 654, "train_loss": 0.03146916162222624, "train_perplexity": 1.0319695508380768, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.973271571728441e-08, "grad_norm": 0.5466866493225098} +{"timestamp": "2025-06-01T22:10:22.450171", "epoch": 3, "step": 220, "global_step": 664, "train_loss": 0.03437684569507837, "train_perplexity": 1.0349745589546517, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.483063448785686e-10, "grad_norm": 0.7315030097961426} +{"timestamp": "2025-06-01T22:10:26.334495", "epoch": 3, "step": 445, "global_step": 666, "train_loss": 0.039174853622711996, "train_perplexity": 1.0399523071818013, "eval_loss": 0.2910202094912529, "eval_perplexity": 1.337791619764882, "learning_rate": 0.0, "grad_norm": null} +{"timestamp": "2025-06-04T15:48:13.078266", "epoch": 1, "step": 10, "global_step": 10, "train_loss": 4.671072721481323, "train_perplexity": 106.81226080494424, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.7313432835820895e-07, "grad_norm": 128.40884399414062} +{"timestamp": "2025-06-04T15:48:16.975402", "epoch": 1, "step": 20, "global_step": 20, "train_loss": 1.949156641960144, "train_perplexity": 7.022762379292836, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.462686567164179e-07, "grad_norm": 165.83786010742188} +{"timestamp": "2025-06-04T15:48:20.859988", "epoch": 1, "step": 30, "global_step": 30, "train_loss": 0.3046633452177048, "train_perplexity": 1.3561683655800802, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.119402985074627e-06, "grad_norm": 2.2541606426239014} +{"timestamp": "2025-06-04T15:48:24.742224", "epoch": 1, "step": 40, "global_step": 40, "train_loss": 0.3191399648785591, "train_perplexity": 1.3759438952493854, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4925373134328358e-06, "grad_norm": 1.6307425498962402} +{"timestamp": "2025-06-04T15:48:45.625340", "epoch": 1, "step": 50, "global_step": 50, "train_loss": 0.2649899423122406, "train_perplexity": 1.303417866342524, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.865671641791045e-06, "grad_norm": 1.4340673685073853} +{"timestamp": "2025-06-04T15:48:49.505420", "epoch": 1, "step": 60, "global_step": 60, "train_loss": 0.3226696774363518, "train_perplexity": 1.380809163142544, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.238805970149254e-06, "grad_norm": 1.825088620185852} +{"timestamp": "2025-06-04T15:48:53.380713", "epoch": 1, "step": 70, "global_step": 70, "train_loss": 0.16062812507152557, "train_perplexity": 1.1742482141392039, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.6119402985074627e-06, "grad_norm": 1.1731597185134888} +{"timestamp": "2025-06-04T15:48:57.241477", "epoch": 1, "step": 80, "global_step": 80, "train_loss": 0.1917380765080452, "train_perplexity": 1.2113531935510409, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.9850746268656716e-06, "grad_norm": 1.4646199941635132} +{"timestamp": "2025-06-04T15:49:01.098674", "epoch": 1, "step": 90, "global_step": 90, "train_loss": 0.19584764540195465, "train_perplexity": 1.2163415759848903, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.3582089552238813e-06, "grad_norm": 1.5290420055389404} +{"timestamp": "2025-06-04T15:49:21.995753", "epoch": 1, "step": 100, "global_step": 100, "train_loss": 0.2458140328526497, "train_perplexity": 1.2786617623495191, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.73134328358209e-06, "grad_norm": 1.8633289337158203} +{"timestamp": "2025-06-04T15:49:25.878003", "epoch": 1, "step": 110, "global_step": 110, "train_loss": 0.1814446747303009, "train_perplexity": 1.1989482027420864, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.104477611940299e-06, "grad_norm": 1.3574203252792358} +{"timestamp": "2025-06-04T15:49:29.741404", "epoch": 1, "step": 120, "global_step": 120, "train_loss": 0.21781329810619354, "train_perplexity": 1.2433549092313039, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.477611940298508e-06, "grad_norm": 1.5201458930969238} +{"timestamp": "2025-06-04T15:49:33.621621", "epoch": 1, "step": 130, "global_step": 130, "train_loss": 0.16430367529392242, "train_perplexity": 1.1785721640033482, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.850746268656717e-06, "grad_norm": 1.3497518301010132} +{"timestamp": "2025-06-04T15:49:37.502736", "epoch": 1, "step": 140, "global_step": 140, "train_loss": 0.17524391412734985, "train_perplexity": 1.191536813832685, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.2238805970149255e-06, "grad_norm": 1.2528648376464844} +{"timestamp": "2025-06-04T15:49:58.449942", "epoch": 1, "step": 150, "global_step": 150, "train_loss": 0.20567508041858673, "train_perplexity": 1.2283540228730032, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.597014925373134e-06, "grad_norm": 1.3659957647323608} +{"timestamp": "2025-06-04T15:50:02.329556", "epoch": 1, "step": 160, "global_step": 160, "train_loss": 0.15652210637927055, "train_perplexity": 1.169436614042621, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.970149253731343e-06, "grad_norm": 1.2447582483291626} +{"timestamp": "2025-06-04T15:50:06.215832", "epoch": 1, "step": 170, "global_step": 170, "train_loss": 0.23489902913570404, "train_perplexity": 1.2647810562489856, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.343283582089553e-06, "grad_norm": 1.4126760959625244} +{"timestamp": "2025-06-04T15:50:10.079518", "epoch": 1, "step": 180, "global_step": 180, "train_loss": 0.19649717211723328, "train_perplexity": 1.217131878967062, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.7164179104477625e-06, "grad_norm": 1.2898818254470825} +{"timestamp": "2025-06-04T15:50:13.968221", "epoch": 1, "step": 190, "global_step": 190, "train_loss": 0.1695742830634117, "train_perplexity": 1.184800354364207, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.089552238805971e-06, "grad_norm": 1.2228553295135498} +{"timestamp": "2025-06-04T15:50:34.972471", "epoch": 1, "step": 200, "global_step": 200, "train_loss": 0.09251426719129086, "train_perplexity": 1.0969287915415644, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.46268656716418e-06, "grad_norm": 0.8676640391349792} +{"timestamp": "2025-06-04T15:50:38.854436", "epoch": 1, "step": 210, "global_step": 210, "train_loss": 0.19059253484010696, "train_perplexity": 1.2099663324984606, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.835820895522389e-06, "grad_norm": 1.3530491590499878} +{"timestamp": "2025-06-04T15:50:42.740946", "epoch": 1, "step": 220, "global_step": 220, "train_loss": 0.1853446662425995, "train_perplexity": 1.2036332203827504, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.208955223880599e-06, "grad_norm": 1.1952266693115234} +{"timestamp": "2025-06-04T15:50:46.624457", "epoch": 1, "step": 230, "global_step": 230, "train_loss": 0.17687048763036728, "train_perplexity": 1.1934765131460265, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.582089552238807e-06, "grad_norm": 1.252812385559082} +{"timestamp": "2025-06-04T15:50:50.515276", "epoch": 1, "step": 240, "global_step": 240, "train_loss": 0.18703439086675644, "train_perplexity": 1.2056687483302178, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.955223880597016e-06, "grad_norm": 1.1214816570281982} +{"timestamp": "2025-06-04T15:51:11.600965", "epoch": 1, "step": 250, "global_step": 250, "train_loss": 0.14947571232914925, "train_perplexity": 1.1612252670124277, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.328358208955226e-06, "grad_norm": 1.126630425453186} +{"timestamp": "2025-06-04T15:51:15.475835", "epoch": 1, "step": 260, "global_step": 260, "train_loss": 0.1320449560880661, "train_perplexity": 1.1411596201864647, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.701492537313434e-06, "grad_norm": 1.0430641174316406} +{"timestamp": "2025-06-04T15:51:19.374491", "epoch": 1, "step": 270, "global_step": 270, "train_loss": 0.17876822501420975, "train_perplexity": 1.1957435685993218, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.999983035350438e-06, "grad_norm": 1.14077627658844} +{"timestamp": "2025-06-04T15:51:23.241027", "epoch": 1, "step": 280, "global_step": 280, "train_loss": 0.1122770607471466, "train_perplexity": 1.1188227995878834, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.999389284703265e-06, "grad_norm": 0.9659624099731445} +{"timestamp": "2025-06-04T15:51:27.128086", "epoch": 1, "step": 290, "global_step": 290, "train_loss": 0.07831090688705444, "train_perplexity": 1.0814588394081597, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.997947416694233e-06, "grad_norm": 0.7408416271209717} +{"timestamp": "2025-06-04T15:51:48.202371", "epoch": 1, "step": 300, "global_step": 300, "train_loss": 0.21869849413633347, "train_perplexity": 1.244456009333831, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.995657675927874e-06, "grad_norm": 1.3368358612060547} +{"timestamp": "2025-06-04T15:51:52.092307", "epoch": 1, "step": 310, "global_step": 310, "train_loss": 0.196963831782341, "train_perplexity": 1.2176999978708714, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.992520450845415e-06, "grad_norm": 1.2888882160186768} +{"timestamp": "2025-06-04T15:51:55.962190", "epoch": 1, "step": 320, "global_step": 320, "train_loss": 0.12523503229022026, "train_perplexity": 1.133414810843023, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.988536273658876e-06, "grad_norm": 0.9675649404525757} +{"timestamp": "2025-06-04T15:51:59.850182", "epoch": 1, "step": 330, "global_step": 330, "train_loss": 0.10732617415487766, "train_perplexity": 1.1132973240779556, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.983705820260776e-06, "grad_norm": 0.8802552819252014} +{"timestamp": "2025-06-04T15:52:03.739457", "epoch": 1, "step": 340, "global_step": 340, "train_loss": 0.11835997551679611, "train_perplexity": 1.1256492446125033, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.978029910109491e-06, "grad_norm": 1.1237599849700928} +{"timestamp": "2025-06-04T15:52:24.820733", "epoch": 1, "step": 350, "global_step": 350, "train_loss": 0.12786667421460152, "train_perplexity": 1.1364014809764293, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.971509506090216e-06, "grad_norm": 0.9043460488319397} +{"timestamp": "2025-06-04T15:52:28.712642", "epoch": 1, "step": 360, "global_step": 360, "train_loss": 0.1164625994861126, "train_perplexity": 1.123515489624736, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.964145714351633e-06, "grad_norm": 1.0198386907577515} +{"timestamp": "2025-06-04T15:52:32.587060", "epoch": 1, "step": 370, "global_step": 370, "train_loss": 0.2362208068370819, "train_perplexity": 1.2664539209803558, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.955939784118246e-06, "grad_norm": 1.3103463649749756} +{"timestamp": "2025-06-04T15:52:36.459376", "epoch": 1, "step": 380, "global_step": 380, "train_loss": 0.1618729755282402, "train_perplexity": 1.1757108877809606, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.946893107478473e-06, "grad_norm": 1.0293731689453125} +{"timestamp": "2025-06-04T15:52:40.338426", "epoch": 1, "step": 390, "global_step": 390, "train_loss": 0.0980859249830246, "train_perplexity": 1.1030575612388553, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.937007219148473e-06, "grad_norm": 0.7921751737594604} +{"timestamp": "2025-06-04T15:53:01.448046", "epoch": 1, "step": 400, "global_step": 400, "train_loss": 0.20105715841054916, "train_perplexity": 1.2226946571092294, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.926283796211796e-06, "grad_norm": 1.0909723043441772} +{"timestamp": "2025-06-04T15:53:05.335319", "epoch": 1, "step": 410, "global_step": 410, "train_loss": 0.22509319335222244, "train_perplexity": 1.2524394297822314, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.914724657834875e-06, "grad_norm": 1.2618776559829712} +{"timestamp": "2025-06-04T15:53:09.208929", "epoch": 1, "step": 420, "global_step": 420, "train_loss": 0.28387679159641266, "train_perplexity": 1.328269266734722, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.902331764958412e-06, "grad_norm": 1.3388011455535889} +{"timestamp": "2025-06-04T15:53:13.287854", "epoch": 1, "step": 430, "global_step": 430, "train_loss": 0.16169708222150803, "train_perplexity": 1.1755041062914209, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.889107219964726e-06, "grad_norm": 1.1124091148376465} +{"timestamp": "2025-06-04T15:53:17.184992", "epoch": 1, "step": 440, "global_step": 440, "train_loss": 0.11832941696047783, "train_perplexity": 1.1256148469222418, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.87505326632108e-06, "grad_norm": 0.9476931691169739} +{"timestamp": "2025-06-04T15:53:38.261983", "epoch": 1, "step": 450, "global_step": 450, "train_loss": 0.1749972589313984, "train_perplexity": 1.1912429513292322, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.8601722881991e-06, "grad_norm": 1.1345462799072266} +{"timestamp": "2025-06-04T15:53:42.134995", "epoch": 1, "step": 460, "global_step": 460, "train_loss": 0.08550283685326576, "train_perplexity": 1.0892646514236664, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.844466810070319e-06, "grad_norm": 0.6836742758750916} +{"timestamp": "2025-06-04T15:53:46.024181", "epoch": 1, "step": 470, "global_step": 470, "train_loss": 0.12165988981723785, "train_perplexity": 1.1293699262409955, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.827939496277901e-06, "grad_norm": 0.9041603803634644} +{"timestamp": "2025-06-04T15:53:49.913415", "epoch": 1, "step": 480, "global_step": 480, "train_loss": 0.14826585724949837, "train_perplexity": 1.159821202253221, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.810593150584658e-06, "grad_norm": 1.0981465578079224} +{"timestamp": "2025-06-04T15:53:53.803995", "epoch": 1, "step": 490, "global_step": 490, "train_loss": 0.17808087170124054, "train_perplexity": 1.194921952698537, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.792430715697412e-06, "grad_norm": 1.0859241485595703} +{"timestamp": "2025-06-04T15:54:14.951227", "epoch": 1, "step": 500, "global_step": 500, "train_loss": 0.1023472435772419, "train_perplexity": 1.1077680702968618, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.77345527276778e-06, "grad_norm": 0.8103573322296143} +{"timestamp": "2025-06-04T15:54:18.850223", "epoch": 1, "step": 510, "global_step": 510, "train_loss": 0.16731278225779533, "train_perplexity": 1.1821239548887807, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.753670040869466e-06, "grad_norm": 1.0387943983078003} +{"timestamp": "2025-06-04T15:54:22.742237", "epoch": 1, "step": 520, "global_step": 520, "train_loss": 0.12773668766021729, "train_perplexity": 1.1362537736637104, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.733078376452172e-06, "grad_norm": 0.8454576134681702} +{"timestamp": "2025-06-04T15:54:26.637383", "epoch": 1, "step": 530, "global_step": 530, "train_loss": 0.18833771720528603, "train_perplexity": 1.2072411526208404, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.711683772772197e-06, "grad_norm": 1.190935730934143} +{"timestamp": "2025-06-04T15:54:30.534207", "epoch": 1, "step": 540, "global_step": 540, "train_loss": 0.19391634687781334, "train_perplexity": 1.2139947242594389, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.689489859299823e-06, "grad_norm": 1.0974748134613037} +{"timestamp": "2025-06-04T15:54:51.675683", "epoch": 1, "step": 550, "global_step": 550, "train_loss": 0.14488542079925537, "train_perplexity": 1.1559071197662123, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.666500401103595e-06, "grad_norm": 0.9678713083267212} +{"timestamp": "2025-06-04T15:54:55.569836", "epoch": 1, "step": 560, "global_step": 560, "train_loss": 0.16025637090206146, "train_perplexity": 1.173811763600792, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.642719298211602e-06, "grad_norm": 1.06119966506958} +{"timestamp": "2025-06-04T15:54:59.460516", "epoch": 1, "step": 570, "global_step": 570, "train_loss": 0.1826080083847046, "train_perplexity": 1.2003437911463095, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.618150584949858e-06, "grad_norm": 1.219964623451233} +{"timestamp": "2025-06-04T15:55:03.348911", "epoch": 1, "step": 580, "global_step": 580, "train_loss": 0.14983021840453148, "train_perplexity": 1.1616370014013302, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.592798429257899e-06, "grad_norm": 0.9647617340087891} +{"timestamp": "2025-06-04T15:55:07.234371", "epoch": 1, "step": 590, "global_step": 590, "train_loss": 0.06683030910789967, "train_perplexity": 1.0691140438653002, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.56666713198173e-06, "grad_norm": 0.6835302114486694} +{"timestamp": "2025-06-04T15:55:28.328386", "epoch": 1, "step": 600, "global_step": 600, "train_loss": 0.14879680424928665, "train_perplexity": 1.1604371693493187, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.539761126144193e-06, "grad_norm": 0.9380856156349182} +{"timestamp": "2025-06-04T15:55:32.201388", "epoch": 1, "step": 610, "global_step": 610, "train_loss": 0.20094577968120575, "train_perplexity": 1.222558482515563, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.512084976192944e-06, "grad_norm": 1.6561542749404907} +{"timestamp": "2025-06-04T15:55:36.097604", "epoch": 1, "step": 620, "global_step": 620, "train_loss": 0.12645913287997246, "train_perplexity": 1.1348030740949924, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.483643377226107e-06, "grad_norm": 0.8381012082099915} +{"timestamp": "2025-06-04T15:55:39.971556", "epoch": 1, "step": 630, "global_step": 630, "train_loss": 0.15692076086997986, "train_perplexity": 1.169902908139351, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.4544411541958e-06, "grad_norm": 1.0088982582092285} +{"timestamp": "2025-06-04T15:55:43.864524", "epoch": 1, "step": 640, "global_step": 640, "train_loss": 0.152855534106493, "train_perplexity": 1.1651566413882393, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.424483261089584e-06, "grad_norm": 0.9556991457939148} +{"timestamp": "2025-06-04T15:56:04.918510", "epoch": 1, "step": 650, "global_step": 650, "train_loss": 0.10202617943286896, "train_perplexity": 1.1074124627786746, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.39377478009007e-06, "grad_norm": 0.7985841631889343} +{"timestamp": "2025-06-04T15:56:08.805577", "epoch": 1, "step": 660, "global_step": 660, "train_loss": 0.10977356135845184, "train_perplexity": 1.1160253305851233, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.36232092071274e-06, "grad_norm": 0.7995733022689819} +{"timestamp": "2025-06-04T15:56:12.690636", "epoch": 1, "step": 670, "global_step": 670, "train_loss": 0.08872723206877708, "train_perplexity": 1.092782539637229, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.330127018922195e-06, "grad_norm": 0.7211840748786926} +{"timestamp": "2025-06-04T15:56:16.581263", "epoch": 1, "step": 680, "global_step": 680, "train_loss": 0.1401810497045517, "train_perplexity": 1.1504820744422115, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.297198536226927e-06, "grad_norm": 0.8916677236557007} +{"timestamp": "2025-06-04T15:56:20.469276", "epoch": 1, "step": 690, "global_step": 690, "train_loss": 0.1811549961566925, "train_perplexity": 1.1986009434361042, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.26354105875282e-06, "grad_norm": 1.0837889909744263} +{"timestamp": "2025-06-04T15:56:41.507497", "epoch": 1, "step": 700, "global_step": 700, "train_loss": 0.17966965585947037, "train_perplexity": 1.1968219346982898, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.229160296295488e-06, "grad_norm": 1.1415470838546753} +{"timestamp": "2025-06-04T15:56:45.399329", "epoch": 1, "step": 710, "global_step": 710, "train_loss": 0.08297113329172134, "train_perplexity": 1.0865104441155142, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.194062081351638e-06, "grad_norm": 0.6988579034805298} +{"timestamp": "2025-06-04T15:56:49.283621", "epoch": 1, "step": 720, "global_step": 720, "train_loss": 0.21730352938175201, "train_perplexity": 1.242721247309434, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.158252368129628e-06, "grad_norm": 1.173891305923462} +{"timestamp": "2025-06-04T15:56:53.169881", "epoch": 1, "step": 730, "global_step": 730, "train_loss": 0.11370521038770676, "train_perplexity": 1.1204217874921896, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.121737231539369e-06, "grad_norm": 0.9612295627593994} +{"timestamp": "2025-06-04T15:56:57.061155", "epoch": 1, "step": 740, "global_step": 740, "train_loss": 0.1307085007429123, "train_perplexity": 1.139635529978495, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.084522866161747e-06, "grad_norm": 0.8802459836006165} +{"timestamp": "2025-06-04T15:57:18.033686", "epoch": 1, "step": 750, "global_step": 750, "train_loss": 0.21306980401277542, "train_perplexity": 1.2374710286723147, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.046615585197753e-06, "grad_norm": 1.119827389717102} +{"timestamp": "2025-06-04T15:57:21.917915", "epoch": 1, "step": 760, "global_step": 760, "train_loss": 0.10635868832468987, "train_perplexity": 1.1122207455633288, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.008021819397488e-06, "grad_norm": 0.7633558511734009} +{"timestamp": "2025-06-04T15:57:25.803713", "epoch": 1, "step": 770, "global_step": 770, "train_loss": 0.09661010652780533, "train_perplexity": 1.1014308491938922, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.968748115969211e-06, "grad_norm": 0.8787773251533508} +{"timestamp": "2025-06-04T15:57:29.683554", "epoch": 1, "step": 780, "global_step": 780, "train_loss": 0.14135663956403732, "train_perplexity": 1.1518353648038036, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.928801137468654e-06, "grad_norm": 0.9087620377540588} +{"timestamp": "2025-06-04T15:57:33.561708", "epoch": 1, "step": 790, "global_step": 790, "train_loss": 0.09599808976054192, "train_perplexity": 1.1007569612825894, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.888187660668762e-06, "grad_norm": 0.8097870945930481} +{"timestamp": "2025-06-04T15:57:54.498816", "epoch": 1, "step": 800, "global_step": 800, "train_loss": 0.13304192572832108, "train_perplexity": 1.1422978889979176, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.846914575410035e-06, "grad_norm": 0.8932214975357056} +{"timestamp": "2025-06-04T15:57:58.352666", "epoch": 1, "step": 810, "global_step": 810, "train_loss": 0.17763880267739296, "train_perplexity": 1.1943938314589546, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.804988883431728e-06, "grad_norm": 1.072876214981079} +{"timestamp": "2025-06-04T15:58:02.235000", "epoch": 1, "step": 820, "global_step": 820, "train_loss": 0.09748945012688637, "train_perplexity": 1.1023998113238476, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.762417697184034e-06, "grad_norm": 0.8411881923675537} +{"timestamp": "2025-06-04T15:58:06.111317", "epoch": 1, "step": 830, "global_step": 830, "train_loss": 0.13653943687677383, "train_perplexity": 1.1463000833795116, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.719208238621495e-06, "grad_norm": 0.8364983797073364} +{"timestamp": "2025-06-04T15:58:09.988318", "epoch": 1, "step": 840, "global_step": 840, "train_loss": 0.11472301930189133, "train_perplexity": 1.1215627433141526, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.675367837977848e-06, "grad_norm": 0.8123106360435486} +{"timestamp": "2025-06-04T15:58:30.937925", "epoch": 1, "step": 850, "global_step": 850, "train_loss": 0.0726587325334549, "train_perplexity": 1.075363487714223, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.630903932522496e-06, "grad_norm": 0.6372828483581543} +{"timestamp": "2025-06-04T15:58:34.799348", "epoch": 1, "step": 860, "global_step": 860, "train_loss": 0.19452973455190659, "train_perplexity": 1.214739602085798, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.585824065298806e-06, "grad_norm": 1.091491937637329} +{"timestamp": "2025-06-04T15:58:38.677190", "epoch": 1, "step": 870, "global_step": 870, "train_loss": 0.14252465218305588, "train_perplexity": 1.1531815090486186, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.540135883844483e-06, "grad_norm": 0.9200571179389954} +{"timestamp": "2025-06-04T15:58:42.542710", "epoch": 1, "step": 880, "global_step": 880, "train_loss": 0.12328824773430824, "train_perplexity": 1.1312104428048655, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.49384713889421e-06, "grad_norm": 0.8436995148658752} +{"timestamp": "2025-06-04T15:58:46.420788", "epoch": 1, "step": 890, "global_step": 890, "train_loss": 0.055943405255675316, "train_perplexity": 1.057537830959794, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.44696568306478e-06, "grad_norm": 0.5423739552497864} +{"timestamp": "2025-06-04T15:59:07.372567", "epoch": 1, "step": 900, "global_step": 900, "train_loss": 0.10158522799611092, "train_perplexity": 1.106924255307932, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.399499469522947e-06, "grad_norm": 0.7847106456756592} +{"timestamp": "2025-06-04T15:59:11.254678", "epoch": 1, "step": 910, "global_step": 910, "train_loss": 0.17052586376667023, "train_perplexity": 1.1859283241106406, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.35145655063621e-06, "grad_norm": 0.9354358911514282} +{"timestamp": "2025-06-04T15:59:15.130113", "epoch": 1, "step": 920, "global_step": 920, "train_loss": 0.10208459198474884, "train_perplexity": 1.107477151455906, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.302845076606786e-06, "grad_norm": 0.7604900598526001} +{"timestamp": "2025-06-04T15:59:19.010625", "epoch": 1, "step": 930, "global_step": 930, "train_loss": 0.11179571971297264, "train_perplexity": 1.1182843938532543, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.253673294088968e-06, "grad_norm": 0.7490450143814087} +{"timestamp": "2025-06-04T15:59:22.889396", "epoch": 1, "step": 940, "global_step": 940, "train_loss": 0.12244100123643875, "train_perplexity": 1.1302524346106964, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.203949544790131e-06, "grad_norm": 0.8755216002464294} +{"timestamp": "2025-06-04T15:59:43.808026", "epoch": 1, "step": 950, "global_step": 950, "train_loss": 0.12281830981373787, "train_perplexity": 1.130678969011272, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.15368226405561e-06, "grad_norm": 0.8605537414550781} +{"timestamp": "2025-06-04T15:59:47.687514", "epoch": 1, "step": 960, "global_step": 960, "train_loss": 0.11029093712568283, "train_perplexity": 1.1166028844400853, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.10287997943769e-06, "grad_norm": 0.8044679760932922} +{"timestamp": "2025-06-04T15:59:51.545996", "epoch": 1, "step": 970, "global_step": 970, "train_loss": 0.13130095973610878, "train_perplexity": 1.1403109173471018, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.051551309248961e-06, "grad_norm": 0.9222280383110046} +{"timestamp": "2025-06-04T15:59:55.422269", "epoch": 1, "step": 980, "global_step": 980, "train_loss": 0.06533852592110634, "train_perplexity": 1.067520346530895, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.999704961100267e-06, "grad_norm": 0.6003274321556091} +{"timestamp": "2025-06-04T15:59:59.299502", "epoch": 1, "step": 990, "global_step": 990, "train_loss": 0.1481747031211853, "train_perplexity": 1.1597154845809048, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.947349730423509e-06, "grad_norm": 1.0037659406661987} +{"timestamp": "2025-06-04T16:00:20.173458", "epoch": 1, "step": 1000, "global_step": 1000, "train_loss": 0.09115409478545189, "train_perplexity": 1.095437793505203, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.894494498979558e-06, "grad_norm": 0.6946194767951965} +{"timestamp": "2025-06-04T16:00:24.034851", "epoch": 1, "step": 1010, "global_step": 1010, "train_loss": 0.06985310651361942, "train_perplexity": 1.0723506483588616, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.841148233351512e-06, "grad_norm": 0.6094436645507812} +{"timestamp": "2025-06-04T16:00:27.914051", "epoch": 1, "step": 1020, "global_step": 1020, "train_loss": 0.21721255779266357, "train_perplexity": 1.2426082001248913, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.787319983423564e-06, "grad_norm": 1.39258873462677} +{"timestamp": "2025-06-04T16:00:31.789968", "epoch": 1, "step": 1030, "global_step": 1030, "train_loss": 0.08021197468042374, "train_perplexity": 1.0835167214444683, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.733018880845747e-06, "grad_norm": 0.6262266635894775} +{"timestamp": "2025-06-04T16:00:35.650779", "epoch": 1, "step": 1040, "global_step": 1040, "train_loss": 0.10484625026583672, "train_perplexity": 1.1105398520219913, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.678254137484797e-06, "grad_norm": 0.8431848287582397} +{"timestamp": "2025-06-04T16:00:56.555193", "epoch": 1, "step": 1050, "global_step": 1050, "train_loss": 0.16010309010744095, "train_perplexity": 1.1736318545895827, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.623035043861422e-06, "grad_norm": 0.9451720118522644} +{"timestamp": "2025-06-04T16:01:00.436999", "epoch": 1, "step": 1060, "global_step": 1060, "train_loss": 0.1360699161887169, "train_perplexity": 1.1457619981066123, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.56737096757421e-06, "grad_norm": 0.8864161968231201} +{"timestamp": "2025-06-04T16:01:04.301305", "epoch": 1, "step": 1070, "global_step": 1070, "train_loss": 0.08098086714744568, "train_perplexity": 1.0843501496567565, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.511271351710476e-06, "grad_norm": 0.653930127620697} +{"timestamp": "2025-06-04T16:01:08.178934", "epoch": 1, "step": 1080, "global_step": 1080, "train_loss": 0.10546499118208885, "train_perplexity": 1.1112272010911335, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.4547457132442895e-06, "grad_norm": 0.7293416857719421} +{"timestamp": "2025-06-04T16:01:12.060376", "epoch": 1, "step": 1090, "global_step": 1090, "train_loss": 0.23945699632167816, "train_perplexity": 1.2705590447386081, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.39780364142199e-06, "grad_norm": 1.1615322828292847} +{"timestamp": "2025-06-04T16:01:33.008327", "epoch": 1, "step": 1100, "global_step": 1100, "train_loss": 0.21317905187606812, "train_perplexity": 1.2376062271230202, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.34045479613541e-06, "grad_norm": 1.1265214681625366} +{"timestamp": "2025-06-04T16:01:36.893071", "epoch": 1, "step": 1110, "global_step": 1110, "train_loss": 0.22858554869890213, "train_perplexity": 1.2568210399376758, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.2827089062831424e-06, "grad_norm": 1.2193849086761475} +{"timestamp": "2025-06-04T16:01:40.775666", "epoch": 1, "step": 1120, "global_step": 1120, "train_loss": 0.17008887976408005, "train_perplexity": 1.185410205617777, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.2245757681200835e-06, "grad_norm": 0.9430868029594421} +{"timestamp": "2025-06-04T16:01:44.639926", "epoch": 1, "step": 1130, "global_step": 1130, "train_loss": 0.21848519518971443, "train_perplexity": 1.2441905964850732, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.166065243595556e-06, "grad_norm": 1.2422202825546265} +{"timestamp": "2025-06-04T16:01:48.524440", "epoch": 1, "step": 1140, "global_step": 1140, "train_loss": 0.12940853089094162, "train_perplexity": 1.1381550006775147, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.107187258680288e-06, "grad_norm": 0.843524694442749} +{"timestamp": "2025-06-04T16:02:09.509814", "epoch": 1, "step": 1150, "global_step": 1150, "train_loss": 0.12559930607676506, "train_perplexity": 1.1338277593564878, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.047951801682533e-06, "grad_norm": 0.9139230251312256} +{"timestamp": "2025-06-04T16:02:13.394615", "epoch": 1, "step": 1160, "global_step": 1160, "train_loss": 0.1010470874607563, "train_perplexity": 1.1063287347478317, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.988368921553601e-06, "grad_norm": 0.7504018545150757} +{"timestamp": "2025-06-04T16:02:17.263516", "epoch": 1, "step": 1170, "global_step": 1170, "train_loss": 0.08690540492534637, "train_perplexity": 1.0907934911460626, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.928448726183121e-06, "grad_norm": 0.9263979196548462} +{"timestamp": "2025-06-04T16:02:21.133390", "epoch": 1, "step": 1180, "global_step": 1180, "train_loss": 0.11731548048555851, "train_perplexity": 1.1244741233805684, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.8682013806842985e-06, "grad_norm": 0.8856023550033569} +{"timestamp": "2025-06-04T16:02:25.015896", "epoch": 1, "step": 1190, "global_step": 1190, "train_loss": 0.1063197422772646, "train_perplexity": 1.1121774298049192, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.807637105669454e-06, "grad_norm": 0.807375431060791} +{"timestamp": "2025-06-04T16:02:46.062152", "epoch": 1, "step": 1200, "global_step": 1200, "train_loss": 0.17924988269805908, "train_perplexity": 1.196319646402064, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.746766175516159e-06, "grad_norm": 1.0243499279022217} +{"timestamp": "2025-06-04T16:02:49.946916", "epoch": 1, "step": 1210, "global_step": 1210, "train_loss": 0.09139541536569595, "train_perplexity": 1.0957021770884698, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.685598916624254e-06, "grad_norm": 0.7833459973335266} +{"timestamp": "2025-06-04T16:02:53.829310", "epoch": 1, "step": 1220, "global_step": 1220, "train_loss": 0.1082618460059166, "train_perplexity": 1.114339492533743, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.624145705664024e-06, "grad_norm": 0.7323276996612549} +{"timestamp": "2025-06-04T16:02:57.716753", "epoch": 1, "step": 1230, "global_step": 1230, "train_loss": 0.05634104833006859, "train_perplexity": 1.0579584371742161, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.562416967815863e-06, "grad_norm": 0.5661768317222595} +{"timestamp": "2025-06-04T16:03:01.603285", "epoch": 1, "step": 1240, "global_step": 1240, "train_loss": 0.1578119546175003, "train_perplexity": 1.1709459830181594, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.500423175001705e-06, "grad_norm": 1.0235848426818848} +{"timestamp": "2025-06-04T16:03:22.665669", "epoch": 1, "step": 1250, "global_step": 1250, "train_loss": 0.11713567562401295, "train_perplexity": 1.124271955642418, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.438174844108515e-06, "grad_norm": 0.9373230338096619} +{"timestamp": "2025-06-04T16:03:26.553201", "epoch": 1, "step": 1260, "global_step": 1260, "train_loss": 0.09851735457777977, "train_perplexity": 1.1035335555871335, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.375682535204168e-06, "grad_norm": 0.7698451280593872} +{"timestamp": "2025-06-04T16:03:30.438345", "epoch": 1, "step": 1270, "global_step": 1270, "train_loss": 0.11661957576870918, "train_perplexity": 1.1236918687530446, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.312956849745993e-06, "grad_norm": 0.9150201678276062} +{"timestamp": "2025-06-04T16:03:34.327490", "epoch": 1, "step": 1280, "global_step": 1280, "train_loss": 0.12427449226379395, "train_perplexity": 1.1323266432485943, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.2500084287822925e-06, "grad_norm": 0.8793236017227173} +{"timestamp": "2025-06-04T16:03:38.211254", "epoch": 1, "step": 1290, "global_step": 1290, "train_loss": 0.17418143153190613, "train_perplexity": 1.1902714990126755, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.1868479511471565e-06, "grad_norm": 1.0391594171524048} +{"timestamp": "2025-06-04T16:03:59.309946", "epoch": 1, "step": 1300, "global_step": 1300, "train_loss": 0.11626759171485901, "train_perplexity": 1.1232964167342876, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.123486131648859e-06, "grad_norm": 0.8350743055343628} +{"timestamp": "2025-06-04T16:04:03.199855", "epoch": 1, "step": 1310, "global_step": 1310, "train_loss": 0.07747054845094681, "train_perplexity": 1.080550408106575, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.059933719252151e-06, "grad_norm": 0.6586312651634216} +{"timestamp": "2025-06-04T16:04:07.092124", "epoch": 1, "step": 1320, "global_step": 1320, "train_loss": 0.07231690175831318, "train_perplexity": 1.0749959581996869, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.996201495254757e-06, "grad_norm": 0.6390174627304077} +{"timestamp": "2025-06-04T16:04:10.986353", "epoch": 1, "step": 1330, "global_step": 1330, "train_loss": 0.08034726977348328, "train_perplexity": 1.083663325857333, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.932300271458406e-06, "grad_norm": 0.6472488641738892} +{"timestamp": "2025-06-04T16:04:14.873661", "epoch": 1, "step": 1340, "global_step": 1340, "train_loss": 0.09573419578373432, "train_perplexity": 1.100466516475581, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.8682408883346535e-06, "grad_norm": 0.8322047591209412} +{"timestamp": "2025-06-04T16:04:32.200653", "epoch": 1, "step": 2681, "global_step": 1340, "train_loss": 0.20949130744826217, "train_perplexity": 1.23305065670761, "eval_loss": 0.2135066479294492, "eval_perplexity": 1.2380117284553571, "learning_rate": 5.8682408883346535e-06, "grad_norm": null} +{"timestamp": "2025-06-04T16:04:57.217038", "epoch": 2, "step": 10, "global_step": 1350, "train_loss": 0.09219155833125114, "train_perplexity": 1.0965748600131928, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.8040342131858654e-06, "grad_norm": 0.7483306527137756} +{"timestamp": "2025-06-04T16:05:01.111916", "epoch": 2, "step": 20, "global_step": 1360, "train_loss": 0.05083961598575115, "train_perplexity": 1.0521541310470375, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.73969113830165e-06, "grad_norm": 0.6238669753074646} +{"timestamp": "2025-06-04T16:05:04.997146", "epoch": 2, "step": 30, "global_step": 1370, "train_loss": 0.07681288383901119, "train_perplexity": 1.079840001971844, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.675222579111037e-06, "grad_norm": 0.704855740070343} +{"timestamp": "2025-06-04T16:05:08.893108", "epoch": 2, "step": 40, "global_step": 1380, "train_loss": 0.07093845680356026, "train_perplexity": 1.0735151562811673, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.610639472330737e-06, "grad_norm": 0.7362458109855652} +{"timestamp": "2025-06-04T16:05:12.780723", "epoch": 2, "step": 50, "global_step": 1390, "train_loss": 0.07507956400513649, "train_perplexity": 1.0779699150765751, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.545952774109798e-06, "grad_norm": 0.7241469025611877} +{"timestamp": "2025-06-04T16:05:33.899301", "epoch": 2, "step": 60, "global_step": 1400, "train_loss": 0.10019706189632416, "train_perplexity": 1.1053887266127027, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.481173458170952e-06, "grad_norm": 0.9513847827911377} +{"timestamp": "2025-06-04T16:05:37.785001", "epoch": 2, "step": 70, "global_step": 1410, "train_loss": 0.04052358120679855, "train_perplexity": 1.0413558658410078, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.4163125139489896e-06, "grad_norm": 0.5646671056747437} +{"timestamp": "2025-06-04T16:05:41.673972", "epoch": 2, "step": 80, "global_step": 1420, "train_loss": 0.13121213763952255, "train_perplexity": 1.140209637038694, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.351380944726465e-06, "grad_norm": 0.9856654405593872} +{"timestamp": "2025-06-04T16:05:45.562491", "epoch": 2, "step": 90, "global_step": 1430, "train_loss": 0.11307748407125473, "train_perplexity": 1.119718689950157, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.286389765767056e-06, "grad_norm": 0.9123920202255249} +{"timestamp": "2025-06-04T16:05:49.453509", "epoch": 2, "step": 100, "global_step": 1440, "train_loss": 0.0894496887922287, "train_perplexity": 1.0935723129844412, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.221350002446882e-06, "grad_norm": 0.822655975818634} +{"timestamp": "2025-06-04T16:06:10.573961", "epoch": 2, "step": 110, "global_step": 1450, "train_loss": 0.04346796963363886, "train_perplexity": 1.044426540411532, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.156272688384123e-06, "grad_norm": 0.5520569682121277} +{"timestamp": "2025-06-04T16:06:14.466564", "epoch": 2, "step": 120, "global_step": 1460, "train_loss": 0.13778063654899597, "train_perplexity": 1.1477237540142602, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.0911688635672155e-06, "grad_norm": 1.0230581760406494} +{"timestamp": "2025-06-04T16:06:18.326786", "epoch": 2, "step": 130, "global_step": 1470, "train_loss": 0.07549739256501198, "train_perplexity": 1.0784204158032615, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.026049572482002e-06, "grad_norm": 0.7364359498023987} +{"timestamp": "2025-06-04T16:06:22.218793", "epoch": 2, "step": 140, "global_step": 1480, "train_loss": 0.13180837780237198, "train_perplexity": 1.1408896785322435, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.96092586223808e-06, "grad_norm": 1.1415563821792603} +{"timestamp": "2025-06-04T16:06:26.103407", "epoch": 2, "step": 150, "global_step": 1490, "train_loss": 0.05570453219115734, "train_perplexity": 1.0572852438266014, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.895808780694738e-06, "grad_norm": 0.7476668953895569} +{"timestamp": "2025-06-04T16:06:47.262701", "epoch": 2, "step": 160, "global_step": 1500, "train_loss": 0.08492013439536095, "train_perplexity": 1.0886301191236925, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.8307093745867335e-06, "grad_norm": 0.8331665992736816} +{"timestamp": "2025-06-04T16:06:51.160457", "epoch": 2, "step": 170, "global_step": 1510, "train_loss": 0.09430919960141182, "train_perplexity": 1.0988994726717203, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.765638687650299e-06, "grad_norm": 0.8835126161575317} +{"timestamp": "2025-06-04T16:06:55.051079", "epoch": 2, "step": 180, "global_step": 1520, "train_loss": 0.039034622721374035, "train_perplexity": 1.0398064839571168, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.700607758749626e-06, "grad_norm": 0.5312080383300781} +{"timestamp": "2025-06-04T16:06:58.947587", "epoch": 2, "step": 190, "global_step": 1530, "train_loss": 0.09536056965589523, "train_perplexity": 1.1000554302332763, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.635627620004178e-06, "grad_norm": 0.8708493709564209} +{"timestamp": "2025-06-04T16:07:02.832561", "epoch": 2, "step": 200, "global_step": 1540, "train_loss": 0.058465560898184776, "train_perplexity": 1.0602084724383314, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.57070929491717e-06, "grad_norm": 0.6560453176498413} +{"timestamp": "2025-06-04T16:07:23.955276", "epoch": 2, "step": 210, "global_step": 1550, "train_loss": 0.07735063508152962, "train_perplexity": 1.0804208434347373, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.5058637965054905e-06, "grad_norm": 0.8640308976173401} +{"timestamp": "2025-06-04T16:07:27.848027", "epoch": 2, "step": 220, "global_step": 1560, "train_loss": 0.07625348679721355, "train_perplexity": 1.0792361115921199, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.441102125431398e-06, "grad_norm": 0.829659640789032} +{"timestamp": "2025-06-04T16:07:31.737333", "epoch": 2, "step": 230, "global_step": 1570, "train_loss": 0.10900850594043732, "train_perplexity": 1.1151718358863194, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.3764352681363365e-06, "grad_norm": 0.9264845252037048} +{"timestamp": "2025-06-04T16:07:35.631800", "epoch": 2, "step": 240, "global_step": 1580, "train_loss": 0.05006047524511814, "train_perplexity": 1.051334674175685, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.311874194977142e-06, "grad_norm": 0.6202943921089172} +{"timestamp": "2025-06-04T16:07:39.521053", "epoch": 2, "step": 250, "global_step": 1590, "train_loss": 0.08443677611649036, "train_perplexity": 1.0881040478936708, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.247429858364981e-06, "grad_norm": 0.865699291229248} +{"timestamp": "2025-06-04T16:08:00.602207", "epoch": 2, "step": 260, "global_step": 1600, "train_loss": 0.08922336995601654, "train_perplexity": 1.093324844975645, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.183113190907349e-06, "grad_norm": 0.9664984941482544} +{"timestamp": "2025-06-04T16:08:04.490320", "epoch": 2, "step": 270, "global_step": 1610, "train_loss": 0.10598694533109665, "train_perplexity": 1.111807362134794, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.11893510355341e-06, "grad_norm": 1.0453600883483887} +{"timestamp": "2025-06-04T16:08:08.355443", "epoch": 2, "step": 280, "global_step": 1620, "train_loss": 0.08416618593037128, "train_perplexity": 1.0878096574482132, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.054906483743012e-06, "grad_norm": 0.7608398199081421} +{"timestamp": "2025-06-04T16:08:12.233929", "epoch": 2, "step": 290, "global_step": 1630, "train_loss": 0.08983586728572845, "train_perplexity": 1.0939947086476136, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.99103819355971e-06, "grad_norm": 0.8169810175895691} +{"timestamp": "2025-06-04T16:08:16.116204", "epoch": 2, "step": 300, "global_step": 1640, "train_loss": 0.10179191827774048, "train_perplexity": 1.1071530694400182, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.927341067888065e-06, "grad_norm": 0.9132513999938965} +{"timestamp": "2025-06-04T16:08:37.120057", "epoch": 2, "step": 310, "global_step": 1650, "train_loss": 0.08725733309984207, "train_perplexity": 1.0911774396653415, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.863825912575573e-06, "grad_norm": 0.8127678632736206} +{"timestamp": "2025-06-04T16:08:41.005250", "epoch": 2, "step": 320, "global_step": 1660, "train_loss": 0.08614224568009377, "train_perplexity": 1.0899613595734716, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.800503502599511e-06, "grad_norm": 0.9069166779518127} +{"timestamp": "2025-06-04T16:08:44.869929", "epoch": 2, "step": 330, "global_step": 1670, "train_loss": 0.09273252263665199, "train_perplexity": 1.097168228351735, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.7373845802390394e-06, "grad_norm": 1.0052939653396606} +{"timestamp": "2025-06-04T16:08:48.751768", "epoch": 2, "step": 340, "global_step": 1680, "train_loss": 0.08590411208570004, "train_perplexity": 1.0897018340592644, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.6744798532528137e-06, "grad_norm": 0.8956423997879028} +{"timestamp": "2025-06-04T16:08:52.632466", "epoch": 2, "step": 350, "global_step": 1690, "train_loss": 0.04824352078139782, "train_perplexity": 1.0494261812939538, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.611799993062497e-06, "grad_norm": 0.5963554382324219} +{"timestamp": "2025-06-04T16:09:13.541304", "epoch": 2, "step": 360, "global_step": 1700, "train_loss": 0.053508270531892776, "train_perplexity": 1.0549657168591178, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.549355632942405e-06, "grad_norm": 0.6234306693077087} +{"timestamp": "2025-06-04T16:09:17.416341", "epoch": 2, "step": 370, "global_step": 1710, "train_loss": 0.04932752437889576, "train_perplexity": 1.0505643798439919, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.4871573662156287e-06, "grad_norm": 0.5964241623878479} +{"timestamp": "2025-06-04T16:09:21.296846", "epoch": 2, "step": 380, "global_step": 1720, "train_loss": 0.07632846012711525, "train_perplexity": 1.079317028550425, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.4252157444569478e-06, "grad_norm": 0.8359777331352234} +{"timestamp": "2025-06-04T16:09:25.173247", "epoch": 2, "step": 390, "global_step": 1730, "train_loss": 0.04537820816040039, "train_perplexity": 1.0464235510034243, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.363541275702818e-06, "grad_norm": 0.6145345568656921} +{"timestamp": "2025-06-04T16:09:29.055844", "epoch": 2, "step": 400, "global_step": 1740, "train_loss": 0.09098780155181885, "train_perplexity": 1.0952556447577493, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.3021444226687267e-06, "grad_norm": 0.8584290742874146} +{"timestamp": "2025-06-04T16:09:49.952086", "epoch": 2, "step": 410, "global_step": 1750, "train_loss": 0.049650244414806366, "train_perplexity": 1.050903472731473, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.2410356009742784e-06, "grad_norm": 0.6375706791877747} +{"timestamp": "2025-06-04T16:09:53.834130", "epoch": 2, "step": 420, "global_step": 1760, "train_loss": 0.08727036416530609, "train_perplexity": 1.0911916589626367, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.1802251773762294e-06, "grad_norm": 0.8463383316993713} +{"timestamp": "2025-06-04T16:09:57.714735", "epoch": 2, "step": 430, "global_step": 1770, "train_loss": 0.07331629283726215, "train_perplexity": 1.076070836592692, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.119723468009829e-06, "grad_norm": 0.7825964093208313} +{"timestamp": "2025-06-04T16:10:01.573670", "epoch": 2, "step": 440, "global_step": 1780, "train_loss": 0.08756432309746742, "train_perplexity": 1.0915124716480558, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.059540736638751e-06, "grad_norm": 0.851995587348938} +{"timestamp": "2025-06-04T16:10:05.448649", "epoch": 2, "step": 450, "global_step": 1790, "train_loss": 0.039396269246935844, "train_perplexity": 1.0401825943647085, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.9996871929139104e-06, "grad_norm": 0.5352329015731812} +{"timestamp": "2025-06-04T16:10:26.369149", "epoch": 2, "step": 460, "global_step": 1800, "train_loss": 0.04152260348200798, "train_perplexity": 1.0423967233807683, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.9401729906414385e-06, "grad_norm": 0.5729257464408875} +{"timestamp": "2025-06-04T16:10:30.245596", "epoch": 2, "step": 470, "global_step": 1810, "train_loss": 0.07019095867872238, "train_perplexity": 1.0727130055552943, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.881008226060168e-06, "grad_norm": 0.7474201917648315} +{"timestamp": "2025-06-04T16:10:34.126508", "epoch": 2, "step": 480, "global_step": 1820, "train_loss": 0.04946332424879074, "train_perplexity": 1.050707056037576, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.822202936128858e-06, "grad_norm": 0.6267549991607666} +{"timestamp": "2025-06-04T16:10:38.001835", "epoch": 2, "step": 490, "global_step": 1830, "train_loss": 0.11550524644553661, "train_perplexity": 1.122440403355276, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.76376709682347e-06, "grad_norm": 0.9410679340362549} +{"timestamp": "2025-06-04T16:10:41.879393", "epoch": 2, "step": 500, "global_step": 1840, "train_loss": 0.06606614962220192, "train_perplexity": 1.06829738229697, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.7057106214448216e-06, "grad_norm": 0.7640741467475891} +{"timestamp": "2025-06-04T16:11:02.820833", "epoch": 2, "step": 510, "global_step": 1850, "train_loss": 0.06857362389564514, "train_perplexity": 1.0709794717293049, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.6480433589368424e-06, "grad_norm": 0.7982816696166992} +{"timestamp": "2025-06-04T16:11:06.705971", "epoch": 2, "step": 520, "global_step": 1860, "train_loss": 0.11946064792573452, "train_perplexity": 1.126888897779058, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.5907750922157555e-06, "grad_norm": 1.1855436563491821} +{"timestamp": "2025-06-04T16:11:10.581562", "epoch": 2, "step": 530, "global_step": 1870, "train_loss": 0.05941667780280113, "train_perplexity": 1.0612173343356341, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.533915536510464e-06, "grad_norm": 0.7668613195419312} +{"timestamp": "2025-06-04T16:11:14.457155", "epoch": 2, "step": 540, "global_step": 1880, "train_loss": 0.07246024534106255, "train_perplexity": 1.0751500630164808, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.4774743377144265e-06, "grad_norm": 0.7726814150810242} +{"timestamp": "2025-06-04T16:11:18.318211", "epoch": 2, "step": 550, "global_step": 1890, "train_loss": 0.07262087985873222, "train_perplexity": 1.075322783100308, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.4214610707492752e-06, "grad_norm": 0.8308311700820923} +{"timestamp": "2025-06-04T16:11:39.197748", "epoch": 2, "step": 560, "global_step": 1900, "train_loss": 0.08977160044014454, "train_perplexity": 1.0939244033177784, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.3658852379404973e-06, "grad_norm": 0.8458873629570007} +{"timestamp": "2025-06-04T16:11:43.081320", "epoch": 2, "step": 570, "global_step": 1910, "train_loss": 0.04722259379923344, "train_perplexity": 1.0483553405078179, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.3107562674054122e-06, "grad_norm": 0.6285303831100464} +{"timestamp": "2025-06-04T16:11:46.958811", "epoch": 2, "step": 580, "global_step": 1920, "train_loss": 0.05732823722064495, "train_perplexity": 1.0590033576721796, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.256083511453747e-06, "grad_norm": 0.7101243734359741} +{"timestamp": "2025-06-04T16:11:50.838620", "epoch": 2, "step": 590, "global_step": 1930, "train_loss": 0.056910332292318344, "train_perplexity": 1.0585608874115848, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.20187624500108e-06, "grad_norm": 0.6693544983863831} +{"timestamp": "2025-06-04T16:11:54.715259", "epoch": 2, "step": 600, "global_step": 1940, "train_loss": 0.075874924659729, "train_perplexity": 1.078827630985389, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.1481436639953983e-06, "grad_norm": 0.7663268446922302} +{"timestamp": "2025-06-04T16:12:15.657974", "epoch": 2, "step": 610, "global_step": 1950, "train_loss": 0.08494022488594055, "train_perplexity": 1.0886519904565477, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.094894883857051e-06, "grad_norm": 0.8211141228675842} +{"timestamp": "2025-06-04T16:12:19.543490", "epoch": 2, "step": 620, "global_step": 1960, "train_loss": 0.069455586373806, "train_perplexity": 1.0719244520956086, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.042138937932388e-06, "grad_norm": 0.7399444580078125} +{"timestamp": "2025-06-04T16:12:23.418781", "epoch": 2, "step": 630, "global_step": 1970, "train_loss": 0.043883929029107094, "train_perplexity": 1.0448610698109204, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.989884775961296e-06, "grad_norm": 0.5605442523956299} +{"timestamp": "2025-06-04T16:12:27.278023", "epoch": 2, "step": 640, "global_step": 1980, "train_loss": 0.05252310447394848, "train_perplexity": 1.0539269122241246, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.9381412625589237e-06, "grad_norm": 0.641305148601532} +{"timestamp": "2025-06-04T16:12:31.134662", "epoch": 2, "step": 650, "global_step": 1990, "train_loss": 0.06824682280421257, "train_perplexity": 1.0706295316525558, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.8869171757118554e-06, "grad_norm": 0.8991125226020813} +{"timestamp": "2025-06-04T16:12:52.061672", "epoch": 2, "step": 660, "global_step": 2000, "train_loss": 0.056670188903808594, "train_perplexity": 1.0583067115336877, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.8362212052889827e-06, "grad_norm": 0.6741258502006531} +{"timestamp": "2025-06-04T16:12:55.944385", "epoch": 2, "step": 670, "global_step": 2010, "train_loss": 0.05249423906207085, "train_perplexity": 1.0538964906287824, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.7860619515673034e-06, "grad_norm": 0.6633082032203674} +{"timestamp": "2025-06-04T16:12:59.824036", "epoch": 2, "step": 680, "global_step": 2020, "train_loss": 0.07438912987709045, "train_perplexity": 1.0772259047328216, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.7364479237729526e-06, "grad_norm": 0.8070685863494873} +{"timestamp": "2025-06-04T16:13:03.700676", "epoch": 2, "step": 690, "global_step": 2030, "train_loss": 0.05695551820099354, "train_perplexity": 1.0586087205278538, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.6873875386376464e-06, "grad_norm": 0.6835163235664368} +{"timestamp": "2025-06-04T16:13:07.561493", "epoch": 2, "step": 700, "global_step": 2040, "train_loss": 0.08398167788982391, "train_perplexity": 1.0876089663349604, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.63888911897084e-06, "grad_norm": 0.8436160683631897} +{"timestamp": "2025-06-04T16:13:28.476986", "epoch": 2, "step": 710, "global_step": 2050, "train_loss": 0.03553978633135557, "train_perplexity": 1.0361788730639523, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5909608922478108e-06, "grad_norm": 0.5797974467277527} +{"timestamp": "2025-06-04T16:13:32.359636", "epoch": 2, "step": 720, "global_step": 2060, "train_loss": 0.07211183197796345, "train_perplexity": 1.07477553161685, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5436109892139178e-06, "grad_norm": 0.7558038830757141} +{"timestamp": "2025-06-04T16:13:36.239867", "epoch": 2, "step": 730, "global_step": 2070, "train_loss": 0.0650089755654335, "train_perplexity": 1.0671686027828295, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4968474425052576e-06, "grad_norm": 0.7757009863853455} +{"timestamp": "2025-06-04T16:13:40.124392", "epoch": 2, "step": 740, "global_step": 2080, "train_loss": 0.09436911717057228, "train_perplexity": 1.0989653180295018, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4506781852859836e-06, "grad_norm": 0.884422779083252} +{"timestamp": "2025-06-04T16:13:44.008383", "epoch": 2, "step": 750, "global_step": 2090, "train_loss": 0.0381308626383543, "train_perplexity": 1.038867172882713, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.4051110499024923e-06, "grad_norm": 0.536321222782135} +{"timestamp": "2025-06-04T16:14:05.013950", "epoch": 2, "step": 760, "global_step": 2100, "train_loss": 0.045937731862068176, "train_perplexity": 1.0470092136129705, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.360153766554701e-06, "grad_norm": 0.6449976563453674} +{"timestamp": "2025-06-04T16:14:08.897170", "epoch": 2, "step": 770, "global_step": 2110, "train_loss": 0.08238577656447887, "train_perplexity": 1.0858746340238439, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.3158139619846734e-06, "grad_norm": 1.050763726234436} +{"timestamp": "2025-06-04T16:14:12.773031", "epoch": 2, "step": 780, "global_step": 2120, "train_loss": 0.06662464700639248, "train_perplexity": 1.068894190232843, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2720991581827852e-06, "grad_norm": 0.7440298199653625} +{"timestamp": "2025-06-04T16:14:16.639486", "epoch": 2, "step": 790, "global_step": 2130, "train_loss": 0.0670732781291008, "train_perplexity": 1.0693738370176564, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.229016771111658e-06, "grad_norm": 0.7524603009223938} +{"timestamp": "2025-06-04T16:14:20.526197", "epoch": 2, "step": 800, "global_step": 2140, "train_loss": 0.17072025686502457, "train_perplexity": 1.1861588828007739, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.186574109448091e-06, "grad_norm": 1.283272385597229} +{"timestamp": "2025-06-04T16:14:41.599092", "epoch": 2, "step": 810, "global_step": 2150, "train_loss": 0.07225818186998367, "train_perplexity": 1.074932836410337, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.1447783733431799e-06, "grad_norm": 0.8511999845504761} +{"timestamp": "2025-06-04T16:14:45.469048", "epoch": 2, "step": 820, "global_step": 2160, "train_loss": 0.05403805337846279, "train_perplexity": 1.0555247676743196, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.1036366532008552e-06, "grad_norm": 0.6990443468093872} +{"timestamp": "2025-06-04T16:14:49.353694", "epoch": 2, "step": 830, "global_step": 2170, "train_loss": 0.07556630671024323, "train_perplexity": 1.0784947367852706, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0631559284750398e-06, "grad_norm": 0.7718824744224548} +{"timestamp": "2025-06-04T16:14:53.235127", "epoch": 2, "step": 840, "global_step": 2180, "train_loss": 0.07645644620060921, "train_perplexity": 1.0794551749391827, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0233430664856236e-06, "grad_norm": 0.8092624545097351} +{"timestamp": "2025-06-04T16:14:57.121511", "epoch": 2, "step": 850, "global_step": 2190, "train_loss": 0.0467569287866354, "train_perplexity": 1.0478672717520643, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.842048212534567e-07, "grad_norm": 0.6216375827789307} +{"timestamp": "2025-06-04T16:15:18.200082", "epoch": 2, "step": 860, "global_step": 2200, "train_loss": 0.08881667256355286, "train_perplexity": 1.0928802830192994, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.457478323545749e-07, "grad_norm": 0.910168468952179} +{"timestamp": "2025-06-04T16:15:22.090252", "epoch": 2, "step": 870, "global_step": 2210, "train_loss": 0.09652426838874817, "train_perplexity": 1.1013363084771548, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.07978623793836e-07, "grad_norm": 0.9564467072486877} +{"timestamp": "2025-06-04T16:15:25.974800", "epoch": 2, "step": 880, "global_step": 2220, "train_loss": 0.03395761735737324, "train_perplexity": 1.0345407592276568, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.709036028981571e-07, "grad_norm": 0.5178916454315186} +{"timestamp": "2025-06-04T16:15:29.860619", "epoch": 2, "step": 890, "global_step": 2230, "train_loss": 0.053766580298542976, "train_perplexity": 1.0552382600060264, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.345290592295429e-07, "grad_norm": 0.6506232619285583} +{"timestamp": "2025-06-04T16:15:33.750074", "epoch": 2, "step": 900, "global_step": 2240, "train_loss": 0.06971279717981815, "train_perplexity": 1.0722001981088232, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.988611635181099e-07, "grad_norm": 0.7732512950897217} +{"timestamp": "2025-06-04T16:15:54.818147", "epoch": 2, "step": 910, "global_step": 2250, "train_loss": 0.10275266878306866, "train_perplexity": 1.1082172784487707, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.639059666152526e-07, "grad_norm": 0.9805467128753662} +{"timestamp": "2025-06-04T16:15:58.707863", "epoch": 2, "step": 920, "global_step": 2260, "train_loss": 0.09875445626676083, "train_perplexity": 1.1037952362782515, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.296693984671465e-07, "grad_norm": 1.038161277770996} +{"timestamp": "2025-06-04T16:16:02.576727", "epoch": 2, "step": 930, "global_step": 2270, "train_loss": 0.0979168489575386, "train_perplexity": 1.1028710764160194, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.961572671087741e-07, "grad_norm": 0.9988915920257568} +{"timestamp": "2025-06-04T16:16:06.465591", "epoch": 2, "step": 940, "global_step": 2280, "train_loss": 0.09837886318564415, "train_perplexity": 1.1033807362710761, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.633752576786251e-07, "grad_norm": 0.9663264155387878} +{"timestamp": "2025-06-04T16:16:10.358347", "epoch": 2, "step": 950, "global_step": 2290, "train_loss": 0.03254261892288923, "train_perplexity": 1.033077920874007, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.313289314542392e-07, "grad_norm": 0.5405398607254028} +{"timestamp": "2025-06-04T16:16:31.505418", "epoch": 2, "step": 960, "global_step": 2300, "train_loss": 0.03980386257171631, "train_perplexity": 1.0406066522624378, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.000237249087776e-07, "grad_norm": 0.5731000304222107} +{"timestamp": "2025-06-04T16:16:35.400262", "epoch": 2, "step": 970, "global_step": 2310, "train_loss": 0.08755393698811531, "train_perplexity": 1.0915011351390373, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.694649487887466e-07, "grad_norm": 0.9045767188072205} +{"timestamp": "2025-06-04T16:16:39.293181", "epoch": 2, "step": 980, "global_step": 2320, "train_loss": 0.0784428808838129, "train_perplexity": 1.0816015732718982, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.396577872130676e-07, "grad_norm": 0.832079291343689} +{"timestamp": "2025-06-04T16:16:43.181234", "epoch": 2, "step": 990, "global_step": 2330, "train_loss": 0.06308055575937033, "train_perplexity": 1.0651126367327846, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.106072967936188e-07, "grad_norm": 0.8090586066246033} +{"timestamp": "2025-06-04T16:16:47.070248", "epoch": 2, "step": 1000, "global_step": 2340, "train_loss": 0.08401019498705864, "train_perplexity": 1.0876399822278462, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.823184057774116e-07, "grad_norm": 0.8339213132858276} +{"timestamp": "2025-06-04T16:17:08.123786", "epoch": 2, "step": 1010, "global_step": 2350, "train_loss": 0.10523340478539467, "train_perplexity": 1.1109698857842367, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.5479591321053895e-07, "grad_norm": 0.9590398669242859} +{"timestamp": "2025-06-04T16:17:11.996628", "epoch": 2, "step": 1020, "global_step": 2360, "train_loss": 0.15093089640140533, "train_perplexity": 1.1629162936043624, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.2804448812404754e-07, "grad_norm": 1.2930185794830322} +{"timestamp": "2025-06-04T16:17:15.862961", "epoch": 2, "step": 1030, "global_step": 2370, "train_loss": 0.044746036641299725, "train_perplexity": 1.0457622408902971, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.020686687418651e-07, "grad_norm": 0.7421437501907349} +{"timestamp": "2025-06-04T16:17:19.746935", "epoch": 2, "step": 1040, "global_step": 2380, "train_loss": 0.07852279394865036, "train_perplexity": 1.0816880108222502, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.7687286171091355e-07, "grad_norm": 0.8390912413597107} +{"timestamp": "2025-06-04T16:17:23.600032", "epoch": 2, "step": 1050, "global_step": 2390, "train_loss": 0.08741313591599464, "train_perplexity": 1.0913474614279544, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.5246134135354934e-07, "grad_norm": 0.8625169396400452} +{"timestamp": "2025-06-04T16:17:44.673962", "epoch": 2, "step": 1060, "global_step": 2400, "train_loss": 0.09790205955505371, "train_perplexity": 1.1028547657323942, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.288382489424502e-07, "grad_norm": 0.9403291344642639} +{"timestamp": "2025-06-04T16:17:48.563786", "epoch": 2, "step": 1070, "global_step": 2410, "train_loss": 0.08274862542748451, "train_perplexity": 1.0862687138915583, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.0600759199806815e-07, "grad_norm": 0.8528719544410706} +{"timestamp": "2025-06-04T16:17:52.433486", "epoch": 2, "step": 1080, "global_step": 2420, "train_loss": 0.12123417109251022, "train_perplexity": 1.1288892346432176, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.839732436087833e-07, "grad_norm": 1.396231770515442} +{"timestamp": "2025-06-04T16:17:56.301147", "epoch": 2, "step": 1090, "global_step": 2430, "train_loss": 0.05448045115917921, "train_perplexity": 1.0559918327956987, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.6273894177385074e-07, "grad_norm": 0.6799153089523315} +{"timestamp": "2025-06-04T16:18:00.185705", "epoch": 2, "step": 1100, "global_step": 2440, "train_loss": 0.041266513988375664, "train_perplexity": 1.0421298107099377, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.4230828876927293e-07, "grad_norm": 0.5852925181388855} +{"timestamp": "2025-06-04T16:18:21.293365", "epoch": 2, "step": 1110, "global_step": 2450, "train_loss": 0.08677143976092339, "train_perplexity": 1.0906473726042856, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.2268475053669803e-07, "grad_norm": 0.9448606371879578} +{"timestamp": "2025-06-04T16:18:25.186404", "epoch": 2, "step": 1120, "global_step": 2460, "train_loss": 0.04151636362075806, "train_perplexity": 1.0423902189901402, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.0387165609543736e-07, "grad_norm": 0.6255165934562683} +{"timestamp": "2025-06-04T16:18:29.077248", "epoch": 2, "step": 1130, "global_step": 2470, "train_loss": 0.07429993152618408, "train_perplexity": 1.0771298222438292, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.8587219697771942e-07, "grad_norm": 0.9192423820495605} +{"timestamp": "2025-06-04T16:18:32.966138", "epoch": 2, "step": 1140, "global_step": 2480, "train_loss": 0.07785939425230026, "train_perplexity": 1.0809706572966895, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.6868942668726408e-07, "grad_norm": 0.8874773979187012} +{"timestamp": "2025-06-04T16:18:36.856620", "epoch": 2, "step": 1150, "global_step": 2490, "train_loss": 0.05496346019208431, "train_perplexity": 1.0565020095896944, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5232626018127582e-07, "grad_norm": 0.6460995078086853} +{"timestamp": "2025-06-04T16:18:57.884077", "epoch": 2, "step": 1160, "global_step": 2500, "train_loss": 0.09730996191501617, "train_perplexity": 1.1022019613093508, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.3678547337593494e-07, "grad_norm": 0.8977996706962585} +{"timestamp": "2025-06-04T16:19:01.766719", "epoch": 2, "step": 1170, "global_step": 2510, "train_loss": 0.050714531913399696, "train_perplexity": 1.0520225315542564, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.2206970267548078e-07, "grad_norm": 0.6345444321632385} +{"timestamp": "2025-06-04T16:19:05.649466", "epoch": 2, "step": 1180, "global_step": 2520, "train_loss": 0.11084700748324394, "train_perplexity": 1.1172239668720507, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0818144452496293e-07, "grad_norm": 0.9890771508216858} +{"timestamp": "2025-06-04T16:19:09.538711", "epoch": 2, "step": 1190, "global_step": 2530, "train_loss": 0.06192183867096901, "train_perplexity": 1.063879187267022, "eval_loss": null, "eval_perplexity": null, "learning_rate": 9.512305498672936e-08, "grad_norm": 0.6979386806488037} +{"timestamp": "2025-06-04T16:19:13.424704", "epoch": 2, "step": 1200, "global_step": 2540, "train_loss": 0.11293324083089828, "train_perplexity": 1.1195571897459673, "eval_loss": null, "eval_perplexity": null, "learning_rate": 8.289674934073844e-08, "grad_norm": 1.0071110725402832} +{"timestamp": "2025-06-04T16:19:34.488446", "epoch": 2, "step": 1210, "global_step": 2550, "train_loss": 0.07455204799771309, "train_perplexity": 1.077401418649517, "eval_loss": null, "eval_perplexity": null, "learning_rate": 7.150460170874895e-08, "grad_norm": 0.790630578994751} +{"timestamp": "2025-06-04T16:19:38.381800", "epoch": 2, "step": 1220, "global_step": 2560, "train_loss": 0.03821308817714453, "train_perplexity": 1.0389525978077419, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.094854470245326e-08, "grad_norm": 0.6562174558639526} +{"timestamp": "2025-06-04T16:19:42.266736", "epoch": 2, "step": 1230, "global_step": 2570, "train_loss": 0.08794312551617622, "train_perplexity": 1.0919260175335126, "eval_loss": null, "eval_perplexity": null, "learning_rate": 5.123036909562673e-08, "grad_norm": 0.8555896878242493} +{"timestamp": "2025-06-04T16:19:46.155691", "epoch": 2, "step": 1240, "global_step": 2580, "train_loss": 0.05262951739132404, "train_perplexity": 1.0540390696289477, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.235172352033024e-08, "grad_norm": 0.691398561000824} +{"timestamp": "2025-06-04T16:19:50.041245", "epoch": 2, "step": 1250, "global_step": 2590, "train_loss": 0.08566490188241005, "train_perplexity": 1.089441197436726, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.431411418722941e-08, "grad_norm": 0.8565791845321655} +{"timestamp": "2025-06-04T16:20:11.046606", "epoch": 2, "step": 1260, "global_step": 2600, "train_loss": 0.05852423422038555, "train_perplexity": 1.0602706802165853, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.711890463007405e-08, "grad_norm": 0.6980656981468201} +{"timestamp": "2025-06-04T16:20:14.937058", "epoch": 2, "step": 1270, "global_step": 2610, "train_loss": 0.052355626598000526, "train_perplexity": 1.053750417563349, "eval_loss": null, "eval_perplexity": null, "learning_rate": 2.076731547438593e-08, "grad_norm": 0.6781346201896667} +{"timestamp": "2025-06-04T16:20:18.817416", "epoch": 2, "step": 1280, "global_step": 2620, "train_loss": 0.0786192286759615, "train_perplexity": 1.0817923281404347, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.5260424230382763e-08, "grad_norm": 0.8754461407661438} +{"timestamp": "2025-06-04T16:20:22.700789", "epoch": 2, "step": 1290, "global_step": 2630, "train_loss": 0.09569213911890984, "train_perplexity": 1.1004202354973656, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.0599165110186105e-08, "grad_norm": 0.8618500828742981} +{"timestamp": "2025-06-04T16:20:26.583343", "epoch": 2, "step": 1300, "global_step": 2640, "train_loss": 0.07639635913074017, "train_perplexity": 1.0793903155892892, "eval_loss": null, "eval_perplexity": null, "learning_rate": 6.784328869339218e-09, "grad_norm": 0.8118147253990173} +{"timestamp": "2025-06-04T16:20:47.500884", "epoch": 2, "step": 1310, "global_step": 2650, "train_loss": 0.05080252140760422, "train_perplexity": 1.052115102557278, "eval_loss": null, "eval_perplexity": null, "learning_rate": 3.816562672658841e-09, "grad_norm": 0.6867138743400574} +{"timestamp": "2025-06-04T16:20:51.383803", "epoch": 2, "step": 1320, "global_step": 2660, "train_loss": 0.06145966425538063, "train_perplexity": 1.0633876031329454, "eval_loss": null, "eval_perplexity": null, "learning_rate": 1.6963699844474434e-09, "grad_norm": 0.7489965558052063} +{"timestamp": "2025-06-04T16:20:55.262606", "epoch": 2, "step": 1330, "global_step": 2670, "train_loss": 0.051951976493000984, "train_perplexity": 1.0533251569306799, "eval_loss": null, "eval_perplexity": null, "learning_rate": 4.2411048308210744e-10, "grad_norm": 0.6713370680809021} +{"timestamp": "2025-06-04T16:20:59.142862", "epoch": 2, "step": 1340, "global_step": 2680, "train_loss": 0.08062903303653002, "train_perplexity": 1.0839687053924174, "eval_loss": null, "eval_perplexity": null, "learning_rate": 0.0, "grad_norm": 0.81634920835495}