| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.97737556561086, | |
| "eval_steps": 500, | |
| "global_step": 330, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09049773755656108, | |
| "grad_norm": 5.675890171531588, | |
| "learning_rate": 5.4545454545454545e-06, | |
| "loss": 1.6551, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18099547511312217, | |
| "grad_norm": 3.1493873949654208, | |
| "learning_rate": 1.1515151515151517e-05, | |
| "loss": 0.9996, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27149321266968324, | |
| "grad_norm": 2.317346702211777, | |
| "learning_rate": 1.7575757575757576e-05, | |
| "loss": 0.8852, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.36199095022624433, | |
| "grad_norm": 2.1665431468038485, | |
| "learning_rate": 1.9979866764718846e-05, | |
| "loss": 0.9125, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.45248868778280543, | |
| "grad_norm": 1.9872442646240362, | |
| "learning_rate": 1.9857123789054707e-05, | |
| "loss": 0.8808, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5429864253393665, | |
| "grad_norm": 1.8230261485664525, | |
| "learning_rate": 1.9624193376650708e-05, | |
| "loss": 0.8431, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6334841628959276, | |
| "grad_norm": 1.974350474015766, | |
| "learning_rate": 1.9283679330160726e-05, | |
| "loss": 0.8398, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7239819004524887, | |
| "grad_norm": 1.9362550230328093, | |
| "learning_rate": 1.8839388071291506e-05, | |
| "loss": 0.8562, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8144796380090498, | |
| "grad_norm": 1.610981133740552, | |
| "learning_rate": 1.8296286090880362e-05, | |
| "loss": 0.8251, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9049773755656109, | |
| "grad_norm": 1.8899605055386794, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.8464, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.995475113122172, | |
| "grad_norm": 1.7571115754322442, | |
| "learning_rate": 1.693897082102109e-05, | |
| "loss": 0.8404, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.081447963800905, | |
| "grad_norm": 1.65645207980794, | |
| "learning_rate": 1.613993022227202e-05, | |
| "loss": 0.7015, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1719457013574661, | |
| "grad_norm": 1.6906605717376717, | |
| "learning_rate": 1.5272254676105026e-05, | |
| "loss": 0.634, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.262443438914027, | |
| "grad_norm": 1.6559934264823226, | |
| "learning_rate": 1.4345643456507126e-05, | |
| "loss": 0.657, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3529411764705883, | |
| "grad_norm": 1.5859757830352863, | |
| "learning_rate": 1.3370454647370418e-05, | |
| "loss": 0.6293, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4434389140271493, | |
| "grad_norm": 1.5392446269424735, | |
| "learning_rate": 1.2357589355094275e-05, | |
| "loss": 0.6383, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.5339366515837103, | |
| "grad_norm": 1.75731044939785, | |
| "learning_rate": 1.1318369851033604e-05, | |
| "loss": 0.6, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6244343891402715, | |
| "grad_norm": 1.4325139156397309, | |
| "learning_rate": 1.0264413005972736e-05, | |
| "loss": 0.617, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7149321266968327, | |
| "grad_norm": 1.6359135154692293, | |
| "learning_rate": 9.207500431432115e-06, | |
| "loss": 0.6267, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.8054298642533937, | |
| "grad_norm": 1.5756156420029719, | |
| "learning_rate": 8.159446779427798e-06, | |
| "loss": 0.6466, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.8959276018099547, | |
| "grad_norm": 1.576884871710188, | |
| "learning_rate": 7.131967672889101e-06, | |
| "loss": 0.637, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.9864253393665159, | |
| "grad_norm": 1.450435273138839, | |
| "learning_rate": 6.136548743068713e-06, | |
| "loss": 0.6059, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.072398190045249, | |
| "grad_norm": 1.7672371670783633, | |
| "learning_rate": 5.184317237904939e-06, | |
| "loss": 0.487, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.16289592760181, | |
| "grad_norm": 1.6979920248439577, | |
| "learning_rate": 4.2859176365564294e-06, | |
| "loss": 0.4366, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.253393665158371, | |
| "grad_norm": 1.8644743282510046, | |
| "learning_rate": 3.4513926605471504e-06, | |
| "loss": 0.4173, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.3438914027149322, | |
| "grad_norm": 1.5579346123392488, | |
| "learning_rate": 2.690071011633284e-06, | |
| "loss": 0.4134, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.4343891402714934, | |
| "grad_norm": 1.8589064799645634, | |
| "learning_rate": 2.010463091309587e-06, | |
| "loss": 0.4075, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.524886877828054, | |
| "grad_norm": 1.7947832163484936, | |
| "learning_rate": 1.4201658676502294e-06, | |
| "loss": 0.3921, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.6153846153846154, | |
| "grad_norm": 1.975474474170027, | |
| "learning_rate": 9.257779529260558e-07, | |
| "loss": 0.3693, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.7058823529411766, | |
| "grad_norm": 1.85340769413525, | |
| "learning_rate": 5.328258412994958e-07, | |
| "loss": 0.4024, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.7963800904977374, | |
| "grad_norm": 1.9481497542668331, | |
| "learning_rate": 2.4570213114592957e-07, | |
| "loss": 0.3926, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.8868778280542986, | |
| "grad_norm": 1.458202310148974, | |
| "learning_rate": 6.761642258056977e-08, | |
| "loss": 0.3807, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.97737556561086, | |
| "grad_norm": 1.7723492664729235, | |
| "learning_rate": 5.594390808494332e-10, | |
| "loss": 0.3905, | |
| "step": 330 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 330, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 27141445976064.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |