| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0533333333333332, | |
| "eval_steps": 500, | |
| "global_step": 60, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.017777777777777778, | |
| "grad_norm": 9.902738571166992, | |
| "learning_rate": 0.0, | |
| "loss": 7.5683, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.035555555555555556, | |
| "grad_norm": 8.882129669189453, | |
| "learning_rate": 4e-05, | |
| "loss": 7.3788, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.05333333333333334, | |
| "grad_norm": 6.9227614402771, | |
| "learning_rate": 8e-05, | |
| "loss": 7.2323, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.07111111111111111, | |
| "grad_norm": 3.930971384048462, | |
| "learning_rate": 0.00012, | |
| "loss": 7.0819, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 3.7796881198883057, | |
| "learning_rate": 0.00016, | |
| "loss": 6.7808, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.10666666666666667, | |
| "grad_norm": 3.719074249267578, | |
| "learning_rate": 0.0002, | |
| "loss": 6.704, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.12444444444444444, | |
| "grad_norm": 5.659056186676025, | |
| "learning_rate": 0.00019636363636363636, | |
| "loss": 6.5588, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.14222222222222222, | |
| "grad_norm": 11.144302368164062, | |
| "learning_rate": 0.00019272727272727274, | |
| "loss": 6.4086, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 4.9178338050842285, | |
| "learning_rate": 0.0001890909090909091, | |
| "loss": 6.2833, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 2.524080991744995, | |
| "learning_rate": 0.00018545454545454545, | |
| "loss": 6.2426, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.19555555555555557, | |
| "grad_norm": 3.267975330352783, | |
| "learning_rate": 0.00018181818181818183, | |
| "loss": 6.2025, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.21333333333333335, | |
| "grad_norm": 3.699554681777954, | |
| "learning_rate": 0.0001781818181818182, | |
| "loss": 6.1311, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.2311111111111111, | |
| "grad_norm": 5.842496395111084, | |
| "learning_rate": 0.00017454545454545454, | |
| "loss": 6.106, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.24888888888888888, | |
| "grad_norm": 4.9988532066345215, | |
| "learning_rate": 0.0001709090909090909, | |
| "loss": 6.1794, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 4.165736198425293, | |
| "learning_rate": 0.00016727272727272728, | |
| "loss": 6.1222, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.28444444444444444, | |
| "grad_norm": 4.161974906921387, | |
| "learning_rate": 0.00016363636363636366, | |
| "loss": 6.0748, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.3022222222222222, | |
| "grad_norm": 3.4932847023010254, | |
| "learning_rate": 0.00016, | |
| "loss": 5.9904, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 4.147347927093506, | |
| "learning_rate": 0.00015636363636363637, | |
| "loss": 6.041, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.3377777777777778, | |
| "grad_norm": 3.2812204360961914, | |
| "learning_rate": 0.00015272727272727275, | |
| "loss": 5.9663, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 3.3930680751800537, | |
| "learning_rate": 0.0001490909090909091, | |
| "loss": 5.9091, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.37333333333333335, | |
| "grad_norm": 4.013797283172607, | |
| "learning_rate": 0.00014545454545454546, | |
| "loss": 5.931, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.39111111111111113, | |
| "grad_norm": 2.85188364982605, | |
| "learning_rate": 0.00014181818181818184, | |
| "loss": 5.8464, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.4088888888888889, | |
| "grad_norm": 2.7872464656829834, | |
| "learning_rate": 0.0001381818181818182, | |
| "loss": 5.943, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.4266666666666667, | |
| "grad_norm": 3.2645275592803955, | |
| "learning_rate": 0.00013454545454545455, | |
| "loss": 5.8657, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 2.8722269535064697, | |
| "learning_rate": 0.00013090909090909093, | |
| "loss": 5.7727, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.4622222222222222, | |
| "grad_norm": 2.1603174209594727, | |
| "learning_rate": 0.00012727272727272728, | |
| "loss": 5.8712, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 2.783595561981201, | |
| "learning_rate": 0.00012363636363636364, | |
| "loss": 5.9149, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.49777777777777776, | |
| "grad_norm": 2.2030014991760254, | |
| "learning_rate": 0.00012, | |
| "loss": 5.8502, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.5155555555555555, | |
| "grad_norm": 2.821263074874878, | |
| "learning_rate": 0.00011636363636363636, | |
| "loss": 5.8471, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 4.963853359222412, | |
| "learning_rate": 0.00011272727272727272, | |
| "loss": 5.9404, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5511111111111111, | |
| "grad_norm": 2.6349852085113525, | |
| "learning_rate": 0.00010909090909090909, | |
| "loss": 5.8419, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.5688888888888889, | |
| "grad_norm": 2.6861917972564697, | |
| "learning_rate": 0.00010545454545454545, | |
| "loss": 5.7972, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.5866666666666667, | |
| "grad_norm": 2.8622891902923584, | |
| "learning_rate": 0.00010181818181818181, | |
| "loss": 5.7338, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.6044444444444445, | |
| "grad_norm": 2.2102324962615967, | |
| "learning_rate": 9.818181818181818e-05, | |
| "loss": 5.7475, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 2.9417433738708496, | |
| "learning_rate": 9.454545454545455e-05, | |
| "loss": 5.7562, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 2.533151388168335, | |
| "learning_rate": 9.090909090909092e-05, | |
| "loss": 5.7702, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.6577777777777778, | |
| "grad_norm": 2.4525845050811768, | |
| "learning_rate": 8.727272727272727e-05, | |
| "loss": 5.7764, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.6755555555555556, | |
| "grad_norm": 3.402923107147217, | |
| "learning_rate": 8.363636363636364e-05, | |
| "loss": 5.9145, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.6933333333333334, | |
| "grad_norm": 3.261005163192749, | |
| "learning_rate": 8e-05, | |
| "loss": 5.7848, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 2.244154930114746, | |
| "learning_rate": 7.636363636363637e-05, | |
| "loss": 5.7873, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7288888888888889, | |
| "grad_norm": 3.457752227783203, | |
| "learning_rate": 7.272727272727273e-05, | |
| "loss": 5.8139, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.7466666666666667, | |
| "grad_norm": 2.3376035690307617, | |
| "learning_rate": 6.90909090909091e-05, | |
| "loss": 5.7564, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.7644444444444445, | |
| "grad_norm": 3.8495278358459473, | |
| "learning_rate": 6.545454545454546e-05, | |
| "loss": 5.6566, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.7822222222222223, | |
| "grad_norm": 2.349485158920288, | |
| "learning_rate": 6.181818181818182e-05, | |
| "loss": 5.7529, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 2.8913824558258057, | |
| "learning_rate": 5.818181818181818e-05, | |
| "loss": 5.706, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8177777777777778, | |
| "grad_norm": 3.932647705078125, | |
| "learning_rate": 5.4545454545454546e-05, | |
| "loss": 5.8259, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.8355555555555556, | |
| "grad_norm": 2.2334136962890625, | |
| "learning_rate": 5.090909090909091e-05, | |
| "loss": 5.7211, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.8533333333333334, | |
| "grad_norm": 1.9286762475967407, | |
| "learning_rate": 4.7272727272727275e-05, | |
| "loss": 5.6765, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.8711111111111111, | |
| "grad_norm": 2.598808526992798, | |
| "learning_rate": 4.3636363636363636e-05, | |
| "loss": 5.7905, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 2.2895076274871826, | |
| "learning_rate": 4e-05, | |
| "loss": 5.6579, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9066666666666666, | |
| "grad_norm": 3.7514963150024414, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 5.734, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.9244444444444444, | |
| "grad_norm": 2.71718430519104, | |
| "learning_rate": 3.272727272727273e-05, | |
| "loss": 5.6998, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.9422222222222222, | |
| "grad_norm": 1.9475185871124268, | |
| "learning_rate": 2.909090909090909e-05, | |
| "loss": 5.6739, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 2.6652722358703613, | |
| "learning_rate": 2.5454545454545454e-05, | |
| "loss": 5.8915, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.9777777777777777, | |
| "grad_norm": 2.292740821838379, | |
| "learning_rate": 2.1818181818181818e-05, | |
| "loss": 5.7107, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.9955555555555555, | |
| "grad_norm": 2.497856855392456, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 5.6296, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 4.55602502822876, | |
| "learning_rate": 1.4545454545454545e-05, | |
| "loss": 5.5288, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.0177777777777777, | |
| "grad_norm": 2.530329704284668, | |
| "learning_rate": 1.0909090909090909e-05, | |
| "loss": 5.3935, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.0355555555555556, | |
| "grad_norm": 2.5937700271606445, | |
| "learning_rate": 7.272727272727272e-06, | |
| "loss": 5.3327, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.0533333333333332, | |
| "grad_norm": 2.1082897186279297, | |
| "learning_rate": 3.636363636363636e-06, | |
| "loss": 5.298, | |
| "step": 60 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 60, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 401136498706944.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |