| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 40, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.03473077821126121, | |
| "learning_rate": 0.0, | |
| "loss": 1.855, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.035850848919996056, | |
| "learning_rate": 5e-05, | |
| "loss": 1.9926, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.07242531500164132, | |
| "learning_rate": 4.9918932703355256e-05, | |
| "loss": 1.9838, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.07242531500164132, | |
| "learning_rate": 4.967625656594782e-05, | |
| "loss": 1.9305, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 0.536791987008578, | |
| "learning_rate": 4.92735454356513e-05, | |
| "loss": 1.8536, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.6400000000000001, | |
| "grad_norm": 0.04135597442300236, | |
| "learning_rate": 4.8713411048678635e-05, | |
| "loss": 1.9887, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 0.18990782320783178, | |
| "learning_rate": 4.799948609147061e-05, | |
| "loss": 1.9696, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.18990782320783178, | |
| "learning_rate": 4.713640064133025e-05, | |
| "loss": 1.9114, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 0.11234308928288564, | |
| "learning_rate": 4.6129752138594874e-05, | |
| "loss": 1.85, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 0.058076252155169324, | |
| "learning_rate": 4.498606908508754e-05, | |
| "loss": 1.9732, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 0.13987666899200468, | |
| "learning_rate": 4.371276870427753e-05, | |
| "loss": 1.9427, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.13987666899200468, | |
| "learning_rate": 4.231810883773999e-05, | |
| "loss": 1.8893, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "grad_norm": 0.12718968249921672, | |
| "learning_rate": 4.0811134389884433e-05, | |
| "loss": 1.8437, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "grad_norm": 0.04520417007189144, | |
| "learning_rate": 3.920161866827889e-05, | |
| "loss": 1.9575, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "grad_norm": 0.04844047771097423, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 1.9313, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.04844047771097423, | |
| "learning_rate": 3.5717314035076355e-05, | |
| "loss": 1.8717, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "grad_norm": 0.14689321776031963, | |
| "learning_rate": 3.386512217606339e-05, | |
| "loss": 1.8364, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "grad_norm": 0.048287298678026926, | |
| "learning_rate": 3.195543659791132e-05, | |
| "loss": 1.9484, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "grad_norm": 0.0517689914373381, | |
| "learning_rate": 3.0000642344401113e-05, | |
| "loss": 1.92, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.0517689914373381, | |
| "learning_rate": 2.8013417006383076e-05, | |
| "loss": 1.8518, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 0.15044395273143124, | |
| "learning_rate": 2.600664850273538e-05, | |
| "loss": 1.8293, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "grad_norm": 0.05273098521583528, | |
| "learning_rate": 2.399335149726463e-05, | |
| "loss": 1.9383, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "grad_norm": 0.06228002271268191, | |
| "learning_rate": 2.1986582993616926e-05, | |
| "loss": 1.908, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.06228002271268191, | |
| "learning_rate": 1.9999357655598893e-05, | |
| "loss": 1.8322, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 6.32, | |
| "grad_norm": 0.14463143685943824, | |
| "learning_rate": 1.8044563402088684e-05, | |
| "loss": 1.8224, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 6.64, | |
| "grad_norm": 0.05890018895954, | |
| "learning_rate": 1.613487782393661e-05, | |
| "loss": 1.9292, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 6.96, | |
| "grad_norm": 0.05527128944462077, | |
| "learning_rate": 1.4282685964923642e-05, | |
| "loss": 1.8986, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.05527128944462077, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 1.8183, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "grad_norm": 0.2047695182816609, | |
| "learning_rate": 1.0798381331721109e-05, | |
| "loss": 1.8177, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 7.64, | |
| "grad_norm": 0.06001287449156272, | |
| "learning_rate": 9.18886561011557e-06, | |
| "loss": 1.9229, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 7.96, | |
| "grad_norm": 0.06720093718398726, | |
| "learning_rate": 7.681891162260015e-06, | |
| "loss": 1.8924, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.1853351157471328, | |
| "learning_rate": 6.28723129572247e-06, | |
| "loss": 1.8081, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 8.32, | |
| "grad_norm": 0.09202030256616207, | |
| "learning_rate": 5.013930914912476e-06, | |
| "loss": 1.8142, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 8.64, | |
| "grad_norm": 0.10091895409355263, | |
| "learning_rate": 3.8702478614051355e-06, | |
| "loss": 1.9154, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 8.96, | |
| "grad_norm": 0.0924690353833725, | |
| "learning_rate": 2.8635993586697553e-06, | |
| "loss": 1.8841, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.0924690353833725, | |
| "learning_rate": 2.0005139085293945e-06, | |
| "loss": 1.7995, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 9.32, | |
| "grad_norm": 0.20221156410773924, | |
| "learning_rate": 1.286588951321363e-06, | |
| "loss": 1.8119, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 9.64, | |
| "grad_norm": 0.07419919499134094, | |
| "learning_rate": 7.264545643486997e-07, | |
| "loss": 1.9125, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 9.96, | |
| "grad_norm": 0.0716386556789569, | |
| "learning_rate": 3.237434340521789e-07, | |
| "loss": 1.8828, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.0716386556789569, | |
| "learning_rate": 8.106729664475176e-08, | |
| "loss": 1.7975, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 40, | |
| "total_flos": 48225369194496.0, | |
| "train_loss": 1.8884073972702027, | |
| "train_runtime": 1617.1809, | |
| "train_samples_per_second": 0.612, | |
| "train_steps_per_second": 0.025 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 40, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 48225369194496.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |