| { | |
| "best_global_step": 282, | |
| "best_metric": 9.5367431640625e-07, | |
| "best_model_checkpoint": "./new_zealand_guard_finetuned/checkpoint-282", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 282, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0071111111111111115, | |
| "grad_norm": 849.5966796875, | |
| "learning_rate": 0.0, | |
| "loss": 26.5098, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.07111111111111111, | |
| "grad_norm": 559.8890380859375, | |
| "learning_rate": 4.186046511627907e-05, | |
| "loss": 19.3119, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14222222222222222, | |
| "grad_norm": 895.1287841796875, | |
| "learning_rate": 8.837209302325582e-05, | |
| "loss": 10.5964, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21333333333333335, | |
| "grad_norm": 75.24260711669922, | |
| "learning_rate": 0.00013488372093023256, | |
| "loss": 3.0907, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.28444444444444444, | |
| "grad_norm": 67.9478759765625, | |
| "learning_rate": 0.0001813953488372093, | |
| "loss": 2.2113, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 59.889305114746094, | |
| "learning_rate": 0.00019987699691483048, | |
| "loss": 1.3483, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4266666666666667, | |
| "grad_norm": 103.38257598876953, | |
| "learning_rate": 0.00019912640693269752, | |
| "loss": 0.222, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.49777777777777776, | |
| "grad_norm": 2.15047025680542, | |
| "learning_rate": 0.00019769868307835994, | |
| "loss": 2.9384, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5688888888888889, | |
| "grad_norm": 0.0028045785147696733, | |
| "learning_rate": 0.00019560357815343577, | |
| "loss": 1.7078, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.0005238493904471397, | |
| "learning_rate": 0.00019285540384897073, | |
| "loss": 0.4505, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 24.068443298339844, | |
| "learning_rate": 0.00018947293298207635, | |
| "loss": 0.1432, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7822222222222223, | |
| "grad_norm": 104.02115631103516, | |
| "learning_rate": 0.0001854792712585539, | |
| "loss": 0.237, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8533333333333334, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.1881, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9244444444444444, | |
| "grad_norm": 1.329183578491211e-05, | |
| "learning_rate": 0.0001757714869760335, | |
| "loss": 3.2485, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9955555555555555, | |
| "grad_norm": 0.06525997817516327, | |
| "learning_rate": 0.00017012367842724887, | |
| "loss": 1.1556, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.0180816650390625, | |
| "eval_runtime": 24.8295, | |
| "eval_samples_per_second": 20.137, | |
| "eval_steps_per_second": 5.034, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.064, | |
| "grad_norm": 0.2111472636461258, | |
| "learning_rate": 0.00016399685405033167, | |
| "loss": 1.4252, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.1351111111111112, | |
| "grad_norm": 0.037655677646398544, | |
| "learning_rate": 0.00015743286626829437, | |
| "loss": 0.0121, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.2062222222222223, | |
| "grad_norm": 1.9490718841552734e-05, | |
| "learning_rate": 0.0001504765537734844, | |
| "loss": 0.0305, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.2773333333333334, | |
| "grad_norm": 0.0021932744421064854, | |
| "learning_rate": 0.00014317543523384928, | |
| "loss": 0.0002, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.3484444444444446, | |
| "grad_norm": 0.000517010863404721, | |
| "learning_rate": 0.00013557938469225167, | |
| "loss": 0.2581, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.4195555555555557, | |
| "grad_norm": 97.7376937866211, | |
| "learning_rate": 0.00012774029087618446, | |
| "loss": 0.5677, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.4906666666666666, | |
| "grad_norm": 0.000191587139852345, | |
| "learning_rate": 0.00011971170274514802, | |
| "loss": 0.0868, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.561777777777778, | |
| "grad_norm": 0.0018747359281405807, | |
| "learning_rate": 0.00011154846369695863, | |
| "loss": 0.0019, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.6328888888888888, | |
| "grad_norm": 17.872161865234375, | |
| "learning_rate": 0.00010330633693173082, | |
| "loss": 0.3749, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.704, | |
| "grad_norm": 0.00018133236153516918, | |
| "learning_rate": 9.504162453267777e-05, | |
| "loss": 0.1126, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.775111111111111, | |
| "grad_norm": 0.0003161525819450617, | |
| "learning_rate": 8.681078286579311e-05, | |
| "loss": 1.0695, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.8462222222222222, | |
| "grad_norm": 1.1622905731201172e-05, | |
| "learning_rate": 7.867003692562534e-05, | |
| "loss": 0.055, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.9173333333333333, | |
| "grad_norm": 4.508599281311035, | |
| "learning_rate": 7.067499626155354e-05, | |
| "loss": 0.0015, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.9884444444444445, | |
| "grad_norm": 2.9325485229492188e-05, | |
| "learning_rate": 6.28802751081779e-05, | |
| "loss": 0.0005, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 9.5367431640625e-07, | |
| "eval_runtime": 24.8356, | |
| "eval_samples_per_second": 20.132, | |
| "eval_steps_per_second": 5.033, | |
| "step": 282 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 423, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0137283854336e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |