{ "best_metric": 0.339957594871521, "best_model_checkpoint": "bert-multilingual-base-cased-finetuned-toxicity-classification/checkpoint-4015", "epoch": 2.0, "eval_steps": 500, "global_step": 8030, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.12453300124533001, "grad_norm": 7.769826412200928, "learning_rate": 1.950186799501868e-05, "loss": 0.5596, "step": 500 }, { "epoch": 0.24906600249066002, "grad_norm": 13.72755241394043, "learning_rate": 1.9003735990037362e-05, "loss": 0.4566, "step": 1000 }, { "epoch": 0.37359900373599003, "grad_norm": 6.368185043334961, "learning_rate": 1.850560398505604e-05, "loss": 0.4185, "step": 1500 }, { "epoch": 0.49813200498132004, "grad_norm": 5.925895690917969, "learning_rate": 1.8007471980074722e-05, "loss": 0.3982, "step": 2000 }, { "epoch": 0.6226650062266501, "grad_norm": 2.7038302421569824, "learning_rate": 1.75093399750934e-05, "loss": 0.3906, "step": 2500 }, { "epoch": 0.7471980074719801, "grad_norm": 3.163766622543335, "learning_rate": 1.701120797011208e-05, "loss": 0.3678, "step": 3000 }, { "epoch": 0.8717310087173101, "grad_norm": 3.3420841693878174, "learning_rate": 1.651307596513076e-05, "loss": 0.3629, "step": 3500 }, { "epoch": 0.9962640099626401, "grad_norm": 6.345468997955322, "learning_rate": 1.601494396014944e-05, "loss": 0.3694, "step": 4000 }, { "epoch": 1.0, "eval_loss": 0.339957594871521, "eval_runtime": 174.5055, "eval_samples_per_second": 40.91, "eval_steps_per_second": 5.117, "step": 4015 }, { "epoch": 1.1207970112079702, "grad_norm": 6.267732620239258, "learning_rate": 1.551681195516812e-05, "loss": 0.2756, "step": 4500 }, { "epoch": 1.2453300124533002, "grad_norm": 34.6484375, "learning_rate": 1.5018679950186801e-05, "loss": 0.2826, "step": 5000 }, { "epoch": 1.36986301369863, "grad_norm": 12.416611671447754, "learning_rate": 1.4520547945205482e-05, "loss": 0.2705, "step": 5500 }, { "epoch": 1.4943960149439601, "grad_norm": 4.286174297332764, "learning_rate": 1.4022415940224162e-05, "loss": 0.2735, "step": 6000 }, { "epoch": 1.6189290161892902, "grad_norm": 3.970327377319336, "learning_rate": 1.3524283935242842e-05, "loss": 0.273, "step": 6500 }, { "epoch": 1.74346201743462, "grad_norm": 16.260072708129883, "learning_rate": 1.3026151930261522e-05, "loss": 0.2733, "step": 7000 }, { "epoch": 1.86799501867995, "grad_norm": 0.7613949775695801, "learning_rate": 1.2528019925280199e-05, "loss": 0.2659, "step": 7500 }, { "epoch": 1.9925280199252802, "grad_norm": 12.411490440368652, "learning_rate": 1.2029887920298879e-05, "loss": 0.2632, "step": 8000 }, { "epoch": 2.0, "eval_loss": 0.3527267277240753, "eval_runtime": 174.8977, "eval_samples_per_second": 40.818, "eval_steps_per_second": 5.106, "step": 8030 } ], "logging_steps": 500, "max_steps": 20075, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.38018772820992e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }