| { | |
| "best_metric": 0.8715846994535519, | |
| "best_model_checkpoint": "./neikexue_V2_seq_128__1e-5_model_results/checkpoint-2000", | |
| "epoch": 17.391304347826086, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 8.864755630493164, | |
| "learning_rate": 9.710144927536233e-06, | |
| "loss": 5.6733, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 9.545233726501465, | |
| "learning_rate": 9.420289855072464e-06, | |
| "loss": 5.0217, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.608695652173913, | |
| "grad_norm": 10.265801429748535, | |
| "learning_rate": 9.130434782608697e-06, | |
| "loss": 4.3971, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.4782608695652173, | |
| "grad_norm": 10.584877014160156, | |
| "learning_rate": 8.840579710144929e-06, | |
| "loss": 3.8406, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 11.208259582519531, | |
| "learning_rate": 8.55072463768116e-06, | |
| "loss": 3.3202, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "eval_accuracy": 0.592896174863388, | |
| "eval_f1": 0.5088359121146007, | |
| "eval_loss": 3.198462724685669, | |
| "eval_precision": 0.48192588377014606, | |
| "eval_recall": 0.592896174863388, | |
| "eval_runtime": 0.893, | |
| "eval_samples_per_second": 409.855, | |
| "eval_steps_per_second": 25.756, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.217391304347826, | |
| "grad_norm": 10.305059432983398, | |
| "learning_rate": 8.260869565217392e-06, | |
| "loss": 2.9294, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 6.086956521739131, | |
| "grad_norm": 9.287023544311523, | |
| "learning_rate": 7.971014492753623e-06, | |
| "loss": 2.4987, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 6.956521739130435, | |
| "grad_norm": 8.611043930053711, | |
| "learning_rate": 7.681159420289856e-06, | |
| "loss": 2.1443, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 7.826086956521739, | |
| "grad_norm": 9.929250717163086, | |
| "learning_rate": 7.391304347826087e-06, | |
| "loss": 1.8154, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 8.795051574707031, | |
| "learning_rate": 7.10144927536232e-06, | |
| "loss": 1.537, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "eval_accuracy": 0.825136612021858, | |
| "eval_f1": 0.7840855865446029, | |
| "eval_loss": 1.8385679721832275, | |
| "eval_precision": 0.7657364038511579, | |
| "eval_recall": 0.825136612021858, | |
| "eval_runtime": 0.8634, | |
| "eval_samples_per_second": 423.891, | |
| "eval_steps_per_second": 26.638, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 9.565217391304348, | |
| "grad_norm": 6.896790981292725, | |
| "learning_rate": 6.811594202898551e-06, | |
| "loss": 1.3088, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 10.434782608695652, | |
| "grad_norm": 7.9147725105285645, | |
| "learning_rate": 6.521739130434783e-06, | |
| "loss": 1.0947, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 11.304347826086957, | |
| "grad_norm": 6.530330181121826, | |
| "learning_rate": 6.2318840579710145e-06, | |
| "loss": 0.9349, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 12.173913043478262, | |
| "grad_norm": 6.054310321807861, | |
| "learning_rate": 5.942028985507247e-06, | |
| "loss": 0.773, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 13.043478260869565, | |
| "grad_norm": 5.687331199645996, | |
| "learning_rate": 5.652173913043479e-06, | |
| "loss": 0.6669, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 13.043478260869565, | |
| "eval_accuracy": 0.8497267759562842, | |
| "eval_f1": 0.8189744833187457, | |
| "eval_loss": 1.1578365564346313, | |
| "eval_precision": 0.8049635701275045, | |
| "eval_recall": 0.8497267759562842, | |
| "eval_runtime": 0.9265, | |
| "eval_samples_per_second": 395.024, | |
| "eval_steps_per_second": 24.824, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 13.91304347826087, | |
| "grad_norm": 4.295091152191162, | |
| "learning_rate": 5.362318840579711e-06, | |
| "loss": 0.5477, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 14.782608695652174, | |
| "grad_norm": 3.5699689388275146, | |
| "learning_rate": 5.072463768115943e-06, | |
| "loss": 0.4643, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 15.652173913043478, | |
| "grad_norm": 3.011284828186035, | |
| "learning_rate": 4.782608695652174e-06, | |
| "loss": 0.3989, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 16.52173913043478, | |
| "grad_norm": 2.8458871841430664, | |
| "learning_rate": 4.492753623188406e-06, | |
| "loss": 0.3407, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "grad_norm": 2.9610633850097656, | |
| "learning_rate": 4.202898550724638e-06, | |
| "loss": 0.2911, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "eval_accuracy": 0.8715846994535519, | |
| "eval_f1": 0.8439289459781263, | |
| "eval_loss": 0.836195170879364, | |
| "eval_precision": 0.8367486338797814, | |
| "eval_recall": 0.8715846994535519, | |
| "eval_runtime": 1.0977, | |
| "eval_samples_per_second": 333.42, | |
| "eval_steps_per_second": 20.953, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 3450, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7423711588546560.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |