| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 14.0, | |
| "global_step": 14294, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 1.9347045380346067e-05, | |
| "loss": 0.1497, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.8694090760692135e-05, | |
| "loss": 0.1296, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.963784396648407, | |
| "eval_loss": 0.11704692244529724, | |
| "eval_runtime": 3.6236, | |
| "eval_samples_per_second": 2575.648, | |
| "eval_steps_per_second": 80.584, | |
| "step": 1021 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 1.80411361410382e-05, | |
| "loss": 0.0939, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.7388181521384265e-05, | |
| "loss": 0.1, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9682846069335938, | |
| "eval_loss": 0.11075109988451004, | |
| "eval_runtime": 3.7908, | |
| "eval_samples_per_second": 2462.002, | |
| "eval_steps_per_second": 77.028, | |
| "step": 2042 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 1.673522690173033e-05, | |
| "loss": 0.0682, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 1.6082272282076395e-05, | |
| "loss": 0.0661, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9661416411399841, | |
| "eval_loss": 0.12265008687973022, | |
| "eval_runtime": 3.7694, | |
| "eval_samples_per_second": 2475.974, | |
| "eval_steps_per_second": 77.465, | |
| "step": 3063 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 1.5429317662422463e-05, | |
| "loss": 0.0519, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.4776363042768528e-05, | |
| "loss": 0.0424, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9709632396697998, | |
| "eval_loss": 0.14902332425117493, | |
| "eval_runtime": 3.751, | |
| "eval_samples_per_second": 2488.126, | |
| "eval_steps_per_second": 77.846, | |
| "step": 4084 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 1.4123408423114595e-05, | |
| "loss": 0.0261, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.3470453803460662e-05, | |
| "loss": 0.0279, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9638915657997131, | |
| "eval_loss": 0.18987597525119781, | |
| "eval_runtime": 3.5852, | |
| "eval_samples_per_second": 2603.235, | |
| "eval_steps_per_second": 81.447, | |
| "step": 5105 | |
| }, | |
| { | |
| "epoch": 5.39, | |
| "learning_rate": 1.2817499183806728e-05, | |
| "loss": 0.018, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 5.88, | |
| "learning_rate": 1.2164544564152792e-05, | |
| "loss": 0.0138, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9680702686309814, | |
| "eval_loss": 0.21568563580513, | |
| "eval_runtime": 3.5824, | |
| "eval_samples_per_second": 2605.209, | |
| "eval_steps_per_second": 81.509, | |
| "step": 6126 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "learning_rate": 1.1511589944498858e-05, | |
| "loss": 0.0085, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "learning_rate": 1.0858635324844923e-05, | |
| "loss": 0.0101, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9694631695747375, | |
| "eval_loss": 0.2687966227531433, | |
| "eval_runtime": 3.7582, | |
| "eval_samples_per_second": 2483.357, | |
| "eval_steps_per_second": 77.696, | |
| "step": 7147 | |
| }, | |
| { | |
| "epoch": 7.35, | |
| "learning_rate": 1.020568070519099e-05, | |
| "loss": 0.0061, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 7.84, | |
| "learning_rate": 9.552726085537057e-06, | |
| "loss": 0.0064, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9679631590843201, | |
| "eval_loss": 0.2600478231906891, | |
| "eval_runtime": 3.593, | |
| "eval_samples_per_second": 2597.552, | |
| "eval_steps_per_second": 81.269, | |
| "step": 8168 | |
| }, | |
| { | |
| "epoch": 8.33, | |
| "learning_rate": 8.899771465883122e-06, | |
| "loss": 0.0043, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 8.81, | |
| "learning_rate": 8.246816846229188e-06, | |
| "loss": 0.0043, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.9706417918205261, | |
| "eval_loss": 0.26722854375839233, | |
| "eval_runtime": 3.5876, | |
| "eval_samples_per_second": 2601.484, | |
| "eval_steps_per_second": 81.392, | |
| "step": 9189 | |
| }, | |
| { | |
| "epoch": 9.3, | |
| "learning_rate": 7.593862226575253e-06, | |
| "loss": 0.0037, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 9.79, | |
| "learning_rate": 6.94090760692132e-06, | |
| "loss": 0.0047, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.9704275131225586, | |
| "eval_loss": 0.2815804183483124, | |
| "eval_runtime": 3.5611, | |
| "eval_samples_per_second": 2620.796, | |
| "eval_steps_per_second": 81.996, | |
| "step": 10210 | |
| }, | |
| { | |
| "epoch": 10.28, | |
| "learning_rate": 6.287952987267385e-06, | |
| "loss": 0.0019, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 10.77, | |
| "learning_rate": 5.634998367613451e-06, | |
| "loss": 0.0035, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.9695703387260437, | |
| "eval_loss": 0.2854035198688507, | |
| "eval_runtime": 3.5955, | |
| "eval_samples_per_second": 2595.76, | |
| "eval_steps_per_second": 81.213, | |
| "step": 11231 | |
| }, | |
| { | |
| "epoch": 11.26, | |
| "learning_rate": 4.9820437479595175e-06, | |
| "loss": 0.0027, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 11.75, | |
| "learning_rate": 4.329089128305583e-06, | |
| "loss": 0.0016, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.9701060652732849, | |
| "eval_loss": 0.297050416469574, | |
| "eval_runtime": 3.6052, | |
| "eval_samples_per_second": 2588.747, | |
| "eval_steps_per_second": 80.994, | |
| "step": 12252 | |
| }, | |
| { | |
| "epoch": 12.24, | |
| "learning_rate": 3.676134508651649e-06, | |
| "loss": 0.0022, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 12.73, | |
| "learning_rate": 3.023179888997715e-06, | |
| "loss": 0.0014, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.9695703387260437, | |
| "eval_loss": 0.2982116639614105, | |
| "eval_runtime": 3.5886, | |
| "eval_samples_per_second": 2600.765, | |
| "eval_steps_per_second": 81.37, | |
| "step": 13273 | |
| }, | |
| { | |
| "epoch": 13.22, | |
| "learning_rate": 2.370225269343781e-06, | |
| "loss": 0.0013, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 13.71, | |
| "learning_rate": 1.7172706496898467e-06, | |
| "loss": 0.0026, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.9711775183677673, | |
| "eval_loss": 0.2929518520832062, | |
| "eval_runtime": 3.6072, | |
| "eval_samples_per_second": 2587.331, | |
| "eval_steps_per_second": 80.949, | |
| "step": 14294 | |
| } | |
| ], | |
| "max_steps": 15315, | |
| "num_train_epochs": 15, | |
| "total_flos": 3.0079908340301824e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |