| { | |
| "best_metric": 0.9627493391011775, | |
| "best_model_checkpoint": "vit-base-patch16-224-finetuned-covid_ct_set_full/checkpoint-117", | |
| "epoch": 4.957264957264957, | |
| "global_step": 145, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.6458, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.8076923076923084e-05, | |
| "loss": 0.4343, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "eval_accuracy": 0.9298245614035088, | |
| "eval_loss": 0.19454431533813477, | |
| "eval_runtime": 190.3429, | |
| "eval_samples_per_second": 43.721, | |
| "eval_steps_per_second": 1.371, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 4.423076923076923e-05, | |
| "loss": 0.3003, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 4.038461538461539e-05, | |
| "loss": 0.2501, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 3.653846153846154e-05, | |
| "loss": 0.2353, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "eval_accuracy": 0.9289834174477289, | |
| "eval_loss": 0.20524762570858002, | |
| "eval_runtime": 190.8339, | |
| "eval_samples_per_second": 43.609, | |
| "eval_steps_per_second": 1.368, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 3.269230769230769e-05, | |
| "loss": 0.1774, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 2.8846153846153845e-05, | |
| "loss": 0.1671, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.1395, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "eval_accuracy": 0.9074741648642153, | |
| "eval_loss": 0.2567497193813324, | |
| "eval_runtime": 192.117, | |
| "eval_samples_per_second": 43.317, | |
| "eval_steps_per_second": 1.359, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 2.1153846153846154e-05, | |
| "loss": 0.1685, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.730769230769231e-05, | |
| "loss": 0.1367, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.3461538461538462e-05, | |
| "loss": 0.1399, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9627493391011775, | |
| "eval_loss": 0.12245626002550125, | |
| "eval_runtime": 192.0268, | |
| "eval_samples_per_second": 43.338, | |
| "eval_steps_per_second": 1.359, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 9.615384615384616e-06, | |
| "loss": 0.1177, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 5.76923076923077e-06, | |
| "loss": 0.1103, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "learning_rate": 1.9230769230769234e-06, | |
| "loss": 0.1186, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "eval_accuracy": 0.952054794520548, | |
| "eval_loss": 0.15305645763874054, | |
| "eval_runtime": 197.6583, | |
| "eval_samples_per_second": 42.103, | |
| "eval_steps_per_second": 1.32, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "step": 145, | |
| "total_flos": 1.4357715835791606e+18, | |
| "train_loss": 0.2204632989291487, | |
| "train_runtime": 1986.2502, | |
| "train_samples_per_second": 9.405, | |
| "train_steps_per_second": 0.073 | |
| } | |
| ], | |
| "max_steps": 145, | |
| "num_train_epochs": 5, | |
| "total_flos": 1.4357715835791606e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |