| { | |
| "best_metric": 0.7536231884057971, | |
| "best_model_checkpoint": "dinov2-base-finetuned-eurosat/checkpoint-189", | |
| "epoch": 2.9881422924901186, | |
| "eval_steps": 500, | |
| "global_step": 189, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15810276679841898, | |
| "grad_norm": 17.875991821289062, | |
| "learning_rate": 2.6315789473684212e-05, | |
| "loss": 0.5618, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.31620553359683795, | |
| "grad_norm": 8.501226425170898, | |
| "learning_rate": 4.970588235294118e-05, | |
| "loss": 0.5333, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4743083003952569, | |
| "grad_norm": 22.501171112060547, | |
| "learning_rate": 4.6764705882352944e-05, | |
| "loss": 0.6441, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6324110671936759, | |
| "grad_norm": 11.3024320602417, | |
| "learning_rate": 4.382352941176471e-05, | |
| "loss": 0.5519, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7905138339920948, | |
| "grad_norm": 7.681583404541016, | |
| "learning_rate": 4.0882352941176474e-05, | |
| "loss": 0.5391, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9486166007905138, | |
| "grad_norm": 6.365460395812988, | |
| "learning_rate": 3.794117647058824e-05, | |
| "loss": 0.5271, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9960474308300395, | |
| "eval_accuracy": 0.7134894091415831, | |
| "eval_loss": 0.5548548698425293, | |
| "eval_runtime": 21.298, | |
| "eval_samples_per_second": 42.117, | |
| "eval_steps_per_second": 1.362, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.1067193675889329, | |
| "grad_norm": 9.361493110656738, | |
| "learning_rate": 3.5e-05, | |
| "loss": 0.5572, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.2648221343873518, | |
| "grad_norm": 7.021822452545166, | |
| "learning_rate": 3.205882352941177e-05, | |
| "loss": 0.5003, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4229249011857708, | |
| "grad_norm": 8.887531280517578, | |
| "learning_rate": 2.9117647058823534e-05, | |
| "loss": 0.4903, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.5810276679841897, | |
| "grad_norm": 7.857639789581299, | |
| "learning_rate": 2.6176470588235295e-05, | |
| "loss": 0.5096, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 6.486521244049072, | |
| "learning_rate": 2.323529411764706e-05, | |
| "loss": 0.4911, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.8972332015810278, | |
| "grad_norm": 12.094149589538574, | |
| "learning_rate": 2.0294117647058825e-05, | |
| "loss": 0.4804, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.992094861660079, | |
| "eval_accuracy": 0.738015607580825, | |
| "eval_loss": 0.5335468053817749, | |
| "eval_runtime": 21.4566, | |
| "eval_samples_per_second": 41.805, | |
| "eval_steps_per_second": 1.352, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 2.0553359683794468, | |
| "grad_norm": 10.58962345123291, | |
| "learning_rate": 1.735294117647059e-05, | |
| "loss": 0.4347, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.2134387351778657, | |
| "grad_norm": 12.747217178344727, | |
| "learning_rate": 1.4411764705882352e-05, | |
| "loss": 0.3991, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.3715415019762847, | |
| "grad_norm": 9.04605770111084, | |
| "learning_rate": 1.1470588235294118e-05, | |
| "loss": 0.3978, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.5296442687747036, | |
| "grad_norm": 11.612887382507324, | |
| "learning_rate": 8.529411764705883e-06, | |
| "loss": 0.3989, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.6877470355731226, | |
| "grad_norm": 11.906518936157227, | |
| "learning_rate": 5.588235294117647e-06, | |
| "loss": 0.3797, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.8458498023715415, | |
| "grad_norm": 11.579667091369629, | |
| "learning_rate": 2.647058823529412e-06, | |
| "loss": 0.3901, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.9881422924901186, | |
| "eval_accuracy": 0.7536231884057971, | |
| "eval_loss": 0.5222525000572205, | |
| "eval_runtime": 21.3284, | |
| "eval_samples_per_second": 42.057, | |
| "eval_steps_per_second": 1.36, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 2.9881422924901186, | |
| "step": 189, | |
| "total_flos": 1.8869538215666074e+18, | |
| "train_loss": 0.48228894844257014, | |
| "train_runtime": 1217.9669, | |
| "train_samples_per_second": 19.865, | |
| "train_steps_per_second": 0.155 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 189, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 1.8869538215666074e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |