| { | |
| "best_metric": 3.047952175140381, | |
| "best_model_checkpoint": "output/xxxtentacion/checkpoint-114", | |
| "epoch": 1.0, | |
| "global_step": 114, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00013654981500613273, | |
| "loss": 3.7924, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00013461158482121457, | |
| "loss": 3.5933, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00013142205020853694, | |
| "loss": 3.4387, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00012704167144630924, | |
| "loss": 3.3335, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00012155348225285961, | |
| "loss": 3.3589, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00011506151581352585, | |
| "loss": 3.3227, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00010768883274520426, | |
| "loss": 3.6526, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 9.957518838012702e-05, | |
| "loss": 3.2754, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 9.08743835874413e-05, | |
| "loss": 3.2572, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 8.175134934996847e-05, | |
| "loss": 3.1639, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 7.237902036041238e-05, | |
| "loss": 3.343, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 6.293505690059801e-05, | |
| "loss": 3.4637, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 5.359847714324262e-05, | |
| "loss": 3.1047, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.4546263713777056e-05, | |
| "loss": 3.0869, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 3.5950008837656755e-05, | |
| "loss": 3.3149, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 2.7972661667182378e-05, | |
| "loss": 3.107, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 2.0765439444951916e-05, | |
| "loss": 2.9627, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.4464961055407408e-05, | |
| "loss": 3.0099, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.190657300387505e-06, | |
| "loss": 3.5708, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 5.042506989064599e-06, | |
| "loss": 3.0849, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 2.0991417565619363e-06, | |
| "loss": 3.1334, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.1635553598866533e-07, | |
| "loss": 3.2816, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.047952175140381, | |
| "eval_runtime": 6.9391, | |
| "eval_samples_per_second": 21.04, | |
| "eval_steps_per_second": 2.738, | |
| "step": 114 | |
| } | |
| ], | |
| "max_steps": 114, | |
| "num_train_epochs": 1, | |
| "total_flos": 118887874560000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |