solacies / trainer_state.json
Docty's picture
End of training
7b72b17 verified
{
"best_global_step": 304,
"best_metric": 0.3814464807510376,
"best_model_checkpoint": "./solacies/checkpoint-304",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 304,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06578947368421052,
"grad_norm": 2.7918763160705566,
"learning_rate": 1.9407894736842107e-05,
"loss": 1.3351,
"step": 10
},
{
"epoch": 0.13157894736842105,
"grad_norm": 2.623225212097168,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.1694,
"step": 20
},
{
"epoch": 0.19736842105263158,
"grad_norm": 2.48968505859375,
"learning_rate": 1.8092105263157896e-05,
"loss": 1.0772,
"step": 30
},
{
"epoch": 0.2631578947368421,
"grad_norm": 2.2622175216674805,
"learning_rate": 1.743421052631579e-05,
"loss": 1.0927,
"step": 40
},
{
"epoch": 0.32894736842105265,
"grad_norm": 2.3349521160125732,
"learning_rate": 1.6776315789473686e-05,
"loss": 0.9766,
"step": 50
},
{
"epoch": 0.39473684210526316,
"grad_norm": 2.7016446590423584,
"learning_rate": 1.611842105263158e-05,
"loss": 0.8842,
"step": 60
},
{
"epoch": 0.4605263157894737,
"grad_norm": 1.8381617069244385,
"learning_rate": 1.5460526315789475e-05,
"loss": 0.7284,
"step": 70
},
{
"epoch": 0.5263157894736842,
"grad_norm": 2.1242270469665527,
"learning_rate": 1.4802631578947371e-05,
"loss": 0.6287,
"step": 80
},
{
"epoch": 0.5921052631578947,
"grad_norm": 1.3842352628707886,
"learning_rate": 1.4144736842105264e-05,
"loss": 0.6329,
"step": 90
},
{
"epoch": 0.6578947368421053,
"grad_norm": 2.2132720947265625,
"learning_rate": 1.3486842105263159e-05,
"loss": 0.607,
"step": 100
},
{
"epoch": 0.7236842105263158,
"grad_norm": 2.2834842205047607,
"learning_rate": 1.2828947368421055e-05,
"loss": 0.5891,
"step": 110
},
{
"epoch": 0.7894736842105263,
"grad_norm": 2.5198376178741455,
"learning_rate": 1.2171052631578948e-05,
"loss": 0.5515,
"step": 120
},
{
"epoch": 0.8552631578947368,
"grad_norm": 1.5494874715805054,
"learning_rate": 1.1513157894736844e-05,
"loss": 0.4724,
"step": 130
},
{
"epoch": 0.9210526315789473,
"grad_norm": 2.719534158706665,
"learning_rate": 1.0855263157894737e-05,
"loss": 0.4908,
"step": 140
},
{
"epoch": 0.9868421052631579,
"grad_norm": 1.478468418121338,
"learning_rate": 1.0197368421052632e-05,
"loss": 0.4536,
"step": 150
},
{
"epoch": 1.0,
"eval_accuracy": 0.9088235294117647,
"eval_loss": 0.5104668140411377,
"eval_runtime": 5.2696,
"eval_samples_per_second": 64.521,
"eval_steps_per_second": 8.16,
"step": 152
},
{
"epoch": 1.0526315789473684,
"grad_norm": 1.4376304149627686,
"learning_rate": 9.539473684210528e-06,
"loss": 0.378,
"step": 160
},
{
"epoch": 1.118421052631579,
"grad_norm": 1.3609135150909424,
"learning_rate": 8.881578947368423e-06,
"loss": 0.3742,
"step": 170
},
{
"epoch": 1.1842105263157894,
"grad_norm": 2.889965057373047,
"learning_rate": 8.223684210526316e-06,
"loss": 0.3694,
"step": 180
},
{
"epoch": 1.25,
"grad_norm": 5.698398113250732,
"learning_rate": 7.565789473684211e-06,
"loss": 0.4044,
"step": 190
},
{
"epoch": 1.3157894736842106,
"grad_norm": 1.3650037050247192,
"learning_rate": 6.907894736842106e-06,
"loss": 0.3697,
"step": 200
},
{
"epoch": 1.381578947368421,
"grad_norm": 2.522857904434204,
"learning_rate": 6.25e-06,
"loss": 0.4656,
"step": 210
},
{
"epoch": 1.4473684210526316,
"grad_norm": 1.6762239933013916,
"learning_rate": 5.592105263157896e-06,
"loss": 0.3532,
"step": 220
},
{
"epoch": 1.513157894736842,
"grad_norm": 1.3175244331359863,
"learning_rate": 4.9342105263157895e-06,
"loss": 0.3821,
"step": 230
},
{
"epoch": 1.5789473684210527,
"grad_norm": 1.7241592407226562,
"learning_rate": 4.276315789473684e-06,
"loss": 0.3258,
"step": 240
},
{
"epoch": 1.6447368421052633,
"grad_norm": 1.2837048768997192,
"learning_rate": 3.618421052631579e-06,
"loss": 0.3147,
"step": 250
},
{
"epoch": 1.7105263157894737,
"grad_norm": 2.3983030319213867,
"learning_rate": 2.960526315789474e-06,
"loss": 0.3278,
"step": 260
},
{
"epoch": 1.776315789473684,
"grad_norm": 1.1498711109161377,
"learning_rate": 2.3026315789473684e-06,
"loss": 0.3126,
"step": 270
},
{
"epoch": 1.8421052631578947,
"grad_norm": 2.200284004211426,
"learning_rate": 1.6447368421052635e-06,
"loss": 0.2814,
"step": 280
},
{
"epoch": 1.9078947368421053,
"grad_norm": 1.2347966432571411,
"learning_rate": 9.86842105263158e-07,
"loss": 0.2528,
"step": 290
},
{
"epoch": 1.973684210526316,
"grad_norm": 1.8223544359207153,
"learning_rate": 3.2894736842105264e-07,
"loss": 0.3086,
"step": 300
},
{
"epoch": 2.0,
"eval_accuracy": 0.9205882352941176,
"eval_loss": 0.3814464807510376,
"eval_runtime": 5.5076,
"eval_samples_per_second": 61.732,
"eval_steps_per_second": 7.807,
"step": 304
},
{
"epoch": 2.0,
"step": 304,
"total_flos": 1.8830891020935168e+17,
"train_loss": 0.5599808394908905,
"train_runtime": 139.819,
"train_samples_per_second": 17.38,
"train_steps_per_second": 2.174
}
],
"logging_steps": 10,
"max_steps": 304,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8830891020935168e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}