epidemiology_sft_10000_mcq_2epoch / trainer_state.json
Howard881010's picture
Upload folder using huggingface_hub
0e8123c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 30,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.044444444444444446,
"grad_norm": 0.36675021052360535,
"learning_rate": 9.987820251299122e-05,
"loss": 0.3008,
"step": 10
},
{
"epoch": 0.08888888888888889,
"grad_norm": 0.045268554240465164,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0061,
"step": 20
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.006114976014941931,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0042,
"step": 30
},
{
"epoch": 0.13333333333333333,
"eval_loss": 0.004080221988260746,
"eval_runtime": 96.8324,
"eval_samples_per_second": 10.327,
"eval_steps_per_second": 0.258,
"step": 30
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.0014940646942704916,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0039,
"step": 40
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.0011621147859841585,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0039,
"step": 50
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.005421994719654322,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0039,
"step": 60
},
{
"epoch": 0.26666666666666666,
"eval_loss": 0.003909846767783165,
"eval_runtime": 96.8186,
"eval_samples_per_second": 10.329,
"eval_steps_per_second": 0.258,
"step": 60
},
{
"epoch": 0.3111111111111111,
"grad_norm": 0.002968616783618927,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0039,
"step": 70
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.016864212229847908,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0039,
"step": 80
},
{
"epoch": 0.4,
"grad_norm": 0.004826202057301998,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0039,
"step": 90
},
{
"epoch": 0.4,
"eval_loss": 0.0038871022406965494,
"eval_runtime": 96.843,
"eval_samples_per_second": 10.326,
"eval_steps_per_second": 0.258,
"step": 90
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.003356265602633357,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0039,
"step": 100
},
{
"epoch": 0.4888888888888889,
"grad_norm": 0.0016542698722332716,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0039,
"step": 110
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.007389770820736885,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0039,
"step": 120
},
{
"epoch": 0.5333333333333333,
"eval_loss": 0.0038501631934195757,
"eval_runtime": 96.845,
"eval_samples_per_second": 10.326,
"eval_steps_per_second": 0.258,
"step": 120
},
{
"epoch": 0.5777777777777777,
"grad_norm": 0.009115256369113922,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0038,
"step": 130
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.006779392249882221,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0037,
"step": 140
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.022685615345835686,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0033,
"step": 150
},
{
"epoch": 0.6666666666666666,
"eval_loss": 0.0030691924039274454,
"eval_runtime": 96.8893,
"eval_samples_per_second": 10.321,
"eval_steps_per_second": 0.258,
"step": 150
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.04725850373506546,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0031,
"step": 160
},
{
"epoch": 0.7555555555555555,
"grad_norm": 0.02934485487639904,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0031,
"step": 170
},
{
"epoch": 0.8,
"grad_norm": 0.020546529442071915,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0029,
"step": 180
},
{
"epoch": 0.8,
"eval_loss": 0.0029199642594903708,
"eval_runtime": 96.8327,
"eval_samples_per_second": 10.327,
"eval_steps_per_second": 0.258,
"step": 180
},
{
"epoch": 0.8444444444444444,
"grad_norm": 0.01967918500304222,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0029,
"step": 190
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.013359949924051762,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0028,
"step": 200
},
{
"epoch": 0.9333333333333333,
"grad_norm": 0.03619702160358429,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0028,
"step": 210
},
{
"epoch": 0.9333333333333333,
"eval_loss": 0.002507995581254363,
"eval_runtime": 96.8365,
"eval_samples_per_second": 10.327,
"eval_steps_per_second": 0.258,
"step": 210
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.015583088621497154,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0026,
"step": 220
},
{
"epoch": 1.0222222222222221,
"grad_norm": 0.02111578918993473,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0028,
"step": 230
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.014296974055469036,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0025,
"step": 240
},
{
"epoch": 1.0666666666666667,
"eval_loss": 0.0024458724074065685,
"eval_runtime": 96.9524,
"eval_samples_per_second": 10.314,
"eval_steps_per_second": 0.258,
"step": 240
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.036929819732904434,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0025,
"step": 250
},
{
"epoch": 1.1555555555555554,
"grad_norm": 0.01881023496389389,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0026,
"step": 260
},
{
"epoch": 1.2,
"grad_norm": 0.02219155989587307,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0024,
"step": 270
},
{
"epoch": 1.2,
"eval_loss": 0.002419873373582959,
"eval_runtime": 96.9311,
"eval_samples_per_second": 10.317,
"eval_steps_per_second": 0.258,
"step": 270
},
{
"epoch": 1.2444444444444445,
"grad_norm": 0.015554447658360004,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0026,
"step": 280
},
{
"epoch": 1.2888888888888888,
"grad_norm": 0.014902675524353981,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.0026,
"step": 290
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.023787055164575577,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0027,
"step": 300
},
{
"epoch": 1.3333333333333333,
"eval_loss": 0.0023624880705028772,
"eval_runtime": 96.9344,
"eval_samples_per_second": 10.316,
"eval_steps_per_second": 0.258,
"step": 300
},
{
"epoch": 1.3777777777777778,
"grad_norm": 0.024232730269432068,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0024,
"step": 310
},
{
"epoch": 1.4222222222222223,
"grad_norm": 0.030378742143511772,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0025,
"step": 320
},
{
"epoch": 1.4666666666666668,
"grad_norm": 0.022359298542141914,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0025,
"step": 330
},
{
"epoch": 1.4666666666666668,
"eval_loss": 0.0022804364562034607,
"eval_runtime": 96.9112,
"eval_samples_per_second": 10.319,
"eval_steps_per_second": 0.258,
"step": 330
},
{
"epoch": 1.511111111111111,
"grad_norm": 0.01789114437997341,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0024,
"step": 340
},
{
"epoch": 1.5555555555555556,
"grad_norm": 0.019261475652456284,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0027,
"step": 350
},
{
"epoch": 1.6,
"grad_norm": 0.021873965859413147,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0023,
"step": 360
},
{
"epoch": 1.6,
"eval_loss": 0.002201332012191415,
"eval_runtime": 96.9199,
"eval_samples_per_second": 10.318,
"eval_steps_per_second": 0.258,
"step": 360
},
{
"epoch": 1.6444444444444444,
"grad_norm": 0.0286718662828207,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0025,
"step": 370
},
{
"epoch": 1.6888888888888889,
"grad_norm": 0.019971389323472977,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0022,
"step": 380
},
{
"epoch": 1.7333333333333334,
"grad_norm": 0.02623485028743744,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0023,
"step": 390
},
{
"epoch": 1.7333333333333334,
"eval_loss": 0.002075706608593464,
"eval_runtime": 96.9432,
"eval_samples_per_second": 10.315,
"eval_steps_per_second": 0.258,
"step": 390
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.021184084936976433,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.002,
"step": 400
},
{
"epoch": 1.8222222222222222,
"grad_norm": 0.02332981303334236,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.002,
"step": 410
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.023150313645601273,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.002,
"step": 420
},
{
"epoch": 1.8666666666666667,
"eval_loss": 0.0020291407126933336,
"eval_runtime": 96.9072,
"eval_samples_per_second": 10.319,
"eval_steps_per_second": 0.258,
"step": 420
},
{
"epoch": 1.911111111111111,
"grad_norm": 0.03483868017792702,
"learning_rate": 4.865965629214819e-07,
"loss": 0.002,
"step": 430
},
{
"epoch": 1.9555555555555557,
"grad_norm": 0.03138676658272743,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0025,
"step": 440
},
{
"epoch": 2.0,
"grad_norm": 0.028902092948555946,
"learning_rate": 0.0,
"loss": 0.0022,
"step": 450
},
{
"epoch": 2.0,
"eval_loss": 0.0020088243763893843,
"eval_runtime": 96.9299,
"eval_samples_per_second": 10.317,
"eval_steps_per_second": 0.258,
"step": 450
},
{
"epoch": 2.0,
"step": 450,
"total_flos": 1.4721820638419354e+18,
"train_loss": 0.009612742554810313,
"train_runtime": 7000.1011,
"train_samples_per_second": 2.571,
"train_steps_per_second": 0.064
}
],
"logging_steps": 10,
"max_steps": 450,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4721820638419354e+18,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}