debias_t5-mini_kw_qqp_21 / trainer_state.json
jialicheng's picture
Upload folder using huggingface_hub
4ebac1c verified
{
"best_metric": 0.8179661130234268,
"best_model_checkpoint": "outputs/t5-mini/kw/qqp_21/checkpoint-45484",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 56855,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 78.46215057373047,
"learning_rate": 4.956028493536189e-05,
"loss": 20.096,
"step": 500
},
{
"epoch": 0.09,
"grad_norm": 69.02784729003906,
"learning_rate": 4.9120569870723775e-05,
"loss": 17.1696,
"step": 1000
},
{
"epoch": 0.13,
"grad_norm": 73.71961212158203,
"learning_rate": 4.868085480608566e-05,
"loss": 15.9207,
"step": 1500
},
{
"epoch": 0.18,
"grad_norm": 56.62882995605469,
"learning_rate": 4.824113974144755e-05,
"loss": 15.5129,
"step": 2000
},
{
"epoch": 0.22,
"grad_norm": 109.83588409423828,
"learning_rate": 4.780142467680943e-05,
"loss": 14.5543,
"step": 2500
},
{
"epoch": 0.26,
"grad_norm": 63.06591796875,
"learning_rate": 4.736170961217132e-05,
"loss": 14.1194,
"step": 3000
},
{
"epoch": 0.31,
"grad_norm": 97.26592254638672,
"learning_rate": 4.6921994547533196e-05,
"loss": 13.8172,
"step": 3500
},
{
"epoch": 0.35,
"grad_norm": 59.0395393371582,
"learning_rate": 4.6482279482895086e-05,
"loss": 13.9323,
"step": 4000
},
{
"epoch": 0.4,
"grad_norm": 50.412662506103516,
"learning_rate": 4.6042564418256975e-05,
"loss": 13.5055,
"step": 4500
},
{
"epoch": 0.44,
"grad_norm": 61.14406967163086,
"learning_rate": 4.560284935361886e-05,
"loss": 13.5669,
"step": 5000
},
{
"epoch": 0.48,
"grad_norm": 87.61188507080078,
"learning_rate": 4.516313428898074e-05,
"loss": 13.0135,
"step": 5500
},
{
"epoch": 0.53,
"grad_norm": 47.9595832824707,
"learning_rate": 4.472341922434263e-05,
"loss": 13.009,
"step": 6000
},
{
"epoch": 0.57,
"grad_norm": 132.860107421875,
"learning_rate": 4.428370415970451e-05,
"loss": 12.725,
"step": 6500
},
{
"epoch": 0.62,
"grad_norm": 107.28248596191406,
"learning_rate": 4.38439890950664e-05,
"loss": 12.5346,
"step": 7000
},
{
"epoch": 0.66,
"grad_norm": 124.45763397216797,
"learning_rate": 4.3404274030428286e-05,
"loss": 12.581,
"step": 7500
},
{
"epoch": 0.7,
"grad_norm": 70.87162780761719,
"learning_rate": 4.296455896579017e-05,
"loss": 12.4756,
"step": 8000
},
{
"epoch": 0.75,
"grad_norm": 79.66390228271484,
"learning_rate": 4.252484390115206e-05,
"loss": 12.2702,
"step": 8500
},
{
"epoch": 0.79,
"grad_norm": 64.87586212158203,
"learning_rate": 4.208512883651394e-05,
"loss": 11.9856,
"step": 9000
},
{
"epoch": 0.84,
"grad_norm": 90.74152374267578,
"learning_rate": 4.1645413771875824e-05,
"loss": 12.1635,
"step": 9500
},
{
"epoch": 0.88,
"grad_norm": 84.467041015625,
"learning_rate": 4.1205698707237714e-05,
"loss": 12.0625,
"step": 10000
},
{
"epoch": 0.92,
"grad_norm": 76.96673583984375,
"learning_rate": 4.0765983642599596e-05,
"loss": 11.9467,
"step": 10500
},
{
"epoch": 0.97,
"grad_norm": 82.92093658447266,
"learning_rate": 4.0326268577961486e-05,
"loss": 11.7273,
"step": 11000
},
{
"epoch": 1.0,
"eval_combined_score": 0.7798604912559093,
"eval_f1": 0.7798604912559093,
"eval_loss": 0.3752712905406952,
"eval_runtime": 64.3854,
"eval_samples_per_second": 627.937,
"eval_steps_per_second": 2.454,
"step": 11371
},
{
"epoch": 1.01,
"grad_norm": 92.10366821289062,
"learning_rate": 3.988655351332337e-05,
"loss": 11.3212,
"step": 11500
},
{
"epoch": 1.06,
"grad_norm": 47.02320861816406,
"learning_rate": 3.944683844868525e-05,
"loss": 11.4345,
"step": 12000
},
{
"epoch": 1.1,
"grad_norm": 42.389854431152344,
"learning_rate": 3.900712338404714e-05,
"loss": 11.4321,
"step": 12500
},
{
"epoch": 1.14,
"grad_norm": 70.61559295654297,
"learning_rate": 3.8567408319409024e-05,
"loss": 11.2646,
"step": 13000
},
{
"epoch": 1.19,
"grad_norm": 102.52373504638672,
"learning_rate": 3.812769325477091e-05,
"loss": 11.3316,
"step": 13500
},
{
"epoch": 1.23,
"grad_norm": 53.84244918823242,
"learning_rate": 3.76879781901328e-05,
"loss": 11.3191,
"step": 14000
},
{
"epoch": 1.28,
"grad_norm": 62.559635162353516,
"learning_rate": 3.724826312549468e-05,
"loss": 11.1774,
"step": 14500
},
{
"epoch": 1.32,
"grad_norm": 77.8012924194336,
"learning_rate": 3.680854806085657e-05,
"loss": 11.3886,
"step": 15000
},
{
"epoch": 1.36,
"grad_norm": 59.538143157958984,
"learning_rate": 3.636883299621845e-05,
"loss": 10.97,
"step": 15500
},
{
"epoch": 1.41,
"grad_norm": 72.5059814453125,
"learning_rate": 3.5929117931580335e-05,
"loss": 10.9806,
"step": 16000
},
{
"epoch": 1.45,
"grad_norm": 50.77811050415039,
"learning_rate": 3.5489402866942224e-05,
"loss": 11.145,
"step": 16500
},
{
"epoch": 1.5,
"grad_norm": 73.23597717285156,
"learning_rate": 3.504968780230411e-05,
"loss": 10.9792,
"step": 17000
},
{
"epoch": 1.54,
"grad_norm": 60.301700592041016,
"learning_rate": 3.4609972737666e-05,
"loss": 10.7031,
"step": 17500
},
{
"epoch": 1.58,
"grad_norm": 63.75108337402344,
"learning_rate": 3.417025767302788e-05,
"loss": 11.1893,
"step": 18000
},
{
"epoch": 1.63,
"grad_norm": 67.40409088134766,
"learning_rate": 3.373054260838976e-05,
"loss": 10.6892,
"step": 18500
},
{
"epoch": 1.67,
"grad_norm": 42.0967903137207,
"learning_rate": 3.329082754375165e-05,
"loss": 10.926,
"step": 19000
},
{
"epoch": 1.71,
"grad_norm": 97.28190612792969,
"learning_rate": 3.2851112479113535e-05,
"loss": 10.6576,
"step": 19500
},
{
"epoch": 1.76,
"grad_norm": 71.09336853027344,
"learning_rate": 3.241139741447542e-05,
"loss": 10.5294,
"step": 20000
},
{
"epoch": 1.8,
"grad_norm": 52.61311721801758,
"learning_rate": 3.197168234983731e-05,
"loss": 10.7066,
"step": 20500
},
{
"epoch": 1.85,
"grad_norm": 70.7719497680664,
"learning_rate": 3.153196728519919e-05,
"loss": 10.6082,
"step": 21000
},
{
"epoch": 1.89,
"grad_norm": 57.35883331298828,
"learning_rate": 3.109225222056108e-05,
"loss": 10.8771,
"step": 21500
},
{
"epoch": 1.93,
"grad_norm": 53.73151397705078,
"learning_rate": 3.065253715592296e-05,
"loss": 10.7454,
"step": 22000
},
{
"epoch": 1.98,
"grad_norm": 75.67459106445312,
"learning_rate": 3.021282209128485e-05,
"loss": 10.8851,
"step": 22500
},
{
"epoch": 2.0,
"eval_combined_score": 0.8087286404461593,
"eval_f1": 0.8087286404461593,
"eval_loss": 0.3425763249397278,
"eval_runtime": 33.7764,
"eval_samples_per_second": 1196.991,
"eval_steps_per_second": 4.678,
"step": 22742
},
{
"epoch": 2.02,
"grad_norm": 68.96992492675781,
"learning_rate": 2.9773107026646735e-05,
"loss": 10.4706,
"step": 23000
},
{
"epoch": 2.07,
"grad_norm": 80.8647232055664,
"learning_rate": 2.933339196200862e-05,
"loss": 10.2234,
"step": 23500
},
{
"epoch": 2.11,
"grad_norm": 96.2059326171875,
"learning_rate": 2.8893676897370504e-05,
"loss": 10.3815,
"step": 24000
},
{
"epoch": 2.15,
"grad_norm": 86.25205993652344,
"learning_rate": 2.845396183273239e-05,
"loss": 10.1995,
"step": 24500
},
{
"epoch": 2.2,
"grad_norm": 64.76643371582031,
"learning_rate": 2.8014246768094277e-05,
"loss": 10.4213,
"step": 25000
},
{
"epoch": 2.24,
"grad_norm": 59.44820022583008,
"learning_rate": 2.7574531703456163e-05,
"loss": 9.9771,
"step": 25500
},
{
"epoch": 2.29,
"grad_norm": 64.23847198486328,
"learning_rate": 2.7134816638818046e-05,
"loss": 10.1808,
"step": 26000
},
{
"epoch": 2.33,
"grad_norm": 74.40206146240234,
"learning_rate": 2.6695101574179932e-05,
"loss": 10.091,
"step": 26500
},
{
"epoch": 2.37,
"grad_norm": 47.61061096191406,
"learning_rate": 2.625538650954182e-05,
"loss": 10.142,
"step": 27000
},
{
"epoch": 2.42,
"grad_norm": 74.26252746582031,
"learning_rate": 2.5815671444903705e-05,
"loss": 10.104,
"step": 27500
},
{
"epoch": 2.46,
"grad_norm": 41.37887191772461,
"learning_rate": 2.5375956380265588e-05,
"loss": 10.4493,
"step": 28000
},
{
"epoch": 2.51,
"grad_norm": 84.93189239501953,
"learning_rate": 2.4936241315627474e-05,
"loss": 10.0927,
"step": 28500
},
{
"epoch": 2.55,
"grad_norm": 50.18044662475586,
"learning_rate": 2.449652625098936e-05,
"loss": 10.1145,
"step": 29000
},
{
"epoch": 2.59,
"grad_norm": 62.862892150878906,
"learning_rate": 2.4056811186351246e-05,
"loss": 10.0547,
"step": 29500
},
{
"epoch": 2.64,
"grad_norm": 68.27816772460938,
"learning_rate": 2.3617096121713133e-05,
"loss": 10.0757,
"step": 30000
},
{
"epoch": 2.68,
"grad_norm": 62.146175384521484,
"learning_rate": 2.317738105707502e-05,
"loss": 10.0992,
"step": 30500
},
{
"epoch": 2.73,
"grad_norm": 85.38493347167969,
"learning_rate": 2.27376659924369e-05,
"loss": 9.7253,
"step": 31000
},
{
"epoch": 2.77,
"grad_norm": 70.77899932861328,
"learning_rate": 2.2297950927798788e-05,
"loss": 10.0789,
"step": 31500
},
{
"epoch": 2.81,
"grad_norm": 76.08915710449219,
"learning_rate": 2.1858235863160674e-05,
"loss": 9.9853,
"step": 32000
},
{
"epoch": 2.86,
"grad_norm": 80.961669921875,
"learning_rate": 2.141852079852256e-05,
"loss": 9.8662,
"step": 32500
},
{
"epoch": 2.9,
"grad_norm": 56.63176727294922,
"learning_rate": 2.0978805733884443e-05,
"loss": 9.9995,
"step": 33000
},
{
"epoch": 2.95,
"grad_norm": 78.24808502197266,
"learning_rate": 2.053909066924633e-05,
"loss": 9.9561,
"step": 33500
},
{
"epoch": 2.99,
"grad_norm": 82.20337677001953,
"learning_rate": 2.0099375604608216e-05,
"loss": 9.8649,
"step": 34000
},
{
"epoch": 3.0,
"eval_combined_score": 0.8170599269424424,
"eval_f1": 0.8170599269424424,
"eval_loss": 0.32534557580947876,
"eval_runtime": 33.5119,
"eval_samples_per_second": 1206.439,
"eval_steps_per_second": 4.715,
"step": 34113
},
{
"epoch": 3.03,
"grad_norm": 98.00669860839844,
"learning_rate": 1.9659660539970102e-05,
"loss": 9.7264,
"step": 34500
},
{
"epoch": 3.08,
"grad_norm": 80.73302459716797,
"learning_rate": 1.9219945475331985e-05,
"loss": 9.6493,
"step": 35000
},
{
"epoch": 3.12,
"grad_norm": 61.9378776550293,
"learning_rate": 1.878023041069387e-05,
"loss": 9.6228,
"step": 35500
},
{
"epoch": 3.17,
"grad_norm": 45.61129379272461,
"learning_rate": 1.8340515346055757e-05,
"loss": 9.5834,
"step": 36000
},
{
"epoch": 3.21,
"grad_norm": 68.07831573486328,
"learning_rate": 1.7900800281417643e-05,
"loss": 9.7149,
"step": 36500
},
{
"epoch": 3.25,
"grad_norm": 107.45940399169922,
"learning_rate": 1.7461085216779526e-05,
"loss": 9.7818,
"step": 37000
},
{
"epoch": 3.3,
"grad_norm": 57.60527801513672,
"learning_rate": 1.7021370152141413e-05,
"loss": 9.476,
"step": 37500
},
{
"epoch": 3.34,
"grad_norm": 93.6783447265625,
"learning_rate": 1.65816550875033e-05,
"loss": 9.7492,
"step": 38000
},
{
"epoch": 3.39,
"grad_norm": 65.9582748413086,
"learning_rate": 1.6141940022865185e-05,
"loss": 9.7112,
"step": 38500
},
{
"epoch": 3.43,
"grad_norm": 76.75605010986328,
"learning_rate": 1.5702224958227068e-05,
"loss": 9.6361,
"step": 39000
},
{
"epoch": 3.47,
"grad_norm": 112.33428955078125,
"learning_rate": 1.5262509893588954e-05,
"loss": 9.6922,
"step": 39500
},
{
"epoch": 3.52,
"grad_norm": 48.53580093383789,
"learning_rate": 1.482279482895084e-05,
"loss": 9.4253,
"step": 40000
},
{
"epoch": 3.56,
"grad_norm": 75.4821548461914,
"learning_rate": 1.4383079764312727e-05,
"loss": 9.6344,
"step": 40500
},
{
"epoch": 3.61,
"grad_norm": 50.351139068603516,
"learning_rate": 1.3943364699674611e-05,
"loss": 9.6358,
"step": 41000
},
{
"epoch": 3.65,
"grad_norm": 161.5642547607422,
"learning_rate": 1.3503649635036497e-05,
"loss": 9.6302,
"step": 41500
},
{
"epoch": 3.69,
"grad_norm": 42.763099670410156,
"learning_rate": 1.3063934570398382e-05,
"loss": 9.2827,
"step": 42000
},
{
"epoch": 3.74,
"grad_norm": 84.87345123291016,
"learning_rate": 1.2624219505760268e-05,
"loss": 9.6216,
"step": 42500
},
{
"epoch": 3.78,
"grad_norm": 88.34630584716797,
"learning_rate": 1.2184504441122153e-05,
"loss": 9.4236,
"step": 43000
},
{
"epoch": 3.83,
"grad_norm": 51.73760223388672,
"learning_rate": 1.1744789376484039e-05,
"loss": 9.5094,
"step": 43500
},
{
"epoch": 3.87,
"grad_norm": 107.87085723876953,
"learning_rate": 1.1305074311845925e-05,
"loss": 9.5279,
"step": 44000
},
{
"epoch": 3.91,
"grad_norm": 121.04542541503906,
"learning_rate": 1.086535924720781e-05,
"loss": 9.5586,
"step": 44500
},
{
"epoch": 3.96,
"grad_norm": 59.98193359375,
"learning_rate": 1.0425644182569696e-05,
"loss": 9.4168,
"step": 45000
},
{
"epoch": 4.0,
"eval_combined_score": 0.8179661130234268,
"eval_f1": 0.8179661130234268,
"eval_loss": 0.3207918703556061,
"eval_runtime": 33.3558,
"eval_samples_per_second": 1212.083,
"eval_steps_per_second": 4.737,
"step": 45484
},
{
"epoch": 4.0,
"grad_norm": 66.1643295288086,
"learning_rate": 9.98592911793158e-06,
"loss": 9.5956,
"step": 45500
},
{
"epoch": 4.05,
"grad_norm": 89.93461608886719,
"learning_rate": 9.546214053293467e-06,
"loss": 9.5001,
"step": 46000
},
{
"epoch": 4.09,
"grad_norm": 50.218685150146484,
"learning_rate": 9.106498988655351e-06,
"loss": 9.3642,
"step": 46500
},
{
"epoch": 4.13,
"grad_norm": 91.35980224609375,
"learning_rate": 8.666783924017237e-06,
"loss": 9.1908,
"step": 47000
},
{
"epoch": 4.18,
"grad_norm": 64.65238952636719,
"learning_rate": 8.227068859379122e-06,
"loss": 9.5462,
"step": 47500
},
{
"epoch": 4.22,
"grad_norm": 102.76400756835938,
"learning_rate": 7.787353794741008e-06,
"loss": 9.134,
"step": 48000
},
{
"epoch": 4.27,
"grad_norm": 72.09874725341797,
"learning_rate": 7.347638730102893e-06,
"loss": 9.1759,
"step": 48500
},
{
"epoch": 4.31,
"grad_norm": 77.65538024902344,
"learning_rate": 6.907923665464779e-06,
"loss": 9.3328,
"step": 49000
},
{
"epoch": 4.35,
"grad_norm": 62.97948455810547,
"learning_rate": 6.468208600826664e-06,
"loss": 9.207,
"step": 49500
},
{
"epoch": 4.4,
"grad_norm": 91.46334838867188,
"learning_rate": 6.02849353618855e-06,
"loss": 9.0635,
"step": 50000
},
{
"epoch": 4.44,
"grad_norm": 70.92437744140625,
"learning_rate": 5.588778471550435e-06,
"loss": 9.3683,
"step": 50500
},
{
"epoch": 4.49,
"grad_norm": 73.28337097167969,
"learning_rate": 5.149063406912321e-06,
"loss": 9.4556,
"step": 51000
},
{
"epoch": 4.53,
"grad_norm": 55.88742446899414,
"learning_rate": 4.709348342274207e-06,
"loss": 9.1352,
"step": 51500
},
{
"epoch": 4.57,
"grad_norm": 66.97167205810547,
"learning_rate": 4.269633277636092e-06,
"loss": 8.9607,
"step": 52000
},
{
"epoch": 4.62,
"grad_norm": 62.52705001831055,
"learning_rate": 3.8299182129979776e-06,
"loss": 9.1861,
"step": 52500
},
{
"epoch": 4.66,
"grad_norm": 47.30684280395508,
"learning_rate": 3.3902031483598625e-06,
"loss": 9.1937,
"step": 53000
},
{
"epoch": 4.7,
"grad_norm": 62.73200988769531,
"learning_rate": 2.9504880837217483e-06,
"loss": 9.1071,
"step": 53500
},
{
"epoch": 4.75,
"grad_norm": 124.52106475830078,
"learning_rate": 2.5107730190836337e-06,
"loss": 9.174,
"step": 54000
},
{
"epoch": 4.79,
"grad_norm": 93.40811157226562,
"learning_rate": 2.0710579544455195e-06,
"loss": 9.4766,
"step": 54500
},
{
"epoch": 4.84,
"grad_norm": 59.61379623413086,
"learning_rate": 1.6313428898074047e-06,
"loss": 9.3928,
"step": 55000
},
{
"epoch": 4.88,
"grad_norm": 104.62787628173828,
"learning_rate": 1.1916278251692903e-06,
"loss": 9.2219,
"step": 55500
},
{
"epoch": 4.92,
"grad_norm": 79.24739074707031,
"learning_rate": 7.519127605311758e-07,
"loss": 9.2934,
"step": 56000
},
{
"epoch": 4.97,
"grad_norm": 51.52265548706055,
"learning_rate": 3.121976958930613e-07,
"loss": 9.2123,
"step": 56500
},
{
"epoch": 5.0,
"eval_combined_score": 0.8173628720138796,
"eval_f1": 0.8173628720138796,
"eval_loss": 0.32361531257629395,
"eval_runtime": 70.6423,
"eval_samples_per_second": 572.32,
"eval_steps_per_second": 2.237,
"step": 56855
},
{
"epoch": 5.0,
"step": 56855,
"total_flos": 0.0,
"train_loss": 7.986851407397381,
"train_runtime": 51406.9762,
"train_samples_per_second": 35.389,
"train_steps_per_second": 1.106
}
],
"logging_steps": 500,
"max_steps": 56855,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}