Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
ibndias's picture
Model save
ec26cca verified
raw
history blame
48.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994447529150472,
"eval_steps": 100,
"global_step": 1350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0037016472330186935,
"grad_norm": 2.625,
"learning_rate": 7.407407407407407e-07,
"loss": 1.1051,
"step": 5
},
{
"epoch": 0.007403294466037387,
"grad_norm": 2.859375,
"learning_rate": 1.4814814814814815e-06,
"loss": 1.0518,
"step": 10
},
{
"epoch": 0.01110494169905608,
"grad_norm": 2.59375,
"learning_rate": 2.222222222222222e-06,
"loss": 1.0903,
"step": 15
},
{
"epoch": 0.014806588932074774,
"grad_norm": 2.6875,
"learning_rate": 2.962962962962963e-06,
"loss": 1.1043,
"step": 20
},
{
"epoch": 0.018508236165093468,
"grad_norm": 2.3125,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.0905,
"step": 25
},
{
"epoch": 0.02220988339811216,
"grad_norm": 2.078125,
"learning_rate": 4.444444444444444e-06,
"loss": 1.093,
"step": 30
},
{
"epoch": 0.025911530631130855,
"grad_norm": 2.0,
"learning_rate": 5.185185185185185e-06,
"loss": 1.0631,
"step": 35
},
{
"epoch": 0.029613177864149548,
"grad_norm": 1.75,
"learning_rate": 5.925925925925926e-06,
"loss": 1.0777,
"step": 40
},
{
"epoch": 0.03331482509716824,
"grad_norm": 1.5703125,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0584,
"step": 45
},
{
"epoch": 0.037016472330186935,
"grad_norm": 1.359375,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.0341,
"step": 50
},
{
"epoch": 0.040718119563205625,
"grad_norm": 1.21875,
"learning_rate": 8.148148148148148e-06,
"loss": 1.0072,
"step": 55
},
{
"epoch": 0.04441976679622432,
"grad_norm": 1.1640625,
"learning_rate": 8.888888888888888e-06,
"loss": 1.0038,
"step": 60
},
{
"epoch": 0.04812141402924301,
"grad_norm": 1.0703125,
"learning_rate": 9.62962962962963e-06,
"loss": 0.9487,
"step": 65
},
{
"epoch": 0.05182306126226171,
"grad_norm": 1.0703125,
"learning_rate": 1.037037037037037e-05,
"loss": 0.9725,
"step": 70
},
{
"epoch": 0.0555247084952804,
"grad_norm": 0.96484375,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.9525,
"step": 75
},
{
"epoch": 0.059226355728299096,
"grad_norm": 1.0234375,
"learning_rate": 1.1851851851851852e-05,
"loss": 0.9118,
"step": 80
},
{
"epoch": 0.06292800296131779,
"grad_norm": 0.98828125,
"learning_rate": 1.2592592592592593e-05,
"loss": 0.9172,
"step": 85
},
{
"epoch": 0.06662965019433648,
"grad_norm": 0.94140625,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.9042,
"step": 90
},
{
"epoch": 0.07033129742735518,
"grad_norm": 0.953125,
"learning_rate": 1.4074074074074075e-05,
"loss": 0.9244,
"step": 95
},
{
"epoch": 0.07403294466037387,
"grad_norm": 0.9765625,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.9183,
"step": 100
},
{
"epoch": 0.07403294466037387,
"eval_loss": 0.9207181930541992,
"eval_runtime": 14.1528,
"eval_samples_per_second": 9.044,
"eval_steps_per_second": 2.261,
"step": 100
},
{
"epoch": 0.07773459189339256,
"grad_norm": 0.97265625,
"learning_rate": 1.555555555555556e-05,
"loss": 0.9126,
"step": 105
},
{
"epoch": 0.08143623912641125,
"grad_norm": 0.94921875,
"learning_rate": 1.6296296296296297e-05,
"loss": 0.9135,
"step": 110
},
{
"epoch": 0.08513788635942994,
"grad_norm": 1.0234375,
"learning_rate": 1.7037037037037038e-05,
"loss": 0.8756,
"step": 115
},
{
"epoch": 0.08883953359244864,
"grad_norm": 1.03125,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.8954,
"step": 120
},
{
"epoch": 0.09254118082546733,
"grad_norm": 0.91015625,
"learning_rate": 1.851851851851852e-05,
"loss": 0.8501,
"step": 125
},
{
"epoch": 0.09624282805848602,
"grad_norm": 0.93359375,
"learning_rate": 1.925925925925926e-05,
"loss": 0.9075,
"step": 130
},
{
"epoch": 0.09994447529150471,
"grad_norm": 0.9375,
"learning_rate": 2e-05,
"loss": 0.8572,
"step": 135
},
{
"epoch": 0.10364612252452342,
"grad_norm": 1.015625,
"learning_rate": 1.9999164298554375e-05,
"loss": 0.8856,
"step": 140
},
{
"epoch": 0.10734776975754211,
"grad_norm": 1.0390625,
"learning_rate": 1.9996657333896875e-05,
"loss": 0.8878,
"step": 145
},
{
"epoch": 0.1110494169905608,
"grad_norm": 1.0703125,
"learning_rate": 1.9992479525042305e-05,
"loss": 0.8419,
"step": 150
},
{
"epoch": 0.11475106422357949,
"grad_norm": 0.98046875,
"learning_rate": 1.9986631570270835e-05,
"loss": 0.8759,
"step": 155
},
{
"epoch": 0.11845271145659819,
"grad_norm": 0.91015625,
"learning_rate": 1.9979114447011323e-05,
"loss": 0.8299,
"step": 160
},
{
"epoch": 0.12215435868961688,
"grad_norm": 0.859375,
"learning_rate": 1.996992941167792e-05,
"loss": 0.8408,
"step": 165
},
{
"epoch": 0.12585600592263557,
"grad_norm": 0.87109375,
"learning_rate": 1.9959077999460094e-05,
"loss": 0.8377,
"step": 170
},
{
"epoch": 0.12955765315565426,
"grad_norm": 0.9453125,
"learning_rate": 1.9946562024066018e-05,
"loss": 0.8274,
"step": 175
},
{
"epoch": 0.13325930038867295,
"grad_norm": 0.94140625,
"learning_rate": 1.9932383577419432e-05,
"loss": 0.8508,
"step": 180
},
{
"epoch": 0.13696094762169164,
"grad_norm": 0.99609375,
"learning_rate": 1.991654502931001e-05,
"loss": 0.8198,
"step": 185
},
{
"epoch": 0.14066259485471036,
"grad_norm": 0.88671875,
"learning_rate": 1.9899049026997272e-05,
"loss": 0.8456,
"step": 190
},
{
"epoch": 0.14436424208772905,
"grad_norm": 0.875,
"learning_rate": 1.9879898494768093e-05,
"loss": 0.863,
"step": 195
},
{
"epoch": 0.14806588932074774,
"grad_norm": 1.0390625,
"learning_rate": 1.9859096633447965e-05,
"loss": 0.8583,
"step": 200
},
{
"epoch": 0.14806588932074774,
"eval_loss": 0.8622252941131592,
"eval_runtime": 14.1352,
"eval_samples_per_second": 9.055,
"eval_steps_per_second": 2.264,
"step": 200
},
{
"epoch": 0.15176753655376643,
"grad_norm": 0.8828125,
"learning_rate": 1.9836646919866012e-05,
"loss": 0.827,
"step": 205
},
{
"epoch": 0.15546918378678512,
"grad_norm": 0.9453125,
"learning_rate": 1.9812553106273848e-05,
"loss": 0.8431,
"step": 210
},
{
"epoch": 0.1591708310198038,
"grad_norm": 0.93359375,
"learning_rate": 1.9786819219718443e-05,
"loss": 0.818,
"step": 215
},
{
"epoch": 0.1628724782528225,
"grad_norm": 0.94140625,
"learning_rate": 1.9759449561369036e-05,
"loss": 0.8385,
"step": 220
},
{
"epoch": 0.1665741254858412,
"grad_norm": 0.890625,
"learning_rate": 1.973044870579824e-05,
"loss": 0.8431,
"step": 225
},
{
"epoch": 0.17027577271885988,
"grad_norm": 0.8671875,
"learning_rate": 1.9699821500217436e-05,
"loss": 0.761,
"step": 230
},
{
"epoch": 0.1739774199518786,
"grad_norm": 0.95703125,
"learning_rate": 1.9667573063666622e-05,
"loss": 0.8442,
"step": 235
},
{
"epoch": 0.1776790671848973,
"grad_norm": 0.87109375,
"learning_rate": 1.9633708786158803e-05,
"loss": 0.8249,
"step": 240
},
{
"epoch": 0.18138071441791598,
"grad_norm": 0.86328125,
"learning_rate": 1.959823432777912e-05,
"loss": 0.8621,
"step": 245
},
{
"epoch": 0.18508236165093467,
"grad_norm": 0.8515625,
"learning_rate": 1.95611556177388e-05,
"loss": 0.8083,
"step": 250
},
{
"epoch": 0.18878400888395336,
"grad_norm": 0.83984375,
"learning_rate": 1.9522478853384154e-05,
"loss": 0.8019,
"step": 255
},
{
"epoch": 0.19248565611697205,
"grad_norm": 0.92578125,
"learning_rate": 1.9482210499160767e-05,
"loss": 0.8493,
"step": 260
},
{
"epoch": 0.19618730334999074,
"grad_norm": 0.828125,
"learning_rate": 1.9440357285533e-05,
"loss": 0.7828,
"step": 265
},
{
"epoch": 0.19988895058300943,
"grad_norm": 0.8671875,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.836,
"step": 270
},
{
"epoch": 0.20359059781602815,
"grad_norm": 0.99609375,
"learning_rate": 1.93519245252219e-05,
"loss": 0.836,
"step": 275
},
{
"epoch": 0.20729224504904684,
"grad_norm": 0.89453125,
"learning_rate": 1.9305359759215686e-05,
"loss": 0.8376,
"step": 280
},
{
"epoch": 0.21099389228206553,
"grad_norm": 0.92578125,
"learning_rate": 1.9257239692688907e-05,
"loss": 0.8038,
"step": 285
},
{
"epoch": 0.21469553951508422,
"grad_norm": 0.8671875,
"learning_rate": 1.9207572368443386e-05,
"loss": 0.832,
"step": 290
},
{
"epoch": 0.2183971867481029,
"grad_norm": 0.9453125,
"learning_rate": 1.9156366087890062e-05,
"loss": 0.8348,
"step": 295
},
{
"epoch": 0.2220988339811216,
"grad_norm": 0.91015625,
"learning_rate": 1.9103629409661468e-05,
"loss": 0.7944,
"step": 300
},
{
"epoch": 0.2220988339811216,
"eval_loss": 0.8444012403488159,
"eval_runtime": 14.1453,
"eval_samples_per_second": 9.049,
"eval_steps_per_second": 2.262,
"step": 300
},
{
"epoch": 0.2258004812141403,
"grad_norm": 0.8671875,
"learning_rate": 1.9049371148181253e-05,
"loss": 0.8323,
"step": 305
},
{
"epoch": 0.22950212844715898,
"grad_norm": 0.8828125,
"learning_rate": 1.8993600372190933e-05,
"loss": 0.8538,
"step": 310
},
{
"epoch": 0.23320377568017767,
"grad_norm": 0.96875,
"learning_rate": 1.8936326403234125e-05,
"loss": 0.8482,
"step": 315
},
{
"epoch": 0.23690542291319638,
"grad_norm": 0.99609375,
"learning_rate": 1.8877558814098564e-05,
"loss": 0.8461,
"step": 320
},
{
"epoch": 0.24060707014621507,
"grad_norm": 0.8671875,
"learning_rate": 1.881730742721608e-05,
"loss": 0.8163,
"step": 325
},
{
"epoch": 0.24430871737923376,
"grad_norm": 0.85546875,
"learning_rate": 1.8755582313020912e-05,
"loss": 0.8235,
"step": 330
},
{
"epoch": 0.24801036461225245,
"grad_norm": 0.890625,
"learning_rate": 1.8692393788266477e-05,
"loss": 0.8356,
"step": 335
},
{
"epoch": 0.25171201184527114,
"grad_norm": 0.86328125,
"learning_rate": 1.8627752414301087e-05,
"loss": 0.8406,
"step": 340
},
{
"epoch": 0.25541365907828983,
"grad_norm": 0.890625,
"learning_rate": 1.8561668995302668e-05,
"loss": 0.8382,
"step": 345
},
{
"epoch": 0.2591153063113085,
"grad_norm": 0.87109375,
"learning_rate": 1.8494154576472976e-05,
"loss": 0.8137,
"step": 350
},
{
"epoch": 0.2628169535443272,
"grad_norm": 0.8671875,
"learning_rate": 1.8425220442191496e-05,
"loss": 0.8116,
"step": 355
},
{
"epoch": 0.2665186007773459,
"grad_norm": 0.86328125,
"learning_rate": 1.8354878114129368e-05,
"loss": 0.7837,
"step": 360
},
{
"epoch": 0.2702202480103646,
"grad_norm": 1.140625,
"learning_rate": 1.8283139349323632e-05,
"loss": 0.8512,
"step": 365
},
{
"epoch": 0.2739218952433833,
"grad_norm": 0.92578125,
"learning_rate": 1.8210016138212186e-05,
"loss": 0.7893,
"step": 370
},
{
"epoch": 0.277623542476402,
"grad_norm": 0.80859375,
"learning_rate": 1.8135520702629677e-05,
"loss": 0.7611,
"step": 375
},
{
"epoch": 0.2813251897094207,
"grad_norm": 0.89453125,
"learning_rate": 1.8059665493764745e-05,
"loss": 0.8333,
"step": 380
},
{
"epoch": 0.2850268369424394,
"grad_norm": 0.953125,
"learning_rate": 1.7982463190078928e-05,
"loss": 0.7852,
"step": 385
},
{
"epoch": 0.2887284841754581,
"grad_norm": 0.85546875,
"learning_rate": 1.7903926695187595e-05,
"loss": 0.8475,
"step": 390
},
{
"epoch": 0.2924301314084768,
"grad_norm": 0.86328125,
"learning_rate": 1.78240691357032e-05,
"loss": 0.813,
"step": 395
},
{
"epoch": 0.2961317786414955,
"grad_norm": 0.90625,
"learning_rate": 1.7742903859041324e-05,
"loss": 0.8253,
"step": 400
},
{
"epoch": 0.2961317786414955,
"eval_loss": 0.8342146873474121,
"eval_runtime": 14.1609,
"eval_samples_per_second": 9.039,
"eval_steps_per_second": 2.26,
"step": 400
},
{
"epoch": 0.29983342587451417,
"grad_norm": 0.78515625,
"learning_rate": 1.766044443118978e-05,
"loss": 0.7812,
"step": 405
},
{
"epoch": 0.30353507310753286,
"grad_norm": 0.890625,
"learning_rate": 1.757670463444118e-05,
"loss": 0.8548,
"step": 410
},
{
"epoch": 0.30723672034055155,
"grad_norm": 0.88671875,
"learning_rate": 1.749169846508936e-05,
"loss": 0.7832,
"step": 415
},
{
"epoch": 0.31093836757357024,
"grad_norm": 0.92578125,
"learning_rate": 1.740544013109005e-05,
"loss": 0.8149,
"step": 420
},
{
"epoch": 0.31464001480658893,
"grad_norm": 0.8984375,
"learning_rate": 1.7317944049686125e-05,
"loss": 0.8036,
"step": 425
},
{
"epoch": 0.3183416620396076,
"grad_norm": 0.87890625,
"learning_rate": 1.722922484499793e-05,
"loss": 0.8005,
"step": 430
},
{
"epoch": 0.3220433092726263,
"grad_norm": 0.8046875,
"learning_rate": 1.7139297345578992e-05,
"loss": 0.7736,
"step": 435
},
{
"epoch": 0.325744956505645,
"grad_norm": 0.88671875,
"learning_rate": 1.7048176581937562e-05,
"loss": 0.8337,
"step": 440
},
{
"epoch": 0.3294466037386637,
"grad_norm": 0.890625,
"learning_rate": 1.6955877784024418e-05,
"loss": 0.7989,
"step": 445
},
{
"epoch": 0.3331482509716824,
"grad_norm": 0.90234375,
"learning_rate": 1.686241637868734e-05,
"loss": 0.8384,
"step": 450
},
{
"epoch": 0.33684989820470107,
"grad_norm": 0.890625,
"learning_rate": 1.676780798709262e-05,
"loss": 0.8195,
"step": 455
},
{
"epoch": 0.34055154543771976,
"grad_norm": 0.8046875,
"learning_rate": 1.6672068422114195e-05,
"loss": 0.8656,
"step": 460
},
{
"epoch": 0.3442531926707385,
"grad_norm": 0.8359375,
"learning_rate": 1.657521368569064e-05,
"loss": 0.8226,
"step": 465
},
{
"epoch": 0.3479548399037572,
"grad_norm": 0.890625,
"learning_rate": 1.647725996615059e-05,
"loss": 0.7866,
"step": 470
},
{
"epoch": 0.3516564871367759,
"grad_norm": 0.8515625,
"learning_rate": 1.637822363550706e-05,
"loss": 0.8036,
"step": 475
},
{
"epoch": 0.3553581343697946,
"grad_norm": 0.9609375,
"learning_rate": 1.627812124672099e-05,
"loss": 0.8095,
"step": 480
},
{
"epoch": 0.35905978160281327,
"grad_norm": 0.84765625,
"learning_rate": 1.6176969530934573e-05,
"loss": 0.7834,
"step": 485
},
{
"epoch": 0.36276142883583196,
"grad_norm": 0.9453125,
"learning_rate": 1.6074785394674835e-05,
"loss": 0.8101,
"step": 490
},
{
"epoch": 0.36646307606885065,
"grad_norm": 0.82421875,
"learning_rate": 1.5971585917027864e-05,
"loss": 0.7909,
"step": 495
},
{
"epoch": 0.37016472330186934,
"grad_norm": 0.91015625,
"learning_rate": 1.586738834678418e-05,
"loss": 0.837,
"step": 500
},
{
"epoch": 0.37016472330186934,
"eval_loss": 0.8278591632843018,
"eval_runtime": 14.1558,
"eval_samples_per_second": 9.042,
"eval_steps_per_second": 2.261,
"step": 500
},
{
"epoch": 0.373866370534888,
"grad_norm": 0.8671875,
"learning_rate": 1.5762210099555804e-05,
"loss": 0.7731,
"step": 505
},
{
"epoch": 0.3775680177679067,
"grad_norm": 0.8671875,
"learning_rate": 1.5656068754865388e-05,
"loss": 0.7969,
"step": 510
},
{
"epoch": 0.3812696650009254,
"grad_norm": 0.91015625,
"learning_rate": 1.554898205320797e-05,
"loss": 0.7641,
"step": 515
},
{
"epoch": 0.3849713122339441,
"grad_norm": 0.91796875,
"learning_rate": 1.5440967893085827e-05,
"loss": 0.792,
"step": 520
},
{
"epoch": 0.3886729594669628,
"grad_norm": 0.86328125,
"learning_rate": 1.5332044328016916e-05,
"loss": 0.7894,
"step": 525
},
{
"epoch": 0.3923746066999815,
"grad_norm": 0.890625,
"learning_rate": 1.5222229563517385e-05,
"loss": 0.8165,
"step": 530
},
{
"epoch": 0.39607625393300017,
"grad_norm": 0.765625,
"learning_rate": 1.5111541954058733e-05,
"loss": 0.8038,
"step": 535
},
{
"epoch": 0.39977790116601886,
"grad_norm": 0.91015625,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.7635,
"step": 540
},
{
"epoch": 0.40347954839903755,
"grad_norm": 0.96484375,
"learning_rate": 1.4887622344495643e-05,
"loss": 0.8192,
"step": 545
},
{
"epoch": 0.4071811956320563,
"grad_norm": 0.87109375,
"learning_rate": 1.4774427770379492e-05,
"loss": 0.8043,
"step": 550
},
{
"epoch": 0.410882842865075,
"grad_norm": 0.8671875,
"learning_rate": 1.4660435197025391e-05,
"loss": 0.7787,
"step": 555
},
{
"epoch": 0.41458449009809367,
"grad_norm": 0.91015625,
"learning_rate": 1.4545663677185007e-05,
"loss": 0.7997,
"step": 560
},
{
"epoch": 0.41828613733111236,
"grad_norm": 0.9140625,
"learning_rate": 1.4430132393803353e-05,
"loss": 0.8105,
"step": 565
},
{
"epoch": 0.42198778456413105,
"grad_norm": 0.95703125,
"learning_rate": 1.4313860656812537e-05,
"loss": 0.7971,
"step": 570
},
{
"epoch": 0.42568943179714974,
"grad_norm": 0.984375,
"learning_rate": 1.4196867899904292e-05,
"loss": 0.8128,
"step": 575
},
{
"epoch": 0.42939107903016843,
"grad_norm": 0.984375,
"learning_rate": 1.4079173677281836e-05,
"loss": 0.8489,
"step": 580
},
{
"epoch": 0.4330927262631871,
"grad_norm": 0.8515625,
"learning_rate": 1.396079766039157e-05,
"loss": 0.791,
"step": 585
},
{
"epoch": 0.4367943734962058,
"grad_norm": 0.83203125,
"learning_rate": 1.3841759634635177e-05,
"loss": 0.7924,
"step": 590
},
{
"epoch": 0.4404960207292245,
"grad_norm": 0.84375,
"learning_rate": 1.3722079496062702e-05,
"loss": 0.812,
"step": 595
},
{
"epoch": 0.4441976679622432,
"grad_norm": 0.94140625,
"learning_rate": 1.3601777248047105e-05,
"loss": 0.8149,
"step": 600
},
{
"epoch": 0.4441976679622432,
"eval_loss": 0.8238165378570557,
"eval_runtime": 14.1476,
"eval_samples_per_second": 9.047,
"eval_steps_per_second": 2.262,
"step": 600
},
{
"epoch": 0.4478993151952619,
"grad_norm": 1.0625,
"learning_rate": 1.3480872997940906e-05,
"loss": 0.7896,
"step": 605
},
{
"epoch": 0.4516009624282806,
"grad_norm": 0.93359375,
"learning_rate": 1.3359386953715423e-05,
"loss": 0.8172,
"step": 610
},
{
"epoch": 0.45530260966129926,
"grad_norm": 0.8359375,
"learning_rate": 1.3237339420583213e-05,
"loss": 0.7612,
"step": 615
},
{
"epoch": 0.45900425689431795,
"grad_norm": 0.88671875,
"learning_rate": 1.3114750797604248e-05,
"loss": 0.7997,
"step": 620
},
{
"epoch": 0.46270590412733664,
"grad_norm": 0.83984375,
"learning_rate": 1.2991641574276419e-05,
"loss": 0.7969,
"step": 625
},
{
"epoch": 0.46640755136035533,
"grad_norm": 0.8671875,
"learning_rate": 1.2868032327110904e-05,
"loss": 0.8062,
"step": 630
},
{
"epoch": 0.4701091985933741,
"grad_norm": 0.90625,
"learning_rate": 1.2743943716193017e-05,
"loss": 0.7925,
"step": 635
},
{
"epoch": 0.47381084582639277,
"grad_norm": 0.83984375,
"learning_rate": 1.261939648172906e-05,
"loss": 0.7757,
"step": 640
},
{
"epoch": 0.47751249305941146,
"grad_norm": 0.88671875,
"learning_rate": 1.2494411440579814e-05,
"loss": 0.7899,
"step": 645
},
{
"epoch": 0.48121414029243015,
"grad_norm": 0.8984375,
"learning_rate": 1.2369009482781191e-05,
"loss": 0.791,
"step": 650
},
{
"epoch": 0.48491578752544884,
"grad_norm": 0.83984375,
"learning_rate": 1.2243211568052678e-05,
"loss": 0.8101,
"step": 655
},
{
"epoch": 0.48861743475846753,
"grad_norm": 0.81640625,
"learning_rate": 1.211703872229411e-05,
"loss": 0.7881,
"step": 660
},
{
"epoch": 0.4923190819914862,
"grad_norm": 0.9609375,
"learning_rate": 1.1990512034071407e-05,
"loss": 0.7906,
"step": 665
},
{
"epoch": 0.4960207292245049,
"grad_norm": 1.359375,
"learning_rate": 1.1863652651091824e-05,
"loss": 0.8038,
"step": 670
},
{
"epoch": 0.4997223764575236,
"grad_norm": 0.80859375,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.8216,
"step": 675
},
{
"epoch": 0.5034240236905423,
"grad_norm": 0.921875,
"learning_rate": 1.1609020666180574e-05,
"loss": 0.7844,
"step": 680
},
{
"epoch": 0.507125670923561,
"grad_norm": 0.8984375,
"learning_rate": 1.1481290623512491e-05,
"loss": 0.8239,
"step": 685
},
{
"epoch": 0.5108273181565797,
"grad_norm": 0.8359375,
"learning_rate": 1.1353312997501313e-05,
"loss": 0.7616,
"step": 690
},
{
"epoch": 0.5145289653895984,
"grad_norm": 0.85546875,
"learning_rate": 1.1225109178364456e-05,
"loss": 0.805,
"step": 695
},
{
"epoch": 0.518230612622617,
"grad_norm": 0.9140625,
"learning_rate": 1.1096700594125318e-05,
"loss": 0.7574,
"step": 700
},
{
"epoch": 0.518230612622617,
"eval_loss": 0.8209631443023682,
"eval_runtime": 14.1407,
"eval_samples_per_second": 9.052,
"eval_steps_per_second": 2.263,
"step": 700
},
{
"epoch": 0.5219322598556357,
"grad_norm": 0.91015625,
"learning_rate": 1.0968108707031792e-05,
"loss": 0.788,
"step": 705
},
{
"epoch": 0.5256339070886544,
"grad_norm": 0.890625,
"learning_rate": 1.0839355009969068e-05,
"loss": 0.8086,
"step": 710
},
{
"epoch": 0.5293355543216731,
"grad_norm": 0.91796875,
"learning_rate": 1.0710461022867303e-05,
"loss": 0.84,
"step": 715
},
{
"epoch": 0.5330372015546918,
"grad_norm": 0.8359375,
"learning_rate": 1.0581448289104759e-05,
"loss": 0.7999,
"step": 720
},
{
"epoch": 0.5367388487877105,
"grad_norm": 0.96875,
"learning_rate": 1.0452338371907065e-05,
"loss": 0.8109,
"step": 725
},
{
"epoch": 0.5404404960207292,
"grad_norm": 0.8984375,
"learning_rate": 1.0323152850743107e-05,
"loss": 0.807,
"step": 730
},
{
"epoch": 0.5441421432537479,
"grad_norm": 0.91796875,
"learning_rate": 1.0193913317718245e-05,
"loss": 0.7987,
"step": 735
},
{
"epoch": 0.5478437904867666,
"grad_norm": 0.828125,
"learning_rate": 1.0064641373965394e-05,
"loss": 0.7947,
"step": 740
},
{
"epoch": 0.5515454377197853,
"grad_norm": 1.0,
"learning_rate": 9.935358626034607e-06,
"loss": 0.8161,
"step": 745
},
{
"epoch": 0.555247084952804,
"grad_norm": 0.828125,
"learning_rate": 9.806086682281759e-06,
"loss": 0.8154,
"step": 750
},
{
"epoch": 0.5589487321858226,
"grad_norm": 0.90625,
"learning_rate": 9.676847149256894e-06,
"loss": 0.8126,
"step": 755
},
{
"epoch": 0.5626503794188414,
"grad_norm": 0.87890625,
"learning_rate": 9.547661628092938e-06,
"loss": 0.7987,
"step": 760
},
{
"epoch": 0.5663520266518601,
"grad_norm": 0.94921875,
"learning_rate": 9.418551710895243e-06,
"loss": 0.7893,
"step": 765
},
{
"epoch": 0.5700536738848788,
"grad_norm": 0.92578125,
"learning_rate": 9.289538977132702e-06,
"loss": 0.7406,
"step": 770
},
{
"epoch": 0.5737553211178975,
"grad_norm": 0.9375,
"learning_rate": 9.160644990030932e-06,
"loss": 0.817,
"step": 775
},
{
"epoch": 0.5774569683509162,
"grad_norm": 0.7890625,
"learning_rate": 9.03189129296821e-06,
"loss": 0.7853,
"step": 780
},
{
"epoch": 0.5811586155839349,
"grad_norm": 0.8359375,
"learning_rate": 8.903299405874685e-06,
"loss": 0.7853,
"step": 785
},
{
"epoch": 0.5848602628169536,
"grad_norm": 0.90234375,
"learning_rate": 8.774890821635548e-06,
"loss": 0.7722,
"step": 790
},
{
"epoch": 0.5885619100499723,
"grad_norm": 0.8515625,
"learning_rate": 8.646687002498692e-06,
"loss": 0.7754,
"step": 795
},
{
"epoch": 0.592263557282991,
"grad_norm": 0.9453125,
"learning_rate": 8.518709376487515e-06,
"loss": 0.797,
"step": 800
},
{
"epoch": 0.592263557282991,
"eval_loss": 0.8192309141159058,
"eval_runtime": 14.1305,
"eval_samples_per_second": 9.058,
"eval_steps_per_second": 2.265,
"step": 800
},
{
"epoch": 0.5959652045160097,
"grad_norm": 0.84765625,
"learning_rate": 8.390979333819427e-06,
"loss": 0.7655,
"step": 805
},
{
"epoch": 0.5996668517490283,
"grad_norm": 0.85546875,
"learning_rate": 8.263518223330698e-06,
"loss": 0.7754,
"step": 810
},
{
"epoch": 0.603368498982047,
"grad_norm": 0.921875,
"learning_rate": 8.13634734890818e-06,
"loss": 0.7866,
"step": 815
},
{
"epoch": 0.6070701462150657,
"grad_norm": 0.84765625,
"learning_rate": 8.009487965928597e-06,
"loss": 0.8163,
"step": 820
},
{
"epoch": 0.6107717934480844,
"grad_norm": 0.89453125,
"learning_rate": 7.882961277705897e-06,
"loss": 0.808,
"step": 825
},
{
"epoch": 0.6144734406811031,
"grad_norm": 0.8125,
"learning_rate": 7.756788431947327e-06,
"loss": 0.7537,
"step": 830
},
{
"epoch": 0.6181750879141218,
"grad_norm": 0.87890625,
"learning_rate": 7.630990517218809e-06,
"loss": 0.7992,
"step": 835
},
{
"epoch": 0.6218767351471405,
"grad_norm": 0.80859375,
"learning_rate": 7.505588559420188e-06,
"loss": 0.7968,
"step": 840
},
{
"epoch": 0.6255783823801592,
"grad_norm": 0.9453125,
"learning_rate": 7.380603518270942e-06,
"loss": 0.7735,
"step": 845
},
{
"epoch": 0.6292800296131779,
"grad_norm": 0.90625,
"learning_rate": 7.256056283806987e-06,
"loss": 0.8063,
"step": 850
},
{
"epoch": 0.6329816768461966,
"grad_norm": 0.953125,
"learning_rate": 7.131967672889101e-06,
"loss": 0.8159,
"step": 855
},
{
"epoch": 0.6366833240792152,
"grad_norm": 0.91015625,
"learning_rate": 7.008358425723586e-06,
"loss": 0.7991,
"step": 860
},
{
"epoch": 0.6403849713122339,
"grad_norm": 0.8984375,
"learning_rate": 6.885249202395754e-06,
"loss": 0.8088,
"step": 865
},
{
"epoch": 0.6440866185452526,
"grad_norm": 0.86328125,
"learning_rate": 6.762660579416791e-06,
"loss": 0.8,
"step": 870
},
{
"epoch": 0.6477882657782713,
"grad_norm": 0.84375,
"learning_rate": 6.640613046284581e-06,
"loss": 0.79,
"step": 875
},
{
"epoch": 0.65148991301129,
"grad_norm": 0.84375,
"learning_rate": 6.519127002059096e-06,
"loss": 0.8089,
"step": 880
},
{
"epoch": 0.6551915602443087,
"grad_norm": 0.90234375,
"learning_rate": 6.3982227519528986e-06,
"loss": 0.8017,
"step": 885
},
{
"epoch": 0.6588932074773274,
"grad_norm": 0.9375,
"learning_rate": 6.277920503937303e-06,
"loss": 0.8179,
"step": 890
},
{
"epoch": 0.6625948547103461,
"grad_norm": 0.8828125,
"learning_rate": 6.158240365364823e-06,
"loss": 0.7799,
"step": 895
},
{
"epoch": 0.6662965019433648,
"grad_norm": 0.8359375,
"learning_rate": 6.039202339608432e-06,
"loss": 0.7924,
"step": 900
},
{
"epoch": 0.6662965019433648,
"eval_loss": 0.8184313178062439,
"eval_runtime": 14.1349,
"eval_samples_per_second": 9.056,
"eval_steps_per_second": 2.264,
"step": 900
},
{
"epoch": 0.6699981491763835,
"grad_norm": 0.94921875,
"learning_rate": 5.920826322718165e-06,
"loss": 0.8342,
"step": 905
},
{
"epoch": 0.6736997964094021,
"grad_norm": 0.9453125,
"learning_rate": 5.80313210009571e-06,
"loss": 0.783,
"step": 910
},
{
"epoch": 0.6774014436424208,
"grad_norm": 0.82421875,
"learning_rate": 5.686139343187468e-06,
"loss": 0.7624,
"step": 915
},
{
"epoch": 0.6811030908754395,
"grad_norm": 0.8515625,
"learning_rate": 5.569867606196652e-06,
"loss": 0.7978,
"step": 920
},
{
"epoch": 0.6848047381084582,
"grad_norm": 0.8203125,
"learning_rate": 5.454336322814995e-06,
"loss": 0.7723,
"step": 925
},
{
"epoch": 0.688506385341477,
"grad_norm": 0.85546875,
"learning_rate": 5.339564802974615e-06,
"loss": 0.7817,
"step": 930
},
{
"epoch": 0.6922080325744957,
"grad_norm": 0.8828125,
"learning_rate": 5.2255722296205104e-06,
"loss": 0.7841,
"step": 935
},
{
"epoch": 0.6959096798075144,
"grad_norm": 0.94921875,
"learning_rate": 5.112377655504359e-06,
"loss": 0.7929,
"step": 940
},
{
"epoch": 0.6996113270405331,
"grad_norm": 0.90234375,
"learning_rate": 5.000000000000003e-06,
"loss": 0.8059,
"step": 945
},
{
"epoch": 0.7033129742735518,
"grad_norm": 0.875,
"learning_rate": 4.888458045941269e-06,
"loss": 0.7832,
"step": 950
},
{
"epoch": 0.7070146215065705,
"grad_norm": 0.80078125,
"learning_rate": 4.7777704364826175e-06,
"loss": 0.7965,
"step": 955
},
{
"epoch": 0.7107162687395892,
"grad_norm": 0.8046875,
"learning_rate": 4.66795567198309e-06,
"loss": 0.7663,
"step": 960
},
{
"epoch": 0.7144179159726078,
"grad_norm": 0.84375,
"learning_rate": 4.559032106914173e-06,
"loss": 0.7625,
"step": 965
},
{
"epoch": 0.7181195632056265,
"grad_norm": 0.84765625,
"learning_rate": 4.4510179467920325e-06,
"loss": 0.7899,
"step": 970
},
{
"epoch": 0.7218212104386452,
"grad_norm": 0.8125,
"learning_rate": 4.343931245134616e-06,
"loss": 0.7672,
"step": 975
},
{
"epoch": 0.7255228576716639,
"grad_norm": 0.80078125,
"learning_rate": 4.237789900444197e-06,
"loss": 0.82,
"step": 980
},
{
"epoch": 0.7292245049046826,
"grad_norm": 0.87109375,
"learning_rate": 4.132611653215822e-06,
"loss": 0.7918,
"step": 985
},
{
"epoch": 0.7329261521377013,
"grad_norm": 0.9609375,
"learning_rate": 4.028414082972141e-06,
"loss": 0.7849,
"step": 990
},
{
"epoch": 0.73662779937072,
"grad_norm": 0.87890625,
"learning_rate": 3.925214605325164e-06,
"loss": 0.8002,
"step": 995
},
{
"epoch": 0.7403294466037387,
"grad_norm": 0.890625,
"learning_rate": 3.823030469065431e-06,
"loss": 0.7934,
"step": 1000
},
{
"epoch": 0.7403294466037387,
"eval_loss": 0.8180199861526489,
"eval_runtime": 14.1486,
"eval_samples_per_second": 9.047,
"eval_steps_per_second": 2.262,
"step": 1000
},
{
"epoch": 0.7440310938367574,
"grad_norm": 0.8046875,
"learning_rate": 3.7218787532790167e-06,
"loss": 0.8235,
"step": 1005
},
{
"epoch": 0.747732741069776,
"grad_norm": 0.88671875,
"learning_rate": 3.6217763644929393e-06,
"loss": 0.7373,
"step": 1010
},
{
"epoch": 0.7514343883027947,
"grad_norm": 1.1328125,
"learning_rate": 3.522740033849411e-06,
"loss": 0.7963,
"step": 1015
},
{
"epoch": 0.7551360355358134,
"grad_norm": 0.8671875,
"learning_rate": 3.424786314309365e-06,
"loss": 0.7699,
"step": 1020
},
{
"epoch": 0.7588376827688321,
"grad_norm": 0.84765625,
"learning_rate": 3.3279315778858034e-06,
"loss": 0.8272,
"step": 1025
},
{
"epoch": 0.7625393300018508,
"grad_norm": 0.90234375,
"learning_rate": 3.2321920129073815e-06,
"loss": 0.8152,
"step": 1030
},
{
"epoch": 0.7662409772348695,
"grad_norm": 0.8203125,
"learning_rate": 3.1375836213126653e-06,
"loss": 0.7759,
"step": 1035
},
{
"epoch": 0.7699426244678882,
"grad_norm": 0.83984375,
"learning_rate": 3.04412221597558e-06,
"loss": 0.7774,
"step": 1040
},
{
"epoch": 0.7736442717009069,
"grad_norm": 0.8359375,
"learning_rate": 2.9518234180624393e-06,
"loss": 0.7724,
"step": 1045
},
{
"epoch": 0.7773459189339256,
"grad_norm": 0.87890625,
"learning_rate": 2.8607026544210115e-06,
"loss": 0.7814,
"step": 1050
},
{
"epoch": 0.7810475661669443,
"grad_norm": 0.94921875,
"learning_rate": 2.770775155002071e-06,
"loss": 0.7894,
"step": 1055
},
{
"epoch": 0.784749213399963,
"grad_norm": 0.80078125,
"learning_rate": 2.6820559503138797e-06,
"loss": 0.8102,
"step": 1060
},
{
"epoch": 0.7884508606329816,
"grad_norm": 0.84765625,
"learning_rate": 2.594559868909956e-06,
"loss": 0.8363,
"step": 1065
},
{
"epoch": 0.7921525078660003,
"grad_norm": 0.8984375,
"learning_rate": 2.50830153491064e-06,
"loss": 0.7778,
"step": 1070
},
{
"epoch": 0.795854155099019,
"grad_norm": 0.85546875,
"learning_rate": 2.423295365558821e-06,
"loss": 0.7898,
"step": 1075
},
{
"epoch": 0.7995558023320377,
"grad_norm": 0.95703125,
"learning_rate": 2.339555568810221e-06,
"loss": 0.7999,
"step": 1080
},
{
"epoch": 0.8032574495650564,
"grad_norm": 0.859375,
"learning_rate": 2.2570961409586756e-06,
"loss": 0.7868,
"step": 1085
},
{
"epoch": 0.8069590967980751,
"grad_norm": 0.91015625,
"learning_rate": 2.1759308642968024e-06,
"loss": 0.8082,
"step": 1090
},
{
"epoch": 0.8106607440310938,
"grad_norm": 0.796875,
"learning_rate": 2.0960733048124082e-06,
"loss": 0.7827,
"step": 1095
},
{
"epoch": 0.8143623912641126,
"grad_norm": 0.828125,
"learning_rate": 2.01753680992107e-06,
"loss": 0.7951,
"step": 1100
},
{
"epoch": 0.8143623912641126,
"eval_loss": 0.8177920579910278,
"eval_runtime": 14.142,
"eval_samples_per_second": 9.051,
"eval_steps_per_second": 2.263,
"step": 1100
},
{
"epoch": 0.8180640384971313,
"grad_norm": 0.90234375,
"learning_rate": 1.9403345062352574e-06,
"loss": 0.7828,
"step": 1105
},
{
"epoch": 0.82176568573015,
"grad_norm": 0.8359375,
"learning_rate": 1.8644792973703252e-06,
"loss": 0.807,
"step": 1110
},
{
"epoch": 0.8254673329631687,
"grad_norm": 0.90234375,
"learning_rate": 1.7899838617878163e-06,
"loss": 0.7401,
"step": 1115
},
{
"epoch": 0.8291689801961873,
"grad_norm": 0.9609375,
"learning_rate": 1.7168606506763696e-06,
"loss": 0.7825,
"step": 1120
},
{
"epoch": 0.832870627429206,
"grad_norm": 0.8203125,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.7811,
"step": 1125
},
{
"epoch": 0.8365722746622247,
"grad_norm": 0.859375,
"learning_rate": 1.5747795578085046e-06,
"loss": 0.8101,
"step": 1130
},
{
"epoch": 0.8402739218952434,
"grad_norm": 0.9609375,
"learning_rate": 1.505845423527027e-06,
"loss": 0.824,
"step": 1135
},
{
"epoch": 0.8439755691282621,
"grad_norm": 0.83203125,
"learning_rate": 1.4383310046973365e-06,
"loss": 0.8023,
"step": 1140
},
{
"epoch": 0.8476772163612808,
"grad_norm": 0.83984375,
"learning_rate": 1.372247585698916e-06,
"loss": 0.7874,
"step": 1145
},
{
"epoch": 0.8513788635942995,
"grad_norm": 1.0078125,
"learning_rate": 1.307606211733522e-06,
"loss": 0.7812,
"step": 1150
},
{
"epoch": 0.8550805108273182,
"grad_norm": 0.80078125,
"learning_rate": 1.2444176869790925e-06,
"loss": 0.7601,
"step": 1155
},
{
"epoch": 0.8587821580603369,
"grad_norm": 0.859375,
"learning_rate": 1.18269257278392e-06,
"loss": 0.8021,
"step": 1160
},
{
"epoch": 0.8624838052933556,
"grad_norm": 0.8984375,
"learning_rate": 1.1224411859014417e-06,
"loss": 0.8272,
"step": 1165
},
{
"epoch": 0.8661854525263742,
"grad_norm": 0.88671875,
"learning_rate": 1.0636735967658785e-06,
"loss": 0.7712,
"step": 1170
},
{
"epoch": 0.8698870997593929,
"grad_norm": 0.859375,
"learning_rate": 1.0063996278090704e-06,
"loss": 0.819,
"step": 1175
},
{
"epoch": 0.8735887469924116,
"grad_norm": 0.81640625,
"learning_rate": 9.506288518187468e-07,
"loss": 0.815,
"step": 1180
},
{
"epoch": 0.8772903942254303,
"grad_norm": 0.9140625,
"learning_rate": 8.963705903385344e-07,
"loss": 0.808,
"step": 1185
},
{
"epoch": 0.880992041458449,
"grad_norm": 0.80078125,
"learning_rate": 8.436339121099413e-07,
"loss": 0.7677,
"step": 1190
},
{
"epoch": 0.8846936886914677,
"grad_norm": 0.85546875,
"learning_rate": 7.924276315566171e-07,
"loss": 0.7949,
"step": 1195
},
{
"epoch": 0.8883953359244864,
"grad_norm": 0.8515625,
"learning_rate": 7.427603073110967e-07,
"loss": 0.8257,
"step": 1200
},
{
"epoch": 0.8883953359244864,
"eval_loss": 0.8177732229232788,
"eval_runtime": 14.1555,
"eval_samples_per_second": 9.042,
"eval_steps_per_second": 2.261,
"step": 1200
},
{
"epoch": 0.8920969831575051,
"grad_norm": 0.875,
"learning_rate": 6.946402407843156e-07,
"loss": 0.8019,
"step": 1205
},
{
"epoch": 0.8957986303905238,
"grad_norm": 0.921875,
"learning_rate": 6.480754747781037e-07,
"loss": 0.788,
"step": 1210
},
{
"epoch": 0.8995002776235425,
"grad_norm": 0.84375,
"learning_rate": 6.030737921409169e-07,
"loss": 0.7913,
"step": 1215
},
{
"epoch": 0.9032019248565611,
"grad_norm": 0.859375,
"learning_rate": 5.596427144670002e-07,
"loss": 0.7449,
"step": 1220
},
{
"epoch": 0.9069035720895798,
"grad_norm": 0.91796875,
"learning_rate": 5.177895008392353e-07,
"loss": 0.8081,
"step": 1225
},
{
"epoch": 0.9106052193225985,
"grad_norm": 0.88671875,
"learning_rate": 4.775211466158469e-07,
"loss": 0.839,
"step": 1230
},
{
"epoch": 0.9143068665556172,
"grad_norm": 0.8828125,
"learning_rate": 4.388443822612043e-07,
"loss": 0.8061,
"step": 1235
},
{
"epoch": 0.9180085137886359,
"grad_norm": 0.8984375,
"learning_rate": 4.017656722208807e-07,
"loss": 0.8096,
"step": 1240
},
{
"epoch": 0.9217101610216546,
"grad_norm": 0.91796875,
"learning_rate": 3.662912138411967e-07,
"loss": 0.8135,
"step": 1245
},
{
"epoch": 0.9254118082546733,
"grad_norm": 0.80859375,
"learning_rate": 3.3242693633337986e-07,
"loss": 0.7955,
"step": 1250
},
{
"epoch": 0.929113455487692,
"grad_norm": 0.8671875,
"learning_rate": 3.001784997825652e-07,
"loss": 0.7772,
"step": 1255
},
{
"epoch": 0.9328151027207107,
"grad_norm": 0.81640625,
"learning_rate": 2.6955129420176193e-07,
"loss": 0.8006,
"step": 1260
},
{
"epoch": 0.9365167499537294,
"grad_norm": 0.85546875,
"learning_rate": 2.405504386309643e-07,
"loss": 0.8089,
"step": 1265
},
{
"epoch": 0.9402183971867482,
"grad_norm": 0.98046875,
"learning_rate": 2.1318078028155886e-07,
"loss": 0.7974,
"step": 1270
},
{
"epoch": 0.9439200444197668,
"grad_norm": 0.921875,
"learning_rate": 1.874468937261531e-07,
"loss": 0.8074,
"step": 1275
},
{
"epoch": 0.9476216916527855,
"grad_norm": 0.8671875,
"learning_rate": 1.6335308013398888e-07,
"loss": 0.7804,
"step": 1280
},
{
"epoch": 0.9513233388858042,
"grad_norm": 0.91015625,
"learning_rate": 1.409033665520354e-07,
"loss": 0.776,
"step": 1285
},
{
"epoch": 0.9550249861188229,
"grad_norm": 0.796875,
"learning_rate": 1.201015052319099e-07,
"loss": 0.7811,
"step": 1290
},
{
"epoch": 0.9587266333518416,
"grad_norm": 0.83203125,
"learning_rate": 1.0095097300273026e-07,
"loss": 0.7498,
"step": 1295
},
{
"epoch": 0.9624282805848603,
"grad_norm": 0.86328125,
"learning_rate": 8.345497068998897e-08,
"loss": 0.8092,
"step": 1300
},
{
"epoch": 0.9624282805848603,
"eval_loss": 0.8178356885910034,
"eval_runtime": 30.2792,
"eval_samples_per_second": 4.227,
"eval_steps_per_second": 1.057,
"step": 1300
},
{
"epoch": 0.966129927817879,
"grad_norm": 0.8125,
"learning_rate": 6.761642258056977e-08,
"loss": 0.8115,
"step": 1305
},
{
"epoch": 0.9698315750508977,
"grad_norm": 0.9140625,
"learning_rate": 5.3437975933985366e-08,
"loss": 0.7812,
"step": 1310
},
{
"epoch": 0.9735332222839164,
"grad_norm": 0.87890625,
"learning_rate": 4.0922000539906914e-08,
"loss": 0.7491,
"step": 1315
},
{
"epoch": 0.9772348695169351,
"grad_norm": 0.80078125,
"learning_rate": 3.0070588322079765e-08,
"loss": 0.7926,
"step": 1320
},
{
"epoch": 0.9809365167499537,
"grad_norm": 0.828125,
"learning_rate": 2.088555298867978e-08,
"loss": 0.798,
"step": 1325
},
{
"epoch": 0.9846381639829724,
"grad_norm": 0.890625,
"learning_rate": 1.3368429729168075e-08,
"loss": 0.7996,
"step": 1330
},
{
"epoch": 0.9883398112159911,
"grad_norm": 0.8671875,
"learning_rate": 7.520474957699586e-09,
"loss": 0.823,
"step": 1335
},
{
"epoch": 0.9920414584490098,
"grad_norm": 1.203125,
"learning_rate": 3.3426661031255024e-09,
"loss": 0.8011,
"step": 1340
},
{
"epoch": 0.9957431056820285,
"grad_norm": 0.93359375,
"learning_rate": 8.357014456272794e-10,
"loss": 0.7617,
"step": 1345
},
{
"epoch": 0.9994447529150472,
"grad_norm": 0.859375,
"learning_rate": 0.0,
"loss": 0.7972,
"step": 1350
},
{
"epoch": 0.9994447529150472,
"step": 1350,
"total_flos": 6.955833048956928e+17,
"train_loss": 0.8209438484686392,
"train_runtime": 9454.2368,
"train_samples_per_second": 2.286,
"train_steps_per_second": 0.143
}
],
"logging_steps": 5,
"max_steps": 1350,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.955833048956928e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}