| { | |
| "best_metric": 0.46923279762268066, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-200", | |
| "epoch": 0.02185911798458932, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00010929558992294661, | |
| "grad_norm": 0.08892586827278137, | |
| "learning_rate": 5e-06, | |
| "loss": 0.407, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00010929558992294661, | |
| "eval_loss": 0.9313396215438843, | |
| "eval_runtime": 1188.3423, | |
| "eval_samples_per_second": 12.968, | |
| "eval_steps_per_second": 6.484, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00021859117984589322, | |
| "grad_norm": 0.10184711962938309, | |
| "learning_rate": 1e-05, | |
| "loss": 0.3912, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0003278867697688398, | |
| "grad_norm": 0.1291767805814743, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.5147, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.00043718235969178644, | |
| "grad_norm": 0.13007928431034088, | |
| "learning_rate": 2e-05, | |
| "loss": 0.4359, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0005464779496147331, | |
| "grad_norm": 0.14250938594341278, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.4897, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0006557735395376796, | |
| "grad_norm": 0.15091027319431305, | |
| "learning_rate": 3e-05, | |
| "loss": 0.5612, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0007650691294606263, | |
| "grad_norm": 0.14838960766792297, | |
| "learning_rate": 3.5e-05, | |
| "loss": 0.5994, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.0008743647193835729, | |
| "grad_norm": 0.1545994132757187, | |
| "learning_rate": 4e-05, | |
| "loss": 0.6009, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0009836603093065195, | |
| "grad_norm": 0.15585535764694214, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.5009, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0010929558992294661, | |
| "grad_norm": 0.18895578384399414, | |
| "learning_rate": 5e-05, | |
| "loss": 0.6854, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0012022514891524127, | |
| "grad_norm": 0.20997057855129242, | |
| "learning_rate": 5.500000000000001e-05, | |
| "loss": 0.6106, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0013115470790753593, | |
| "grad_norm": 0.20784635841846466, | |
| "learning_rate": 6e-05, | |
| "loss": 0.7, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0014208426689983058, | |
| "grad_norm": 0.20637793838977814, | |
| "learning_rate": 6.500000000000001e-05, | |
| "loss": 0.4913, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.0015301382589212526, | |
| "grad_norm": 0.18955039978027344, | |
| "learning_rate": 7e-05, | |
| "loss": 0.5854, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0016394338488441992, | |
| "grad_norm": 0.19074691832065582, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.5278, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.0017487294387671457, | |
| "grad_norm": 0.1751364767551422, | |
| "learning_rate": 8e-05, | |
| "loss": 0.4145, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.0018580250286900923, | |
| "grad_norm": 0.17601102590560913, | |
| "learning_rate": 8.5e-05, | |
| "loss": 0.5324, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.001967320618613039, | |
| "grad_norm": 0.18196499347686768, | |
| "learning_rate": 9e-05, | |
| "loss": 0.5041, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.0020766162085359857, | |
| "grad_norm": 0.18326377868652344, | |
| "learning_rate": 9.5e-05, | |
| "loss": 0.4329, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0021859117984589322, | |
| "grad_norm": 0.23563292622566223, | |
| "learning_rate": 0.0001, | |
| "loss": 0.484, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.002295207388381879, | |
| "grad_norm": 0.21941961348056793, | |
| "learning_rate": 9.999238475781957e-05, | |
| "loss": 0.6038, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.0024045029783048254, | |
| "grad_norm": 0.18175636231899261, | |
| "learning_rate": 9.99695413509548e-05, | |
| "loss": 0.5373, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.002513798568227772, | |
| "grad_norm": 0.1632622629404068, | |
| "learning_rate": 9.99314767377287e-05, | |
| "loss": 0.4324, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.0026230941581507185, | |
| "grad_norm": 0.18101082742214203, | |
| "learning_rate": 9.987820251299122e-05, | |
| "loss": 0.5009, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.002732389748073665, | |
| "grad_norm": 0.36991167068481445, | |
| "learning_rate": 9.980973490458728e-05, | |
| "loss": 0.6138, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0028416853379966116, | |
| "grad_norm": 0.15769365429878235, | |
| "learning_rate": 9.972609476841367e-05, | |
| "loss": 0.4178, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.0029509809279195586, | |
| "grad_norm": 0.1861845850944519, | |
| "learning_rate": 9.962730758206611e-05, | |
| "loss": 0.5209, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.003060276517842505, | |
| "grad_norm": 0.17047907412052155, | |
| "learning_rate": 9.951340343707852e-05, | |
| "loss": 0.575, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.003169572107765452, | |
| "grad_norm": 0.15702685713768005, | |
| "learning_rate": 9.938441702975689e-05, | |
| "loss": 0.4291, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.0032788676976883984, | |
| "grad_norm": 0.2200898826122284, | |
| "learning_rate": 9.924038765061042e-05, | |
| "loss": 0.6081, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.003388163287611345, | |
| "grad_norm": 0.225937157869339, | |
| "learning_rate": 9.908135917238321e-05, | |
| "loss": 0.5231, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.0034974588775342915, | |
| "grad_norm": 0.2006777971982956, | |
| "learning_rate": 9.890738003669029e-05, | |
| "loss": 0.4612, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.003606754467457238, | |
| "grad_norm": 0.22215096652507782, | |
| "learning_rate": 9.871850323926177e-05, | |
| "loss": 0.6789, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.0037160500573801846, | |
| "grad_norm": 0.2149656116962433, | |
| "learning_rate": 9.851478631379982e-05, | |
| "loss": 0.4645, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.003825345647303131, | |
| "grad_norm": 0.22449809312820435, | |
| "learning_rate": 9.829629131445342e-05, | |
| "loss": 0.5203, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.003934641237226078, | |
| "grad_norm": 0.21824942529201508, | |
| "learning_rate": 9.806308479691595e-05, | |
| "loss": 0.3769, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.004043936827149024, | |
| "grad_norm": 0.37824153900146484, | |
| "learning_rate": 9.781523779815179e-05, | |
| "loss": 0.433, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.004153232417071971, | |
| "grad_norm": 1.0172040462493896, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.6214, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.0042625280069949175, | |
| "grad_norm": 0.28730088472366333, | |
| "learning_rate": 9.727592877996585e-05, | |
| "loss": 0.6083, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.0043718235969178645, | |
| "grad_norm": 0.2713054418563843, | |
| "learning_rate": 9.698463103929542e-05, | |
| "loss": 0.5272, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.004481119186840811, | |
| "grad_norm": 0.40768107771873474, | |
| "learning_rate": 9.667902132486009e-05, | |
| "loss": 0.6015, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.004590414776763758, | |
| "grad_norm": 0.3207384943962097, | |
| "learning_rate": 9.635919272833938e-05, | |
| "loss": 0.6781, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.004699710366686705, | |
| "grad_norm": 0.3168956935405731, | |
| "learning_rate": 9.602524267262203e-05, | |
| "loss": 0.6227, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.004809005956609651, | |
| "grad_norm": 0.3231648802757263, | |
| "learning_rate": 9.567727288213005e-05, | |
| "loss": 0.7363, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.004918301546532598, | |
| "grad_norm": 0.32550543546676636, | |
| "learning_rate": 9.53153893518325e-05, | |
| "loss": 0.6612, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.005027597136455544, | |
| "grad_norm": 0.4946689009666443, | |
| "learning_rate": 9.493970231495835e-05, | |
| "loss": 0.7281, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.005136892726378491, | |
| "grad_norm": 0.41630205512046814, | |
| "learning_rate": 9.45503262094184e-05, | |
| "loss": 0.5782, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.005246188316301437, | |
| "grad_norm": 0.4432646334171295, | |
| "learning_rate": 9.414737964294636e-05, | |
| "loss": 0.6469, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.005355483906224384, | |
| "grad_norm": 0.7148913145065308, | |
| "learning_rate": 9.373098535696979e-05, | |
| "loss": 0.9674, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.00546477949614733, | |
| "grad_norm": 0.6914791464805603, | |
| "learning_rate": 9.330127018922194e-05, | |
| "loss": 0.5803, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.00546477949614733, | |
| "eval_loss": 0.5413779616355896, | |
| "eval_runtime": 1194.3838, | |
| "eval_samples_per_second": 12.902, | |
| "eval_steps_per_second": 6.451, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.005574075086070277, | |
| "grad_norm": 0.25189682841300964, | |
| "learning_rate": 9.285836503510562e-05, | |
| "loss": 0.8563, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.005683370675993223, | |
| "grad_norm": 0.21652118861675262, | |
| "learning_rate": 9.24024048078213e-05, | |
| "loss": 0.3482, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.00579266626591617, | |
| "grad_norm": 0.2325475662946701, | |
| "learning_rate": 9.193352839727121e-05, | |
| "loss": 0.3885, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.005901961855839117, | |
| "grad_norm": 0.20061779022216797, | |
| "learning_rate": 9.145187862775209e-05, | |
| "loss": 0.3972, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.006011257445762063, | |
| "grad_norm": 0.14447438716888428, | |
| "learning_rate": 9.09576022144496e-05, | |
| "loss": 0.4096, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.00612055303568501, | |
| "grad_norm": 0.12227091938257217, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.3501, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.006229848625607957, | |
| "grad_norm": 0.12859652936458588, | |
| "learning_rate": 8.993177550236464e-05, | |
| "loss": 0.5619, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.006339144215530904, | |
| "grad_norm": 0.12088293582201004, | |
| "learning_rate": 8.940053768033609e-05, | |
| "loss": 0.3909, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.00644843980545385, | |
| "grad_norm": 0.11673817783594131, | |
| "learning_rate": 8.885729807284856e-05, | |
| "loss": 0.3682, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.006557735395376797, | |
| "grad_norm": 0.11229289323091507, | |
| "learning_rate": 8.83022221559489e-05, | |
| "loss": 0.3972, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.006667030985299743, | |
| "grad_norm": 0.10862474143505096, | |
| "learning_rate": 8.773547901113862e-05, | |
| "loss": 0.403, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.00677632657522269, | |
| "grad_norm": 0.117657370865345, | |
| "learning_rate": 8.715724127386972e-05, | |
| "loss": 0.511, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.006885622165145636, | |
| "grad_norm": 0.12006518989801407, | |
| "learning_rate": 8.656768508095853e-05, | |
| "loss": 0.3802, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.006994917755068583, | |
| "grad_norm": 0.10380808264017105, | |
| "learning_rate": 8.596699001693255e-05, | |
| "loss": 0.3455, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.00710421334499153, | |
| "grad_norm": 0.1075916737318039, | |
| "learning_rate": 8.535533905932738e-05, | |
| "loss": 0.4197, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.007213508934914476, | |
| "grad_norm": 0.12685179710388184, | |
| "learning_rate": 8.473291852294987e-05, | |
| "loss": 0.368, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.007322804524837423, | |
| "grad_norm": 0.12550005316734314, | |
| "learning_rate": 8.409991800312493e-05, | |
| "loss": 0.5413, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.007432100114760369, | |
| "grad_norm": 0.12594176828861237, | |
| "learning_rate": 8.345653031794292e-05, | |
| "loss": 0.4589, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.007541395704683316, | |
| "grad_norm": 0.12614470720291138, | |
| "learning_rate": 8.280295144952536e-05, | |
| "loss": 0.4523, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.007650691294606262, | |
| "grad_norm": 0.11181683838367462, | |
| "learning_rate": 8.213938048432697e-05, | |
| "loss": 0.3537, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.007759986884529209, | |
| "grad_norm": 0.13527949154376984, | |
| "learning_rate": 8.146601955249188e-05, | |
| "loss": 0.4204, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.007869282474452156, | |
| "grad_norm": 0.13055859506130219, | |
| "learning_rate": 8.07830737662829e-05, | |
| "loss": 0.3639, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.007978578064375103, | |
| "grad_norm": 0.13407902419567108, | |
| "learning_rate": 8.009075115760243e-05, | |
| "loss": 0.3647, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.008087873654298049, | |
| "grad_norm": 0.13400597870349884, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 0.4119, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.008197169244220997, | |
| "grad_norm": 0.16241265833377838, | |
| "learning_rate": 7.86788218175523e-05, | |
| "loss": 0.5364, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.008306464834143943, | |
| "grad_norm": 0.13452617824077606, | |
| "learning_rate": 7.795964517353735e-05, | |
| "loss": 0.3586, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.008415760424066889, | |
| "grad_norm": 0.17259980738162994, | |
| "learning_rate": 7.723195175075136e-05, | |
| "loss": 0.4718, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.008525056013989835, | |
| "grad_norm": 0.1500622034072876, | |
| "learning_rate": 7.649596321166024e-05, | |
| "loss": 0.4104, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.008634351603912783, | |
| "grad_norm": 0.17057955265045166, | |
| "learning_rate": 7.575190374550272e-05, | |
| "loss": 0.5204, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.008743647193835729, | |
| "grad_norm": 0.15768682956695557, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.4258, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.008852942783758675, | |
| "grad_norm": 0.1671830117702484, | |
| "learning_rate": 7.424048101231686e-05, | |
| "loss": 0.3407, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.008962238373681621, | |
| "grad_norm": 0.17148494720458984, | |
| "learning_rate": 7.347357813929454e-05, | |
| "loss": 0.4753, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.009071533963604569, | |
| "grad_norm": 0.14277490973472595, | |
| "learning_rate": 7.269952498697734e-05, | |
| "loss": 0.3654, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.009180829553527515, | |
| "grad_norm": 0.17424607276916504, | |
| "learning_rate": 7.191855733945387e-05, | |
| "loss": 0.5099, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.009290125143450461, | |
| "grad_norm": 0.191071555018425, | |
| "learning_rate": 7.113091308703498e-05, | |
| "loss": 0.5481, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.00939942073337341, | |
| "grad_norm": 0.22640280425548553, | |
| "learning_rate": 7.033683215379002e-05, | |
| "loss": 0.5513, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.009508716323296355, | |
| "grad_norm": 0.16032098233699799, | |
| "learning_rate": 6.953655642446368e-05, | |
| "loss": 0.438, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.009618011913219301, | |
| "grad_norm": 0.20057062804698944, | |
| "learning_rate": 6.873032967079561e-05, | |
| "loss": 0.4187, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.009727307503142248, | |
| "grad_norm": 0.25190460681915283, | |
| "learning_rate": 6.7918397477265e-05, | |
| "loss": 0.686, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.009836603093065195, | |
| "grad_norm": 0.2412964105606079, | |
| "learning_rate": 6.710100716628344e-05, | |
| "loss": 0.6421, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.009945898682988142, | |
| "grad_norm": 0.2152530401945114, | |
| "learning_rate": 6.627840772285784e-05, | |
| "loss": 0.3452, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.010055194272911088, | |
| "grad_norm": 0.2889910936355591, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.6647, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.010164489862834034, | |
| "grad_norm": 0.25227007269859314, | |
| "learning_rate": 6.461858523613684e-05, | |
| "loss": 0.5819, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.010273785452756982, | |
| "grad_norm": 0.29229801893234253, | |
| "learning_rate": 6.378186779084995e-05, | |
| "loss": 0.6188, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.010383081042679928, | |
| "grad_norm": 0.3063337504863739, | |
| "learning_rate": 6.294095225512603e-05, | |
| "loss": 0.6259, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.010492376632602874, | |
| "grad_norm": 0.30890288949012756, | |
| "learning_rate": 6.209609477998338e-05, | |
| "loss": 0.6699, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.010601672222525822, | |
| "grad_norm": 0.3483825922012329, | |
| "learning_rate": 6.124755271719325e-05, | |
| "loss": 0.6592, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.010710967812448768, | |
| "grad_norm": 0.344511479139328, | |
| "learning_rate": 6.0395584540887963e-05, | |
| "loss": 0.5184, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.010820263402371714, | |
| "grad_norm": 0.5086371898651123, | |
| "learning_rate": 5.9540449768827246e-05, | |
| "loss": 0.7996, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.01092955899229466, | |
| "grad_norm": 0.6137983798980713, | |
| "learning_rate": 5.868240888334653e-05, | |
| "loss": 0.9048, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.01092955899229466, | |
| "eval_loss": 0.487389475107193, | |
| "eval_runtime": 1194.2587, | |
| "eval_samples_per_second": 12.903, | |
| "eval_steps_per_second": 6.452, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.011038854582217608, | |
| "grad_norm": 0.08348676562309265, | |
| "learning_rate": 5.782172325201155e-05, | |
| "loss": 1.0152, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.011148150172140554, | |
| "grad_norm": 0.09717176854610443, | |
| "learning_rate": 5.695865504800327e-05, | |
| "loss": 0.4026, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.0112574457620635, | |
| "grad_norm": 0.10635527223348618, | |
| "learning_rate": 5.6093467170257374e-05, | |
| "loss": 0.3539, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.011366741351986447, | |
| "grad_norm": 0.11456354707479477, | |
| "learning_rate": 5.522642316338268e-05, | |
| "loss": 0.4686, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.011476036941909394, | |
| "grad_norm": 0.1162208616733551, | |
| "learning_rate": 5.435778713738292e-05, | |
| "loss": 0.292, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.01158533253183234, | |
| "grad_norm": 0.126183420419693, | |
| "learning_rate": 5.348782368720626e-05, | |
| "loss": 0.56, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.011694628121755287, | |
| "grad_norm": 0.11952321231365204, | |
| "learning_rate": 5.26167978121472e-05, | |
| "loss": 0.3656, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.011803923711678235, | |
| "grad_norm": 0.12617449462413788, | |
| "learning_rate": 5.174497483512506e-05, | |
| "loss": 0.43, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.01191321930160118, | |
| "grad_norm": 0.12929527461528778, | |
| "learning_rate": 5.0872620321864185e-05, | |
| "loss": 0.4259, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.012022514891524127, | |
| "grad_norm": 0.11484142392873764, | |
| "learning_rate": 5e-05, | |
| "loss": 0.3938, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.012131810481447073, | |
| "grad_norm": 0.09470567852258682, | |
| "learning_rate": 4.912737967813583e-05, | |
| "loss": 0.2844, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.01224110607137002, | |
| "grad_norm": 0.10879397392272949, | |
| "learning_rate": 4.825502516487497e-05, | |
| "loss": 0.3645, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.012350401661292967, | |
| "grad_norm": 0.10353327542543411, | |
| "learning_rate": 4.738320218785281e-05, | |
| "loss": 0.4109, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.012459697251215913, | |
| "grad_norm": 0.10917459428310394, | |
| "learning_rate": 4.6512176312793736e-05, | |
| "loss": 0.3751, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.01256899284113886, | |
| "grad_norm": 0.11443409323692322, | |
| "learning_rate": 4.564221286261709e-05, | |
| "loss": 0.4385, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.012678288431061807, | |
| "grad_norm": 0.12315183877944946, | |
| "learning_rate": 4.477357683661734e-05, | |
| "loss": 0.4995, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.012787584020984753, | |
| "grad_norm": 0.1159660667181015, | |
| "learning_rate": 4.390653282974264e-05, | |
| "loss": 0.478, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.0128968796109077, | |
| "grad_norm": 0.11784326285123825, | |
| "learning_rate": 4.3041344951996746e-05, | |
| "loss": 0.421, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.013006175200830647, | |
| "grad_norm": 0.12483936548233032, | |
| "learning_rate": 4.2178276747988446e-05, | |
| "loss": 0.434, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.013115470790753593, | |
| "grad_norm": 0.13112543523311615, | |
| "learning_rate": 4.131759111665349e-05, | |
| "loss": 0.4981, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.01322476638067654, | |
| "grad_norm": 0.12915010750293732, | |
| "learning_rate": 4.045955023117276e-05, | |
| "loss": 0.5198, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.013334061970599486, | |
| "grad_norm": 0.12868228554725647, | |
| "learning_rate": 3.960441545911204e-05, | |
| "loss": 0.4058, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.013443357560522434, | |
| "grad_norm": 0.1531195491552353, | |
| "learning_rate": 3.875244728280676e-05, | |
| "loss": 0.4973, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.01355265315044538, | |
| "grad_norm": 0.1404736340045929, | |
| "learning_rate": 3.790390522001662e-05, | |
| "loss": 0.4539, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.013661948740368326, | |
| "grad_norm": 0.1469733566045761, | |
| "learning_rate": 3.705904774487396e-05, | |
| "loss": 0.4203, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.013771244330291272, | |
| "grad_norm": 0.14171703159809113, | |
| "learning_rate": 3.6218132209150045e-05, | |
| "loss": 0.4354, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.01388053992021422, | |
| "grad_norm": 0.15275560319423676, | |
| "learning_rate": 3.5381414763863166e-05, | |
| "loss": 0.5334, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.013989835510137166, | |
| "grad_norm": 0.16623587906360626, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.748, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.014099131100060112, | |
| "grad_norm": 0.15390785038471222, | |
| "learning_rate": 3.372159227714218e-05, | |
| "loss": 0.4446, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.01420842668998306, | |
| "grad_norm": 0.16154845058918, | |
| "learning_rate": 3.289899283371657e-05, | |
| "loss": 0.5223, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.014317722279906006, | |
| "grad_norm": 0.17848701775074005, | |
| "learning_rate": 3.2081602522734986e-05, | |
| "loss": 0.4334, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.014427017869828952, | |
| "grad_norm": 0.18458040058612823, | |
| "learning_rate": 3.12696703292044e-05, | |
| "loss": 0.5952, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.014536313459751898, | |
| "grad_norm": 0.16637226939201355, | |
| "learning_rate": 3.046344357553632e-05, | |
| "loss": 0.4745, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.014645609049674846, | |
| "grad_norm": 0.17600120604038239, | |
| "learning_rate": 2.9663167846209998e-05, | |
| "loss": 0.4086, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.014754904639597792, | |
| "grad_norm": 0.20289093255996704, | |
| "learning_rate": 2.886908691296504e-05, | |
| "loss": 0.4969, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.014864200229520739, | |
| "grad_norm": 0.1863214075565338, | |
| "learning_rate": 2.8081442660546125e-05, | |
| "loss": 0.4525, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.014973495819443685, | |
| "grad_norm": 0.19627094268798828, | |
| "learning_rate": 2.7300475013022663e-05, | |
| "loss": 0.6143, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.015082791409366633, | |
| "grad_norm": 0.1941378265619278, | |
| "learning_rate": 2.6526421860705473e-05, | |
| "loss": 0.3371, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.015192086999289579, | |
| "grad_norm": 0.2424362599849701, | |
| "learning_rate": 2.575951898768315e-05, | |
| "loss": 0.6709, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.015301382589212525, | |
| "grad_norm": 0.23324838280677795, | |
| "learning_rate": 2.500000000000001e-05, | |
| "loss": 0.6355, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.015410678179135473, | |
| "grad_norm": 0.23842982947826385, | |
| "learning_rate": 2.4248096254497288e-05, | |
| "loss": 0.5407, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.015519973769058419, | |
| "grad_norm": 0.2603646516799927, | |
| "learning_rate": 2.350403678833976e-05, | |
| "loss": 0.5855, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.015629269358981367, | |
| "grad_norm": 0.329253226518631, | |
| "learning_rate": 2.2768048249248648e-05, | |
| "loss": 0.6169, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.015738564948904313, | |
| "grad_norm": 0.2668045461177826, | |
| "learning_rate": 2.2040354826462668e-05, | |
| "loss": 0.52, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.01584786053882726, | |
| "grad_norm": 0.28101295232772827, | |
| "learning_rate": 2.132117818244771e-05, | |
| "loss": 0.5727, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.015957156128750205, | |
| "grad_norm": 0.28658443689346313, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 0.5835, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.01606645171867315, | |
| "grad_norm": 0.3398421108722687, | |
| "learning_rate": 1.9909248842397584e-05, | |
| "loss": 0.6378, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.016175747308596097, | |
| "grad_norm": 0.3835708200931549, | |
| "learning_rate": 1.9216926233717085e-05, | |
| "loss": 0.598, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.016285042898519043, | |
| "grad_norm": 0.7383447885513306, | |
| "learning_rate": 1.8533980447508137e-05, | |
| "loss": 0.8202, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.016394338488441993, | |
| "grad_norm": 0.6827495098114014, | |
| "learning_rate": 1.7860619515673033e-05, | |
| "loss": 0.8189, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.016394338488441993, | |
| "eval_loss": 0.46980416774749756, | |
| "eval_runtime": 1194.1023, | |
| "eval_samples_per_second": 12.905, | |
| "eval_steps_per_second": 6.453, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.01650363407836494, | |
| "grad_norm": 0.05671292915940285, | |
| "learning_rate": 1.7197048550474643e-05, | |
| "loss": 0.3498, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.016612929668287885, | |
| "grad_norm": 0.07922675460577011, | |
| "learning_rate": 1.6543469682057106e-05, | |
| "loss": 0.5638, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.01672222525821083, | |
| "grad_norm": 0.06751131266355515, | |
| "learning_rate": 1.5900081996875083e-05, | |
| "loss": 0.2989, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.016831520848133778, | |
| "grad_norm": 0.08673208206892014, | |
| "learning_rate": 1.526708147705013e-05, | |
| "loss": 0.3434, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.016940816438056724, | |
| "grad_norm": 0.10014075040817261, | |
| "learning_rate": 1.4644660940672627e-05, | |
| "loss": 0.3313, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.01705011202797967, | |
| "grad_norm": 0.0925699844956398, | |
| "learning_rate": 1.4033009983067452e-05, | |
| "loss": 0.3351, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.017159407617902616, | |
| "grad_norm": 0.10073317587375641, | |
| "learning_rate": 1.3432314919041478e-05, | |
| "loss": 0.4239, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.017268703207825566, | |
| "grad_norm": 0.09681018441915512, | |
| "learning_rate": 1.2842758726130283e-05, | |
| "loss": 0.3196, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.017377998797748512, | |
| "grad_norm": 0.10117655992507935, | |
| "learning_rate": 1.22645209888614e-05, | |
| "loss": 0.3483, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.017487294387671458, | |
| "grad_norm": 0.10316099971532822, | |
| "learning_rate": 1.1697777844051105e-05, | |
| "loss": 0.3765, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.017596589977594404, | |
| "grad_norm": 0.10998133569955826, | |
| "learning_rate": 1.1142701927151456e-05, | |
| "loss": 0.3845, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.01770588556751735, | |
| "grad_norm": 0.10539877414703369, | |
| "learning_rate": 1.0599462319663905e-05, | |
| "loss": 0.4918, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.017815181157440296, | |
| "grad_norm": 0.1355060189962387, | |
| "learning_rate": 1.006822449763537e-05, | |
| "loss": 0.472, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.017924476747363242, | |
| "grad_norm": 0.12122693657875061, | |
| "learning_rate": 9.549150281252633e-06, | |
| "loss": 0.3635, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.018033772337286192, | |
| "grad_norm": 0.11582615971565247, | |
| "learning_rate": 9.042397785550405e-06, | |
| "loss": 0.5667, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.018143067927209138, | |
| "grad_norm": 0.12401402741670609, | |
| "learning_rate": 8.548121372247918e-06, | |
| "loss": 0.3543, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.018252363517132084, | |
| "grad_norm": 0.12098807841539383, | |
| "learning_rate": 8.066471602728803e-06, | |
| "loss": 0.4069, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.01836165910705503, | |
| "grad_norm": 0.12497370690107346, | |
| "learning_rate": 7.597595192178702e-06, | |
| "loss": 0.4353, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.018470954696977977, | |
| "grad_norm": 0.11947774142026901, | |
| "learning_rate": 7.1416349648943894e-06, | |
| "loss": 0.4151, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.018580250286900923, | |
| "grad_norm": 0.12466960400342941, | |
| "learning_rate": 6.698729810778065e-06, | |
| "loss": 0.3535, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.01868954587682387, | |
| "grad_norm": 0.14808349311351776, | |
| "learning_rate": 6.269014643030213e-06, | |
| "loss": 0.531, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.01879884146674682, | |
| "grad_norm": 0.13079893589019775, | |
| "learning_rate": 5.852620357053651e-06, | |
| "loss": 0.3183, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.018908137056669765, | |
| "grad_norm": 0.153338223695755, | |
| "learning_rate": 5.449673790581611e-06, | |
| "loss": 0.4308, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.01901743264659271, | |
| "grad_norm": 0.1578577756881714, | |
| "learning_rate": 5.060297685041659e-06, | |
| "loss": 0.5877, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.019126728236515657, | |
| "grad_norm": 0.1331440508365631, | |
| "learning_rate": 4.684610648167503e-06, | |
| "loss": 0.3637, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.019236023826438603, | |
| "grad_norm": 0.1452208012342453, | |
| "learning_rate": 4.322727117869951e-06, | |
| "loss": 0.4592, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.01934531941636155, | |
| "grad_norm": 0.1602085381746292, | |
| "learning_rate": 3.974757327377981e-06, | |
| "loss": 0.5317, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.019454615006284495, | |
| "grad_norm": 0.15811043977737427, | |
| "learning_rate": 3.6408072716606346e-06, | |
| "loss": 0.5555, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.01956391059620744, | |
| "grad_norm": 0.1520129144191742, | |
| "learning_rate": 3.3209786751399187e-06, | |
| "loss": 0.4347, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.01967320618613039, | |
| "grad_norm": 0.17405475676059723, | |
| "learning_rate": 3.0153689607045845e-06, | |
| "loss": 0.4352, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.019782501776053337, | |
| "grad_norm": 0.20877398550510406, | |
| "learning_rate": 2.724071220034158e-06, | |
| "loss": 0.5259, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.019891797365976283, | |
| "grad_norm": 0.1546728014945984, | |
| "learning_rate": 2.4471741852423237e-06, | |
| "loss": 0.4277, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.02000109295589923, | |
| "grad_norm": 0.207755446434021, | |
| "learning_rate": 2.1847622018482283e-06, | |
| "loss": 0.5322, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.020110388545822176, | |
| "grad_norm": 0.17791689932346344, | |
| "learning_rate": 1.9369152030840556e-06, | |
| "loss": 0.3993, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.02021968413574512, | |
| "grad_norm": 0.1971198171377182, | |
| "learning_rate": 1.70370868554659e-06, | |
| "loss": 0.4922, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.020328979725668068, | |
| "grad_norm": 0.18837548792362213, | |
| "learning_rate": 1.4852136862001764e-06, | |
| "loss": 0.669, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.020438275315591017, | |
| "grad_norm": 0.21775005757808685, | |
| "learning_rate": 1.2814967607382432e-06, | |
| "loss": 0.5298, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.020547570905513964, | |
| "grad_norm": 0.23758465051651, | |
| "learning_rate": 1.0926199633097157e-06, | |
| "loss": 0.574, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.02065686649543691, | |
| "grad_norm": 0.23550760746002197, | |
| "learning_rate": 9.186408276168013e-07, | |
| "loss": 0.6356, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.020766162085359856, | |
| "grad_norm": 0.2456381767988205, | |
| "learning_rate": 7.596123493895991e-07, | |
| "loss": 0.5453, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.020875457675282802, | |
| "grad_norm": 0.2014353722333908, | |
| "learning_rate": 6.15582970243117e-07, | |
| "loss": 0.4603, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.020984753265205748, | |
| "grad_norm": 0.2850938141345978, | |
| "learning_rate": 4.865965629214819e-07, | |
| "loss": 0.6694, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.021094048855128694, | |
| "grad_norm": 0.2577400803565979, | |
| "learning_rate": 3.7269241793390085e-07, | |
| "loss": 0.6761, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.021203344445051644, | |
| "grad_norm": 0.2811027467250824, | |
| "learning_rate": 2.7390523158633554e-07, | |
| "loss": 0.621, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.02131264003497459, | |
| "grad_norm": 0.2799067199230194, | |
| "learning_rate": 1.9026509541272275e-07, | |
| "loss": 0.6045, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.021421935624897536, | |
| "grad_norm": 0.40083521604537964, | |
| "learning_rate": 1.2179748700879012e-07, | |
| "loss": 0.8314, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.021531231214820482, | |
| "grad_norm": 0.48128101229667664, | |
| "learning_rate": 6.852326227130834e-08, | |
| "loss": 0.5519, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.02164052680474343, | |
| "grad_norm": 0.3779641389846802, | |
| "learning_rate": 3.04586490452119e-08, | |
| "loss": 0.6474, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.021749822394666374, | |
| "grad_norm": 0.38349881768226624, | |
| "learning_rate": 7.615242180436522e-09, | |
| "loss": 0.6104, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.02185911798458932, | |
| "grad_norm": 0.6852728724479675, | |
| "learning_rate": 0.0, | |
| "loss": 0.8279, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.02185911798458932, | |
| "eval_loss": 0.46923279762268066, | |
| "eval_runtime": 1195.4856, | |
| "eval_samples_per_second": 12.89, | |
| "eval_steps_per_second": 6.445, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.6780376395415552e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |