| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.09771827820393805, | |
| "eval_steps": 500, | |
| "global_step": 10000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0009771827820393806, | |
| "grad_norm": 0.5417118072509766, | |
| "learning_rate": 4.995602247740044e-05, | |
| "loss": 1.378, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.001954365564078761, | |
| "grad_norm": 0.6493918895721436, | |
| "learning_rate": 4.990715856340093e-05, | |
| "loss": 1.3304, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.0029315483461181415, | |
| "grad_norm": 0.9062462449073792, | |
| "learning_rate": 4.9858294649401425e-05, | |
| "loss": 1.3284, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.003908731128157522, | |
| "grad_norm": 0.750052273273468, | |
| "learning_rate": 4.9809430735401906e-05, | |
| "loss": 1.3166, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.004885913910196903, | |
| "grad_norm": 0.6602022051811218, | |
| "learning_rate": 4.97605668214024e-05, | |
| "loss": 1.3166, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.005863096692236283, | |
| "grad_norm": 0.4193927049636841, | |
| "learning_rate": 4.971170290740288e-05, | |
| "loss": 1.3098, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.006840279474275663, | |
| "grad_norm": 0.6095415949821472, | |
| "learning_rate": 4.966283899340338e-05, | |
| "loss": 1.3103, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.007817462256315045, | |
| "grad_norm": 0.9943467378616333, | |
| "learning_rate": 4.9613975079403865e-05, | |
| "loss": 1.3096, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.008794645038354424, | |
| "grad_norm": 1.2263585329055786, | |
| "learning_rate": 4.9565111165404346e-05, | |
| "loss": 1.3067, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.009771827820393805, | |
| "grad_norm": 0.7198677659034729, | |
| "learning_rate": 4.951624725140484e-05, | |
| "loss": 1.3041, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.010749010602433185, | |
| "grad_norm": 0.7370775938034058, | |
| "learning_rate": 4.946738333740533e-05, | |
| "loss": 1.302, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.011726193384472566, | |
| "grad_norm": 0.5109437704086304, | |
| "learning_rate": 4.941851942340582e-05, | |
| "loss": 1.3089, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.012703376166511945, | |
| "grad_norm": 0.1879555583000183, | |
| "learning_rate": 4.9369655509406305e-05, | |
| "loss": 1.3043, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.013680558948551327, | |
| "grad_norm": 0.951046884059906, | |
| "learning_rate": 4.932079159540679e-05, | |
| "loss": 1.3098, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.014657741730590706, | |
| "grad_norm": 0.2478829026222229, | |
| "learning_rate": 4.927192768140728e-05, | |
| "loss": 1.3026, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.01563492451263009, | |
| "grad_norm": 0.5585843324661255, | |
| "learning_rate": 4.9223063767407776e-05, | |
| "loss": 1.3014, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.016612107294669467, | |
| "grad_norm": 0.48532453179359436, | |
| "learning_rate": 4.917419985340826e-05, | |
| "loss": 1.2981, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.017589290076708848, | |
| "grad_norm": 0.4233573079109192, | |
| "learning_rate": 4.912533593940875e-05, | |
| "loss": 1.2992, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.01856647285874823, | |
| "grad_norm": 0.3272475600242615, | |
| "learning_rate": 4.9076472025409234e-05, | |
| "loss": 1.292, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.01954365564078761, | |
| "grad_norm": 0.5299385786056519, | |
| "learning_rate": 4.902760811140973e-05, | |
| "loss": 1.2963, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.02052083842282699, | |
| "grad_norm": 0.1614024043083191, | |
| "learning_rate": 4.8978744197410216e-05, | |
| "loss": 1.2945, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.02149802120486637, | |
| "grad_norm": 0.6039963960647583, | |
| "learning_rate": 4.8929880283410705e-05, | |
| "loss": 1.2913, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.02247520398690575, | |
| "grad_norm": 0.5772804021835327, | |
| "learning_rate": 4.888101636941119e-05, | |
| "loss": 1.2895, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.023452386768945132, | |
| "grad_norm": 0.7489622235298157, | |
| "learning_rate": 4.883215245541168e-05, | |
| "loss": 1.2847, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.024429569550984513, | |
| "grad_norm": 0.30208253860473633, | |
| "learning_rate": 4.878328854141217e-05, | |
| "loss": 1.2924, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.02540675233302389, | |
| "grad_norm": 0.36944472789764404, | |
| "learning_rate": 4.873442462741266e-05, | |
| "loss": 1.2916, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.026383935115063272, | |
| "grad_norm": 0.3268676698207855, | |
| "learning_rate": 4.8685560713413145e-05, | |
| "loss": 1.2893, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.027361117897102653, | |
| "grad_norm": 0.2795974910259247, | |
| "learning_rate": 4.863669679941363e-05, | |
| "loss": 1.282, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.028338300679142035, | |
| "grad_norm": 0.36298853158950806, | |
| "learning_rate": 4.858783288541413e-05, | |
| "loss": 1.2832, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.029315483461181412, | |
| "grad_norm": 0.5242423415184021, | |
| "learning_rate": 4.853896897141461e-05, | |
| "loss": 1.2819, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.030292666243220794, | |
| "grad_norm": 0.25340864062309265, | |
| "learning_rate": 4.8490105057415104e-05, | |
| "loss": 1.2809, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.03126984902526018, | |
| "grad_norm": 0.7241976261138916, | |
| "learning_rate": 4.844124114341559e-05, | |
| "loss": 1.2802, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.032247031807299556, | |
| "grad_norm": 0.5154001712799072, | |
| "learning_rate": 4.839237722941608e-05, | |
| "loss": 1.2748, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.033224214589338934, | |
| "grad_norm": 0.5323473811149597, | |
| "learning_rate": 4.834351331541657e-05, | |
| "loss": 1.284, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.03420139737137832, | |
| "grad_norm": 0.3947168290615082, | |
| "learning_rate": 4.8294649401417056e-05, | |
| "loss": 1.276, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.035178580153417696, | |
| "grad_norm": 0.4776057302951813, | |
| "learning_rate": 4.8245785487417544e-05, | |
| "loss": 1.2783, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.036155762935457074, | |
| "grad_norm": 0.4884164035320282, | |
| "learning_rate": 4.819692157341804e-05, | |
| "loss": 1.2745, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.03713294571749646, | |
| "grad_norm": 0.5210428833961487, | |
| "learning_rate": 4.814805765941852e-05, | |
| "loss": 1.2707, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.038110128499535836, | |
| "grad_norm": 0.46214359998703003, | |
| "learning_rate": 4.809919374541901e-05, | |
| "loss": 1.2727, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.03908731128157522, | |
| "grad_norm": 0.2656782865524292, | |
| "learning_rate": 4.8050329831419496e-05, | |
| "loss": 1.2694, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.0400644940636146, | |
| "grad_norm": 0.4923059940338135, | |
| "learning_rate": 4.8001465917419985e-05, | |
| "loss": 1.2665, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.04104167684565398, | |
| "grad_norm": 0.92928147315979, | |
| "learning_rate": 4.795260200342048e-05, | |
| "loss": 1.2627, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.04201885962769336, | |
| "grad_norm": 1.0651229619979858, | |
| "learning_rate": 4.790373808942096e-05, | |
| "loss": 1.2623, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.04299604240973274, | |
| "grad_norm": 0.9612557888031006, | |
| "learning_rate": 4.7854874175421456e-05, | |
| "loss": 1.2482, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.043973225191772124, | |
| "grad_norm": 1.0120874643325806, | |
| "learning_rate": 4.7806010261421944e-05, | |
| "loss": 1.2589, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.0449504079738115, | |
| "grad_norm": 0.6250020861625671, | |
| "learning_rate": 4.775714634742243e-05, | |
| "loss": 1.2499, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.04592759075585088, | |
| "grad_norm": 0.2850038707256317, | |
| "learning_rate": 4.770828243342292e-05, | |
| "loss": 1.2446, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.046904773537890264, | |
| "grad_norm": 1.2032625675201416, | |
| "learning_rate": 4.765941851942341e-05, | |
| "loss": 1.2238, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.04788195631992964, | |
| "grad_norm": 0.42024949193000793, | |
| "learning_rate": 4.7610554605423896e-05, | |
| "loss": 1.2255, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.048859139101969026, | |
| "grad_norm": 0.7451406121253967, | |
| "learning_rate": 4.756169069142439e-05, | |
| "loss": 1.2071, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.049836321884008404, | |
| "grad_norm": 0.8735096454620361, | |
| "learning_rate": 4.751282677742487e-05, | |
| "loss": 1.2126, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.05081350466604778, | |
| "grad_norm": 0.73675137758255, | |
| "learning_rate": 4.746396286342537e-05, | |
| "loss": 1.2036, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.051790687448087167, | |
| "grad_norm": 0.6540606617927551, | |
| "learning_rate": 4.741509894942585e-05, | |
| "loss": 1.1825, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.052767870230126544, | |
| "grad_norm": 0.825066864490509, | |
| "learning_rate": 4.7366235035426336e-05, | |
| "loss": 1.1655, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.05374505301216593, | |
| "grad_norm": 1.6421219110488892, | |
| "learning_rate": 4.731737112142683e-05, | |
| "loss": 1.1716, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.05472223579420531, | |
| "grad_norm": 1.0644057989120483, | |
| "learning_rate": 4.726850720742731e-05, | |
| "loss": 1.1384, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.055699418576244684, | |
| "grad_norm": 1.1611616611480713, | |
| "learning_rate": 4.721964329342781e-05, | |
| "loss": 1.1499, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.05667660135828407, | |
| "grad_norm": 2.0900723934173584, | |
| "learning_rate": 4.7170779379428295e-05, | |
| "loss": 1.1323, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.05765378414032345, | |
| "grad_norm": 1.0580404996871948, | |
| "learning_rate": 4.712191546542878e-05, | |
| "loss": 1.112, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.058630966922362825, | |
| "grad_norm": 0.6299407482147217, | |
| "learning_rate": 4.707305155142927e-05, | |
| "loss": 1.104, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.05960814970440221, | |
| "grad_norm": 0.6816271543502808, | |
| "learning_rate": 4.702418763742976e-05, | |
| "loss": 1.1128, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.06058533248644159, | |
| "grad_norm": 0.654796302318573, | |
| "learning_rate": 4.697532372343025e-05, | |
| "loss": 1.0942, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.06156251526848097, | |
| "grad_norm": 1.0433884859085083, | |
| "learning_rate": 4.692645980943074e-05, | |
| "loss": 1.0862, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.06253969805052036, | |
| "grad_norm": 0.6256537437438965, | |
| "learning_rate": 4.6877595895431224e-05, | |
| "loss": 1.081, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.06351688083255973, | |
| "grad_norm": 0.8173975348472595, | |
| "learning_rate": 4.682873198143172e-05, | |
| "loss": 1.0767, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.06449406361459911, | |
| "grad_norm": 0.7856473922729492, | |
| "learning_rate": 4.6779868067432206e-05, | |
| "loss": 1.0767, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.0654712463966385, | |
| "grad_norm": 0.6337741017341614, | |
| "learning_rate": 4.6731004153432695e-05, | |
| "loss": 1.0829, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.06644842917867787, | |
| "grad_norm": 0.5813809037208557, | |
| "learning_rate": 4.668214023943318e-05, | |
| "loss": 1.0571, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.06742561196071725, | |
| "grad_norm": 0.4155445992946625, | |
| "learning_rate": 4.6633276325433664e-05, | |
| "loss": 1.0707, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.06840279474275664, | |
| "grad_norm": 0.6730567812919617, | |
| "learning_rate": 4.658441241143416e-05, | |
| "loss": 1.0477, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.06937997752479601, | |
| "grad_norm": 0.8348300457000732, | |
| "learning_rate": 4.653554849743465e-05, | |
| "loss": 1.0644, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.07035716030683539, | |
| "grad_norm": 2.2414326667785645, | |
| "learning_rate": 4.6486684583435135e-05, | |
| "loss": 1.0577, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.07133434308887478, | |
| "grad_norm": 1.6573911905288696, | |
| "learning_rate": 4.643782066943562e-05, | |
| "loss": 1.0836, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.07231152587091415, | |
| "grad_norm": 0.5690039396286011, | |
| "learning_rate": 4.638895675543611e-05, | |
| "loss": 1.0541, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.07328870865295353, | |
| "grad_norm": 0.527215301990509, | |
| "learning_rate": 4.63400928414366e-05, | |
| "loss": 1.0164, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.07426589143499292, | |
| "grad_norm": 0.7997362613677979, | |
| "learning_rate": 4.6291228927437094e-05, | |
| "loss": 1.0447, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.0752430742170323, | |
| "grad_norm": 2.257143259048462, | |
| "learning_rate": 4.6242365013437575e-05, | |
| "loss": 1.0365, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.07622025699907167, | |
| "grad_norm": 0.9132490158081055, | |
| "learning_rate": 4.619350109943807e-05, | |
| "loss": 1.0498, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.07719743978111106, | |
| "grad_norm": 0.5229859948158264, | |
| "learning_rate": 4.614463718543856e-05, | |
| "loss": 1.0342, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.07817462256315044, | |
| "grad_norm": 0.6948792338371277, | |
| "learning_rate": 4.6095773271439046e-05, | |
| "loss": 1.0325, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.07915180534518981, | |
| "grad_norm": 0.8526360988616943, | |
| "learning_rate": 4.6046909357439534e-05, | |
| "loss": 1.0183, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.0801289881272292, | |
| "grad_norm": 1.1457374095916748, | |
| "learning_rate": 4.599804544344002e-05, | |
| "loss": 1.0243, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.08110617090926858, | |
| "grad_norm": 0.9335997700691223, | |
| "learning_rate": 4.594918152944051e-05, | |
| "loss": 1.046, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.08208335369130795, | |
| "grad_norm": 0.8367229700088501, | |
| "learning_rate": 4.5900317615441e-05, | |
| "loss": 1.0176, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.08306053647334734, | |
| "grad_norm": 3.7648801803588867, | |
| "learning_rate": 4.5851453701441486e-05, | |
| "loss": 1.0047, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.08403771925538672, | |
| "grad_norm": 0.5877612829208374, | |
| "learning_rate": 4.5802589787441975e-05, | |
| "loss": 1.0346, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.08501490203742611, | |
| "grad_norm": 0.5145990252494812, | |
| "learning_rate": 4.575372587344246e-05, | |
| "loss": 1.0268, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.08599208481946548, | |
| "grad_norm": 0.9310688376426697, | |
| "learning_rate": 4.570486195944295e-05, | |
| "loss": 1.0109, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.08696926760150486, | |
| "grad_norm": 0.5182886719703674, | |
| "learning_rate": 4.5655998045443445e-05, | |
| "loss": 1.0117, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 0.08794645038354425, | |
| "grad_norm": 0.4319695234298706, | |
| "learning_rate": 4.560713413144393e-05, | |
| "loss": 1.0053, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.08892363316558362, | |
| "grad_norm": 4.307732582092285, | |
| "learning_rate": 4.555827021744442e-05, | |
| "loss": 1.0151, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 0.089900815947623, | |
| "grad_norm": 0.46516236662864685, | |
| "learning_rate": 4.550940630344491e-05, | |
| "loss": 0.9945, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 0.09087799872966239, | |
| "grad_norm": 1.2372952699661255, | |
| "learning_rate": 4.54605423894454e-05, | |
| "loss": 0.9865, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 0.09185518151170176, | |
| "grad_norm": 0.7494595646858215, | |
| "learning_rate": 4.5411678475445886e-05, | |
| "loss": 0.9824, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 0.09283236429374114, | |
| "grad_norm": 0.5540333390235901, | |
| "learning_rate": 4.5362814561446374e-05, | |
| "loss": 1.0132, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.09380954707578053, | |
| "grad_norm": 0.48533427715301514, | |
| "learning_rate": 4.531395064744686e-05, | |
| "loss": 1.0173, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 0.0947867298578199, | |
| "grad_norm": 0.4972572922706604, | |
| "learning_rate": 4.526508673344736e-05, | |
| "loss": 1.0078, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 0.09576391263985928, | |
| "grad_norm": 0.6748878955841064, | |
| "learning_rate": 4.521622281944784e-05, | |
| "loss": 1.0172, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 0.09674109542189867, | |
| "grad_norm": 0.5261876583099365, | |
| "learning_rate": 4.5167358905448326e-05, | |
| "loss": 1.0189, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 0.09771827820393805, | |
| "grad_norm": 0.4164600670337677, | |
| "learning_rate": 4.5118494991448814e-05, | |
| "loss": 0.9978, | |
| "step": 10000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 102335, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.254292317011968e+17, | |
| "train_batch_size": 12, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |