| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9986928104575163, | |
| "eval_steps": 500, | |
| "global_step": 191, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 7.632462021909978, | |
| "learning_rate": 2.5e-08, | |
| "logits/chosen": -2.7965219020843506, | |
| "logits/rejected": -2.8134026527404785, | |
| "logps/chosen": -108.48452758789062, | |
| "logps/rejected": -117.30828094482422, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 8.098615692629847, | |
| "learning_rate": 1.25e-07, | |
| "logits/chosen": -2.8768346309661865, | |
| "logits/rejected": -2.8589253425598145, | |
| "logps/chosen": -214.42208862304688, | |
| "logps/rejected": -232.5906982421875, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.3203125, | |
| "rewards/chosen": -0.00048091242206282914, | |
| "rewards/margins": -0.0004506283439695835, | |
| "rewards/rejected": -3.0284067179309204e-05, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 7.725942549672666, | |
| "learning_rate": 2.5e-07, | |
| "logits/chosen": -2.829338550567627, | |
| "logits/rejected": -2.8378331661224365, | |
| "logps/chosen": -167.3665313720703, | |
| "logps/rejected": -171.85272216796875, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.0015454485546797514, | |
| "rewards/margins": 0.0004239835252519697, | |
| "rewards/rejected": 0.0011214648839086294, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 6.9736886381721375, | |
| "learning_rate": 3.75e-07, | |
| "logits/chosen": -2.8761913776397705, | |
| "logits/rejected": -2.8846983909606934, | |
| "logps/chosen": -218.0037841796875, | |
| "logps/rejected": -215.8222198486328, | |
| "loss": 0.6917, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.008896902203559875, | |
| "rewards/margins": 0.002796543762087822, | |
| "rewards/rejected": 0.006100359372794628, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 7.63640300699799, | |
| "learning_rate": 5e-07, | |
| "logits/chosen": -2.80039644241333, | |
| "logits/rejected": -2.7947278022766113, | |
| "logps/chosen": -214.61083984375, | |
| "logps/rejected": -232.8736114501953, | |
| "loss": 0.6887, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": 0.032507818192243576, | |
| "rewards/margins": 0.011343271471560001, | |
| "rewards/rejected": 0.0211645495146513, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 7.959169453434996, | |
| "learning_rate": 4.989459720247269e-07, | |
| "logits/chosen": -2.8623499870300293, | |
| "logits/rejected": -2.8744444847106934, | |
| "logps/chosen": -213.915283203125, | |
| "logps/rejected": -214.1200408935547, | |
| "loss": 0.6815, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.0523659810423851, | |
| "rewards/margins": 0.024099161848425865, | |
| "rewards/rejected": 0.028266817331314087, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 8.857615760241181, | |
| "learning_rate": 4.957927758986888e-07, | |
| "logits/chosen": -2.748969554901123, | |
| "logits/rejected": -2.755592107772827, | |
| "logps/chosen": -184.64889526367188, | |
| "logps/rejected": -195.65170288085938, | |
| "loss": 0.6713, | |
| "rewards/accuracies": 0.706250011920929, | |
| "rewards/chosen": 0.05236022546887398, | |
| "rewards/margins": 0.05103861540555954, | |
| "rewards/rejected": 0.0013216044753789902, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 9.787510748887732, | |
| "learning_rate": 4.905670000773126e-07, | |
| "logits/chosen": -2.75258207321167, | |
| "logits/rejected": -2.7473061084747314, | |
| "logps/chosen": -210.5746612548828, | |
| "logps/rejected": -210.62503051757812, | |
| "loss": 0.6628, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.05218905210494995, | |
| "rewards/margins": 0.06384799629449844, | |
| "rewards/rejected": -0.1160370483994484, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 8.777368831216146, | |
| "learning_rate": 4.833127094718643e-07, | |
| "logits/chosen": -2.775714159011841, | |
| "logits/rejected": -2.7923226356506348, | |
| "logps/chosen": -239.29794311523438, | |
| "logps/rejected": -238.61569213867188, | |
| "loss": 0.6543, | |
| "rewards/accuracies": 0.706250011920929, | |
| "rewards/chosen": -0.011378347873687744, | |
| "rewards/margins": 0.13017494976520538, | |
| "rewards/rejected": -0.14155328273773193, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 11.157513002596966, | |
| "learning_rate": 4.74091073884255e-07, | |
| "logits/chosen": -2.7218103408813477, | |
| "logits/rejected": -2.734255075454712, | |
| "logps/chosen": -207.44918823242188, | |
| "logps/rejected": -214.82040405273438, | |
| "loss": 0.6408, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": -0.008733808994293213, | |
| "rewards/margins": 0.12230522930622101, | |
| "rewards/rejected": -0.13103903830051422, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 19.987767668495486, | |
| "learning_rate": 4.6297985220958176e-07, | |
| "logits/chosen": -2.6240291595458984, | |
| "logits/rejected": -2.6364402770996094, | |
| "logps/chosen": -230.0597686767578, | |
| "logps/rejected": -247.9846649169922, | |
| "loss": 0.6254, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -0.15959548950195312, | |
| "rewards/margins": 0.19043126702308655, | |
| "rewards/rejected": -0.3500267565250397, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 12.234358766227293, | |
| "learning_rate": 4.50072736755721e-07, | |
| "logits/chosen": -2.6890273094177246, | |
| "logits/rejected": -2.699357509613037, | |
| "logps/chosen": -203.8941192626953, | |
| "logps/rejected": -245.869873046875, | |
| "loss": 0.6224, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -0.14397892355918884, | |
| "rewards/margins": 0.1972903460264206, | |
| "rewards/rejected": -0.34126925468444824, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 9.53983669539952, | |
| "learning_rate": 4.3547856320882036e-07, | |
| "logits/chosen": -2.7587661743164062, | |
| "logits/rejected": -2.7468841075897217, | |
| "logps/chosen": -222.244873046875, | |
| "logps/rejected": -258.94256591796875, | |
| "loss": 0.6219, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.08269988000392914, | |
| "rewards/margins": 0.23697832226753235, | |
| "rewards/rejected": -0.3196782171726227, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 12.877575784727814, | |
| "learning_rate": 4.193203929064353e-07, | |
| "logits/chosen": -2.702998399734497, | |
| "logits/rejected": -2.725602149963379, | |
| "logps/chosen": -195.07113647460938, | |
| "logps/rejected": -241.1154022216797, | |
| "loss": 0.6141, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.13330955803394318, | |
| "rewards/margins": 0.3229420483112335, | |
| "rewards/rejected": -0.4562516212463379, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 18.557640610201624, | |
| "learning_rate": 4.0173447515678915e-07, | |
| "logits/chosen": -2.698917865753174, | |
| "logits/rejected": -2.696554660797119, | |
| "logps/chosen": -211.00540161132812, | |
| "logps/rejected": -240.03512573242188, | |
| "loss": 0.603, | |
| "rewards/accuracies": 0.731249988079071, | |
| "rewards/chosen": -0.25489380955696106, | |
| "rewards/margins": 0.2834622263908386, | |
| "rewards/rejected": -0.5383560061454773, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 12.118908690233818, | |
| "learning_rate": 3.82869098354114e-07, | |
| "logits/chosen": -2.664738416671753, | |
| "logits/rejected": -2.6815037727355957, | |
| "logps/chosen": -210.94235229492188, | |
| "logps/rejected": -244.6395263671875, | |
| "loss": 0.5971, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.19208934903144836, | |
| "rewards/margins": 0.3050723075866699, | |
| "rewards/rejected": -0.4971615672111511, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 15.597624887690841, | |
| "learning_rate": 3.6288333957772234e-07, | |
| "logits/chosen": -2.6601226329803467, | |
| "logits/rejected": -2.6666078567504883, | |
| "logps/chosen": -220.9784698486328, | |
| "logps/rejected": -261.5680847167969, | |
| "loss": 0.6004, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.2644396424293518, | |
| "rewards/margins": 0.3234052360057831, | |
| "rewards/rejected": -0.587844967842102, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 19.2075260268852, | |
| "learning_rate": 3.419457232184733e-07, | |
| "logits/chosen": -2.60345721244812, | |
| "logits/rejected": -2.5938727855682373, | |
| "logps/chosen": -225.29891967773438, | |
| "logps/rejected": -283.8168029785156, | |
| "loss": 0.5789, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.4199010729789734, | |
| "rewards/margins": 0.4167444705963135, | |
| "rewards/rejected": -0.8366454839706421, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 22.569025736787044, | |
| "learning_rate": 3.2023279994339236e-07, | |
| "logits/chosen": -2.443169355392456, | |
| "logits/rejected": -2.4476258754730225, | |
| "logps/chosen": -240.2940673828125, | |
| "logps/rejected": -293.4192810058594, | |
| "loss": 0.5571, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.4993715286254883, | |
| "rewards/margins": 0.5386183857917786, | |
| "rewards/rejected": -1.037989854812622, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 24.018902464460997, | |
| "learning_rate": 2.979276579809346e-07, | |
| "logits/chosen": -2.3657853603363037, | |
| "logits/rejected": -2.3687679767608643, | |
| "logps/chosen": -289.3427734375, | |
| "logps/rejected": -334.8695068359375, | |
| "loss": 0.5686, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.6699068546295166, | |
| "rewards/margins": 0.483262836933136, | |
| "rewards/rejected": -1.1531696319580078, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 26.501758420825617, | |
| "learning_rate": 2.752183792800671e-07, | |
| "logits/chosen": -2.306899070739746, | |
| "logits/rejected": -2.302125930786133, | |
| "logps/chosen": -247.7756805419922, | |
| "logps/rejected": -336.0014343261719, | |
| "loss": 0.5798, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": -0.5823773145675659, | |
| "rewards/margins": 0.7221489548683167, | |
| "rewards/rejected": -1.3045262098312378, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 28.132693943971592, | |
| "learning_rate": 2.522964535611816e-07, | |
| "logits/chosen": -2.304361343383789, | |
| "logits/rejected": -2.304105043411255, | |
| "logps/chosen": -274.34808349609375, | |
| "logps/rejected": -327.9083557128906, | |
| "loss": 0.5455, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -0.698980450630188, | |
| "rewards/margins": 0.6040951013565063, | |
| "rewards/rejected": -1.3030755519866943, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 24.175970284687118, | |
| "learning_rate": 2.2935516363191693e-07, | |
| "logits/chosen": -2.308105230331421, | |
| "logits/rejected": -2.314586639404297, | |
| "logps/chosen": -286.6155090332031, | |
| "logps/rejected": -360.39141845703125, | |
| "loss": 0.5655, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.6548763513565063, | |
| "rewards/margins": 0.5790941119194031, | |
| "rewards/rejected": -1.2339705228805542, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 21.26580024010301, | |
| "learning_rate": 2.065879555832674e-07, | |
| "logits/chosen": -2.2688331604003906, | |
| "logits/rejected": -2.2680087089538574, | |
| "logps/chosen": -276.52972412109375, | |
| "logps/rejected": -326.67901611328125, | |
| "loss": 0.5511, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -0.7125495076179504, | |
| "rewards/margins": 0.5782502293586731, | |
| "rewards/rejected": -1.290799617767334, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 23.746243959703744, | |
| "learning_rate": 1.8418680760885024e-07, | |
| "logits/chosen": -2.218527317047119, | |
| "logits/rejected": -2.221980333328247, | |
| "logps/chosen": -261.77569580078125, | |
| "logps/rejected": -335.24017333984375, | |
| "loss": 0.5532, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -0.7097143530845642, | |
| "rewards/margins": 0.6005780100822449, | |
| "rewards/rejected": -1.310292363166809, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 29.782548003529655, | |
| "learning_rate": 1.6234061120181143e-07, | |
| "logits/chosen": -2.195814609527588, | |
| "logits/rejected": -2.187227964401245, | |
| "logps/chosen": -272.974853515625, | |
| "logps/rejected": -351.69970703125, | |
| "loss": 0.5631, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.7646051645278931, | |
| "rewards/margins": 0.6365951895713806, | |
| "rewards/rejected": -1.4012004137039185, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 26.29706381032987, | |
| "learning_rate": 1.4123357837948176e-07, | |
| "logits/chosen": -2.1339738368988037, | |
| "logits/rejected": -2.1486494541168213, | |
| "logps/chosen": -270.99737548828125, | |
| "logps/rejected": -353.390380859375, | |
| "loss": 0.5582, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -0.852789580821991, | |
| "rewards/margins": 0.6374956369400024, | |
| "rewards/rejected": -1.4902852773666382, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 24.19850768514201, | |
| "learning_rate": 1.2104368836641906e-07, | |
| "logits/chosen": -2.2046804428100586, | |
| "logits/rejected": -2.2131218910217285, | |
| "logps/chosen": -283.3597106933594, | |
| "logps/rejected": -341.37310791015625, | |
| "loss": 0.5644, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.8999420404434204, | |
| "rewards/margins": 0.5441323518753052, | |
| "rewards/rejected": -1.4440743923187256, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 24.56811685462707, | |
| "learning_rate": 1.0194118683375502e-07, | |
| "logits/chosen": -2.1356701850891113, | |
| "logits/rejected": -2.1463379859924316, | |
| "logps/chosen": -286.03912353515625, | |
| "logps/rejected": -366.0921936035156, | |
| "loss": 0.5346, | |
| "rewards/accuracies": 0.706250011920929, | |
| "rewards/chosen": -0.8500736355781555, | |
| "rewards/margins": 0.7081271409988403, | |
| "rewards/rejected": -1.5582005977630615, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 22.093032695536095, | |
| "learning_rate": 8.408715034959468e-08, | |
| "logits/chosen": -2.2413618564605713, | |
| "logits/rejected": -2.238342761993408, | |
| "logps/chosen": -269.3088073730469, | |
| "logps/rejected": -338.8702087402344, | |
| "loss": 0.5355, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.7305900454521179, | |
| "rewards/margins": 0.5677007436752319, | |
| "rewards/rejected": -1.2982908487319946, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 26.974043137085268, | |
| "learning_rate": 6.763212814534483e-08, | |
| "logits/chosen": -2.120349884033203, | |
| "logits/rejected": -2.1195971965789795, | |
| "logps/chosen": -241.08816528320312, | |
| "logps/rejected": -324.95501708984375, | |
| "loss": 0.5188, | |
| "rewards/accuracies": 0.768750011920929, | |
| "rewards/chosen": -0.6961054801940918, | |
| "rewards/margins": 0.7996633648872375, | |
| "rewards/rejected": -1.4957687854766846, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 23.827648297755715, | |
| "learning_rate": 5.271487265090163e-08, | |
| "logits/chosen": -2.125883102416992, | |
| "logits/rejected": -2.1427865028381348, | |
| "logps/chosen": -284.70928955078125, | |
| "logps/rejected": -355.811279296875, | |
| "loss": 0.5376, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -0.8156154751777649, | |
| "rewards/margins": 0.6232105493545532, | |
| "rewards/rejected": -1.438826084136963, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 24.279380848256523, | |
| "learning_rate": 3.94611695031086e-08, | |
| "logits/chosen": -2.1862270832061768, | |
| "logits/rejected": -2.1674563884735107, | |
| "logps/chosen": -310.10052490234375, | |
| "logps/rejected": -389.71978759765625, | |
| "loss": 0.5317, | |
| "rewards/accuracies": 0.706250011920929, | |
| "rewards/chosen": -0.9326030611991882, | |
| "rewards/margins": 0.6839269995689392, | |
| "rewards/rejected": -1.616530179977417, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 25.827053325333576, | |
| "learning_rate": 2.7982776893115624e-08, | |
| "logits/chosen": -2.201906681060791, | |
| "logits/rejected": -2.209010601043701, | |
| "logps/chosen": -283.83642578125, | |
| "logps/rejected": -354.58819580078125, | |
| "loss": 0.5563, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.8339606523513794, | |
| "rewards/margins": 0.6891152262687683, | |
| "rewards/rejected": -1.523075819015503, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 38.39676777599807, | |
| "learning_rate": 1.8376483196299558e-08, | |
| "logits/chosen": -2.1251769065856934, | |
| "logits/rejected": -2.129974842071533, | |
| "logps/chosen": -263.50958251953125, | |
| "logps/rejected": -376.3040466308594, | |
| "loss": 0.5264, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.9548496007919312, | |
| "rewards/margins": 0.8713960647583008, | |
| "rewards/rejected": -1.826245665550232, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 29.646751845133494, | |
| "learning_rate": 1.072329083102147e-08, | |
| "logits/chosen": -2.1439247131347656, | |
| "logits/rejected": -2.1595120429992676, | |
| "logps/chosen": -285.36895751953125, | |
| "logps/rejected": -353.172119140625, | |
| "loss": 0.5656, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -0.9187411069869995, | |
| "rewards/margins": 0.6375783681869507, | |
| "rewards/rejected": -1.5563193559646606, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 25.10520312818217, | |
| "learning_rate": 5.087733228106517e-09, | |
| "logits/chosen": -2.1849255561828613, | |
| "logits/rejected": -2.1818103790283203, | |
| "logps/chosen": -334.8546142578125, | |
| "logps/rejected": -423.3223571777344, | |
| "loss": 0.5344, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.9850701093673706, | |
| "rewards/margins": 0.7517432570457458, | |
| "rewards/rejected": -1.7368133068084717, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 30.431746994283188, | |
| "learning_rate": 1.5173306705126287e-09, | |
| "logits/chosen": -2.051201343536377, | |
| "logits/rejected": -2.058706045150757, | |
| "logps/chosen": -267.4794616699219, | |
| "logps/rejected": -324.93109130859375, | |
| "loss": 0.5379, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.8862212896347046, | |
| "rewards/margins": 0.6717046499252319, | |
| "rewards/rejected": -1.5579259395599365, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 30.54437903718355, | |
| "learning_rate": 4.2189591669322674e-11, | |
| "logits/chosen": -2.094390869140625, | |
| "logits/rejected": -2.1068897247314453, | |
| "logps/chosen": -294.10125732421875, | |
| "logps/rejected": -360.81256103515625, | |
| "loss": 0.5301, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -1.0059906244277954, | |
| "rewards/margins": 0.6718493700027466, | |
| "rewards/rejected": -1.677839994430542, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 191, | |
| "total_flos": 0.0, | |
| "train_loss": 0.590523566563092, | |
| "train_runtime": 5198.2178, | |
| "train_samples_per_second": 4.704, | |
| "train_steps_per_second": 0.037 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 191, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |