| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.522384161312229, | |
| "eval_steps": 500, | |
| "global_step": 30000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00870640268853715, | |
| "grad_norm": 1.7332943677902222, | |
| "learning_rate": 2.902083696093795e-06, | |
| "loss": 8.5163, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.0174128053770743, | |
| "grad_norm": 1.3953511714935303, | |
| "learning_rate": 5.80416739218759e-06, | |
| "loss": 7.5133, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.02611920806561145, | |
| "grad_norm": 1.0311133861541748, | |
| "learning_rate": 8.706251088281386e-06, | |
| "loss": 6.6845, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.0348256107541486, | |
| "grad_norm": 0.9061997532844543, | |
| "learning_rate": 1.160833478437518e-05, | |
| "loss": 6.0887, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.04353201344268575, | |
| "grad_norm": 1.1524709463119507, | |
| "learning_rate": 1.4510418480468977e-05, | |
| "loss": 5.6897, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.0522384161312229, | |
| "grad_norm": 1.559319019317627, | |
| "learning_rate": 1.7412502176562773e-05, | |
| "loss": 5.401, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.06094481881976005, | |
| "grad_norm": 1.388635516166687, | |
| "learning_rate": 2.031458587265657e-05, | |
| "loss": 5.1785, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.0696512215082972, | |
| "grad_norm": 1.545809030532837, | |
| "learning_rate": 2.321666956875036e-05, | |
| "loss": 4.9864, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.07835762419683436, | |
| "grad_norm": 1.8368648290634155, | |
| "learning_rate": 2.611875326484416e-05, | |
| "loss": 4.8379, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.0870640268853715, | |
| "grad_norm": 1.6053112745285034, | |
| "learning_rate": 2.9020836960937954e-05, | |
| "loss": 4.6993, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.09577042957390865, | |
| "grad_norm": 1.5248411893844604, | |
| "learning_rate": 3.1922920657031755e-05, | |
| "loss": 4.5834, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.1044768322624458, | |
| "grad_norm": 1.6166003942489624, | |
| "learning_rate": 3.4825004353125546e-05, | |
| "loss": 4.4827, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.11318323495098295, | |
| "grad_norm": 1.7350354194641113, | |
| "learning_rate": 3.772128388182715e-05, | |
| "loss": 4.3958, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.1218896376395201, | |
| "grad_norm": 1.6347730159759521, | |
| "learning_rate": 4.062336757792095e-05, | |
| "loss": 4.3159, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.13059604032805724, | |
| "grad_norm": 1.7262680530548096, | |
| "learning_rate": 4.3525451274014746e-05, | |
| "loss": 4.2414, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.1393024430165944, | |
| "grad_norm": 1.6050870418548584, | |
| "learning_rate": 4.642753497010854e-05, | |
| "loss": 4.1787, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.14800884570513156, | |
| "grad_norm": 1.6479125022888184, | |
| "learning_rate": 4.9329618666202335e-05, | |
| "loss": 4.1177, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.1567152483936687, | |
| "grad_norm": 1.5750055313110352, | |
| "learning_rate": 5.222589819490395e-05, | |
| "loss": 4.0634, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.16542165108220586, | |
| "grad_norm": 1.5032442808151245, | |
| "learning_rate": 5.512798189099774e-05, | |
| "loss": 4.007, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.174128053770743, | |
| "grad_norm": 1.47872793674469, | |
| "learning_rate": 5.803006558709153e-05, | |
| "loss": 3.9545, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.18283445645928015, | |
| "grad_norm": 1.5958372354507446, | |
| "learning_rate": 6.0932149283185326e-05, | |
| "loss": 3.9063, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.1915408591478173, | |
| "grad_norm": 1.4950892925262451, | |
| "learning_rate": 6.383423297927912e-05, | |
| "loss": 3.8569, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.20024726183635447, | |
| "grad_norm": 1.4393689632415771, | |
| "learning_rate": 6.673631667537293e-05, | |
| "loss": 3.8082, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.2089536645248916, | |
| "grad_norm": 1.5717238187789917, | |
| "learning_rate": 6.963840037146672e-05, | |
| "loss": 3.767, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.21766006721342876, | |
| "grad_norm": 1.4899908304214478, | |
| "learning_rate": 7.254048406756051e-05, | |
| "loss": 3.7333, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.2263664699019659, | |
| "grad_norm": 1.3972300291061401, | |
| "learning_rate": 7.543676359626212e-05, | |
| "loss": 3.6949, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.23507287259050305, | |
| "grad_norm": 1.4396140575408936, | |
| "learning_rate": 7.833884729235591e-05, | |
| "loss": 3.6678, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.2437792752790402, | |
| "grad_norm": 1.2989853620529175, | |
| "learning_rate": 8.124093098844972e-05, | |
| "loss": 3.6383, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.25248567796757737, | |
| "grad_norm": 1.2884502410888672, | |
| "learning_rate": 8.414301468454351e-05, | |
| "loss": 3.6071, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.2611920806561145, | |
| "grad_norm": 1.2769827842712402, | |
| "learning_rate": 8.703929421324512e-05, | |
| "loss": 3.5782, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.26989848334465166, | |
| "grad_norm": 1.2089248895645142, | |
| "learning_rate": 8.994137790933891e-05, | |
| "loss": 3.5591, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.2786048860331888, | |
| "grad_norm": 1.2605466842651367, | |
| "learning_rate": 9.28434616054327e-05, | |
| "loss": 3.5359, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.28731128872172595, | |
| "grad_norm": 1.1653631925582886, | |
| "learning_rate": 9.57455453015265e-05, | |
| "loss": 3.5103, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.29601769141026313, | |
| "grad_norm": 1.1932523250579834, | |
| "learning_rate": 9.864182483022812e-05, | |
| "loss": 3.491, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.30472409409880025, | |
| "grad_norm": 1.115700125694275, | |
| "learning_rate": 9.998919722621363e-05, | |
| "loss": 3.4716, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.3134304967873374, | |
| "grad_norm": 1.0842598676681519, | |
| "learning_rate": 9.99104397096728e-05, | |
| "loss": 3.4554, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.32213689947587454, | |
| "grad_norm": 1.114572525024414, | |
| "learning_rate": 9.975548766466318e-05, | |
| "loss": 3.4367, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 0.3308433021644117, | |
| "grad_norm": 1.0411341190338135, | |
| "learning_rate": 9.952511505591483e-05, | |
| "loss": 3.418, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.3395497048529489, | |
| "grad_norm": 1.0207189321517944, | |
| "learning_rate": 9.921875035663696e-05, | |
| "loss": 3.4066, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 0.348256107541486, | |
| "grad_norm": 1.0282100439071655, | |
| "learning_rate": 9.883724707919666e-05, | |
| "loss": 3.3898, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.3569625102300232, | |
| "grad_norm": 1.0247931480407715, | |
| "learning_rate": 9.838118763606121e-05, | |
| "loss": 3.3797, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 0.3656689129185603, | |
| "grad_norm": 0.9947717785835266, | |
| "learning_rate": 9.785240129092532e-05, | |
| "loss": 3.3638, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.37437531560709747, | |
| "grad_norm": 0.9564889073371887, | |
| "learning_rate": 9.724957618833668e-05, | |
| "loss": 3.3489, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 0.3830817182956346, | |
| "grad_norm": 0.9665967226028442, | |
| "learning_rate": 9.65746186982363e-05, | |
| "loss": 3.3382, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.39178812098417176, | |
| "grad_norm": 0.9586713314056396, | |
| "learning_rate": 9.582855922764911e-05, | |
| "loss": 3.3239, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 0.40049452367270894, | |
| "grad_norm": 0.9278811812400818, | |
| "learning_rate": 9.501423778734501e-05, | |
| "loss": 3.315, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.40920092636124605, | |
| "grad_norm": 0.8986345529556274, | |
| "learning_rate": 9.41296341435732e-05, | |
| "loss": 3.3005, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 0.4179073290497832, | |
| "grad_norm": 0.8818135261535645, | |
| "learning_rate": 9.317766109402922e-05, | |
| "loss": 3.2957, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 0.42661373173832035, | |
| "grad_norm": 0.9282686710357666, | |
| "learning_rate": 9.215977194463557e-05, | |
| "loss": 3.2868, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 0.4353201344268575, | |
| "grad_norm": 0.88565993309021, | |
| "learning_rate": 9.107974828450761e-05, | |
| "loss": 3.2755, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 0.44402653711539464, | |
| "grad_norm": 0.9184048771858215, | |
| "learning_rate": 8.993491070457932e-05, | |
| "loss": 3.269, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 0.4527329398039318, | |
| "grad_norm": 0.9061756134033203, | |
| "learning_rate": 8.872910748883857e-05, | |
| "loss": 3.2582, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 0.461439342492469, | |
| "grad_norm": 0.9069876670837402, | |
| "learning_rate": 8.746417944670291e-05, | |
| "loss": 3.2544, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 0.4701457451810061, | |
| "grad_norm": 0.900641143321991, | |
| "learning_rate": 8.614475764467431e-05, | |
| "loss": 3.2427, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 0.4788521478695433, | |
| "grad_norm": 0.8898523449897766, | |
| "learning_rate": 8.476756875563143e-05, | |
| "loss": 3.235, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 0.4875585505580804, | |
| "grad_norm": 0.9023728370666504, | |
| "learning_rate": 8.333730282453582e-05, | |
| "loss": 3.2254, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 0.49626495324661757, | |
| "grad_norm": 0.8760348558425903, | |
| "learning_rate": 8.185614333121332e-05, | |
| "loss": 3.2175, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 0.5049713559351547, | |
| "grad_norm": 0.8970732092857361, | |
| "learning_rate": 8.032945802949179e-05, | |
| "loss": 3.2159, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 0.5136777586236919, | |
| "grad_norm": 0.8672531843185425, | |
| "learning_rate": 7.875345939638492e-05, | |
| "loss": 3.2051, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 0.522384161312229, | |
| "grad_norm": 0.896369457244873, | |
| "learning_rate": 7.713356501134123e-05, | |
| "loss": 3.2038, | |
| "step": 30000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 57429, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2559910839517184e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |