| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 0.20011168718338013, | |
| "learning_rate": 9.928057553956835e-05, | |
| "loss": 1.5743, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 0.410460501909256, | |
| "learning_rate": 9.784172661870504e-05, | |
| "loss": 1.1382, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 0.24360999464988708, | |
| "learning_rate": 9.640287769784174e-05, | |
| "loss": 0.587, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.07866887748241425, | |
| "learning_rate": 9.496402877697842e-05, | |
| "loss": 0.4484, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.1024688109755516, | |
| "learning_rate": 9.35251798561151e-05, | |
| "loss": 0.3484, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.07728195190429688, | |
| "learning_rate": 9.20863309352518e-05, | |
| "loss": 0.3599, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.07939107716083527, | |
| "learning_rate": 9.06474820143885e-05, | |
| "loss": 0.353, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.07889492064714432, | |
| "learning_rate": 8.920863309352519e-05, | |
| "loss": 0.3237, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 0.08044611662626266, | |
| "learning_rate": 8.776978417266187e-05, | |
| "loss": 0.3519, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.08084924519062042, | |
| "learning_rate": 8.633093525179857e-05, | |
| "loss": 0.354, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 0.08062956482172012, | |
| "learning_rate": 8.489208633093527e-05, | |
| "loss": 0.3886, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.07981599122285843, | |
| "learning_rate": 8.345323741007195e-05, | |
| "loss": 0.3534, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 0.09865035861730576, | |
| "learning_rate": 8.201438848920863e-05, | |
| "loss": 0.3536, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.08893056213855743, | |
| "learning_rate": 8.057553956834533e-05, | |
| "loss": 0.3326, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0714285714285714, | |
| "grad_norm": 0.07679039984941483, | |
| "learning_rate": 7.913669064748202e-05, | |
| "loss": 0.3531, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.07035951316356659, | |
| "learning_rate": 7.769784172661872e-05, | |
| "loss": 0.3239, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.2142857142857142, | |
| "grad_norm": 0.09360262006521225, | |
| "learning_rate": 7.62589928057554e-05, | |
| "loss": 0.3305, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 0.10175589472055435, | |
| "learning_rate": 7.48201438848921e-05, | |
| "loss": 0.3384, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.3571428571428572, | |
| "grad_norm": 0.15438881516456604, | |
| "learning_rate": 7.338129496402878e-05, | |
| "loss": 0.3191, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.09196078777313232, | |
| "learning_rate": 7.194244604316547e-05, | |
| "loss": 0.2944, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.0951504111289978, | |
| "learning_rate": 7.050359712230215e-05, | |
| "loss": 0.3055, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.5714285714285714, | |
| "grad_norm": 0.09340916574001312, | |
| "learning_rate": 6.906474820143886e-05, | |
| "loss": 0.3222, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.6428571428571428, | |
| "grad_norm": 0.08794793486595154, | |
| "learning_rate": 6.762589928057555e-05, | |
| "loss": 0.3395, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.09506211429834366, | |
| "learning_rate": 6.618705035971223e-05, | |
| "loss": 0.3386, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.7857142857142856, | |
| "grad_norm": 0.09224151074886322, | |
| "learning_rate": 6.474820143884892e-05, | |
| "loss": 0.3389, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.8571428571428572, | |
| "grad_norm": 0.10331781953573227, | |
| "learning_rate": 6.330935251798561e-05, | |
| "loss": 0.2951, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.9285714285714286, | |
| "grad_norm": 0.10871120542287827, | |
| "learning_rate": 6.187050359712231e-05, | |
| "loss": 0.3382, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.08793390542268753, | |
| "learning_rate": 6.0431654676258996e-05, | |
| "loss": 0.3221, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.0714285714285716, | |
| "grad_norm": 0.10244260728359222, | |
| "learning_rate": 5.899280575539569e-05, | |
| "loss": 0.3054, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 0.08769655227661133, | |
| "learning_rate": 5.755395683453237e-05, | |
| "loss": 0.28, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.2142857142857144, | |
| "grad_norm": 0.10015639662742615, | |
| "learning_rate": 5.611510791366906e-05, | |
| "loss": 0.324, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.2857142857142856, | |
| "grad_norm": 0.11067729443311691, | |
| "learning_rate": 5.467625899280576e-05, | |
| "loss": 0.329, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.357142857142857, | |
| "grad_norm": 0.10012146830558777, | |
| "learning_rate": 5.323741007194245e-05, | |
| "loss": 0.2767, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.4285714285714284, | |
| "grad_norm": 0.11153287440538406, | |
| "learning_rate": 5.179856115107914e-05, | |
| "loss": 0.3377, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.11222545802593231, | |
| "learning_rate": 5.035971223021583e-05, | |
| "loss": 0.3123, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 0.11043631285429001, | |
| "learning_rate": 4.892086330935252e-05, | |
| "loss": 0.3232, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.642857142857143, | |
| "grad_norm": 0.11605663597583771, | |
| "learning_rate": 4.748201438848921e-05, | |
| "loss": 0.299, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.7142857142857144, | |
| "grad_norm": 0.1168583557009697, | |
| "learning_rate": 4.60431654676259e-05, | |
| "loss": 0.3057, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.7857142857142856, | |
| "grad_norm": 0.1063845306634903, | |
| "learning_rate": 4.460431654676259e-05, | |
| "loss": 0.2892, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.1153399869799614, | |
| "learning_rate": 4.3165467625899284e-05, | |
| "loss": 0.2884, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.928571428571429, | |
| "grad_norm": 0.11235486716032028, | |
| "learning_rate": 4.1726618705035975e-05, | |
| "loss": 0.2879, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.14575359225273132, | |
| "learning_rate": 4.0287769784172666e-05, | |
| "loss": 0.3107, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.0714285714285716, | |
| "grad_norm": 0.11519063264131546, | |
| "learning_rate": 3.884892086330936e-05, | |
| "loss": 0.2878, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.142857142857143, | |
| "grad_norm": 0.13909830152988434, | |
| "learning_rate": 3.741007194244605e-05, | |
| "loss": 0.2743, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.2142857142857144, | |
| "grad_norm": 0.1268688589334488, | |
| "learning_rate": 3.597122302158273e-05, | |
| "loss": 0.2827, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.2857142857142856, | |
| "grad_norm": 0.13879084587097168, | |
| "learning_rate": 3.453237410071943e-05, | |
| "loss": 0.3196, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.357142857142857, | |
| "grad_norm": 0.13855186104774475, | |
| "learning_rate": 3.3093525179856116e-05, | |
| "loss": 0.2846, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.4285714285714284, | |
| "grad_norm": 0.13305865228176117, | |
| "learning_rate": 3.165467625899281e-05, | |
| "loss": 0.2998, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.13443267345428467, | |
| "learning_rate": 3.0215827338129498e-05, | |
| "loss": 0.2742, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 0.1269126534461975, | |
| "learning_rate": 2.8776978417266186e-05, | |
| "loss": 0.2608, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.642857142857143, | |
| "grad_norm": 0.1363735795021057, | |
| "learning_rate": 2.733812949640288e-05, | |
| "loss": 0.2744, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 3.7142857142857144, | |
| "grad_norm": 0.1578550785779953, | |
| "learning_rate": 2.589928057553957e-05, | |
| "loss": 0.2922, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 3.7857142857142856, | |
| "grad_norm": 0.1330137997865677, | |
| "learning_rate": 2.446043165467626e-05, | |
| "loss": 0.2932, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 3.857142857142857, | |
| "grad_norm": 0.14818449318408966, | |
| "learning_rate": 2.302158273381295e-05, | |
| "loss": 0.2587, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 3.928571428571429, | |
| "grad_norm": 0.14131690561771393, | |
| "learning_rate": 2.1582733812949642e-05, | |
| "loss": 0.2999, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.14016275107860565, | |
| "learning_rate": 2.0143884892086333e-05, | |
| "loss": 0.2941, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 4.071428571428571, | |
| "grad_norm": 0.13414789736270905, | |
| "learning_rate": 1.8705035971223024e-05, | |
| "loss": 0.2685, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 4.142857142857143, | |
| "grad_norm": 0.1517314612865448, | |
| "learning_rate": 1.7266187050359716e-05, | |
| "loss": 0.2555, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 4.214285714285714, | |
| "grad_norm": 0.16656886041164398, | |
| "learning_rate": 1.5827338129496403e-05, | |
| "loss": 0.2492, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 4.285714285714286, | |
| "grad_norm": 0.14459213614463806, | |
| "learning_rate": 1.4388489208633093e-05, | |
| "loss": 0.2538, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.357142857142857, | |
| "grad_norm": 0.17722781002521515, | |
| "learning_rate": 1.2949640287769784e-05, | |
| "loss": 0.2882, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 4.428571428571429, | |
| "grad_norm": 0.17217843234539032, | |
| "learning_rate": 1.1510791366906475e-05, | |
| "loss": 0.2996, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.14222589135169983, | |
| "learning_rate": 1.0071942446043167e-05, | |
| "loss": 0.262, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 4.571428571428571, | |
| "grad_norm": 0.17444641888141632, | |
| "learning_rate": 8.633093525179858e-06, | |
| "loss": 0.2721, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 4.642857142857143, | |
| "grad_norm": 0.18322263658046722, | |
| "learning_rate": 7.1942446043165465e-06, | |
| "loss": 0.2818, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 4.714285714285714, | |
| "grad_norm": 0.1895478367805481, | |
| "learning_rate": 5.755395683453238e-06, | |
| "loss": 0.262, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 4.785714285714286, | |
| "grad_norm": 0.1996978223323822, | |
| "learning_rate": 4.316546762589929e-06, | |
| "loss": 0.2836, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 4.857142857142857, | |
| "grad_norm": 0.2018844485282898, | |
| "learning_rate": 2.877697841726619e-06, | |
| "loss": 0.2655, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 4.928571428571429, | |
| "grad_norm": 0.18131566047668457, | |
| "learning_rate": 1.4388489208633094e-06, | |
| "loss": 0.2839, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.1565731167793274, | |
| "learning_rate": 0.0, | |
| "loss": 0.2389, | |
| "step": 700 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.5798886346653696e+17, | |
| "train_batch_size": 10, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |