| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 742, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013490725126475547, | |
| "grad_norm": 65.45332336425781, | |
| "learning_rate": 0.00019964061096136568, | |
| "loss": 22.1843, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.026981450252951095, | |
| "grad_norm": 6.102770805358887, | |
| "learning_rate": 0.00019874213836477988, | |
| "loss": 7.6527, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04047217537942664, | |
| "grad_norm": 2.982379913330078, | |
| "learning_rate": 0.00019784366576819408, | |
| "loss": 3.4532, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05396290050590219, | |
| "grad_norm": 3.4628562927246094, | |
| "learning_rate": 0.00019694519317160826, | |
| "loss": 2.4467, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.06745362563237774, | |
| "grad_norm": 3.0008349418640137, | |
| "learning_rate": 0.00019604672057502246, | |
| "loss": 1.6481, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08094435075885328, | |
| "grad_norm": 1.767865777015686, | |
| "learning_rate": 0.00019514824797843666, | |
| "loss": 1.2119, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.09443507588532883, | |
| "grad_norm": 3.416968584060669, | |
| "learning_rate": 0.00019424977538185087, | |
| "loss": 0.967, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.10792580101180438, | |
| "grad_norm": 3.718262195587158, | |
| "learning_rate": 0.00019335130278526507, | |
| "loss": 0.8783, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.12141652613827993, | |
| "grad_norm": 3.689655065536499, | |
| "learning_rate": 0.00019245283018867927, | |
| "loss": 0.7619, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.13490725126475547, | |
| "grad_norm": 6.728384017944336, | |
| "learning_rate": 0.00019155435759209347, | |
| "loss": 0.7244, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14839797639123103, | |
| "grad_norm": 4.880130290985107, | |
| "learning_rate": 0.00019065588499550765, | |
| "loss": 0.6931, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.16188870151770657, | |
| "grad_norm": 10.14767074584961, | |
| "learning_rate": 0.00018975741239892185, | |
| "loss": 0.6995, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.17537942664418213, | |
| "grad_norm": 10.897403717041016, | |
| "learning_rate": 0.00018885893980233605, | |
| "loss": 0.69, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.18887015177065766, | |
| "grad_norm": 7.759915351867676, | |
| "learning_rate": 0.00018796046720575023, | |
| "loss": 0.6899, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.20236087689713322, | |
| "grad_norm": 7.006256580352783, | |
| "learning_rate": 0.00018706199460916443, | |
| "loss": 0.6739, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.21585160202360876, | |
| "grad_norm": 5.777213096618652, | |
| "learning_rate": 0.00018616352201257863, | |
| "loss": 0.6731, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.22934232715008432, | |
| "grad_norm": 10.224111557006836, | |
| "learning_rate": 0.00018526504941599283, | |
| "loss": 0.6417, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.24283305227655985, | |
| "grad_norm": 6.332294464111328, | |
| "learning_rate": 0.000184366576819407, | |
| "loss": 0.6452, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2563237774030354, | |
| "grad_norm": 3.9472005367279053, | |
| "learning_rate": 0.0001834681042228212, | |
| "loss": 0.6271, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.26981450252951095, | |
| "grad_norm": 6.640835762023926, | |
| "learning_rate": 0.0001825696316262354, | |
| "loss": 0.6161, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.28330522765598654, | |
| "grad_norm": 5.6504316329956055, | |
| "learning_rate": 0.00018167115902964959, | |
| "loss": 0.6452, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.29679595278246207, | |
| "grad_norm": 3.4219472408294678, | |
| "learning_rate": 0.0001807726864330638, | |
| "loss": 0.6228, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3102866779089376, | |
| "grad_norm": 7.479050636291504, | |
| "learning_rate": 0.000179874213836478, | |
| "loss": 0.6035, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.32377740303541314, | |
| "grad_norm": 6.84128475189209, | |
| "learning_rate": 0.0001789757412398922, | |
| "loss": 0.5763, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3372681281618887, | |
| "grad_norm": 3.407183885574341, | |
| "learning_rate": 0.0001780772686433064, | |
| "loss": 0.5981, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.35075885328836426, | |
| "grad_norm": 6.634527683258057, | |
| "learning_rate": 0.0001771787960467206, | |
| "loss": 0.6313, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3642495784148398, | |
| "grad_norm": 6.274175643920898, | |
| "learning_rate": 0.0001762803234501348, | |
| "loss": 0.5971, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.3777403035413153, | |
| "grad_norm": 5.464880466461182, | |
| "learning_rate": 0.00017538185085354897, | |
| "loss": 0.629, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3912310286677909, | |
| "grad_norm": 7.9421234130859375, | |
| "learning_rate": 0.00017448337825696318, | |
| "loss": 0.5604, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.40472175379426645, | |
| "grad_norm": 6.513714790344238, | |
| "learning_rate": 0.00017358490566037738, | |
| "loss": 0.5442, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.418212478920742, | |
| "grad_norm": 4.856393337249756, | |
| "learning_rate": 0.00017268643306379155, | |
| "loss": 0.5444, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4317032040472175, | |
| "grad_norm": 7.252712249755859, | |
| "learning_rate": 0.00017178796046720576, | |
| "loss": 0.6071, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4451939291736931, | |
| "grad_norm": 6.8953328132629395, | |
| "learning_rate": 0.00017088948787061996, | |
| "loss": 0.5622, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.45868465430016864, | |
| "grad_norm": 6.220505237579346, | |
| "learning_rate": 0.00016999101527403416, | |
| "loss": 0.6213, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.47217537942664417, | |
| "grad_norm": 1.9960452318191528, | |
| "learning_rate": 0.00016909254267744833, | |
| "loss": 0.5752, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4856661045531197, | |
| "grad_norm": 4.8189778327941895, | |
| "learning_rate": 0.00016819407008086254, | |
| "loss": 0.5695, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4991568296795953, | |
| "grad_norm": 2.157482624053955, | |
| "learning_rate": 0.00016729559748427674, | |
| "loss": 0.5818, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5126475548060708, | |
| "grad_norm": 6.034278392791748, | |
| "learning_rate": 0.00016639712488769091, | |
| "loss": 0.5632, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5261382799325464, | |
| "grad_norm": 3.6643030643463135, | |
| "learning_rate": 0.00016549865229110512, | |
| "loss": 0.5149, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5396290050590219, | |
| "grad_norm": 3.4874682426452637, | |
| "learning_rate": 0.00016460017969451932, | |
| "loss": 0.5712, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5531197301854974, | |
| "grad_norm": 5.94505500793457, | |
| "learning_rate": 0.00016370170709793352, | |
| "loss": 0.5579, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5666104553119731, | |
| "grad_norm": 2.6858978271484375, | |
| "learning_rate": 0.00016280323450134772, | |
| "loss": 0.5601, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5801011804384486, | |
| "grad_norm": 0.5919632315635681, | |
| "learning_rate": 0.00016190476190476192, | |
| "loss": 0.5122, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5935919055649241, | |
| "grad_norm": 9.723355293273926, | |
| "learning_rate": 0.00016100628930817613, | |
| "loss": 0.5408, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6070826306913997, | |
| "grad_norm": 3.421534776687622, | |
| "learning_rate": 0.0001601078167115903, | |
| "loss": 0.5125, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6205733558178752, | |
| "grad_norm": 1.350819706916809, | |
| "learning_rate": 0.0001592093441150045, | |
| "loss": 0.5443, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.6340640809443507, | |
| "grad_norm": 2.9618473052978516, | |
| "learning_rate": 0.0001583108715184187, | |
| "loss": 0.5792, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.6475548060708263, | |
| "grad_norm": 11.16501522064209, | |
| "learning_rate": 0.00015741239892183288, | |
| "loss": 0.5499, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.6610455311973018, | |
| "grad_norm": 2.7122786045074463, | |
| "learning_rate": 0.00015651392632524708, | |
| "loss": 0.589, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6745362563237775, | |
| "grad_norm": 6.2460408210754395, | |
| "learning_rate": 0.00015561545372866128, | |
| "loss": 0.5422, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.688026981450253, | |
| "grad_norm": 12.943184852600098, | |
| "learning_rate": 0.0001547169811320755, | |
| "loss": 0.5431, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7015177065767285, | |
| "grad_norm": 2.4169883728027344, | |
| "learning_rate": 0.00015381850853548966, | |
| "loss": 0.4984, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.715008431703204, | |
| "grad_norm": 5.108154773712158, | |
| "learning_rate": 0.00015292003593890386, | |
| "loss": 0.4795, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.7284991568296796, | |
| "grad_norm": 3.268669605255127, | |
| "learning_rate": 0.00015202156334231807, | |
| "loss": 0.4961, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.7419898819561551, | |
| "grad_norm": 1.7926982641220093, | |
| "learning_rate": 0.00015112309074573224, | |
| "loss": 0.5339, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.7554806070826307, | |
| "grad_norm": 3.6382663249969482, | |
| "learning_rate": 0.00015022461814914644, | |
| "loss": 0.4855, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.7689713322091062, | |
| "grad_norm": 8.025957107543945, | |
| "learning_rate": 0.00014932614555256067, | |
| "loss": 0.4901, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7824620573355818, | |
| "grad_norm": 4.220882892608643, | |
| "learning_rate": 0.00014842767295597485, | |
| "loss": 0.5448, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7959527824620574, | |
| "grad_norm": 6.42817497253418, | |
| "learning_rate": 0.00014752920035938905, | |
| "loss": 0.4969, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.8094435075885329, | |
| "grad_norm": 4.182610988616943, | |
| "learning_rate": 0.00014663072776280325, | |
| "loss": 0.4863, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8229342327150084, | |
| "grad_norm": 3.687934637069702, | |
| "learning_rate": 0.00014573225516621745, | |
| "loss": 0.5162, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.836424957841484, | |
| "grad_norm": 3.8240537643432617, | |
| "learning_rate": 0.00014483378256963163, | |
| "loss": 0.5089, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.8499156829679595, | |
| "grad_norm": 3.3823423385620117, | |
| "learning_rate": 0.00014393530997304583, | |
| "loss": 0.5447, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.863406408094435, | |
| "grad_norm": 4.591256618499756, | |
| "learning_rate": 0.00014303683737646003, | |
| "loss": 0.5067, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.8768971332209107, | |
| "grad_norm": 4.414494514465332, | |
| "learning_rate": 0.0001421383647798742, | |
| "loss": 0.4858, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8903878583473862, | |
| "grad_norm": 3.4649970531463623, | |
| "learning_rate": 0.0001412398921832884, | |
| "loss": 0.5243, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.9038785834738617, | |
| "grad_norm": 4.813696384429932, | |
| "learning_rate": 0.0001403414195867026, | |
| "loss": 0.51, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.9173693086003373, | |
| "grad_norm": 4.597005844116211, | |
| "learning_rate": 0.00013944294699011681, | |
| "loss": 0.5079, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.9308600337268128, | |
| "grad_norm": 4.291003704071045, | |
| "learning_rate": 0.000138544474393531, | |
| "loss": 0.4966, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.9443507588532883, | |
| "grad_norm": 3.8949058055877686, | |
| "learning_rate": 0.0001376460017969452, | |
| "loss": 0.5086, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.9578414839797639, | |
| "grad_norm": 4.685634613037109, | |
| "learning_rate": 0.0001367475292003594, | |
| "loss": 0.5005, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.9713322091062394, | |
| "grad_norm": 3.9277801513671875, | |
| "learning_rate": 0.0001358490566037736, | |
| "loss": 0.4759, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.984822934232715, | |
| "grad_norm": 4.3011345863342285, | |
| "learning_rate": 0.0001349505840071878, | |
| "loss": 0.4371, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9983136593591906, | |
| "grad_norm": 3.370706796646118, | |
| "learning_rate": 0.000134052111410602, | |
| "loss": 0.4811, | |
| "step": 740 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2226, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.690343765927526e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |