FlippyDora's picture
Add files using upload-large-folder tool
7b3d823 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 84,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03571428571428571,
"grad_norm": 13.10709285736084,
"learning_rate": 0.0,
"loss": 1.4353,
"step": 1
},
{
"epoch": 0.07142857142857142,
"grad_norm": 14.019177436828613,
"learning_rate": 1.111111111111111e-06,
"loss": 1.5287,
"step": 2
},
{
"epoch": 0.10714285714285714,
"grad_norm": 12.805802345275879,
"learning_rate": 2.222222222222222e-06,
"loss": 1.4082,
"step": 3
},
{
"epoch": 0.14285714285714285,
"grad_norm": 11.855799674987793,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.3928,
"step": 4
},
{
"epoch": 0.17857142857142858,
"grad_norm": 9.465292930603027,
"learning_rate": 4.444444444444444e-06,
"loss": 1.3436,
"step": 5
},
{
"epoch": 0.21428571428571427,
"grad_norm": 5.97705602645874,
"learning_rate": 5.555555555555557e-06,
"loss": 1.2292,
"step": 6
},
{
"epoch": 0.25,
"grad_norm": 8.194602966308594,
"learning_rate": 6.666666666666667e-06,
"loss": 1.2037,
"step": 7
},
{
"epoch": 0.2857142857142857,
"grad_norm": 5.747783184051514,
"learning_rate": 7.77777777777778e-06,
"loss": 1.1732,
"step": 8
},
{
"epoch": 0.32142857142857145,
"grad_norm": 3.9221842288970947,
"learning_rate": 8.888888888888888e-06,
"loss": 1.1235,
"step": 9
},
{
"epoch": 0.35714285714285715,
"grad_norm": 3.8269572257995605,
"learning_rate": 1e-05,
"loss": 1.1129,
"step": 10
},
{
"epoch": 0.39285714285714285,
"grad_norm": 2.809392213821411,
"learning_rate": 9.995614150494293e-06,
"loss": 1.0005,
"step": 11
},
{
"epoch": 0.42857142857142855,
"grad_norm": 2.250021457672119,
"learning_rate": 9.982464296247523e-06,
"loss": 0.994,
"step": 12
},
{
"epoch": 0.4642857142857143,
"grad_norm": 2.1464364528656006,
"learning_rate": 9.960573506572391e-06,
"loss": 0.9466,
"step": 13
},
{
"epoch": 0.5,
"grad_norm": 2.0018599033355713,
"learning_rate": 9.929980185352525e-06,
"loss": 0.912,
"step": 14
},
{
"epoch": 0.5357142857142857,
"grad_norm": 2.113518238067627,
"learning_rate": 9.890738003669029e-06,
"loss": 0.9869,
"step": 15
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.891847848892212,
"learning_rate": 9.842915805643156e-06,
"loss": 0.9347,
"step": 16
},
{
"epoch": 0.6071428571428571,
"grad_norm": 1.9779962301254272,
"learning_rate": 9.786597487660336e-06,
"loss": 0.8248,
"step": 17
},
{
"epoch": 0.6428571428571429,
"grad_norm": 1.9319896697998047,
"learning_rate": 9.721881851187406e-06,
"loss": 0.8767,
"step": 18
},
{
"epoch": 0.6785714285714286,
"grad_norm": 1.9389967918395996,
"learning_rate": 9.648882429441258e-06,
"loss": 0.83,
"step": 19
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.9008071422576904,
"learning_rate": 9.567727288213005e-06,
"loss": 0.8586,
"step": 20
},
{
"epoch": 0.75,
"grad_norm": 1.8941298723220825,
"learning_rate": 9.478558801197065e-06,
"loss": 0.8273,
"step": 21
},
{
"epoch": 0.7857142857142857,
"grad_norm": 1.9752389192581177,
"learning_rate": 9.381533400219319e-06,
"loss": 0.8654,
"step": 22
},
{
"epoch": 0.8214285714285714,
"grad_norm": 1.9293617010116577,
"learning_rate": 9.276821300802535e-06,
"loss": 0.8637,
"step": 23
},
{
"epoch": 0.8571428571428571,
"grad_norm": 1.7911441326141357,
"learning_rate": 9.164606203550498e-06,
"loss": 0.7845,
"step": 24
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.8495110273361206,
"learning_rate": 9.045084971874738e-06,
"loss": 0.8131,
"step": 25
},
{
"epoch": 0.9285714285714286,
"grad_norm": 1.720464825630188,
"learning_rate": 8.9184672866292e-06,
"loss": 0.8122,
"step": 26
},
{
"epoch": 0.9642857142857143,
"grad_norm": 1.7889598608016968,
"learning_rate": 8.784975278258783e-06,
"loss": 0.8133,
"step": 27
},
{
"epoch": 1.0,
"grad_norm": 1.7827069759368896,
"learning_rate": 8.644843137107058e-06,
"loss": 0.7941,
"step": 28
},
{
"epoch": 1.0357142857142858,
"grad_norm": 1.6922932863235474,
"learning_rate": 8.498316702566828e-06,
"loss": 0.7506,
"step": 29
},
{
"epoch": 1.0714285714285714,
"grad_norm": 1.6512705087661743,
"learning_rate": 8.345653031794292e-06,
"loss": 0.7839,
"step": 30
},
{
"epoch": 1.1071428571428572,
"grad_norm": 1.6801631450653076,
"learning_rate": 8.18711994874345e-06,
"loss": 0.7373,
"step": 31
},
{
"epoch": 1.1428571428571428,
"grad_norm": 1.7569701671600342,
"learning_rate": 8.022995574311876e-06,
"loss": 0.786,
"step": 32
},
{
"epoch": 1.1785714285714286,
"grad_norm": 1.6871869564056396,
"learning_rate": 7.85356783842216e-06,
"loss": 0.7729,
"step": 33
},
{
"epoch": 1.2142857142857142,
"grad_norm": 1.584979772567749,
"learning_rate": 7.679133974894984e-06,
"loss": 0.7446,
"step": 34
},
{
"epoch": 1.25,
"grad_norm": 1.6062724590301514,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7558,
"step": 35
},
{
"epoch": 1.2857142857142856,
"grad_norm": 1.5438024997711182,
"learning_rate": 7.31648017559931e-06,
"loss": 0.7704,
"step": 36
},
{
"epoch": 1.3214285714285714,
"grad_norm": 1.5629308223724365,
"learning_rate": 7.128896457825364e-06,
"loss": 0.7851,
"step": 37
},
{
"epoch": 1.3571428571428572,
"grad_norm": 1.4814199209213257,
"learning_rate": 6.9375779322605154e-06,
"loss": 0.737,
"step": 38
},
{
"epoch": 1.3928571428571428,
"grad_norm": 1.4256786108016968,
"learning_rate": 6.7428602366090764e-06,
"loss": 0.7183,
"step": 39
},
{
"epoch": 1.4285714285714286,
"grad_norm": 1.3430627584457397,
"learning_rate": 6.545084971874738e-06,
"loss": 0.7083,
"step": 40
},
{
"epoch": 1.4642857142857144,
"grad_norm": 1.3032950162887573,
"learning_rate": 6.344599103076329e-06,
"loss": 0.728,
"step": 41
},
{
"epoch": 1.5,
"grad_norm": 1.2913284301757812,
"learning_rate": 6.141754350553279e-06,
"loss": 0.7554,
"step": 42
},
{
"epoch": 1.5357142857142856,
"grad_norm": 1.3425993919372559,
"learning_rate": 5.936906572928625e-06,
"loss": 0.7853,
"step": 43
},
{
"epoch": 1.5714285714285714,
"grad_norm": 1.1718932390213013,
"learning_rate": 5.730415142812059e-06,
"loss": 0.7505,
"step": 44
},
{
"epoch": 1.6071428571428572,
"grad_norm": 1.1872806549072266,
"learning_rate": 5.522642316338268e-06,
"loss": 0.7819,
"step": 45
},
{
"epoch": 1.6428571428571428,
"grad_norm": 1.1359567642211914,
"learning_rate": 5.3139525976465675e-06,
"loss": 0.7203,
"step": 46
},
{
"epoch": 1.6785714285714286,
"grad_norm": 1.0805718898773193,
"learning_rate": 5.1047120994167855e-06,
"loss": 0.684,
"step": 47
},
{
"epoch": 1.7142857142857144,
"grad_norm": 1.060920238494873,
"learning_rate": 4.895287900583216e-06,
"loss": 0.6947,
"step": 48
},
{
"epoch": 1.75,
"grad_norm": 1.0862159729003906,
"learning_rate": 4.686047402353433e-06,
"loss": 0.7443,
"step": 49
},
{
"epoch": 1.7857142857142856,
"grad_norm": 1.0008074045181274,
"learning_rate": 4.477357683661734e-06,
"loss": 0.6785,
"step": 50
},
{
"epoch": 1.8214285714285714,
"grad_norm": 0.945501446723938,
"learning_rate": 4.269584857187942e-06,
"loss": 0.6388,
"step": 51
},
{
"epoch": 1.8571428571428572,
"grad_norm": 1.052234411239624,
"learning_rate": 4.063093427071376e-06,
"loss": 0.7386,
"step": 52
},
{
"epoch": 1.8928571428571428,
"grad_norm": 0.9897404909133911,
"learning_rate": 3.8582456494467214e-06,
"loss": 0.683,
"step": 53
},
{
"epoch": 1.9285714285714286,
"grad_norm": 1.00469970703125,
"learning_rate": 3.655400896923672e-06,
"loss": 0.7255,
"step": 54
},
{
"epoch": 1.9642857142857144,
"grad_norm": 1.0964341163635254,
"learning_rate": 3.4549150281252635e-06,
"loss": 0.6645,
"step": 55
},
{
"epoch": 2.0,
"grad_norm": 1.000864863395691,
"learning_rate": 3.2571397633909252e-06,
"loss": 0.6886,
"step": 56
},
{
"epoch": 2.0357142857142856,
"grad_norm": 0.9869125485420227,
"learning_rate": 3.0624220677394854e-06,
"loss": 0.6731,
"step": 57
},
{
"epoch": 2.0714285714285716,
"grad_norm": 1.014853835105896,
"learning_rate": 2.871103542174637e-06,
"loss": 0.6943,
"step": 58
},
{
"epoch": 2.107142857142857,
"grad_norm": 0.8944199085235596,
"learning_rate": 2.683519824400693e-06,
"loss": 0.6455,
"step": 59
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.935615599155426,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.704,
"step": 60
},
{
"epoch": 2.1785714285714284,
"grad_norm": 0.9329429268836975,
"learning_rate": 2.320866025105016e-06,
"loss": 0.6564,
"step": 61
},
{
"epoch": 2.2142857142857144,
"grad_norm": 0.958682656288147,
"learning_rate": 2.146432161577842e-06,
"loss": 0.7007,
"step": 62
},
{
"epoch": 2.25,
"grad_norm": 0.9150378704071045,
"learning_rate": 1.977004425688126e-06,
"loss": 0.6407,
"step": 63
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.9446979761123657,
"learning_rate": 1.8128800512565514e-06,
"loss": 0.6801,
"step": 64
},
{
"epoch": 2.3214285714285716,
"grad_norm": 0.882807731628418,
"learning_rate": 1.6543469682057105e-06,
"loss": 0.6565,
"step": 65
},
{
"epoch": 2.357142857142857,
"grad_norm": 0.8852338790893555,
"learning_rate": 1.5016832974331725e-06,
"loss": 0.6419,
"step": 66
},
{
"epoch": 2.392857142857143,
"grad_norm": 0.8824121952056885,
"learning_rate": 1.3551568628929434e-06,
"loss": 0.6389,
"step": 67
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.9070232510566711,
"learning_rate": 1.2150247217412186e-06,
"loss": 0.6533,
"step": 68
},
{
"epoch": 2.4642857142857144,
"grad_norm": 0.9323554635047913,
"learning_rate": 1.0815327133708015e-06,
"loss": 0.6615,
"step": 69
},
{
"epoch": 2.5,
"grad_norm": 0.8787508606910706,
"learning_rate": 9.549150281252633e-07,
"loss": 0.6436,
"step": 70
},
{
"epoch": 2.5357142857142856,
"grad_norm": 0.91995769739151,
"learning_rate": 8.353937964495029e-07,
"loss": 0.6884,
"step": 71
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.8974673748016357,
"learning_rate": 7.23178699197467e-07,
"loss": 0.6379,
"step": 72
},
{
"epoch": 2.607142857142857,
"grad_norm": 0.9197289347648621,
"learning_rate": 6.184665997806832e-07,
"loss": 0.6856,
"step": 73
},
{
"epoch": 2.642857142857143,
"grad_norm": 0.8877494931221008,
"learning_rate": 5.214411988029355e-07,
"loss": 0.6893,
"step": 74
},
{
"epoch": 2.678571428571429,
"grad_norm": 0.8887228965759277,
"learning_rate": 4.322727117869951e-07,
"loss": 0.6411,
"step": 75
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.8678321242332458,
"learning_rate": 3.511175705587433e-07,
"loss": 0.6297,
"step": 76
},
{
"epoch": 2.75,
"grad_norm": 0.842978835105896,
"learning_rate": 2.7811814881259503e-07,
"loss": 0.6186,
"step": 77
},
{
"epoch": 2.7857142857142856,
"grad_norm": 0.8772309422492981,
"learning_rate": 2.134025123396638e-07,
"loss": 0.6486,
"step": 78
},
{
"epoch": 2.821428571428571,
"grad_norm": 0.881015419960022,
"learning_rate": 1.5708419435684463e-07,
"loss": 0.6621,
"step": 79
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.918168306350708,
"learning_rate": 1.0926199633097156e-07,
"loss": 0.6808,
"step": 80
},
{
"epoch": 2.892857142857143,
"grad_norm": 0.8930094838142395,
"learning_rate": 7.001981464747565e-08,
"loss": 0.6678,
"step": 81
},
{
"epoch": 2.928571428571429,
"grad_norm": 0.8652575612068176,
"learning_rate": 3.9426493427611177e-08,
"loss": 0.641,
"step": 82
},
{
"epoch": 2.9642857142857144,
"grad_norm": 0.887458086013794,
"learning_rate": 1.753570375247815e-08,
"loss": 0.6706,
"step": 83
},
{
"epoch": 3.0,
"grad_norm": 0.876089870929718,
"learning_rate": 4.385849505708084e-09,
"loss": 0.6692,
"step": 84
}
],
"logging_steps": 1,
"max_steps": 84,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.626059675082424e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}