{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 5.0, "eval_steps": 500, "global_step": 18715, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0, "step": 0, "train/loss_ctc": 23.579652786254883, "train/loss_error": 0.690536379814148, "train/loss_total": 5.268359661102295 }, { "epoch": 0.0002671653753673524, "step": 1, "train/loss_ctc": 19.763473510742188, "train/loss_error": 0.5651726126670837, "train/loss_total": 4.40483283996582 }, { "epoch": 0.0005343307507347048, "step": 2, "train/loss_ctc": 26.64632225036621, "train/loss_error": 0.6836357712745667, "train/loss_total": 5.87617301940918 }, { "epoch": 0.0008014961261020571, "step": 3, "train/loss_ctc": 22.030452728271484, "train/loss_error": 0.8402075171470642, "train/loss_total": 5.078256607055664 }, { "epoch": 0.0010686615014694097, "step": 4, "train/loss_ctc": 22.274478912353516, "train/loss_error": 0.5607916116714478, "train/loss_total": 4.903529167175293 }, { "epoch": 0.0013358268768367619, "step": 5, "train/loss_ctc": 21.60124969482422, "train/loss_error": 0.5817999243736267, "train/loss_total": 4.785689830780029 }, { "epoch": 0.0016029922522041143, "step": 6, "train/loss_ctc": 14.257883071899414, "train/loss_error": 0.5730302929878235, "train/loss_total": 3.3100008964538574 }, { "epoch": 0.0018701576275714667, "step": 7, "train/loss_ctc": 9.943649291992188, "train/loss_error": 0.5274721384048462, "train/loss_total": 2.410707473754883 }, { "epoch": 0.0021373230029388193, "step": 8, "train/loss_ctc": 11.120607376098633, "train/loss_error": 0.6149134039878845, "train/loss_total": 2.716052293777466 }, { "epoch": 0.0024044883783061713, "step": 9, "train/loss_ctc": 9.498275756835938, "train/loss_error": 0.5401654839515686, "train/loss_total": 2.331787586212158 }, { "epoch": 0.0026716537536735237, "grad_norm": 5.861338138580322, "learning_rate": 2.998717606198237e-05, "loss": 4.1085, "step": 10 }, { "epoch": 0.0026716537536735237, "step": 10, "train/loss_ctc": 6.130046367645264, "train/loss_error": 0.5693219900131226, "train/loss_total": 1.681466817855835 }, { "epoch": 0.002938819129040876, "step": 11, "train/loss_ctc": 9.318807601928711, "train/loss_error": 0.5835579633712769, "train/loss_total": 2.3306078910827637 }, { "epoch": 0.0032059845044082286, "step": 12, "train/loss_ctc": 5.8735175132751465, "train/loss_error": 0.5275614261627197, "train/loss_total": 1.596752643585205 }, { "epoch": 0.003473149879775581, "step": 13, "train/loss_ctc": 4.825148105621338, "train/loss_error": 0.5179761648178101, "train/loss_total": 1.3794106245040894 }, { "epoch": 0.0037403152551429334, "step": 14, "train/loss_ctc": 4.410122871398926, "train/loss_error": 0.5924773812294006, "train/loss_total": 1.3560065031051636 }, { "epoch": 0.004007480630510286, "step": 15, "train/loss_ctc": 3.8095102310180664, "train/loss_error": 0.5730850100517273, "train/loss_total": 1.2203700542449951 }, { "epoch": 0.004274646005877639, "step": 16, "train/loss_ctc": 4.746579170227051, "train/loss_error": 0.5394807457923889, "train/loss_total": 1.3809003829956055 }, { "epoch": 0.004541811381244991, "step": 17, "train/loss_ctc": 3.979372501373291, "train/loss_error": 0.5693400502204895, "train/loss_total": 1.2513465881347656 }, { "epoch": 0.004808976756612343, "step": 18, "train/loss_ctc": 3.670931816101074, "train/loss_error": 0.5183305144309998, "train/loss_total": 1.1488507986068726 }, { "epoch": 0.005076142131979695, "step": 19, "train/loss_ctc": 3.9605071544647217, "train/loss_error": 0.5386409759521484, "train/loss_total": 1.223014235496521 }, { "epoch": 0.0053433075073470475, "grad_norm": 0.974557101726532, "learning_rate": 2.997274913171253e-05, "loss": 1.4569, "step": 20 }, { "epoch": 0.0053433075073470475, "step": 20, "train/loss_ctc": 4.087126731872559, "train/loss_error": 0.6009541153907776, "train/loss_total": 1.2981886863708496 }, { "epoch": 0.0056104728827144, "step": 21, "train/loss_ctc": 3.8882856369018555, "train/loss_error": 0.505187451839447, "train/loss_total": 1.1818071603775024 }, { "epoch": 0.005877638258081752, "step": 22, "train/loss_ctc": 3.789463520050049, "train/loss_error": 0.5258811712265015, "train/loss_total": 1.1785976886749268 }, { "epoch": 0.006144803633449105, "step": 23, "train/loss_ctc": 3.707143783569336, "train/loss_error": 0.542178750038147, "train/loss_total": 1.1751718521118164 }, { "epoch": 0.006411969008816457, "step": 24, "train/loss_ctc": 3.5837812423706055, "train/loss_error": 0.5165364742279053, "train/loss_total": 1.1299854516983032 }, { "epoch": 0.0066791343841838095, "step": 25, "train/loss_ctc": 3.8327572345733643, "train/loss_error": 0.5242528915405273, "train/loss_total": 1.1859537363052368 }, { "epoch": 0.006946299759551162, "step": 26, "train/loss_ctc": 3.4775218963623047, "train/loss_error": 0.46574071049690247, "train/loss_total": 1.0680968761444092 }, { "epoch": 0.007213465134918514, "step": 27, "train/loss_ctc": 3.5252251625061035, "train/loss_error": 0.5198798775672913, "train/loss_total": 1.1209489107131958 }, { "epoch": 0.007480630510285867, "step": 28, "train/loss_ctc": 3.6225483417510986, "train/loss_error": 0.5479358434677124, "train/loss_total": 1.1628583669662476 }, { "epoch": 0.007747795885653219, "step": 29, "train/loss_ctc": 3.6283209323883057, "train/loss_error": 0.535555899143219, "train/loss_total": 1.1541088819503784 }, { "epoch": 0.008014961261020572, "grad_norm": 0.7943634986877441, "learning_rate": 2.995671920919049e-05, "loss": 1.1656, "step": 30 }, { "epoch": 0.008014961261020572, "step": 30, "train/loss_ctc": 3.4719185829162598, "train/loss_error": 0.5028358697891235, "train/loss_total": 1.0966525077819824 }, { "epoch": 0.008282126636387924, "step": 31, "train/loss_ctc": 3.575826644897461, "train/loss_error": 0.5197010636329651, "train/loss_total": 1.1309261322021484 }, { "epoch": 0.008549292011755277, "step": 32, "train/loss_ctc": 3.3957481384277344, "train/loss_error": 0.478402704000473, "train/loss_total": 1.0618717670440674 }, { "epoch": 0.008816457387122629, "step": 33, "train/loss_ctc": 3.642833709716797, "train/loss_error": 0.5670740008354187, "train/loss_total": 1.1822259426116943 }, { "epoch": 0.009083622762489982, "step": 34, "train/loss_ctc": 3.608487129211426, "train/loss_error": 0.4984882175922394, "train/loss_total": 1.1204880475997925 }, { "epoch": 0.009350788137857334, "step": 35, "train/loss_ctc": 3.4960732460021973, "train/loss_error": 0.4874913990497589, "train/loss_total": 1.0892077684402466 }, { "epoch": 0.009617953513224685, "step": 36, "train/loss_ctc": 3.4486289024353027, "train/loss_error": 0.49661582708358765, "train/loss_total": 1.0870184898376465 }, { "epoch": 0.009885118888592039, "step": 37, "train/loss_ctc": 3.543088436126709, "train/loss_error": 0.49451127648353577, "train/loss_total": 1.1042267084121704 }, { "epoch": 0.01015228426395939, "step": 38, "train/loss_ctc": 3.5428950786590576, "train/loss_error": 0.5639446973800659, "train/loss_total": 1.1597347259521484 }, { "epoch": 0.010419449639326743, "step": 39, "train/loss_ctc": 3.4609382152557373, "train/loss_error": 0.47191017866134644, "train/loss_total": 1.0697158575057983 }, { "epoch": 0.010686615014694095, "grad_norm": 0.7906984090805054, "learning_rate": 2.994068928666845e-05, "loss": 1.1102, "step": 40 }, { "epoch": 0.010686615014694095, "step": 40, "train/loss_ctc": 3.8613908290863037, "train/loss_error": 0.5604652166366577, "train/loss_total": 1.2206504344940186 }, { "epoch": 0.010953780390061448, "step": 41, "train/loss_ctc": 3.539379596710205, "train/loss_error": 0.5422731637954712, "train/loss_total": 1.14169442653656 }, { "epoch": 0.0112209457654288, "step": 42, "train/loss_ctc": 3.505049467086792, "train/loss_error": 0.4906432330608368, "train/loss_total": 1.09352445602417 }, { "epoch": 0.011488111140796153, "step": 43, "train/loss_ctc": 3.6074419021606445, "train/loss_error": 0.5363548398017883, "train/loss_total": 1.1505722999572754 }, { "epoch": 0.011755276516163505, "step": 44, "train/loss_ctc": 3.5508663654327393, "train/loss_error": 0.48606687784194946, "train/loss_total": 1.0990267992019653 }, { "epoch": 0.012022441891530858, "step": 45, "train/loss_ctc": 3.538586378097534, "train/loss_error": 0.5394644737243652, "train/loss_total": 1.1392889022827148 }, { "epoch": 0.01228960726689821, "step": 46, "train/loss_ctc": 3.6124095916748047, "train/loss_error": 0.5771356225013733, "train/loss_total": 1.1841903924942017 }, { "epoch": 0.012556772642265563, "step": 47, "train/loss_ctc": 3.5389695167541504, "train/loss_error": 0.5210414528846741, "train/loss_total": 1.1246271133422852 }, { "epoch": 0.012823938017632914, "step": 48, "train/loss_ctc": 3.56662654876709, "train/loss_error": 0.5357038378715515, "train/loss_total": 1.1418883800506592 }, { "epoch": 0.013091103393000268, "step": 49, "train/loss_ctc": 3.3780035972595215, "train/loss_error": 0.5149374008178711, "train/loss_total": 1.0875506401062012 }, { "epoch": 0.013358268768367619, "grad_norm": 0.24946783483028412, "learning_rate": 2.992465936414641e-05, "loss": 1.1383, "step": 50 }, { "epoch": 0.013358268768367619, "step": 50, "train/loss_ctc": 3.5557427406311035, "train/loss_error": 0.5196346640586853, "train/loss_total": 1.1268563270568848 }, { "epoch": 0.013625434143734972, "step": 51, "train/loss_ctc": 3.4858362674713135, "train/loss_error": 0.4634363055229187, "train/loss_total": 1.0679163932800293 }, { "epoch": 0.013892599519102324, "step": 52, "train/loss_ctc": 3.5549068450927734, "train/loss_error": 0.49053531885147095, "train/loss_total": 1.1034096479415894 }, { "epoch": 0.014159764894469677, "step": 53, "train/loss_ctc": 3.5628182888031006, "train/loss_error": 0.5629624128341675, "train/loss_total": 1.162933588027954 }, { "epoch": 0.014426930269837029, "step": 54, "train/loss_ctc": 3.578752040863037, "train/loss_error": 0.4947643280029297, "train/loss_total": 1.111561894416809 }, { "epoch": 0.014694095645204382, "step": 55, "train/loss_ctc": 3.505037307739258, "train/loss_error": 0.543353259563446, "train/loss_total": 1.1356900930404663 }, { "epoch": 0.014961261020571734, "step": 56, "train/loss_ctc": 3.366260051727295, "train/loss_error": 0.5055004358291626, "train/loss_total": 1.0776524543762207 }, { "epoch": 0.015228426395939087, "step": 57, "train/loss_ctc": 3.62150239944458, "train/loss_error": 0.4961734712123871, "train/loss_total": 1.1212393045425415 }, { "epoch": 0.015495591771306438, "step": 58, "train/loss_ctc": 3.764528751373291, "train/loss_error": 0.562924861907959, "train/loss_total": 1.2032456398010254 }, { "epoch": 0.01576275714667379, "step": 59, "train/loss_ctc": 3.4985270500183105, "train/loss_error": 0.5397648215293884, "train/loss_total": 1.1315172910690308 }, { "epoch": 0.016029922522041145, "grad_norm": 0.6255154013633728, "learning_rate": 2.9908629441624367e-05, "loss": 1.1242, "step": 60 }, { "epoch": 0.016029922522041145, "step": 60, "train/loss_ctc": 3.6566848754882812, "train/loss_error": 0.5417408347129822, "train/loss_total": 1.1647297143936157 }, { "epoch": 0.016297087897408497, "step": 61, "train/loss_ctc": 3.6376171112060547, "train/loss_error": 0.5975580215454102, "train/loss_total": 1.205569863319397 }, { "epoch": 0.016564253272775848, "step": 62, "train/loss_ctc": 3.392533302307129, "train/loss_error": 0.5219561457633972, "train/loss_total": 1.0960716009140015 }, { "epoch": 0.0168314186481432, "step": 63, "train/loss_ctc": 3.538421630859375, "train/loss_error": 0.5938800573348999, "train/loss_total": 1.182788372039795 }, { "epoch": 0.017098584023510555, "step": 64, "train/loss_ctc": 3.629631519317627, "train/loss_error": 0.5637999773025513, "train/loss_total": 1.1769663095474243 }, { "epoch": 0.017365749398877906, "step": 65, "train/loss_ctc": 3.4141178131103516, "train/loss_error": 0.4998956322669983, "train/loss_total": 1.082740068435669 }, { "epoch": 0.017632914774245258, "step": 66, "train/loss_ctc": 4.188981533050537, "train/loss_error": 0.5102899074554443, "train/loss_total": 1.2460283041000366 }, { "epoch": 0.01790008014961261, "step": 67, "train/loss_ctc": 3.6562957763671875, "train/loss_error": 0.577817440032959, "train/loss_total": 1.1935131549835205 }, { "epoch": 0.018167245524979964, "step": 68, "train/loss_ctc": 3.5138540267944336, "train/loss_error": 0.5811274647712708, "train/loss_total": 1.167672872543335 }, { "epoch": 0.018434410900347316, "step": 69, "train/loss_ctc": 3.785607099533081, "train/loss_error": 0.4839093089103699, "train/loss_total": 1.1442489624023438 }, { "epoch": 0.018701576275714667, "grad_norm": 1.3638681173324585, "learning_rate": 2.9892599519102325e-05, "loss": 1.166, "step": 70 }, { "epoch": 0.018701576275714667, "step": 70, "train/loss_ctc": 3.4296350479125977, "train/loss_error": 0.46665868163108826, "train/loss_total": 1.0592539310455322 }, { "epoch": 0.01896874165108202, "step": 71, "train/loss_ctc": 3.5612378120422363, "train/loss_error": 0.5867034196853638, "train/loss_total": 1.181610345840454 }, { "epoch": 0.01923590702644937, "step": 72, "train/loss_ctc": 3.7426657676696777, "train/loss_error": 0.5349392890930176, "train/loss_total": 1.1764845848083496 }, { "epoch": 0.019503072401816725, "step": 73, "train/loss_ctc": 3.6271653175354004, "train/loss_error": 0.644265353679657, "train/loss_total": 1.2408453226089478 }, { "epoch": 0.019770237777184077, "step": 74, "train/loss_ctc": 3.7316951751708984, "train/loss_error": 0.5419251322746277, "train/loss_total": 1.1798791885375977 }, { "epoch": 0.02003740315255143, "step": 75, "train/loss_ctc": 3.657512664794922, "train/loss_error": 0.5600537061691284, "train/loss_total": 1.179545521736145 }, { "epoch": 0.02030456852791878, "step": 76, "train/loss_ctc": 3.4750213623046875, "train/loss_error": 0.4831877648830414, "train/loss_total": 1.0815545320510864 }, { "epoch": 0.020571733903286135, "step": 77, "train/loss_ctc": 3.4520182609558105, "train/loss_error": 0.6341646909713745, "train/loss_total": 1.1977354288101196 }, { "epoch": 0.020838899278653487, "step": 78, "train/loss_ctc": 3.5809507369995117, "train/loss_error": 0.4845244288444519, "train/loss_total": 1.1038097143173218 }, { "epoch": 0.021106064654020838, "step": 79, "train/loss_ctc": 3.7638721466064453, "train/loss_error": 0.510709822177887, "train/loss_total": 1.1613422632217407 }, { "epoch": 0.02137323002938819, "grad_norm": 1.5960553884506226, "learning_rate": 2.9876569596580283e-05, "loss": 1.1562, "step": 80 }, { "epoch": 0.02137323002938819, "step": 80, "train/loss_ctc": 3.636615514755249, "train/loss_error": 0.5526120662689209, "train/loss_total": 1.1694127321243286 }, { "epoch": 0.021640395404755545, "step": 81, "train/loss_ctc": 3.5734193325042725, "train/loss_error": 0.5038661360740662, "train/loss_total": 1.117776870727539 }, { "epoch": 0.021907560780122896, "step": 82, "train/loss_ctc": 3.8450546264648438, "train/loss_error": 0.5295800566673279, "train/loss_total": 1.192674994468689 }, { "epoch": 0.022174726155490248, "step": 83, "train/loss_ctc": 3.4117636680603027, "train/loss_error": 0.4837034344673157, "train/loss_total": 1.0693154335021973 }, { "epoch": 0.0224418915308576, "step": 84, "train/loss_ctc": 3.5501654148101807, "train/loss_error": 0.5267595648765564, "train/loss_total": 1.1314407587051392 }, { "epoch": 0.022709056906224954, "step": 85, "train/loss_ctc": 3.517728567123413, "train/loss_error": 0.5320234894752502, "train/loss_total": 1.1291645765304565 }, { "epoch": 0.022976222281592306, "step": 86, "train/loss_ctc": 3.4555487632751465, "train/loss_error": 0.4860055446624756, "train/loss_total": 1.0799142122268677 }, { "epoch": 0.023243387656959658, "step": 87, "train/loss_ctc": 3.796821117401123, "train/loss_error": 0.5387651324272156, "train/loss_total": 1.1903764009475708 }, { "epoch": 0.02351055303232701, "step": 88, "train/loss_ctc": 3.6435470581054688, "train/loss_error": 0.5348666310310364, "train/loss_total": 1.1566027402877808 }, { "epoch": 0.023777718407694364, "step": 89, "train/loss_ctc": 3.7075023651123047, "train/loss_error": 0.5242215991020203, "train/loss_total": 1.1608778238296509 }, { "epoch": 0.024044883783061716, "grad_norm": 0.9390844702720642, "learning_rate": 2.986053967405824e-05, "loss": 1.1398, "step": 90 }, { "epoch": 0.024044883783061716, "step": 90, "train/loss_ctc": 3.598820209503174, "train/loss_error": 0.537837564945221, "train/loss_total": 1.1500340700149536 }, { "epoch": 0.024312049158429067, "step": 91, "train/loss_ctc": 3.457665205001831, "train/loss_error": 0.5591683983802795, "train/loss_total": 1.138867735862732 }, { "epoch": 0.02457921453379642, "step": 92, "train/loss_ctc": 3.4713616371154785, "train/loss_error": 0.5129568576812744, "train/loss_total": 1.104637861251831 }, { "epoch": 0.024846379909163774, "step": 93, "train/loss_ctc": 3.530562162399292, "train/loss_error": 0.46588319540023804, "train/loss_total": 1.0788190364837646 }, { "epoch": 0.025113545284531125, "step": 94, "train/loss_ctc": 3.612527847290039, "train/loss_error": 0.580775260925293, "train/loss_total": 1.1871258020401 }, { "epoch": 0.025380710659898477, "step": 95, "train/loss_ctc": 3.5255508422851562, "train/loss_error": 0.5039080381393433, "train/loss_total": 1.1082366704940796 }, { "epoch": 0.02564787603526583, "step": 96, "train/loss_ctc": 3.5156188011169434, "train/loss_error": 0.5475828647613525, "train/loss_total": 1.1411900520324707 }, { "epoch": 0.025915041410633183, "step": 97, "train/loss_ctc": 3.696624279022217, "train/loss_error": 0.49038463830947876, "train/loss_total": 1.1316325664520264 }, { "epoch": 0.026182206786000535, "step": 98, "train/loss_ctc": 3.5399744510650635, "train/loss_error": 0.5328061580657959, "train/loss_total": 1.1342397928237915 }, { "epoch": 0.026449372161367887, "step": 99, "train/loss_ctc": 3.4388699531555176, "train/loss_error": 0.514761745929718, "train/loss_total": 1.099583387374878 }, { "epoch": 0.026716537536735238, "grad_norm": 0.36399292945861816, "learning_rate": 2.9844509751536203e-05, "loss": 1.1274, "step": 100 }, { "epoch": 0.026716537536735238, "step": 100, "train/loss_ctc": 3.549891471862793, "train/loss_error": 0.449166864156723, "train/loss_total": 1.0693117380142212 }, { "epoch": 0.026983702912102593, "step": 101, "train/loss_ctc": 3.6043097972869873, "train/loss_error": 0.5303941369056702, "train/loss_total": 1.1451772451400757 }, { "epoch": 0.027250868287469945, "step": 102, "train/loss_ctc": 3.4384522438049316, "train/loss_error": 0.5596320033073425, "train/loss_total": 1.1353960037231445 }, { "epoch": 0.027518033662837296, "step": 103, "train/loss_ctc": 3.371306896209717, "train/loss_error": 0.4518708884716034, "train/loss_total": 1.035758137702942 }, { "epoch": 0.027785199038204648, "step": 104, "train/loss_ctc": 3.420140266418457, "train/loss_error": 0.5736973285675049, "train/loss_total": 1.1429859399795532 }, { "epoch": 0.028052364413572, "step": 105, "train/loss_ctc": 3.526637077331543, "train/loss_error": 0.528805136680603, "train/loss_total": 1.1283715963363647 }, { "epoch": 0.028319529788939354, "step": 106, "train/loss_ctc": 3.4872636795043945, "train/loss_error": 0.5596810579299927, "train/loss_total": 1.1451976299285889 }, { "epoch": 0.028586695164306706, "step": 107, "train/loss_ctc": 3.536341667175293, "train/loss_error": 0.5000301003456116, "train/loss_total": 1.1072924137115479 }, { "epoch": 0.028853860539674057, "step": 108, "train/loss_ctc": 3.791851043701172, "train/loss_error": 0.6075926423072815, "train/loss_total": 1.2444443702697754 }, { "epoch": 0.02912102591504141, "step": 109, "train/loss_ctc": 3.3924102783203125, "train/loss_error": 0.49386975169181824, "train/loss_total": 1.073577880859375 }, { "epoch": 0.029388191290408764, "grad_norm": 0.3837027847766876, "learning_rate": 2.982847982901416e-05, "loss": 1.1228, "step": 110 }, { "epoch": 0.029388191290408764, "step": 110, "train/loss_ctc": 3.5467939376831055, "train/loss_error": 0.5725910663604736, "train/loss_total": 1.1674317121505737 }, { "epoch": 0.029655356665776116, "step": 111, "train/loss_ctc": 3.4484405517578125, "train/loss_error": 0.5285395979881287, "train/loss_total": 1.1125198602676392 }, { "epoch": 0.029922522041143467, "step": 112, "train/loss_ctc": 3.4930827617645264, "train/loss_error": 0.47161272168159485, "train/loss_total": 1.075906753540039 }, { "epoch": 0.03018968741651082, "step": 113, "train/loss_ctc": 3.5420689582824707, "train/loss_error": 0.5281211733818054, "train/loss_total": 1.1309107542037964 }, { "epoch": 0.030456852791878174, "step": 114, "train/loss_ctc": 3.572190999984741, "train/loss_error": 0.5372090935707092, "train/loss_total": 1.1442054510116577 }, { "epoch": 0.030724018167245525, "step": 115, "train/loss_ctc": 3.4538471698760986, "train/loss_error": 0.6055251359939575, "train/loss_total": 1.17518949508667 }, { "epoch": 0.030991183542612877, "step": 116, "train/loss_ctc": 3.4971468448638916, "train/loss_error": 0.534327507019043, "train/loss_total": 1.1268913745880127 }, { "epoch": 0.03125834891798023, "step": 117, "train/loss_ctc": 3.4732861518859863, "train/loss_error": 0.5535038113594055, "train/loss_total": 1.1374603509902954 }, { "epoch": 0.03152551429334758, "step": 118, "train/loss_ctc": 3.6616721153259277, "train/loss_error": 0.5199458003044128, "train/loss_total": 1.1482911109924316 }, { "epoch": 0.031792679668714935, "step": 119, "train/loss_ctc": 3.4494056701660156, "train/loss_error": 0.5015482902526855, "train/loss_total": 1.0911197662353516 }, { "epoch": 0.03205984504408229, "grad_norm": 0.3249819576740265, "learning_rate": 2.981244990649212e-05, "loss": 1.131, "step": 120 }, { "epoch": 0.03205984504408229, "step": 120, "train/loss_ctc": 3.433885097503662, "train/loss_error": 0.5112344622612, "train/loss_total": 1.0957646369934082 }, { "epoch": 0.03232701041944964, "step": 121, "train/loss_ctc": 3.4629406929016113, "train/loss_error": 0.5738487839698792, "train/loss_total": 1.1516671180725098 }, { "epoch": 0.03259417579481699, "step": 122, "train/loss_ctc": 3.4896035194396973, "train/loss_error": 0.5231990218162537, "train/loss_total": 1.1164799928665161 }, { "epoch": 0.03286134117018434, "step": 123, "train/loss_ctc": 3.5244884490966797, "train/loss_error": 0.5691642761230469, "train/loss_total": 1.1602290868759155 }, { "epoch": 0.033128506545551696, "step": 124, "train/loss_ctc": 3.525134563446045, "train/loss_error": 0.480530321598053, "train/loss_total": 1.0894511938095093 }, { "epoch": 0.03339567192091905, "step": 125, "train/loss_ctc": 3.6376147270202637, "train/loss_error": 0.5403177738189697, "train/loss_total": 1.1597771644592285 }, { "epoch": 0.0336628372962864, "step": 126, "train/loss_ctc": 3.407804489135742, "train/loss_error": 0.5868121385574341, "train/loss_total": 1.1510106325149536 }, { "epoch": 0.033930002671653754, "step": 127, "train/loss_ctc": 3.6018922328948975, "train/loss_error": 0.4745105803012848, "train/loss_total": 1.0999869108200073 }, { "epoch": 0.03419716804702111, "step": 128, "train/loss_ctc": 3.4653122425079346, "train/loss_error": 0.5199177265167236, "train/loss_total": 1.1089966297149658 }, { "epoch": 0.03446433342238846, "step": 129, "train/loss_ctc": 3.511064052581787, "train/loss_error": 0.5503098368644714, "train/loss_total": 1.1424607038497925 }, { "epoch": 0.03473149879775581, "grad_norm": 0.386899471282959, "learning_rate": 2.9796419983970077e-05, "loss": 1.1276, "step": 130 }, { "epoch": 0.03473149879775581, "step": 130, "train/loss_ctc": 3.6272878646850586, "train/loss_error": 0.4423353374004364, "train/loss_total": 1.0793259143829346 }, { "epoch": 0.03499866417312316, "step": 131, "train/loss_ctc": 3.5426740646362305, "train/loss_error": 0.5340761542320251, "train/loss_total": 1.1357958316802979 }, { "epoch": 0.035265829548490515, "step": 132, "train/loss_ctc": 3.5667781829833984, "train/loss_error": 0.5220909118652344, "train/loss_total": 1.131028413772583 }, { "epoch": 0.03553299492385787, "step": 133, "train/loss_ctc": 3.460174083709717, "train/loss_error": 0.5810573101043701, "train/loss_total": 1.1568807363510132 }, { "epoch": 0.03580016029922522, "step": 134, "train/loss_ctc": 3.503749132156372, "train/loss_error": 0.48043933510780334, "train/loss_total": 1.0851012468338013 }, { "epoch": 0.036067325674592574, "step": 135, "train/loss_ctc": 3.549304246902466, "train/loss_error": 0.47393110394477844, "train/loss_total": 1.089005708694458 }, { "epoch": 0.03633449104995993, "step": 136, "train/loss_ctc": 3.40805721282959, "train/loss_error": 0.5151451230049133, "train/loss_total": 1.0937275886535645 }, { "epoch": 0.03660165642532728, "step": 137, "train/loss_ctc": 3.3921446800231934, "train/loss_error": 0.4968811571598053, "train/loss_total": 1.0759339332580566 }, { "epoch": 0.03686882180069463, "step": 138, "train/loss_ctc": 3.43095064163208, "train/loss_error": 0.5061841607093811, "train/loss_total": 1.091137409210205 }, { "epoch": 0.03713598717606198, "step": 139, "train/loss_ctc": 3.4148285388946533, "train/loss_error": 0.5597527623176575, "train/loss_total": 1.1307679414749146 }, { "epoch": 0.037403152551429335, "grad_norm": 0.3679502308368683, "learning_rate": 2.9780390061448035e-05, "loss": 1.1069, "step": 140 }, { "epoch": 0.037403152551429335, "step": 140, "train/loss_ctc": 3.383633852005005, "train/loss_error": 0.4982079565525055, "train/loss_total": 1.0752930641174316 }, { "epoch": 0.03767031792679669, "step": 141, "train/loss_ctc": 3.2839651107788086, "train/loss_error": 0.4843314588069916, "train/loss_total": 1.0442582368850708 }, { "epoch": 0.03793748330216404, "step": 142, "train/loss_ctc": 3.603933811187744, "train/loss_error": 0.5693323612213135, "train/loss_total": 1.1762526035308838 }, { "epoch": 0.03820464867753139, "step": 143, "train/loss_ctc": 3.342388868331909, "train/loss_error": 0.4280373156070709, "train/loss_total": 1.0109076499938965 }, { "epoch": 0.03847181405289874, "step": 144, "train/loss_ctc": 3.48234486579895, "train/loss_error": 0.4716533422470093, "train/loss_total": 1.073791742324829 }, { "epoch": 0.038738979428266096, "step": 145, "train/loss_ctc": 3.3900322914123535, "train/loss_error": 0.5443731546401978, "train/loss_total": 1.1135050058364868 }, { "epoch": 0.03900614480363345, "step": 146, "train/loss_ctc": 3.275237560272217, "train/loss_error": 0.5574447512626648, "train/loss_total": 1.1010034084320068 }, { "epoch": 0.0392733101790008, "step": 147, "train/loss_ctc": 3.4144585132598877, "train/loss_error": 0.4611259400844574, "train/loss_total": 1.0517925024032593 }, { "epoch": 0.039540475554368154, "step": 148, "train/loss_ctc": 3.2550647258758545, "train/loss_error": 0.45114412903785706, "train/loss_total": 1.0119283199310303 }, { "epoch": 0.03980764092973551, "step": 149, "train/loss_ctc": 3.19511079788208, "train/loss_error": 0.4932681918144226, "train/loss_total": 1.0336366891860962 }, { "epoch": 0.04007480630510286, "grad_norm": 0.3963142931461334, "learning_rate": 2.9764360138925993e-05, "loss": 1.0692, "step": 150 }, { "epoch": 0.04007480630510286, "step": 150, "train/loss_ctc": 3.102329730987549, "train/loss_error": 0.5609411001205444, "train/loss_total": 1.0692188739776611 }, { "epoch": 0.04034197168047021, "step": 151, "train/loss_ctc": 3.0372047424316406, "train/loss_error": 0.5585846304893494, "train/loss_total": 1.0543086528778076 }, { "epoch": 0.04060913705583756, "step": 152, "train/loss_ctc": 3.1109886169433594, "train/loss_error": 0.6072953939437866, "train/loss_total": 1.1080341339111328 }, { "epoch": 0.040876302431204915, "step": 153, "train/loss_ctc": 2.9020121097564697, "train/loss_error": 0.5117300152778625, "train/loss_total": 0.9897864460945129 }, { "epoch": 0.04114346780657227, "step": 154, "train/loss_ctc": 2.9552464485168457, "train/loss_error": 0.48642706871032715, "train/loss_total": 0.9801909923553467 }, { "epoch": 0.04141063318193962, "step": 155, "train/loss_ctc": 2.8676857948303223, "train/loss_error": 0.5532346367835999, "train/loss_total": 1.0161248445510864 }, { "epoch": 0.04167779855730697, "step": 156, "train/loss_ctc": 2.5248403549194336, "train/loss_error": 0.4642040729522705, "train/loss_total": 0.8763313293457031 }, { "epoch": 0.04194496393267433, "step": 157, "train/loss_ctc": 2.8133134841918945, "train/loss_error": 0.4634579122066498, "train/loss_total": 0.9334290623664856 }, { "epoch": 0.042212129308041677, "step": 158, "train/loss_ctc": 2.370234966278076, "train/loss_error": 0.5235790610313416, "train/loss_total": 0.8929102420806885 }, { "epoch": 0.04247929468340903, "step": 159, "train/loss_ctc": 2.505162239074707, "train/loss_error": 0.5224296450614929, "train/loss_total": 0.9189761877059937 }, { "epoch": 0.04274646005877638, "grad_norm": 0.8853405117988586, "learning_rate": 2.9748330216403955e-05, "loss": 0.9839, "step": 160 }, { "epoch": 0.04274646005877638, "step": 160, "train/loss_ctc": 2.441182851791382, "train/loss_error": 0.5193716287612915, "train/loss_total": 0.9037338495254517 }, { "epoch": 0.043013625434143735, "step": 161, "train/loss_ctc": 2.0080647468566895, "train/loss_error": 0.5285681486129761, "train/loss_total": 0.8244674801826477 }, { "epoch": 0.04328079080951109, "step": 162, "train/loss_ctc": 1.9686481952667236, "train/loss_error": 0.4837800860404968, "train/loss_total": 0.7807537317276001 }, { "epoch": 0.04354795618487844, "step": 163, "train/loss_ctc": 1.8096009492874146, "train/loss_error": 0.5275772213935852, "train/loss_total": 0.78398197889328 }, { "epoch": 0.04381512156024579, "step": 164, "train/loss_ctc": 1.504805564880371, "train/loss_error": 0.6130049228668213, "train/loss_total": 0.7913650274276733 }, { "epoch": 0.04408228693561315, "step": 165, "train/loss_ctc": 2.4139246940612793, "train/loss_error": 0.4913724958896637, "train/loss_total": 0.8758829832077026 }, { "epoch": 0.044349452310980496, "step": 166, "train/loss_ctc": 1.995551347732544, "train/loss_error": 0.49180760979652405, "train/loss_total": 0.7925564050674438 }, { "epoch": 0.04461661768634785, "step": 167, "train/loss_ctc": 1.672912359237671, "train/loss_error": 0.5316190719604492, "train/loss_total": 0.7598777413368225 }, { "epoch": 0.0448837830617152, "step": 168, "train/loss_ctc": 1.8145326375961304, "train/loss_error": 0.5468171238899231, "train/loss_total": 0.8003602623939514 }, { "epoch": 0.045150948437082554, "step": 169, "train/loss_ctc": 1.4627577066421509, "train/loss_error": 0.5680135488510132, "train/loss_total": 0.7469624280929565 }, { "epoch": 0.04541811381244991, "grad_norm": 0.7266998887062073, "learning_rate": 2.9732300293881913e-05, "loss": 0.806, "step": 170 }, { "epoch": 0.04541811381244991, "step": 170, "train/loss_ctc": 1.6491644382476807, "train/loss_error": 0.5379371643066406, "train/loss_total": 0.7601826190948486 }, { "epoch": 0.04568527918781726, "step": 171, "train/loss_ctc": 2.311734199523926, "train/loss_error": 0.5878498554229736, "train/loss_total": 0.9326267242431641 }, { "epoch": 0.04595244456318461, "step": 172, "train/loss_ctc": 1.0288364887237549, "train/loss_error": 0.5607715249061584, "train/loss_total": 0.6543844938278198 }, { "epoch": 0.04621960993855197, "step": 173, "train/loss_ctc": 1.7972004413604736, "train/loss_error": 0.47600823640823364, "train/loss_total": 0.7402466535568237 }, { "epoch": 0.046486775313919315, "step": 174, "train/loss_ctc": 1.9274792671203613, "train/loss_error": 0.5621200203895569, "train/loss_total": 0.8351919054985046 }, { "epoch": 0.04675394068928667, "step": 175, "train/loss_ctc": 1.4972615242004395, "train/loss_error": 0.543163537979126, "train/loss_total": 0.7339831590652466 }, { "epoch": 0.04702110606465402, "step": 176, "train/loss_ctc": 1.499396800994873, "train/loss_error": 0.5115652084350586, "train/loss_total": 0.7091315388679504 }, { "epoch": 0.04728827144002137, "step": 177, "train/loss_ctc": 1.0959560871124268, "train/loss_error": 0.5161018967628479, "train/loss_total": 0.6320727467536926 }, { "epoch": 0.04755543681538873, "step": 178, "train/loss_ctc": 1.0420796871185303, "train/loss_error": 0.4366385042667389, "train/loss_total": 0.5577267408370972 }, { "epoch": 0.047822602190756076, "step": 179, "train/loss_ctc": 1.3144903182983398, "train/loss_error": 0.6022613644599915, "train/loss_total": 0.7447071671485901 }, { "epoch": 0.04808976756612343, "grad_norm": 1.3759374618530273, "learning_rate": 2.971627037135987e-05, "loss": 0.73, "step": 180 }, { "epoch": 0.04808976756612343, "step": 180, "train/loss_ctc": 1.1131750345230103, "train/loss_error": 0.5225542187690735, "train/loss_total": 0.6406784057617188 }, { "epoch": 0.04835693294149078, "step": 181, "train/loss_ctc": 1.3325707912445068, "train/loss_error": 0.5694946646690369, "train/loss_total": 0.7221099138259888 }, { "epoch": 0.048624098316858134, "step": 182, "train/loss_ctc": 1.5583739280700684, "train/loss_error": 0.4761163890361786, "train/loss_total": 0.6925679445266724 }, { "epoch": 0.04889126369222549, "step": 183, "train/loss_ctc": 1.6030176877975464, "train/loss_error": 0.5584916472434998, "train/loss_total": 0.767396867275238 }, { "epoch": 0.04915842906759284, "step": 184, "train/loss_ctc": 2.016284465789795, "train/loss_error": 0.5474700331687927, "train/loss_total": 0.8412328958511353 }, { "epoch": 0.04942559444296019, "step": 185, "train/loss_ctc": 1.8454320430755615, "train/loss_error": 0.5356673002243042, "train/loss_total": 0.7976202964782715 }, { "epoch": 0.04969275981832755, "step": 186, "train/loss_ctc": 1.588592529296875, "train/loss_error": 0.511967658996582, "train/loss_total": 0.7272926568984985 }, { "epoch": 0.049959925193694896, "step": 187, "train/loss_ctc": 0.9205796718597412, "train/loss_error": 0.5188751816749573, "train/loss_total": 0.599216103553772 }, { "epoch": 0.05022709056906225, "step": 188, "train/loss_ctc": 1.4379374980926514, "train/loss_error": 0.4928228259086609, "train/loss_total": 0.6818457841873169 }, { "epoch": 0.0504942559444296, "step": 189, "train/loss_ctc": 1.300125002861023, "train/loss_error": 0.4936739504337311, "train/loss_total": 0.6549641489982605 }, { "epoch": 0.050761421319796954, "grad_norm": 1.5486042499542236, "learning_rate": 2.9700240448837832e-05, "loss": 0.7125, "step": 190 }, { "epoch": 0.050761421319796954, "step": 190, "train/loss_ctc": 1.2063939571380615, "train/loss_error": 0.601223349571228, "train/loss_total": 0.7222574949264526 }, { "epoch": 0.05102858669516431, "step": 191, "train/loss_ctc": 0.9409698247909546, "train/loss_error": 0.5268810391426086, "train/loss_total": 0.6096987724304199 }, { "epoch": 0.05129575207053166, "step": 192, "train/loss_ctc": 1.5801024436950684, "train/loss_error": 0.48847246170043945, "train/loss_total": 0.7067984342575073 }, { "epoch": 0.05156291744589901, "step": 193, "train/loss_ctc": 1.8505010604858398, "train/loss_error": 0.538374125957489, "train/loss_total": 0.800799548625946 }, { "epoch": 0.05183008282126637, "step": 194, "train/loss_ctc": 1.2405331134796143, "train/loss_error": 0.5262033343315125, "train/loss_total": 0.6690692901611328 }, { "epoch": 0.052097248196633715, "step": 195, "train/loss_ctc": 0.9005036354064941, "train/loss_error": 0.5078504085540771, "train/loss_total": 0.5863810777664185 }, { "epoch": 0.05236441357200107, "step": 196, "train/loss_ctc": 1.3369706869125366, "train/loss_error": 0.5172492265701294, "train/loss_total": 0.6811935305595398 }, { "epoch": 0.05263157894736842, "step": 197, "train/loss_ctc": 1.4663338661193848, "train/loss_error": 0.4967285990715027, "train/loss_total": 0.6906496286392212 }, { "epoch": 0.05289874432273577, "step": 198, "train/loss_ctc": 1.2832765579223633, "train/loss_error": 0.5079455375671387, "train/loss_total": 0.6630117297172546 }, { "epoch": 0.05316590969810313, "step": 199, "train/loss_ctc": 1.2444641590118408, "train/loss_error": 0.5613990426063538, "train/loss_total": 0.6980120539665222 }, { "epoch": 0.053433075073470476, "grad_norm": 0.7208200693130493, "learning_rate": 2.968421052631579e-05, "loss": 0.6828, "step": 200 }, { "epoch": 0.053433075073470476, "step": 200, "train/loss_ctc": 1.2637255191802979, "train/loss_error": 0.5940301418304443, "train/loss_total": 0.727969229221344 }, { "epoch": 0.05370024044883783, "step": 201, "train/loss_ctc": 1.4073940515518188, "train/loss_error": 0.5958065390586853, "train/loss_total": 0.758124053478241 }, { "epoch": 0.053967405824205186, "step": 202, "train/loss_ctc": 1.7612035274505615, "train/loss_error": 0.46412014961242676, "train/loss_total": 0.7235368490219116 }, { "epoch": 0.054234571199572534, "step": 203, "train/loss_ctc": 1.3158611059188843, "train/loss_error": 0.6092207431793213, "train/loss_total": 0.7505488395690918 }, { "epoch": 0.05450173657493989, "step": 204, "train/loss_ctc": 1.3591840267181396, "train/loss_error": 0.525901198387146, "train/loss_total": 0.6925578117370605 }, { "epoch": 0.05476890195030724, "step": 205, "train/loss_ctc": 1.0237051248550415, "train/loss_error": 0.5166935920715332, "train/loss_total": 0.618095874786377 }, { "epoch": 0.05503606732567459, "step": 206, "train/loss_ctc": 0.6247824430465698, "train/loss_error": 0.508830189704895, "train/loss_total": 0.5320206880569458 }, { "epoch": 0.05530323270104195, "step": 207, "train/loss_ctc": 1.679478645324707, "train/loss_error": 0.5372774004936218, "train/loss_total": 0.7657176852226257 }, { "epoch": 0.055570398076409296, "step": 208, "train/loss_ctc": 1.6070936918258667, "train/loss_error": 0.623242199420929, "train/loss_total": 0.8200125098228455 }, { "epoch": 0.05583756345177665, "step": 209, "train/loss_ctc": 1.0132362842559814, "train/loss_error": 0.5252915620803833, "train/loss_total": 0.6228805184364319 }, { "epoch": 0.056104728827144, "grad_norm": 0.9313914179801941, "learning_rate": 2.966818060379375e-05, "loss": 0.7011, "step": 210 }, { "epoch": 0.056104728827144, "step": 210, "train/loss_ctc": 1.0409634113311768, "train/loss_error": 0.5405890345573425, "train/loss_total": 0.6406639218330383 }, { "epoch": 0.056371894202511354, "step": 211, "train/loss_ctc": 0.8939475417137146, "train/loss_error": 0.538445770740509, "train/loss_total": 0.6095461249351501 }, { "epoch": 0.05663905957787871, "step": 212, "train/loss_ctc": 0.6657798290252686, "train/loss_error": 0.5032487511634827, "train/loss_total": 0.5357549786567688 }, { "epoch": 0.05690622495324606, "step": 213, "train/loss_ctc": 1.2634211778640747, "train/loss_error": 0.54642254114151, "train/loss_total": 0.6898223161697388 }, { "epoch": 0.05717339032861341, "step": 214, "train/loss_ctc": 0.9984685778617859, "train/loss_error": 0.5237658619880676, "train/loss_total": 0.6187064051628113 }, { "epoch": 0.05744055570398077, "step": 215, "train/loss_ctc": 1.5639883279800415, "train/loss_error": 0.513327956199646, "train/loss_total": 0.7234600782394409 }, { "epoch": 0.057707721079348115, "step": 216, "train/loss_ctc": 0.7803702354431152, "train/loss_error": 0.4514906108379364, "train/loss_total": 0.5172665119171143 }, { "epoch": 0.05797488645471547, "step": 217, "train/loss_ctc": 1.1551295518875122, "train/loss_error": 0.570198118686676, "train/loss_total": 0.6871843934059143 }, { "epoch": 0.05824205183008282, "step": 218, "train/loss_ctc": 0.8259176015853882, "train/loss_error": 0.5633736848831177, "train/loss_total": 0.6158824563026428 }, { "epoch": 0.05850921720545017, "step": 219, "train/loss_ctc": 1.1756219863891602, "train/loss_error": 0.4974210262298584, "train/loss_total": 0.6330612301826477 }, { "epoch": 0.05877638258081753, "grad_norm": 1.8644930124282837, "learning_rate": 2.965215068127171e-05, "loss": 0.6271, "step": 220 }, { "epoch": 0.05877638258081753, "step": 220, "train/loss_ctc": 1.2438788414001465, "train/loss_error": 0.5522658824920654, "train/loss_total": 0.6905884742736816 }, { "epoch": 0.059043547956184876, "step": 221, "train/loss_ctc": 0.8723535537719727, "train/loss_error": 0.5058046579360962, "train/loss_total": 0.5791144371032715 }, { "epoch": 0.05931071333155223, "step": 222, "train/loss_ctc": 0.9243442416191101, "train/loss_error": 0.5128381848335266, "train/loss_total": 0.5951393842697144 }, { "epoch": 0.059577878706919586, "step": 223, "train/loss_ctc": 0.965293824672699, "train/loss_error": 0.498264878988266, "train/loss_total": 0.5916706919670105 }, { "epoch": 0.059845044082286934, "step": 224, "train/loss_ctc": 1.1176954507827759, "train/loss_error": 0.5017316341400146, "train/loss_total": 0.6249244213104248 }, { "epoch": 0.06011220945765429, "step": 225, "train/loss_ctc": 1.081470251083374, "train/loss_error": 0.48100724816322327, "train/loss_total": 0.6010998487472534 }, { "epoch": 0.06037937483302164, "step": 226, "train/loss_ctc": 1.404480218887329, "train/loss_error": 0.5529156923294067, "train/loss_total": 0.7232285737991333 }, { "epoch": 0.06064654020838899, "step": 227, "train/loss_ctc": 0.871796727180481, "train/loss_error": 0.5099719762802124, "train/loss_total": 0.5823369026184082 }, { "epoch": 0.06091370558375635, "step": 228, "train/loss_ctc": 0.9449965357780457, "train/loss_error": 0.5797944664955139, "train/loss_total": 0.6528348922729492 }, { "epoch": 0.061180870959123695, "step": 229, "train/loss_ctc": 1.1067895889282227, "train/loss_error": 0.4735521376132965, "train/loss_total": 0.6001996397972107 }, { "epoch": 0.06144803633449105, "grad_norm": 1.1288504600524902, "learning_rate": 2.9636120758749668e-05, "loss": 0.6241, "step": 230 }, { "epoch": 0.06144803633449105, "step": 230, "train/loss_ctc": 1.3852659463882446, "train/loss_error": 0.5167505145072937, "train/loss_total": 0.6904536485671997 }, { "epoch": 0.061715201709858405, "step": 231, "train/loss_ctc": 1.084315299987793, "train/loss_error": 0.5055360198020935, "train/loss_total": 0.6212918758392334 }, { "epoch": 0.061982367085225754, "step": 232, "train/loss_ctc": 1.124265432357788, "train/loss_error": 0.520159125328064, "train/loss_total": 0.6409803628921509 }, { "epoch": 0.06224953246059311, "step": 233, "train/loss_ctc": 1.1845686435699463, "train/loss_error": 0.48756566643714905, "train/loss_total": 0.6269662380218506 }, { "epoch": 0.06251669783596046, "step": 234, "train/loss_ctc": 0.6681088209152222, "train/loss_error": 0.5108621716499329, "train/loss_total": 0.5423114895820618 }, { "epoch": 0.06278386321132781, "step": 235, "train/loss_ctc": 1.6279823780059814, "train/loss_error": 0.48939988017082214, "train/loss_total": 0.7171163558959961 }, { "epoch": 0.06305102858669516, "step": 236, "train/loss_ctc": 0.5447856187820435, "train/loss_error": 0.5205681324005127, "train/loss_total": 0.5254116654396057 }, { "epoch": 0.06331819396206252, "step": 237, "train/loss_ctc": 1.280189037322998, "train/loss_error": 0.5969749093055725, "train/loss_total": 0.7336177229881287 }, { "epoch": 0.06358535933742987, "step": 238, "train/loss_ctc": 0.7768566608428955, "train/loss_error": 0.4843127727508545, "train/loss_total": 0.5428215265274048 }, { "epoch": 0.06385252471279722, "step": 239, "train/loss_ctc": 1.4510948657989502, "train/loss_error": 0.47986164689064026, "train/loss_total": 0.6741082668304443 }, { "epoch": 0.06411969008816458, "grad_norm": 0.6821390986442566, "learning_rate": 2.9620090836227626e-05, "loss": 0.6315, "step": 240 }, { "epoch": 0.06411969008816458, "step": 240, "train/loss_ctc": 0.7389264106750488, "train/loss_error": 0.5455064177513123, "train/loss_total": 0.5841904282569885 }, { "epoch": 0.06438685546353193, "step": 241, "train/loss_ctc": 1.180286169052124, "train/loss_error": 0.48962846398353577, "train/loss_total": 0.6277599930763245 }, { "epoch": 0.06465402083889928, "step": 242, "train/loss_ctc": 1.4999322891235352, "train/loss_error": 0.49225467443466187, "train/loss_total": 0.6937901973724365 }, { "epoch": 0.06492118621426664, "step": 243, "train/loss_ctc": 1.2387964725494385, "train/loss_error": 0.4981522560119629, "train/loss_total": 0.6462811231613159 }, { "epoch": 0.06518835158963399, "step": 244, "train/loss_ctc": 1.0642378330230713, "train/loss_error": 0.4870758652687073, "train/loss_total": 0.6025082468986511 }, { "epoch": 0.06545551696500133, "step": 245, "train/loss_ctc": 0.9189203381538391, "train/loss_error": 0.4582747519016266, "train/loss_total": 0.5504038333892822 }, { "epoch": 0.06572268234036868, "step": 246, "train/loss_ctc": 1.0356879234313965, "train/loss_error": 0.453571617603302, "train/loss_total": 0.5699949264526367 }, { "epoch": 0.06598984771573604, "step": 247, "train/loss_ctc": 1.001309871673584, "train/loss_error": 0.47458159923553467, "train/loss_total": 0.5799272656440735 }, { "epoch": 0.06625701309110339, "step": 248, "train/loss_ctc": 1.2837883234024048, "train/loss_error": 0.5356354117393494, "train/loss_total": 0.6852660179138184 }, { "epoch": 0.06652417846647074, "step": 249, "train/loss_ctc": 1.0914448499679565, "train/loss_error": 0.5291556119918823, "train/loss_total": 0.6416134834289551 }, { "epoch": 0.0667913438418381, "grad_norm": 0.5800209641456604, "learning_rate": 2.9604060913705584e-05, "loss": 0.6182, "step": 250 }, { "epoch": 0.0667913438418381, "step": 250, "train/loss_ctc": 1.217538595199585, "train/loss_error": 0.5095291137695312, "train/loss_total": 0.6511310338973999 }, { "epoch": 0.06705850921720545, "step": 251, "train/loss_ctc": 0.7485295534133911, "train/loss_error": 0.449716717004776, "train/loss_total": 0.509479284286499 }, { "epoch": 0.0673256745925728, "step": 252, "train/loss_ctc": 1.025521159172058, "train/loss_error": 0.5353227853775024, "train/loss_total": 0.6333624720573425 }, { "epoch": 0.06759283996794016, "step": 253, "train/loss_ctc": 1.2648189067840576, "train/loss_error": 0.4651138186454773, "train/loss_total": 0.6250548362731934 }, { "epoch": 0.06786000534330751, "step": 254, "train/loss_ctc": 1.237206220626831, "train/loss_error": 0.5858537554740906, "train/loss_total": 0.7161242365837097 }, { "epoch": 0.06812717071867486, "step": 255, "train/loss_ctc": 0.8984494209289551, "train/loss_error": 0.4978563189506531, "train/loss_total": 0.5779749155044556 }, { "epoch": 0.06839433609404222, "step": 256, "train/loss_ctc": 1.7390074729919434, "train/loss_error": 0.5847029685974121, "train/loss_total": 0.8155639171600342 }, { "epoch": 0.06866150146940957, "step": 257, "train/loss_ctc": 0.9249576330184937, "train/loss_error": 0.5247632265090942, "train/loss_total": 0.604802131652832 }, { "epoch": 0.06892866684477691, "step": 258, "train/loss_ctc": 1.106674075126648, "train/loss_error": 0.5201979875564575, "train/loss_total": 0.6374932527542114 }, { "epoch": 0.06919583222014426, "step": 259, "train/loss_ctc": 1.8926465511322021, "train/loss_error": 0.4886649549007416, "train/loss_total": 0.7694612741470337 }, { "epoch": 0.06946299759551162, "grad_norm": 1.5464314222335815, "learning_rate": 2.9588030991183542e-05, "loss": 0.654, "step": 260 }, { "epoch": 0.06946299759551162, "step": 260, "train/loss_ctc": 1.180429220199585, "train/loss_error": 0.5106751918792725, "train/loss_total": 0.6446260213851929 }, { "epoch": 0.06973016297087897, "step": 261, "train/loss_ctc": 1.1789233684539795, "train/loss_error": 0.544258177280426, "train/loss_total": 0.6711912155151367 }, { "epoch": 0.06999732834624632, "step": 262, "train/loss_ctc": 0.905434250831604, "train/loss_error": 0.5484541058540344, "train/loss_total": 0.6198501586914062 }, { "epoch": 0.07026449372161368, "step": 263, "train/loss_ctc": 1.2707773447036743, "train/loss_error": 0.4940308928489685, "train/loss_total": 0.6493802070617676 }, { "epoch": 0.07053165909698103, "step": 264, "train/loss_ctc": 1.4089877605438232, "train/loss_error": 0.49677711725234985, "train/loss_total": 0.6792192459106445 }, { "epoch": 0.07079882447234838, "step": 265, "train/loss_ctc": 1.8002402782440186, "train/loss_error": 0.5058983564376831, "train/loss_total": 0.7647667527198792 }, { "epoch": 0.07106598984771574, "step": 266, "train/loss_ctc": 1.578891634941101, "train/loss_error": 0.5636160373687744, "train/loss_total": 0.7666711807250977 }, { "epoch": 0.07133315522308309, "step": 267, "train/loss_ctc": 1.5376883745193481, "train/loss_error": 0.5196276903152466, "train/loss_total": 0.7232398390769958 }, { "epoch": 0.07160032059845044, "step": 268, "train/loss_ctc": 1.028719186782837, "train/loss_error": 0.4897541105747223, "train/loss_total": 0.5975471138954163 }, { "epoch": 0.0718674859738178, "step": 269, "train/loss_ctc": 1.2183849811553955, "train/loss_error": 0.4992862939834595, "train/loss_total": 0.6431060433387756 }, { "epoch": 0.07213465134918515, "grad_norm": 0.828788697719574, "learning_rate": 2.9572001068661504e-05, "loss": 0.676, "step": 270 }, { "epoch": 0.07213465134918515, "step": 270, "train/loss_ctc": 0.8417997360229492, "train/loss_error": 0.6472741961479187, "train/loss_total": 0.6861792802810669 }, { "epoch": 0.0724018167245525, "step": 271, "train/loss_ctc": 1.1437098979949951, "train/loss_error": 0.5193154215812683, "train/loss_total": 0.6441943049430847 }, { "epoch": 0.07266898209991986, "step": 272, "train/loss_ctc": 0.7664840221405029, "train/loss_error": 0.5040237903594971, "train/loss_total": 0.5565158724784851 }, { "epoch": 0.0729361474752872, "step": 273, "train/loss_ctc": 1.0148215293884277, "train/loss_error": 0.5016084313392639, "train/loss_total": 0.6042510271072388 }, { "epoch": 0.07320331285065455, "step": 274, "train/loss_ctc": 1.4157741069793701, "train/loss_error": 0.5894337296485901, "train/loss_total": 0.7547017931938171 }, { "epoch": 0.0734704782260219, "step": 275, "train/loss_ctc": 1.3972294330596924, "train/loss_error": 0.5512430667877197, "train/loss_total": 0.7204403877258301 }, { "epoch": 0.07373764360138926, "step": 276, "train/loss_ctc": 0.9251450300216675, "train/loss_error": 0.46339258551597595, "train/loss_total": 0.5557430982589722 }, { "epoch": 0.07400480897675661, "step": 277, "train/loss_ctc": 1.4884405136108398, "train/loss_error": 0.5423993468284607, "train/loss_total": 0.7316075563430786 }, { "epoch": 0.07427197435212396, "step": 278, "train/loss_ctc": 0.9924870729446411, "train/loss_error": 0.48430296778678894, "train/loss_total": 0.5859397649765015 }, { "epoch": 0.07453913972749132, "step": 279, "train/loss_ctc": 0.8882652521133423, "train/loss_error": 0.4959971010684967, "train/loss_total": 0.5744507312774658 }, { "epoch": 0.07480630510285867, "grad_norm": 0.7870831489562988, "learning_rate": 2.9555971146139462e-05, "loss": 0.6414, "step": 280 }, { "epoch": 0.07480630510285867, "step": 280, "train/loss_ctc": 0.7455161809921265, "train/loss_error": 0.5068856477737427, "train/loss_total": 0.5546117424964905 }, { "epoch": 0.07507347047822602, "step": 281, "train/loss_ctc": 1.1215484142303467, "train/loss_error": 0.5318644642829895, "train/loss_total": 0.6498012542724609 }, { "epoch": 0.07534063585359338, "step": 282, "train/loss_ctc": 1.12015962600708, "train/loss_error": 0.5354052186012268, "train/loss_total": 0.6523561477661133 }, { "epoch": 0.07560780122896073, "step": 283, "train/loss_ctc": 1.3948479890823364, "train/loss_error": 0.4901813566684723, "train/loss_total": 0.6711146831512451 }, { "epoch": 0.07587496660432808, "step": 284, "train/loss_ctc": 1.0597258806228638, "train/loss_error": 0.48588278889656067, "train/loss_total": 0.6006513833999634 }, { "epoch": 0.07614213197969544, "step": 285, "train/loss_ctc": 1.025026798248291, "train/loss_error": 0.4933178424835205, "train/loss_total": 0.5996596217155457 }, { "epoch": 0.07640929735506279, "step": 286, "train/loss_ctc": 1.132333517074585, "train/loss_error": 0.47458723187446594, "train/loss_total": 0.6061365008354187 }, { "epoch": 0.07667646273043013, "step": 287, "train/loss_ctc": 1.4846572875976562, "train/loss_error": 0.5224222540855408, "train/loss_total": 0.7148692607879639 }, { "epoch": 0.07694362810579748, "step": 288, "train/loss_ctc": 1.6242499351501465, "train/loss_error": 0.45972171425819397, "train/loss_total": 0.6926273703575134 }, { "epoch": 0.07721079348116484, "step": 289, "train/loss_ctc": 1.1251893043518066, "train/loss_error": 0.45305320620536804, "train/loss_total": 0.5874804258346558 }, { "epoch": 0.07747795885653219, "grad_norm": 1.155676245689392, "learning_rate": 2.953994122361742e-05, "loss": 0.6329, "step": 290 }, { "epoch": 0.07747795885653219, "step": 290, "train/loss_ctc": 1.3961783647537231, "train/loss_error": 0.5013437271118164, "train/loss_total": 0.6803106665611267 }, { "epoch": 0.07774512423189954, "step": 291, "train/loss_ctc": 1.132622480392456, "train/loss_error": 0.5661219954490662, "train/loss_total": 0.67942214012146 }, { "epoch": 0.0780122896072669, "step": 292, "train/loss_ctc": 0.7788457870483398, "train/loss_error": 0.5148813724517822, "train/loss_total": 0.5676742792129517 }, { "epoch": 0.07827945498263425, "step": 293, "train/loss_ctc": 1.0488063097000122, "train/loss_error": 0.5024619698524475, "train/loss_total": 0.6117308139801025 }, { "epoch": 0.0785466203580016, "step": 294, "train/loss_ctc": 0.9715032577514648, "train/loss_error": 0.5335076451301575, "train/loss_total": 0.621106743812561 }, { "epoch": 0.07881378573336896, "step": 295, "train/loss_ctc": 1.8417545557022095, "train/loss_error": 0.48538556694984436, "train/loss_total": 0.7566593885421753 }, { "epoch": 0.07908095110873631, "step": 296, "train/loss_ctc": 0.9411225318908691, "train/loss_error": 0.5498611927032471, "train/loss_total": 0.6281134486198425 }, { "epoch": 0.07934811648410366, "step": 297, "train/loss_ctc": 1.322098970413208, "train/loss_error": 0.568112850189209, "train/loss_total": 0.7189100980758667 }, { "epoch": 0.07961528185947102, "step": 298, "train/loss_ctc": 0.9254623651504517, "train/loss_error": 0.49469247460365295, "train/loss_total": 0.5808464288711548 }, { "epoch": 0.07988244723483837, "step": 299, "train/loss_ctc": 1.394883632659912, "train/loss_error": 0.48303255438804626, "train/loss_total": 0.6654027700424194 }, { "epoch": 0.08014961261020571, "grad_norm": 1.2181612253189087, "learning_rate": 2.9523911301095378e-05, "loss": 0.651, "step": 300 }, { "epoch": 0.08014961261020571, "step": 300, "train/loss_ctc": 1.0188195705413818, "train/loss_error": 0.5188629627227783, "train/loss_total": 0.618854284286499 }, { "epoch": 0.08041677798557308, "step": 301, "train/loss_ctc": 1.1595346927642822, "train/loss_error": 0.5883118510246277, "train/loss_total": 0.7025564312934875 }, { "epoch": 0.08068394336094042, "step": 302, "train/loss_ctc": 1.6000392436981201, "train/loss_error": 0.544117271900177, "train/loss_total": 0.7553017139434814 }, { "epoch": 0.08095110873630777, "step": 303, "train/loss_ctc": 1.03915536403656, "train/loss_error": 0.5302283763885498, "train/loss_total": 0.6320137977600098 }, { "epoch": 0.08121827411167512, "step": 304, "train/loss_ctc": 1.1067874431610107, "train/loss_error": 0.46726298332214355, "train/loss_total": 0.595167875289917 }, { "epoch": 0.08148543948704248, "step": 305, "train/loss_ctc": 1.2287817001342773, "train/loss_error": 0.47887319326400757, "train/loss_total": 0.6288549304008484 }, { "epoch": 0.08175260486240983, "step": 306, "train/loss_ctc": 0.6804414391517639, "train/loss_error": 0.5044513940811157, "train/loss_total": 0.5396494269371033 }, { "epoch": 0.08201977023777718, "step": 307, "train/loss_ctc": 0.7759988307952881, "train/loss_error": 0.5105875134468079, "train/loss_total": 0.5636698007583618 }, { "epoch": 0.08228693561314454, "step": 308, "train/loss_ctc": 0.9400147795677185, "train/loss_error": 0.5471035242080688, "train/loss_total": 0.6256858110427856 }, { "epoch": 0.08255410098851189, "step": 309, "train/loss_ctc": 0.9136689901351929, "train/loss_error": 0.43655648827552795, "train/loss_total": 0.531978964805603 }, { "epoch": 0.08282126636387924, "grad_norm": 0.9278818964958191, "learning_rate": 2.9507881378573336e-05, "loss": 0.6194, "step": 310 }, { "epoch": 0.08282126636387924, "step": 310, "train/loss_ctc": 0.7097032070159912, "train/loss_error": 0.4693470299243927, "train/loss_total": 0.5174182653427124 }, { "epoch": 0.0830884317392466, "step": 311, "train/loss_ctc": 0.8186275959014893, "train/loss_error": 0.5047013759613037, "train/loss_total": 0.5674866437911987 }, { "epoch": 0.08335559711461395, "step": 312, "train/loss_ctc": 1.1722955703735352, "train/loss_error": 0.45834675431251526, "train/loss_total": 0.6011365056037903 }, { "epoch": 0.0836227624899813, "step": 313, "train/loss_ctc": 1.377397060394287, "train/loss_error": 0.46584978699684143, "train/loss_total": 0.6481592655181885 }, { "epoch": 0.08388992786534866, "step": 314, "train/loss_ctc": 1.0202006101608276, "train/loss_error": 0.5274606347084045, "train/loss_total": 0.6260086297988892 }, { "epoch": 0.084157093240716, "step": 315, "train/loss_ctc": 0.7892500162124634, "train/loss_error": 0.46336629986763, "train/loss_total": 0.5285430550575256 }, { "epoch": 0.08442425861608335, "step": 316, "train/loss_ctc": 1.0498615503311157, "train/loss_error": 0.5377658605575562, "train/loss_total": 0.6401849985122681 }, { "epoch": 0.0846914239914507, "step": 317, "train/loss_ctc": 1.1521425247192383, "train/loss_error": 0.5760981440544128, "train/loss_total": 0.691307008266449 }, { "epoch": 0.08495858936681806, "step": 318, "train/loss_ctc": 1.0798616409301758, "train/loss_error": 0.4674625098705292, "train/loss_total": 0.5899423360824585 }, { "epoch": 0.08522575474218541, "step": 319, "train/loss_ctc": 1.1504194736480713, "train/loss_error": 0.5749791860580444, "train/loss_total": 0.6900672316551208 }, { "epoch": 0.08549292011755276, "grad_norm": 0.9811462163925171, "learning_rate": 2.9491851456051294e-05, "loss": 0.61, "step": 320 }, { "epoch": 0.08549292011755276, "step": 320, "train/loss_ctc": 1.2350975275039673, "train/loss_error": 0.4877358376979828, "train/loss_total": 0.6372081637382507 }, { "epoch": 0.08576008549292012, "step": 321, "train/loss_ctc": 0.47893470525741577, "train/loss_error": 0.4886374771595001, "train/loss_total": 0.4866969585418701 }, { "epoch": 0.08602725086828747, "step": 322, "train/loss_ctc": 1.9980438947677612, "train/loss_error": 0.48359552025794983, "train/loss_total": 0.7864851951599121 }, { "epoch": 0.08629441624365482, "step": 323, "train/loss_ctc": 0.39798158407211304, "train/loss_error": 0.5260363221168518, "train/loss_total": 0.500425398349762 }, { "epoch": 0.08656158161902218, "step": 324, "train/loss_ctc": 1.0275671482086182, "train/loss_error": 0.5506578683853149, "train/loss_total": 0.6460397243499756 }, { "epoch": 0.08682874699438953, "step": 325, "train/loss_ctc": 1.0796208381652832, "train/loss_error": 0.5322070121765137, "train/loss_total": 0.6416897773742676 }, { "epoch": 0.08709591236975688, "step": 326, "train/loss_ctc": 0.8241540789604187, "train/loss_error": 0.4851624071598053, "train/loss_total": 0.5529607534408569 }, { "epoch": 0.08736307774512424, "step": 327, "train/loss_ctc": 0.7619774341583252, "train/loss_error": 0.5341138243675232, "train/loss_total": 0.5796865224838257 }, { "epoch": 0.08763024312049159, "step": 328, "train/loss_ctc": 1.471107006072998, "train/loss_error": 0.5749509334564209, "train/loss_total": 0.7541821599006653 }, { "epoch": 0.08789740849585893, "step": 329, "train/loss_ctc": 1.7899171113967896, "train/loss_error": 0.4658471941947937, "train/loss_total": 0.7306612133979797 }, { "epoch": 0.0881645738712263, "grad_norm": 0.9435173273086548, "learning_rate": 2.9475821533529256e-05, "loss": 0.6316, "step": 330 }, { "epoch": 0.0881645738712263, "step": 330, "train/loss_ctc": 1.2330445051193237, "train/loss_error": 0.48231691122055054, "train/loss_total": 0.6324624419212341 }, { "epoch": 0.08843173924659364, "step": 331, "train/loss_ctc": 1.606583595275879, "train/loss_error": 0.528327226638794, "train/loss_total": 0.7439785003662109 }, { "epoch": 0.08869890462196099, "step": 332, "train/loss_ctc": 1.1430011987686157, "train/loss_error": 0.4865521490573883, "train/loss_total": 0.6178419589996338 }, { "epoch": 0.08896606999732834, "step": 333, "train/loss_ctc": 1.1656067371368408, "train/loss_error": 0.5008054375648499, "train/loss_total": 0.633765697479248 }, { "epoch": 0.0892332353726957, "step": 334, "train/loss_ctc": 0.5827573537826538, "train/loss_error": 0.4829612672328949, "train/loss_total": 0.5029205083847046 }, { "epoch": 0.08950040074806305, "step": 335, "train/loss_ctc": 1.4479451179504395, "train/loss_error": 0.5138744115829468, "train/loss_total": 0.7006885409355164 }, { "epoch": 0.0897675661234304, "step": 336, "train/loss_ctc": 1.2038371562957764, "train/loss_error": 0.4622931480407715, "train/loss_total": 0.6106019616127014 }, { "epoch": 0.09003473149879776, "step": 337, "train/loss_ctc": 0.8589602708816528, "train/loss_error": 0.48587313294410706, "train/loss_total": 0.560490608215332 }, { "epoch": 0.09030189687416511, "step": 338, "train/loss_ctc": 0.9597412347793579, "train/loss_error": 0.4728728234767914, "train/loss_total": 0.5702465176582336 }, { "epoch": 0.09056906224953246, "step": 339, "train/loss_ctc": 1.0351557731628418, "train/loss_error": 0.49845364689826965, "train/loss_total": 0.6057940721511841 }, { "epoch": 0.09083622762489982, "grad_norm": 1.210143804550171, "learning_rate": 2.9459791611007214e-05, "loss": 0.6179, "step": 340 }, { "epoch": 0.09083622762489982, "step": 340, "train/loss_ctc": 0.8650791645050049, "train/loss_error": 0.564564049243927, "train/loss_total": 0.6246670484542847 }, { "epoch": 0.09110339300026717, "step": 341, "train/loss_ctc": 0.467223197221756, "train/loss_error": 0.5275230407714844, "train/loss_total": 0.5154630541801453 }, { "epoch": 0.09137055837563451, "step": 342, "train/loss_ctc": 1.037686824798584, "train/loss_error": 0.5659140944480896, "train/loss_total": 0.6602686643600464 }, { "epoch": 0.09163772375100188, "step": 343, "train/loss_ctc": 1.7845076322555542, "train/loss_error": 0.5444279313087463, "train/loss_total": 0.7924438714981079 }, { "epoch": 0.09190488912636922, "step": 344, "train/loss_ctc": 0.7323603630065918, "train/loss_error": 0.5128992795944214, "train/loss_total": 0.5567914843559265 }, { "epoch": 0.09217205450173657, "step": 345, "train/loss_ctc": 0.6775307655334473, "train/loss_error": 0.5482009649276733, "train/loss_total": 0.5740669369697571 }, { "epoch": 0.09243921987710393, "step": 346, "train/loss_ctc": 0.39169055223464966, "train/loss_error": 0.5484491586685181, "train/loss_total": 0.5170974135398865 }, { "epoch": 0.09270638525247128, "step": 347, "train/loss_ctc": 1.0047096014022827, "train/loss_error": 0.49180057644844055, "train/loss_total": 0.5943824052810669 }, { "epoch": 0.09297355062783863, "step": 348, "train/loss_ctc": 1.0347213745117188, "train/loss_error": 0.501956582069397, "train/loss_total": 0.6085095405578613 }, { "epoch": 0.09324071600320598, "step": 349, "train/loss_ctc": 1.7961127758026123, "train/loss_error": 0.49510443210601807, "train/loss_total": 0.7553061246871948 }, { "epoch": 0.09350788137857334, "grad_norm": 1.7035695314407349, "learning_rate": 2.9443761688485172e-05, "loss": 0.6199, "step": 350 }, { "epoch": 0.09350788137857334, "step": 350, "train/loss_ctc": 1.1329854726791382, "train/loss_error": 0.49125826358795166, "train/loss_total": 0.6196037530899048 }, { "epoch": 0.09377504675394069, "step": 351, "train/loss_ctc": 1.1850130558013916, "train/loss_error": 0.5536041855812073, "train/loss_total": 0.679885983467102 }, { "epoch": 0.09404221212930804, "step": 352, "train/loss_ctc": 0.8212720155715942, "train/loss_error": 0.5659387111663818, "train/loss_total": 0.6170054078102112 }, { "epoch": 0.0943093775046754, "step": 353, "train/loss_ctc": 1.5504393577575684, "train/loss_error": 0.5268296599388123, "train/loss_total": 0.7315516471862793 }, { "epoch": 0.09457654288004275, "step": 354, "train/loss_ctc": 0.8013204336166382, "train/loss_error": 0.48274606466293335, "train/loss_total": 0.5464609265327454 }, { "epoch": 0.0948437082554101, "step": 355, "train/loss_ctc": 0.9561695456504822, "train/loss_error": 0.5645056962966919, "train/loss_total": 0.6428384780883789 }, { "epoch": 0.09511087363077746, "step": 356, "train/loss_ctc": 0.5706906318664551, "train/loss_error": 0.48107531666755676, "train/loss_total": 0.49899837374687195 }, { "epoch": 0.0953780390061448, "step": 357, "train/loss_ctc": 1.8447984457015991, "train/loss_error": 0.46897128224372864, "train/loss_total": 0.7441366910934448 }, { "epoch": 0.09564520438151215, "step": 358, "train/loss_ctc": 0.7296761870384216, "train/loss_error": 0.55698162317276, "train/loss_total": 0.5915205478668213 }, { "epoch": 0.09591236975687951, "step": 359, "train/loss_ctc": 1.4774165153503418, "train/loss_error": 0.5223844647407532, "train/loss_total": 0.7133908867835999 }, { "epoch": 0.09617953513224686, "grad_norm": 1.2162333726882935, "learning_rate": 2.942773176596313e-05, "loss": 0.6385, "step": 360 }, { "epoch": 0.09617953513224686, "step": 360, "train/loss_ctc": 0.9145421385765076, "train/loss_error": 0.5122177600860596, "train/loss_total": 0.5926826596260071 }, { "epoch": 0.09644670050761421, "step": 361, "train/loss_ctc": 0.6231439709663391, "train/loss_error": 0.5151112079620361, "train/loss_total": 0.5367177724838257 }, { "epoch": 0.09671386588298156, "step": 362, "train/loss_ctc": 0.9073154330253601, "train/loss_error": 0.5093768835067749, "train/loss_total": 0.588964581489563 }, { "epoch": 0.09698103125834892, "step": 363, "train/loss_ctc": 0.8869292736053467, "train/loss_error": 0.5451204776763916, "train/loss_total": 0.6134822368621826 }, { "epoch": 0.09724819663371627, "step": 364, "train/loss_ctc": 1.174008846282959, "train/loss_error": 0.5294346213340759, "train/loss_total": 0.6583495140075684 }, { "epoch": 0.09751536200908362, "step": 365, "train/loss_ctc": 1.2325873374938965, "train/loss_error": 0.5268105864524841, "train/loss_total": 0.6679659485816956 }, { "epoch": 0.09778252738445098, "step": 366, "train/loss_ctc": 0.816051721572876, "train/loss_error": 0.4730047881603241, "train/loss_total": 0.5416141748428345 }, { "epoch": 0.09804969275981833, "step": 367, "train/loss_ctc": 1.6942702531814575, "train/loss_error": 0.5199087262153625, "train/loss_total": 0.7547810077667236 }, { "epoch": 0.09831685813518568, "step": 368, "train/loss_ctc": 0.7438981533050537, "train/loss_error": 0.48989933729171753, "train/loss_total": 0.5406991243362427 }, { "epoch": 0.09858402351055304, "step": 369, "train/loss_ctc": 0.9289795756340027, "train/loss_error": 0.47977694869041443, "train/loss_total": 0.569617509841919 }, { "epoch": 0.09885118888592039, "grad_norm": 0.6736205220222473, "learning_rate": 2.941170184344109e-05, "loss": 0.6065, "step": 370 }, { "epoch": 0.09885118888592039, "step": 370, "train/loss_ctc": 0.9458625912666321, "train/loss_error": 0.5368648767471313, "train/loss_total": 0.6186644434928894 }, { "epoch": 0.09911835426128773, "step": 371, "train/loss_ctc": 0.7746199369430542, "train/loss_error": 0.5139884948730469, "train/loss_total": 0.5661147832870483 }, { "epoch": 0.0993855196366551, "step": 372, "train/loss_ctc": 1.2120139598846436, "train/loss_error": 0.521782636642456, "train/loss_total": 0.6598289012908936 }, { "epoch": 0.09965268501202244, "step": 373, "train/loss_ctc": 0.8975635170936584, "train/loss_error": 0.6855337023735046, "train/loss_total": 0.7279397249221802 }, { "epoch": 0.09991985038738979, "step": 374, "train/loss_ctc": 1.3261921405792236, "train/loss_error": 0.5495189428329468, "train/loss_total": 0.7048535943031311 }, { "epoch": 0.10018701576275715, "step": 375, "train/loss_ctc": 1.3224211931228638, "train/loss_error": 0.5665552616119385, "train/loss_total": 0.7177284955978394 }, { "epoch": 0.1004541811381245, "step": 376, "train/loss_ctc": 0.6431321501731873, "train/loss_error": 0.5022549033164978, "train/loss_total": 0.5304303765296936 }, { "epoch": 0.10072134651349185, "step": 377, "train/loss_ctc": 0.969677209854126, "train/loss_error": 0.5443421602249146, "train/loss_total": 0.6294091939926147 }, { "epoch": 0.1009885118888592, "step": 378, "train/loss_ctc": 0.8033376336097717, "train/loss_error": 0.5789002776145935, "train/loss_total": 0.6237877607345581 }, { "epoch": 0.10125567726422656, "step": 379, "train/loss_ctc": 0.9158570766448975, "train/loss_error": 0.4517013728618622, "train/loss_total": 0.5445325374603271 }, { "epoch": 0.10152284263959391, "grad_norm": 0.6842573881149292, "learning_rate": 2.939567192091905e-05, "loss": 0.6323, "step": 380 }, { "epoch": 0.10152284263959391, "step": 380, "train/loss_ctc": 1.4219635725021362, "train/loss_error": 0.5144355893135071, "train/loss_total": 0.6959412097930908 }, { "epoch": 0.10179000801496126, "step": 381, "train/loss_ctc": 1.095362663269043, "train/loss_error": 0.4624341130256653, "train/loss_total": 0.5890198349952698 }, { "epoch": 0.10205717339032862, "step": 382, "train/loss_ctc": 0.8185564279556274, "train/loss_error": 0.5258069634437561, "train/loss_total": 0.5843568444252014 }, { "epoch": 0.10232433876569597, "step": 383, "train/loss_ctc": 0.9575836658477783, "train/loss_error": 0.49536243081092834, "train/loss_total": 0.5878067016601562 }, { "epoch": 0.10259150414106331, "step": 384, "train/loss_ctc": 0.6856978535652161, "train/loss_error": 0.534049391746521, "train/loss_total": 0.564379096031189 }, { "epoch": 0.10285866951643068, "step": 385, "train/loss_ctc": 0.7810041904449463, "train/loss_error": 0.5300113558769226, "train/loss_total": 0.5802099108695984 }, { "epoch": 0.10312583489179802, "step": 386, "train/loss_ctc": 0.822925329208374, "train/loss_error": 0.4796920716762543, "train/loss_total": 0.5483387112617493 }, { "epoch": 0.10339300026716537, "step": 387, "train/loss_ctc": 0.9319342374801636, "train/loss_error": 0.516029417514801, "train/loss_total": 0.5992103815078735 }, { "epoch": 0.10366016564253273, "step": 388, "train/loss_ctc": 0.6657482385635376, "train/loss_error": 0.5669726133346558, "train/loss_total": 0.5867277383804321 }, { "epoch": 0.10392733101790008, "step": 389, "train/loss_ctc": 1.7487726211547852, "train/loss_error": 0.4981202483177185, "train/loss_total": 0.7482507228851318 }, { "epoch": 0.10419449639326743, "grad_norm": 1.1783329248428345, "learning_rate": 2.937964199839701e-05, "loss": 0.6084, "step": 390 }, { "epoch": 0.10419449639326743, "step": 390, "train/loss_ctc": 0.8603993058204651, "train/loss_error": 0.526723325252533, "train/loss_total": 0.5934585332870483 }, { "epoch": 0.10446166176863478, "step": 391, "train/loss_ctc": 0.7642228603363037, "train/loss_error": 0.5010571479797363, "train/loss_total": 0.5536903142929077 }, { "epoch": 0.10472882714400214, "step": 392, "train/loss_ctc": 1.6226046085357666, "train/loss_error": 0.525084376335144, "train/loss_total": 0.7445884346961975 }, { "epoch": 0.10499599251936949, "step": 393, "train/loss_ctc": 1.3419389724731445, "train/loss_error": 0.49685725569725037, "train/loss_total": 0.665873646736145 }, { "epoch": 0.10526315789473684, "step": 394, "train/loss_ctc": 0.9603001475334167, "train/loss_error": 0.5157772898674011, "train/loss_total": 0.6046818494796753 }, { "epoch": 0.1055303232701042, "step": 395, "train/loss_ctc": 0.8333861231803894, "train/loss_error": 0.5586366653442383, "train/loss_total": 0.6135865449905396 }, { "epoch": 0.10579748864547155, "step": 396, "train/loss_ctc": 1.0328744649887085, "train/loss_error": 0.5090466737747192, "train/loss_total": 0.613812267780304 }, { "epoch": 0.1060646540208389, "step": 397, "train/loss_ctc": 0.7232624292373657, "train/loss_error": 0.4926546812057495, "train/loss_total": 0.5387762784957886 }, { "epoch": 0.10633181939620626, "step": 398, "train/loss_ctc": 1.0362013578414917, "train/loss_error": 0.5766544938087463, "train/loss_total": 0.6685638427734375 }, { "epoch": 0.1065989847715736, "step": 399, "train/loss_ctc": 1.0616555213928223, "train/loss_error": 0.4933282732963562, "train/loss_total": 0.6069937348365784 }, { "epoch": 0.10686615014694095, "grad_norm": 1.25261652469635, "learning_rate": 2.936361207587497e-05, "loss": 0.6204, "step": 400 }, { "epoch": 0.10686615014694095, "step": 400, "train/loss_ctc": 1.1500279903411865, "train/loss_error": 0.4933301508426666, "train/loss_total": 0.6246697306632996 }, { "epoch": 0.10713331552230831, "step": 401, "train/loss_ctc": 1.0791680812835693, "train/loss_error": 0.6310151815414429, "train/loss_total": 0.7206457853317261 }, { "epoch": 0.10740048089767566, "step": 402, "train/loss_ctc": 0.8128418326377869, "train/loss_error": 0.5390952825546265, "train/loss_total": 0.5938445925712585 }, { "epoch": 0.10766764627304301, "step": 403, "train/loss_ctc": 0.915104866027832, "train/loss_error": 0.5164240598678589, "train/loss_total": 0.5961602330207825 }, { "epoch": 0.10793481164841037, "step": 404, "train/loss_ctc": 0.7278926372528076, "train/loss_error": 0.4976687431335449, "train/loss_total": 0.5437135696411133 }, { "epoch": 0.10820197702377772, "step": 405, "train/loss_ctc": 1.0386693477630615, "train/loss_error": 0.5621932744979858, "train/loss_total": 0.6574884653091431 }, { "epoch": 0.10846914239914507, "step": 406, "train/loss_ctc": 0.4632861316204071, "train/loss_error": 0.5216964483261108, "train/loss_total": 0.5100144147872925 }, { "epoch": 0.10873630777451242, "step": 407, "train/loss_ctc": 1.1343393325805664, "train/loss_error": 0.5757835507392883, "train/loss_total": 0.687494695186615 }, { "epoch": 0.10900347314987978, "step": 408, "train/loss_ctc": 0.6099203824996948, "train/loss_error": 0.5613512992858887, "train/loss_total": 0.5710651278495789 }, { "epoch": 0.10927063852524713, "step": 409, "train/loss_ctc": 0.8061103224754333, "train/loss_error": 0.5377724766731262, "train/loss_total": 0.5914400815963745 }, { "epoch": 0.10953780390061447, "grad_norm": 1.0928010940551758, "learning_rate": 2.9347582153352927e-05, "loss": 0.6097, "step": 410 }, { "epoch": 0.10953780390061447, "step": 410, "train/loss_ctc": 1.2463873624801636, "train/loss_error": 0.5367619395256042, "train/loss_total": 0.6786870360374451 }, { "epoch": 0.10980496927598184, "step": 411, "train/loss_ctc": 1.105947732925415, "train/loss_error": 0.516577422618866, "train/loss_total": 0.6344515085220337 }, { "epoch": 0.11007213465134918, "step": 412, "train/loss_ctc": 0.7450916767120361, "train/loss_error": 0.48435941338539124, "train/loss_total": 0.5365058779716492 }, { "epoch": 0.11033930002671653, "step": 413, "train/loss_ctc": 1.3760957717895508, "train/loss_error": 0.5008748173713684, "train/loss_total": 0.6759190559387207 }, { "epoch": 0.1106064654020839, "step": 414, "train/loss_ctc": 0.5062846541404724, "train/loss_error": 0.5022806525230408, "train/loss_total": 0.5030814409255981 }, { "epoch": 0.11087363077745124, "step": 415, "train/loss_ctc": 1.128713846206665, "train/loss_error": 0.5179042220115662, "train/loss_total": 0.6400661468505859 }, { "epoch": 0.11114079615281859, "step": 416, "train/loss_ctc": 1.8051786422729492, "train/loss_error": 0.5288592576980591, "train/loss_total": 0.7841231822967529 }, { "epoch": 0.11140796152818595, "step": 417, "train/loss_ctc": 1.0691267251968384, "train/loss_error": 0.5182470083236694, "train/loss_total": 0.6284229755401611 }, { "epoch": 0.1116751269035533, "step": 418, "train/loss_ctc": 0.6771992444992065, "train/loss_error": 0.5059982538223267, "train/loss_total": 0.5402384400367737 }, { "epoch": 0.11194229227892065, "step": 419, "train/loss_ctc": 1.3000580072402954, "train/loss_error": 0.5340642929077148, "train/loss_total": 0.687263011932373 }, { "epoch": 0.112209457654288, "grad_norm": 1.7139432430267334, "learning_rate": 2.9331552230830885e-05, "loss": 0.6309, "step": 420 }, { "epoch": 0.112209457654288, "step": 420, "train/loss_ctc": 0.6219081282615662, "train/loss_error": 0.5583223700523376, "train/loss_total": 0.5710394978523254 }, { "epoch": 0.11247662302965536, "step": 421, "train/loss_ctc": 0.5464673042297363, "train/loss_error": 0.531772792339325, "train/loss_total": 0.5347117185592651 }, { "epoch": 0.11274378840502271, "step": 422, "train/loss_ctc": 0.9007793068885803, "train/loss_error": 0.49884042143821716, "train/loss_total": 0.5792282223701477 }, { "epoch": 0.11301095378039006, "step": 423, "train/loss_ctc": 1.2061951160430908, "train/loss_error": 0.5301451086997986, "train/loss_total": 0.6653551459312439 }, { "epoch": 0.11327811915575742, "step": 424, "train/loss_ctc": 0.9647861123085022, "train/loss_error": 0.5252009034156799, "train/loss_total": 0.6131179332733154 }, { "epoch": 0.11354528453112477, "step": 425, "train/loss_ctc": 1.3394789695739746, "train/loss_error": 0.5178500413894653, "train/loss_total": 0.6821758151054382 }, { "epoch": 0.11381244990649211, "step": 426, "train/loss_ctc": 0.8617841005325317, "train/loss_error": 0.5467119216918945, "train/loss_total": 0.6097263693809509 }, { "epoch": 0.11407961528185948, "step": 427, "train/loss_ctc": 0.9654196500778198, "train/loss_error": 0.4849943220615387, "train/loss_total": 0.581079363822937 }, { "epoch": 0.11434678065722682, "step": 428, "train/loss_ctc": 1.1181118488311768, "train/loss_error": 0.4730112850666046, "train/loss_total": 0.602031409740448 }, { "epoch": 0.11461394603259417, "step": 429, "train/loss_ctc": 0.8235106468200684, "train/loss_error": 0.48366960883140564, "train/loss_total": 0.5516378283500671 }, { "epoch": 0.11488111140796153, "grad_norm": 0.6383955478668213, "learning_rate": 2.9315522308308843e-05, "loss": 0.599, "step": 430 }, { "epoch": 0.11488111140796153, "step": 430, "train/loss_ctc": 1.5845550298690796, "train/loss_error": 0.47124558687210083, "train/loss_total": 0.6939074993133545 }, { "epoch": 0.11514827678332888, "step": 431, "train/loss_ctc": 1.0640352964401245, "train/loss_error": 0.5235174894332886, "train/loss_total": 0.6316210627555847 }, { "epoch": 0.11541544215869623, "step": 432, "train/loss_ctc": 0.9373897314071655, "train/loss_error": 0.5204625725746155, "train/loss_total": 0.6038479804992676 }, { "epoch": 0.11568260753406359, "step": 433, "train/loss_ctc": 0.8528728485107422, "train/loss_error": 0.4999355673789978, "train/loss_total": 0.5705230236053467 }, { "epoch": 0.11594977290943094, "step": 434, "train/loss_ctc": 0.645373523235321, "train/loss_error": 0.5026700496673584, "train/loss_total": 0.5312107801437378 }, { "epoch": 0.11621693828479829, "step": 435, "train/loss_ctc": 0.9609214067459106, "train/loss_error": 0.551378071308136, "train/loss_total": 0.6332867741584778 }, { "epoch": 0.11648410366016564, "step": 436, "train/loss_ctc": 0.9764705896377563, "train/loss_error": 0.481644868850708, "train/loss_total": 0.5806100368499756 }, { "epoch": 0.116751269035533, "step": 437, "train/loss_ctc": 1.1883480548858643, "train/loss_error": 0.456667423248291, "train/loss_total": 0.6030035614967346 }, { "epoch": 0.11701843441090035, "step": 438, "train/loss_ctc": 1.2340166568756104, "train/loss_error": 0.5019242167472839, "train/loss_total": 0.6483427286148071 }, { "epoch": 0.1172855997862677, "step": 439, "train/loss_ctc": 1.5821982622146606, "train/loss_error": 0.5723169445991516, "train/loss_total": 0.7742931842803955 }, { "epoch": 0.11755276516163506, "grad_norm": 1.3969004154205322, "learning_rate": 2.92994923857868e-05, "loss": 0.6271, "step": 440 }, { "epoch": 0.11755276516163506, "step": 440, "train/loss_ctc": 1.0040072202682495, "train/loss_error": 0.5632551312446594, "train/loss_total": 0.6514055728912354 }, { "epoch": 0.1178199305370024, "step": 441, "train/loss_ctc": 1.5995615720748901, "train/loss_error": 0.49048393964767456, "train/loss_total": 0.7122994661331177 }, { "epoch": 0.11808709591236975, "step": 442, "train/loss_ctc": 1.4849867820739746, "train/loss_error": 0.5552721619606018, "train/loss_total": 0.7412151098251343 }, { "epoch": 0.11835426128773711, "step": 443, "train/loss_ctc": 1.1364120244979858, "train/loss_error": 0.540030837059021, "train/loss_total": 0.6593071222305298 }, { "epoch": 0.11862142666310446, "step": 444, "train/loss_ctc": 1.15382719039917, "train/loss_error": 0.5365561842918396, "train/loss_total": 0.6600103974342346 }, { "epoch": 0.11888859203847181, "step": 445, "train/loss_ctc": 0.706750750541687, "train/loss_error": 0.5570515394210815, "train/loss_total": 0.5869914293289185 }, { "epoch": 0.11915575741383917, "step": 446, "train/loss_ctc": 0.822587251663208, "train/loss_error": 0.4619826376438141, "train/loss_total": 0.5341035723686218 }, { "epoch": 0.11942292278920652, "step": 447, "train/loss_ctc": 0.7967995405197144, "train/loss_error": 0.590046226978302, "train/loss_total": 0.6313968896865845 }, { "epoch": 0.11969008816457387, "step": 448, "train/loss_ctc": 0.806807279586792, "train/loss_error": 0.5064407587051392, "train/loss_total": 0.5665140748023987 }, { "epoch": 0.11995725353994123, "step": 449, "train/loss_ctc": 1.0420811176300049, "train/loss_error": 0.5491943359375, "train/loss_total": 0.6477717161178589 }, { "epoch": 0.12022441891530858, "grad_norm": 2.377002000808716, "learning_rate": 2.9283462463264763e-05, "loss": 0.6391, "step": 450 }, { "epoch": 0.12022441891530858, "step": 450, "train/loss_ctc": 0.7545739412307739, "train/loss_error": 0.5516980290412903, "train/loss_total": 0.5922732353210449 }, { "epoch": 0.12049158429067593, "step": 451, "train/loss_ctc": 1.308978796005249, "train/loss_error": 0.5421597361564636, "train/loss_total": 0.6955235600471497 }, { "epoch": 0.12075874966604327, "step": 452, "train/loss_ctc": 0.5447800755500793, "train/loss_error": 0.5065094232559204, "train/loss_total": 0.5141635537147522 }, { "epoch": 0.12102591504141064, "step": 453, "train/loss_ctc": 1.8747034072875977, "train/loss_error": 0.46357491612434387, "train/loss_total": 0.7458006143569946 }, { "epoch": 0.12129308041677798, "step": 454, "train/loss_ctc": 1.2576875686645508, "train/loss_error": 0.5089709162712097, "train/loss_total": 0.6587142944335938 }, { "epoch": 0.12156024579214533, "step": 455, "train/loss_ctc": 0.5304111242294312, "train/loss_error": 0.5291153788566589, "train/loss_total": 0.5293745398521423 }, { "epoch": 0.1218274111675127, "step": 456, "train/loss_ctc": 1.1391620635986328, "train/loss_error": 0.5145904421806335, "train/loss_total": 0.6395047903060913 }, { "epoch": 0.12209457654288004, "step": 457, "train/loss_ctc": 0.7216442823410034, "train/loss_error": 0.4467158317565918, "train/loss_total": 0.5017015337944031 }, { "epoch": 0.12236174191824739, "step": 458, "train/loss_ctc": 0.7997753620147705, "train/loss_error": 0.4757738709449768, "train/loss_total": 0.5405741930007935 }, { "epoch": 0.12262890729361475, "step": 459, "train/loss_ctc": 1.0578020811080933, "train/loss_error": 0.48392975330352783, "train/loss_total": 0.5987042188644409 }, { "epoch": 0.1228960726689821, "grad_norm": 0.6444090008735657, "learning_rate": 2.926743254074272e-05, "loss": 0.6016, "step": 460 }, { "epoch": 0.1228960726689821, "step": 460, "train/loss_ctc": 1.2051951885223389, "train/loss_error": 0.5667160153388977, "train/loss_total": 0.6944118738174438 }, { "epoch": 0.12316323804434945, "step": 461, "train/loss_ctc": 0.865144670009613, "train/loss_error": 0.5209210515022278, "train/loss_total": 0.5897657871246338 }, { "epoch": 0.12343040341971681, "step": 462, "train/loss_ctc": 0.3820625841617584, "train/loss_error": 0.5246064066886902, "train/loss_total": 0.4960976541042328 }, { "epoch": 0.12369756879508416, "step": 463, "train/loss_ctc": 0.6696563363075256, "train/loss_error": 0.47537651658058167, "train/loss_total": 0.5142324566841125 }, { "epoch": 0.12396473417045151, "step": 464, "train/loss_ctc": 1.196969747543335, "train/loss_error": 0.5710841417312622, "train/loss_total": 0.6962612867355347 }, { "epoch": 0.12423189954581886, "step": 465, "train/loss_ctc": 0.6950411796569824, "train/loss_error": 0.5326715707778931, "train/loss_total": 0.5651454925537109 }, { "epoch": 0.12449906492118622, "step": 466, "train/loss_ctc": 1.318609595298767, "train/loss_error": 0.493681401014328, "train/loss_total": 0.6586670279502869 }, { "epoch": 0.12476623029655357, "step": 467, "train/loss_ctc": 1.664915680885315, "train/loss_error": 0.4975878894329071, "train/loss_total": 0.7310534715652466 }, { "epoch": 0.12503339567192093, "step": 468, "train/loss_ctc": 1.064513087272644, "train/loss_error": 0.5035513639450073, "train/loss_total": 0.6157436966896057 }, { "epoch": 0.12530056104728826, "step": 469, "train/loss_ctc": 0.6592669486999512, "train/loss_error": 0.5394511818885803, "train/loss_total": 0.5634143352508545 }, { "epoch": 0.12556772642265562, "grad_norm": 1.6732606887817383, "learning_rate": 2.925140261822068e-05, "loss": 0.6125, "step": 470 }, { "epoch": 0.12556772642265562, "step": 470, "train/loss_ctc": 0.530584454536438, "train/loss_error": 0.48935186862945557, "train/loss_total": 0.4975983798503876 }, { "epoch": 0.12583489179802299, "step": 471, "train/loss_ctc": 0.8924154043197632, "train/loss_error": 0.47278839349746704, "train/loss_total": 0.5567138195037842 }, { "epoch": 0.12610205717339032, "step": 472, "train/loss_ctc": 1.3657383918762207, "train/loss_error": 0.5651764273643494, "train/loss_total": 0.7252888083457947 }, { "epoch": 0.12636922254875768, "step": 473, "train/loss_ctc": 1.2409747838974, "train/loss_error": 0.4820762574672699, "train/loss_total": 0.633855938911438 }, { "epoch": 0.12663638792412504, "step": 474, "train/loss_ctc": 0.8711045980453491, "train/loss_error": 0.4837658405303955, "train/loss_total": 0.561233639717102 }, { "epoch": 0.12690355329949238, "step": 475, "train/loss_ctc": 0.8695440292358398, "train/loss_error": 0.5547165274620056, "train/loss_total": 0.6176820397377014 }, { "epoch": 0.12717071867485974, "step": 476, "train/loss_ctc": 0.7391487956047058, "train/loss_error": 0.5933876037597656, "train/loss_total": 0.6225398182868958 }, { "epoch": 0.1274378840502271, "step": 477, "train/loss_ctc": 0.5453768968582153, "train/loss_error": 0.46927177906036377, "train/loss_total": 0.48449283838272095 }, { "epoch": 0.12770504942559444, "step": 478, "train/loss_ctc": 1.407310962677002, "train/loss_error": 0.49932777881622314, "train/loss_total": 0.6809244155883789 }, { "epoch": 0.1279722148009618, "step": 479, "train/loss_ctc": 0.9088143706321716, "train/loss_error": 0.47384437918663025, "train/loss_total": 0.5608383417129517 }, { "epoch": 0.12823938017632916, "grad_norm": 0.773769199848175, "learning_rate": 2.9235372695698637e-05, "loss": 0.5941, "step": 480 }, { "epoch": 0.12823938017632916, "step": 480, "train/loss_ctc": 1.2851486206054688, "train/loss_error": 0.5285723805427551, "train/loss_total": 0.6798876523971558 }, { "epoch": 0.1285065455516965, "step": 481, "train/loss_ctc": 0.5794373750686646, "train/loss_error": 0.4471128284931183, "train/loss_total": 0.47357773780822754 }, { "epoch": 0.12877371092706386, "step": 482, "train/loss_ctc": 1.1422432661056519, "train/loss_error": 0.5174018144607544, "train/loss_total": 0.6423701047897339 }, { "epoch": 0.12904087630243122, "step": 483, "train/loss_ctc": 1.3028106689453125, "train/loss_error": 0.479317843914032, "train/loss_total": 0.644016444683075 }, { "epoch": 0.12930804167779855, "step": 484, "train/loss_ctc": 1.0798161029815674, "train/loss_error": 0.5172553658485413, "train/loss_total": 0.6297675371170044 }, { "epoch": 0.12957520705316591, "step": 485, "train/loss_ctc": 1.613553762435913, "train/loss_error": 0.5185889005661011, "train/loss_total": 0.7375818490982056 }, { "epoch": 0.12984237242853328, "step": 486, "train/loss_ctc": 1.3178884983062744, "train/loss_error": 0.5823683142662048, "train/loss_total": 0.7294723987579346 }, { "epoch": 0.1301095378039006, "step": 487, "train/loss_ctc": 0.5547323822975159, "train/loss_error": 0.5354116559028625, "train/loss_total": 0.5392757654190063 }, { "epoch": 0.13037670317926797, "step": 488, "train/loss_ctc": 1.555856466293335, "train/loss_error": 0.47190001606941223, "train/loss_total": 0.6886913180351257 }, { "epoch": 0.1306438685546353, "step": 489, "train/loss_ctc": 0.9200010299682617, "train/loss_error": 0.4729265868663788, "train/loss_total": 0.5623415112495422 }, { "epoch": 0.13091103393000267, "grad_norm": 1.7320362329483032, "learning_rate": 2.9219342773176595e-05, "loss": 0.6327, "step": 490 }, { "epoch": 0.13091103393000267, "step": 490, "train/loss_ctc": 1.8479267358779907, "train/loss_error": 0.5058315396308899, "train/loss_total": 0.7742506265640259 }, { "epoch": 0.13117819930537003, "step": 491, "train/loss_ctc": 0.8162738084793091, "train/loss_error": 0.4982246458530426, "train/loss_total": 0.561834454536438 }, { "epoch": 0.13144536468073736, "step": 492, "train/loss_ctc": 1.7692028284072876, "train/loss_error": 0.5402845144271851, "train/loss_total": 0.7860682010650635 }, { "epoch": 0.13171253005610473, "step": 493, "train/loss_ctc": 1.6017277240753174, "train/loss_error": 0.49526482820510864, "train/loss_total": 0.7165573835372925 }, { "epoch": 0.1319796954314721, "step": 494, "train/loss_ctc": 0.7587260007858276, "train/loss_error": 0.5639987587928772, "train/loss_total": 0.6029442548751831 }, { "epoch": 0.13224686080683942, "step": 495, "train/loss_ctc": 1.16646146774292, "train/loss_error": 0.4751119911670685, "train/loss_total": 0.6133819222450256 }, { "epoch": 0.13251402618220678, "step": 496, "train/loss_ctc": 1.646693229675293, "train/loss_error": 0.46647509932518005, "train/loss_total": 0.7025187015533447 }, { "epoch": 0.13278119155757415, "step": 497, "train/loss_ctc": 1.1908984184265137, "train/loss_error": 0.4887852966785431, "train/loss_total": 0.629207968711853 }, { "epoch": 0.13304835693294148, "step": 498, "train/loss_ctc": 0.8932982683181763, "train/loss_error": 0.5767104625701904, "train/loss_total": 0.6400280594825745 }, { "epoch": 0.13331552230830884, "step": 499, "train/loss_ctc": 1.2727429866790771, "train/loss_error": 0.5188981294631958, "train/loss_total": 0.66966712474823 }, { "epoch": 0.1335826876836762, "grad_norm": 2.3900516033172607, "learning_rate": 2.9203312850654553e-05, "loss": 0.6696, "step": 500 }, { "epoch": 0.1335826876836762, "step": 500, "train/loss_ctc": 0.40107953548431396, "train/loss_error": 0.5157337784767151, "train/loss_total": 0.4928029179573059 }, { "epoch": 0.13384985305904354, "step": 501, "train/loss_ctc": 0.7166010141372681, "train/loss_error": 0.5288755893707275, "train/loss_total": 0.5664206743240356 }, { "epoch": 0.1341170184344109, "step": 502, "train/loss_ctc": 0.868682861328125, "train/loss_error": 0.47257277369499207, "train/loss_total": 0.5517947673797607 }, { "epoch": 0.13438418380977826, "step": 503, "train/loss_ctc": 1.3301730155944824, "train/loss_error": 0.5516530275344849, "train/loss_total": 0.7073570489883423 }, { "epoch": 0.1346513491851456, "step": 504, "train/loss_ctc": 1.1712450981140137, "train/loss_error": 0.5720334053039551, "train/loss_total": 0.6918757557868958 }, { "epoch": 0.13491851456051296, "step": 505, "train/loss_ctc": 0.504365086555481, "train/loss_error": 0.5001846551895142, "train/loss_total": 0.5010207295417786 }, { "epoch": 0.13518567993588032, "step": 506, "train/loss_ctc": 0.6952396631240845, "train/loss_error": 0.48317092657089233, "train/loss_total": 0.5255846977233887 }, { "epoch": 0.13545284531124765, "step": 507, "train/loss_ctc": 0.7115944027900696, "train/loss_error": 0.5832915306091309, "train/loss_total": 0.6089521050453186 }, { "epoch": 0.13572001068661502, "step": 508, "train/loss_ctc": 0.7496830821037292, "train/loss_error": 0.5561269521713257, "train/loss_total": 0.5948381423950195 }, { "epoch": 0.13598717606198238, "step": 509, "train/loss_ctc": 0.7688351273536682, "train/loss_error": 0.5153519511222839, "train/loss_total": 0.5660486221313477 }, { "epoch": 0.1362543414373497, "grad_norm": 1.9366389513015747, "learning_rate": 2.9187282928132515e-05, "loss": 0.5807, "step": 510 }, { "epoch": 0.1362543414373497, "step": 510, "train/loss_ctc": 1.6880443096160889, "train/loss_error": 0.5031471252441406, "train/loss_total": 0.7401266098022461 }, { "epoch": 0.13652150681271707, "step": 511, "train/loss_ctc": 0.898500382900238, "train/loss_error": 0.595676839351654, "train/loss_total": 0.6562415361404419 }, { "epoch": 0.13678867218808444, "step": 512, "train/loss_ctc": 0.9092766046524048, "train/loss_error": 0.5424544215202332, "train/loss_total": 0.6158188581466675 }, { "epoch": 0.13705583756345177, "step": 513, "train/loss_ctc": 1.3709317445755005, "train/loss_error": 0.48803192377090454, "train/loss_total": 0.6646118760108948 }, { "epoch": 0.13732300293881913, "step": 514, "train/loss_ctc": 1.2794430255889893, "train/loss_error": 0.5212221741676331, "train/loss_total": 0.6728663444519043 }, { "epoch": 0.1375901683141865, "step": 515, "train/loss_ctc": 1.0445194244384766, "train/loss_error": 0.4717756509780884, "train/loss_total": 0.5863243937492371 }, { "epoch": 0.13785733368955383, "step": 516, "train/loss_ctc": 0.854037880897522, "train/loss_error": 0.5168581604957581, "train/loss_total": 0.5842941403388977 }, { "epoch": 0.1381244990649212, "step": 517, "train/loss_ctc": 0.8484293818473816, "train/loss_error": 0.5615571141242981, "train/loss_total": 0.6189315915107727 }, { "epoch": 0.13839166444028853, "step": 518, "train/loss_ctc": 1.5839791297912598, "train/loss_error": 0.5152347683906555, "train/loss_total": 0.7289836406707764 }, { "epoch": 0.1386588298156559, "step": 519, "train/loss_ctc": 0.5869594812393188, "train/loss_error": 0.47480469942092896, "train/loss_total": 0.49723565578460693 }, { "epoch": 0.13892599519102325, "grad_norm": 1.1741842031478882, "learning_rate": 2.9171253005610473e-05, "loss": 0.6365, "step": 520 }, { "epoch": 0.13892599519102325, "step": 520, "train/loss_ctc": 1.223109483718872, "train/loss_error": 0.5877907276153564, "train/loss_total": 0.7148544788360596 }, { "epoch": 0.13919316056639058, "step": 521, "train/loss_ctc": 0.8713310360908508, "train/loss_error": 0.5120916962623596, "train/loss_total": 0.5839395523071289 }, { "epoch": 0.13946032594175795, "step": 522, "train/loss_ctc": 0.9514029026031494, "train/loss_error": 0.4914538562297821, "train/loss_total": 0.5834436416625977 }, { "epoch": 0.1397274913171253, "step": 523, "train/loss_ctc": 1.0525708198547363, "train/loss_error": 0.55308598279953, "train/loss_total": 0.6529829502105713 }, { "epoch": 0.13999465669249264, "step": 524, "train/loss_ctc": 0.451083242893219, "train/loss_error": 0.5005499124526978, "train/loss_total": 0.49065661430358887 }, { "epoch": 0.14026182206786, "step": 525, "train/loss_ctc": 1.1819567680358887, "train/loss_error": 0.5377739667892456, "train/loss_total": 0.6666105389595032 }, { "epoch": 0.14052898744322737, "step": 526, "train/loss_ctc": 1.2064006328582764, "train/loss_error": 0.5581830143928528, "train/loss_total": 0.6878265142440796 }, { "epoch": 0.1407961528185947, "step": 527, "train/loss_ctc": 0.803486168384552, "train/loss_error": 0.506977915763855, "train/loss_total": 0.5662795901298523 }, { "epoch": 0.14106331819396206, "step": 528, "train/loss_ctc": 1.4547780752182007, "train/loss_error": 0.47017163038253784, "train/loss_total": 0.6670929193496704 }, { "epoch": 0.14133048356932942, "step": 529, "train/loss_ctc": 1.3029611110687256, "train/loss_error": 0.4622096121311188, "train/loss_total": 0.6303598880767822 }, { "epoch": 0.14159764894469676, "grad_norm": 1.870969533920288, "learning_rate": 2.915522308308843e-05, "loss": 0.6244, "step": 530 }, { "epoch": 0.14159764894469676, "step": 530, "train/loss_ctc": 0.8013580441474915, "train/loss_error": 0.6078348159790039, "train/loss_total": 0.6465394496917725 }, { "epoch": 0.14186481432006412, "step": 531, "train/loss_ctc": 1.265190601348877, "train/loss_error": 0.5803623199462891, "train/loss_total": 0.7173280119895935 }, { "epoch": 0.14213197969543148, "step": 532, "train/loss_ctc": 1.044028639793396, "train/loss_error": 0.5045298933982849, "train/loss_total": 0.6124296188354492 }, { "epoch": 0.14239914507079882, "step": 533, "train/loss_ctc": 0.5888437628746033, "train/loss_error": 0.4516107141971588, "train/loss_total": 0.47905734181404114 }, { "epoch": 0.14266631044616618, "step": 534, "train/loss_ctc": 1.0598745346069336, "train/loss_error": 0.4376225471496582, "train/loss_total": 0.5620729327201843 }, { "epoch": 0.14293347582153354, "step": 535, "train/loss_ctc": 0.9275081157684326, "train/loss_error": 0.4731937348842621, "train/loss_total": 0.5640566349029541 }, { "epoch": 0.14320064119690087, "step": 536, "train/loss_ctc": 0.7904582023620605, "train/loss_error": 0.46661579608917236, "train/loss_total": 0.531384289264679 }, { "epoch": 0.14346780657226824, "step": 537, "train/loss_ctc": 1.309332013130188, "train/loss_error": 0.5859853029251099, "train/loss_total": 0.7306546568870544 }, { "epoch": 0.1437349719476356, "step": 538, "train/loss_ctc": 0.7723438739776611, "train/loss_error": 0.5291153788566589, "train/loss_total": 0.5777610540390015 }, { "epoch": 0.14400213732300293, "step": 539, "train/loss_ctc": 0.9170811772346497, "train/loss_error": 0.547149121761322, "train/loss_total": 0.6211355328559875 }, { "epoch": 0.1442693026983703, "grad_norm": 1.056070327758789, "learning_rate": 2.9139193160566393e-05, "loss": 0.6042, "step": 540 }, { "epoch": 0.1442693026983703, "step": 540, "train/loss_ctc": 0.9497941732406616, "train/loss_error": 0.48296448588371277, "train/loss_total": 0.5763304233551025 }, { "epoch": 0.14453646807373766, "step": 541, "train/loss_ctc": 0.3401973843574524, "train/loss_error": 0.5593288540840149, "train/loss_total": 0.5155025720596313 }, { "epoch": 0.144803633449105, "step": 542, "train/loss_ctc": 0.4255560040473938, "train/loss_error": 0.503069281578064, "train/loss_total": 0.48756662011146545 }, { "epoch": 0.14507079882447235, "step": 543, "train/loss_ctc": 0.8485113382339478, "train/loss_error": 0.5462634563446045, "train/loss_total": 0.606713056564331 }, { "epoch": 0.14533796419983971, "step": 544, "train/loss_ctc": 1.0052019357681274, "train/loss_error": 0.5103279948234558, "train/loss_total": 0.6093027591705322 }, { "epoch": 0.14560512957520705, "step": 545, "train/loss_ctc": 1.9523415565490723, "train/loss_error": 0.547913134098053, "train/loss_total": 0.8287988305091858 }, { "epoch": 0.1458722949505744, "step": 546, "train/loss_ctc": 0.8697795867919922, "train/loss_error": 0.5507000684738159, "train/loss_total": 0.614516019821167 }, { "epoch": 0.14613946032594174, "step": 547, "train/loss_ctc": 0.8446522951126099, "train/loss_error": 0.4797784388065338, "train/loss_total": 0.552753210067749 }, { "epoch": 0.1464066257013091, "step": 548, "train/loss_ctc": 0.6615675687789917, "train/loss_error": 0.5024255514144897, "train/loss_total": 0.5342539548873901 }, { "epoch": 0.14667379107667647, "step": 549, "train/loss_ctc": 0.5663042664527893, "train/loss_error": 0.4836422801017761, "train/loss_total": 0.5001747012138367 }, { "epoch": 0.1469409564520438, "grad_norm": 1.9814825057983398, "learning_rate": 2.912316323804435e-05, "loss": 0.5826, "step": 550 }, { "epoch": 0.1469409564520438, "step": 550, "train/loss_ctc": 1.0623879432678223, "train/loss_error": 0.5189192891120911, "train/loss_total": 0.6276130676269531 }, { "epoch": 0.14720812182741116, "step": 551, "train/loss_ctc": 1.4918023347854614, "train/loss_error": 0.568118691444397, "train/loss_total": 0.7528554201126099 }, { "epoch": 0.14747528720277853, "step": 552, "train/loss_ctc": 0.9080778360366821, "train/loss_error": 0.4581068754196167, "train/loss_total": 0.5481010675430298 }, { "epoch": 0.14774245257814586, "step": 553, "train/loss_ctc": 1.0611644983291626, "train/loss_error": 0.543520450592041, "train/loss_total": 0.6470492482185364 }, { "epoch": 0.14800961795351322, "step": 554, "train/loss_ctc": 0.7898975610733032, "train/loss_error": 0.46570825576782227, "train/loss_total": 0.5305461287498474 }, { "epoch": 0.14827678332888058, "step": 555, "train/loss_ctc": 0.8616970181465149, "train/loss_error": 0.521521806716919, "train/loss_total": 0.589556872844696 }, { "epoch": 0.14854394870424792, "step": 556, "train/loss_ctc": 1.2956838607788086, "train/loss_error": 0.4800620377063751, "train/loss_total": 0.6431863903999329 }, { "epoch": 0.14881111407961528, "step": 557, "train/loss_ctc": 0.635080099105835, "train/loss_error": 0.5708222985267639, "train/loss_total": 0.583673894405365 }, { "epoch": 0.14907827945498264, "step": 558, "train/loss_ctc": 1.349147915840149, "train/loss_error": 0.5744075775146484, "train/loss_total": 0.7293556928634644 }, { "epoch": 0.14934544483034998, "step": 559, "train/loss_ctc": 0.8329817056655884, "train/loss_error": 0.5563330054283142, "train/loss_total": 0.611662745475769 }, { "epoch": 0.14961261020571734, "grad_norm": 1.3351603746414185, "learning_rate": 2.910713331552231e-05, "loss": 0.6264, "step": 560 }, { "epoch": 0.14961261020571734, "step": 560, "train/loss_ctc": 1.0153864622116089, "train/loss_error": 0.5021676421165466, "train/loss_total": 0.604811429977417 }, { "epoch": 0.1498797755810847, "step": 561, "train/loss_ctc": 0.3233950138092041, "train/loss_error": 0.5068711042404175, "train/loss_total": 0.4701758921146393 }, { "epoch": 0.15014694095645204, "step": 562, "train/loss_ctc": 0.7119733095169067, "train/loss_error": 0.5462981462478638, "train/loss_total": 0.5794332027435303 }, { "epoch": 0.1504141063318194, "step": 563, "train/loss_ctc": 1.1581733226776123, "train/loss_error": 0.45216333866119385, "train/loss_total": 0.5933653116226196 }, { "epoch": 0.15068127170718676, "step": 564, "train/loss_ctc": 1.2037092447280884, "train/loss_error": 0.5211354494094849, "train/loss_total": 0.6576502323150635 }, { "epoch": 0.1509484370825541, "step": 565, "train/loss_ctc": 1.444615125656128, "train/loss_error": 0.49339935183525085, "train/loss_total": 0.6836425065994263 }, { "epoch": 0.15121560245792146, "step": 566, "train/loss_ctc": 1.1004537343978882, "train/loss_error": 0.5016429424285889, "train/loss_total": 0.6214051246643066 }, { "epoch": 0.15148276783328882, "step": 567, "train/loss_ctc": 0.934130072593689, "train/loss_error": 0.48285266757011414, "train/loss_total": 0.5731081962585449 }, { "epoch": 0.15174993320865615, "step": 568, "train/loss_ctc": 0.8031569719314575, "train/loss_error": 0.5155910849571228, "train/loss_total": 0.5731042623519897 }, { "epoch": 0.1520170985840235, "step": 569, "train/loss_ctc": 0.699751615524292, "train/loss_error": 0.4866501986980438, "train/loss_total": 0.5292704701423645 }, { "epoch": 0.15228426395939088, "grad_norm": 2.5623207092285156, "learning_rate": 2.909110339300027e-05, "loss": 0.5886, "step": 570 }, { "epoch": 0.15228426395939088, "step": 570, "train/loss_ctc": 0.755042314529419, "train/loss_error": 0.4657212793827057, "train/loss_total": 0.5235854983329773 }, { "epoch": 0.1525514293347582, "step": 571, "train/loss_ctc": 1.2140294313430786, "train/loss_error": 0.48520976305007935, "train/loss_total": 0.6309736967086792 }, { "epoch": 0.15281859471012557, "step": 572, "train/loss_ctc": 1.3767049312591553, "train/loss_error": 0.5249968767166138, "train/loss_total": 0.6953384876251221 }, { "epoch": 0.15308576008549293, "step": 573, "train/loss_ctc": 1.2682220935821533, "train/loss_error": 0.4617469310760498, "train/loss_total": 0.6230419874191284 }, { "epoch": 0.15335292546086027, "step": 574, "train/loss_ctc": 0.47063004970550537, "train/loss_error": 0.5422770380973816, "train/loss_total": 0.5279476642608643 }, { "epoch": 0.15362009083622763, "step": 575, "train/loss_ctc": 0.8568827509880066, "train/loss_error": 0.49337154626846313, "train/loss_total": 0.5660737752914429 }, { "epoch": 0.15388725621159496, "step": 576, "train/loss_ctc": 1.3730292320251465, "train/loss_error": 0.5225735306739807, "train/loss_total": 0.6926646828651428 }, { "epoch": 0.15415442158696233, "step": 577, "train/loss_ctc": 0.8511825799942017, "train/loss_error": 0.4987700581550598, "train/loss_total": 0.5692525506019592 }, { "epoch": 0.1544215869623297, "step": 578, "train/loss_ctc": 0.8015167713165283, "train/loss_error": 0.5393330454826355, "train/loss_total": 0.591769814491272 }, { "epoch": 0.15468875233769702, "step": 579, "train/loss_ctc": 0.6132767200469971, "train/loss_error": 0.518268883228302, "train/loss_total": 0.5372704863548279 }, { "epoch": 0.15495591771306438, "grad_norm": 0.9084275364875793, "learning_rate": 2.9075073470478228e-05, "loss": 0.5958, "step": 580 }, { "epoch": 0.15495591771306438, "step": 580, "train/loss_ctc": 1.8508152961730957, "train/loss_error": 0.6067638397216797, "train/loss_total": 0.8555741310119629 }, { "epoch": 0.15522308308843175, "step": 581, "train/loss_ctc": 0.9583947658538818, "train/loss_error": 0.5879144668579102, "train/loss_total": 0.6620105504989624 }, { "epoch": 0.15549024846379908, "step": 582, "train/loss_ctc": 0.6632147431373596, "train/loss_error": 0.5474986433982849, "train/loss_total": 0.5706418752670288 }, { "epoch": 0.15575741383916644, "step": 583, "train/loss_ctc": 1.1217647790908813, "train/loss_error": 0.5046138763427734, "train/loss_total": 0.628044068813324 }, { "epoch": 0.1560245792145338, "step": 584, "train/loss_ctc": 0.7030678987503052, "train/loss_error": 0.5647741556167603, "train/loss_total": 0.5924329161643982 }, { "epoch": 0.15629174458990114, "step": 585, "train/loss_ctc": 0.6647886037826538, "train/loss_error": 0.4606087803840637, "train/loss_total": 0.5014447569847107 }, { "epoch": 0.1565589099652685, "step": 586, "train/loss_ctc": 0.8704140186309814, "train/loss_error": 0.5438212156295776, "train/loss_total": 0.6091398000717163 }, { "epoch": 0.15682607534063586, "step": 587, "train/loss_ctc": 0.7203927040100098, "train/loss_error": 0.5144816040992737, "train/loss_total": 0.5556638240814209 }, { "epoch": 0.1570932407160032, "step": 588, "train/loss_ctc": 1.073740005493164, "train/loss_error": 0.5449745655059814, "train/loss_total": 0.6507276892662048 }, { "epoch": 0.15736040609137056, "step": 589, "train/loss_ctc": 1.1620416641235352, "train/loss_error": 0.4550287425518036, "train/loss_total": 0.596431314945221 }, { "epoch": 0.15762757146673792, "grad_norm": 1.2897586822509766, "learning_rate": 2.9059043547956186e-05, "loss": 0.6222, "step": 590 }, { "epoch": 0.15762757146673792, "step": 590, "train/loss_ctc": 0.8347750902175903, "train/loss_error": 0.45791396498680115, "train/loss_total": 0.5332862138748169 }, { "epoch": 0.15789473684210525, "step": 591, "train/loss_ctc": 0.7255815267562866, "train/loss_error": 0.5675109028816223, "train/loss_total": 0.5991250276565552 }, { "epoch": 0.15816190221747262, "step": 592, "train/loss_ctc": 1.546546459197998, "train/loss_error": 0.4972879886627197, "train/loss_total": 0.7071397304534912 }, { "epoch": 0.15842906759283998, "step": 593, "train/loss_ctc": 0.6875385046005249, "train/loss_error": 0.5422487258911133, "train/loss_total": 0.5713067054748535 }, { "epoch": 0.1586962329682073, "step": 594, "train/loss_ctc": 0.7718544006347656, "train/loss_error": 0.5670009851455688, "train/loss_total": 0.6079716682434082 }, { "epoch": 0.15896339834357467, "step": 595, "train/loss_ctc": 1.2661796808242798, "train/loss_error": 0.5096887350082397, "train/loss_total": 0.6609869003295898 }, { "epoch": 0.15923056371894204, "step": 596, "train/loss_ctc": 0.7508093118667603, "train/loss_error": 0.5040799379348755, "train/loss_total": 0.5534257888793945 }, { "epoch": 0.15949772909430937, "step": 597, "train/loss_ctc": 0.9876939058303833, "train/loss_error": 0.5106357336044312, "train/loss_total": 0.6060473918914795 }, { "epoch": 0.15976489446967673, "step": 598, "train/loss_ctc": 0.8044476509094238, "train/loss_error": 0.5391970872879028, "train/loss_total": 0.5922472476959229 }, { "epoch": 0.1600320598450441, "step": 599, "train/loss_ctc": 0.9144682884216309, "train/loss_error": 0.5843557715415955, "train/loss_total": 0.6503782868385315 }, { "epoch": 0.16029922522041143, "grad_norm": 1.1121350526809692, "learning_rate": 2.9043013625434144e-05, "loss": 0.6082, "step": 600 }, { "epoch": 0.16029922522041143, "step": 600, "train/loss_ctc": 1.2830016613006592, "train/loss_error": 0.4772006869316101, "train/loss_total": 0.6383609175682068 }, { "epoch": 0.1605663905957788, "step": 601, "train/loss_ctc": 1.0503169298171997, "train/loss_error": 0.4403976500034332, "train/loss_total": 0.5623815059661865 }, { "epoch": 0.16083355597114615, "step": 602, "train/loss_ctc": 0.8915137052536011, "train/loss_error": 0.48567020893096924, "train/loss_total": 0.5668389201164246 }, { "epoch": 0.1611007213465135, "step": 603, "train/loss_ctc": 1.4858094453811646, "train/loss_error": 0.4911912977695465, "train/loss_total": 0.6901149749755859 }, { "epoch": 0.16136788672188085, "step": 604, "train/loss_ctc": 1.3219106197357178, "train/loss_error": 0.460945725440979, "train/loss_total": 0.6331387162208557 }, { "epoch": 0.16163505209724818, "step": 605, "train/loss_ctc": 1.2132049798965454, "train/loss_error": 0.4610176682472229, "train/loss_total": 0.6114551424980164 }, { "epoch": 0.16190221747261554, "step": 606, "train/loss_ctc": 0.8226356506347656, "train/loss_error": 0.509989857673645, "train/loss_total": 0.5725190043449402 }, { "epoch": 0.1621693828479829, "step": 607, "train/loss_ctc": 1.0928561687469482, "train/loss_error": 0.45307111740112305, "train/loss_total": 0.5810281038284302 }, { "epoch": 0.16243654822335024, "step": 608, "train/loss_ctc": 0.5364423394203186, "train/loss_error": 0.4809090793132782, "train/loss_total": 0.4920157492160797 }, { "epoch": 0.1627037135987176, "step": 609, "train/loss_ctc": 0.9556733965873718, "train/loss_error": 0.46421992778778076, "train/loss_total": 0.56251060962677 }, { "epoch": 0.16297087897408497, "grad_norm": 0.8041022419929504, "learning_rate": 2.9026983702912102e-05, "loss": 0.591, "step": 610 }, { "epoch": 0.16297087897408497, "step": 610, "train/loss_ctc": 1.1439447402954102, "train/loss_error": 0.5312644839286804, "train/loss_total": 0.6538005471229553 }, { "epoch": 0.1632380443494523, "step": 611, "train/loss_ctc": 0.7259677648544312, "train/loss_error": 0.48708412051200867, "train/loss_total": 0.5348608493804932 }, { "epoch": 0.16350520972481966, "step": 612, "train/loss_ctc": 0.8178519010543823, "train/loss_error": 0.5026345252990723, "train/loss_total": 0.5656780004501343 }, { "epoch": 0.16377237510018702, "step": 613, "train/loss_ctc": 1.00822114944458, "train/loss_error": 0.5895878672599792, "train/loss_total": 0.6733145117759705 }, { "epoch": 0.16403954047555436, "step": 614, "train/loss_ctc": 0.6351960897445679, "train/loss_error": 0.46133914589881897, "train/loss_total": 0.49611055850982666 }, { "epoch": 0.16430670585092172, "step": 615, "train/loss_ctc": 1.6261650323867798, "train/loss_error": 0.4976438581943512, "train/loss_total": 0.7233481407165527 }, { "epoch": 0.16457387122628908, "step": 616, "train/loss_ctc": 0.8748307228088379, "train/loss_error": 0.4664894938468933, "train/loss_total": 0.5481577515602112 }, { "epoch": 0.16484103660165642, "step": 617, "train/loss_ctc": 0.9514020681381226, "train/loss_error": 0.48833975195884705, "train/loss_total": 0.5809522271156311 }, { "epoch": 0.16510820197702378, "step": 618, "train/loss_ctc": 0.8953353762626648, "train/loss_error": 0.5264469981193542, "train/loss_total": 0.6002246737480164 }, { "epoch": 0.16537536735239114, "step": 619, "train/loss_ctc": 1.2165584564208984, "train/loss_error": 0.5089241862297058, "train/loss_total": 0.6504510641098022 }, { "epoch": 0.16564253272775847, "grad_norm": 0.9557764530181885, "learning_rate": 2.9010953780390064e-05, "loss": 0.6027, "step": 620 }, { "epoch": 0.16564253272775847, "step": 620, "train/loss_ctc": 0.5918700098991394, "train/loss_error": 0.5087808966636658, "train/loss_total": 0.5253987312316895 }, { "epoch": 0.16590969810312584, "step": 621, "train/loss_ctc": 1.1070590019226074, "train/loss_error": 0.4982123076915741, "train/loss_total": 0.6199816465377808 }, { "epoch": 0.1661768634784932, "step": 622, "train/loss_ctc": 1.5898219347000122, "train/loss_error": 0.4894649386405945, "train/loss_total": 0.7095363736152649 }, { "epoch": 0.16644402885386053, "step": 623, "train/loss_ctc": 0.9549114108085632, "train/loss_error": 0.47506973147392273, "train/loss_total": 0.5710380673408508 }, { "epoch": 0.1667111942292279, "step": 624, "train/loss_ctc": 0.6662319898605347, "train/loss_error": 0.5152580738067627, "train/loss_total": 0.545452892780304 }, { "epoch": 0.16697835960459526, "step": 625, "train/loss_ctc": 0.4526366591453552, "train/loss_error": 0.5139539241790771, "train/loss_total": 0.5016905069351196 }, { "epoch": 0.1672455249799626, "step": 626, "train/loss_ctc": 2.5351874828338623, "train/loss_error": 0.5222504734992981, "train/loss_total": 0.9248379468917847 }, { "epoch": 0.16751269035532995, "step": 627, "train/loss_ctc": 0.7638932466506958, "train/loss_error": 0.48463720083236694, "train/loss_total": 0.5404884219169617 }, { "epoch": 0.1677798557306973, "step": 628, "train/loss_ctc": 1.267541766166687, "train/loss_error": 0.5215247869491577, "train/loss_total": 0.6707282066345215 }, { "epoch": 0.16804702110606465, "step": 629, "train/loss_ctc": 1.56278657913208, "train/loss_error": 0.5482267141342163, "train/loss_total": 0.7511386871337891 }, { "epoch": 0.168314186481432, "grad_norm": 1.2251821756362915, "learning_rate": 2.8994923857868022e-05, "loss": 0.636, "step": 630 }, { "epoch": 0.168314186481432, "step": 630, "train/loss_ctc": 1.1645774841308594, "train/loss_error": 0.5281982421875, "train/loss_total": 0.6554741263389587 }, { "epoch": 0.16858135185679937, "step": 631, "train/loss_ctc": 1.3456486463546753, "train/loss_error": 0.5101549029350281, "train/loss_total": 0.6772536635398865 }, { "epoch": 0.1688485172321667, "step": 632, "train/loss_ctc": 1.4009835720062256, "train/loss_error": 0.5898531675338745, "train/loss_total": 0.7520792484283447 }, { "epoch": 0.16911568260753407, "step": 633, "train/loss_ctc": 2.1414272785186768, "train/loss_error": 0.45913827419281006, "train/loss_total": 0.7955960631370544 }, { "epoch": 0.1693828479829014, "step": 634, "train/loss_ctc": 1.4670941829681396, "train/loss_error": 0.5332866907119751, "train/loss_total": 0.720048189163208 }, { "epoch": 0.16965001335826876, "step": 635, "train/loss_ctc": 0.5595743060112, "train/loss_error": 0.4625140130519867, "train/loss_total": 0.4819260835647583 }, { "epoch": 0.16991717873363613, "step": 636, "train/loss_ctc": 0.8355798721313477, "train/loss_error": 0.4922889173030853, "train/loss_total": 0.5609471201896667 }, { "epoch": 0.17018434410900346, "step": 637, "train/loss_ctc": 0.8924448490142822, "train/loss_error": 0.5626077651977539, "train/loss_total": 0.6285752058029175 }, { "epoch": 0.17045150948437082, "step": 638, "train/loss_ctc": 1.5128264427185059, "train/loss_error": 0.5383086800575256, "train/loss_total": 0.7332122325897217 }, { "epoch": 0.17071867485973818, "step": 639, "train/loss_ctc": 1.040295124053955, "train/loss_error": 0.5567482709884644, "train/loss_total": 0.6534576416015625 }, { "epoch": 0.17098584023510552, "grad_norm": 0.954082190990448, "learning_rate": 2.897889393534598e-05, "loss": 0.6659, "step": 640 }, { "epoch": 0.17098584023510552, "step": 640, "train/loss_ctc": 0.7548115253448486, "train/loss_error": 0.48697590827941895, "train/loss_total": 0.5405430197715759 }, { "epoch": 0.17125300561047288, "step": 641, "train/loss_ctc": 1.7039775848388672, "train/loss_error": 0.43705832958221436, "train/loss_total": 0.6904422044754028 }, { "epoch": 0.17152017098584024, "step": 642, "train/loss_ctc": 0.7473865151405334, "train/loss_error": 0.5438893437385559, "train/loss_total": 0.5845887660980225 }, { "epoch": 0.17178733636120758, "step": 643, "train/loss_ctc": 0.5779708623886108, "train/loss_error": 0.518446683883667, "train/loss_total": 0.5303515195846558 }, { "epoch": 0.17205450173657494, "step": 644, "train/loss_ctc": 0.5217878818511963, "train/loss_error": 0.5086885690689087, "train/loss_total": 0.5113084316253662 }, { "epoch": 0.1723216671119423, "step": 645, "train/loss_ctc": 1.296729326248169, "train/loss_error": 0.5269397497177124, "train/loss_total": 0.6808976531028748 }, { "epoch": 0.17258883248730963, "step": 646, "train/loss_ctc": 1.155969500541687, "train/loss_error": 0.516257643699646, "train/loss_total": 0.6442000269889832 }, { "epoch": 0.172855997862677, "step": 647, "train/loss_ctc": 0.5881505608558655, "train/loss_error": 0.5475212931632996, "train/loss_total": 0.5556471347808838 }, { "epoch": 0.17312316323804436, "step": 648, "train/loss_ctc": 1.3925361633300781, "train/loss_error": 0.5320842266082764, "train/loss_total": 0.7041746377944946 }, { "epoch": 0.1733903286134117, "step": 649, "train/loss_ctc": 0.7996642589569092, "train/loss_error": 0.554225742816925, "train/loss_total": 0.6033134460449219 }, { "epoch": 0.17365749398877905, "grad_norm": 0.8094308376312256, "learning_rate": 2.8962864012823938e-05, "loss": 0.6045, "step": 650 }, { "epoch": 0.17365749398877905, "step": 650, "train/loss_ctc": 0.6608397960662842, "train/loss_error": 0.467709481716156, "train/loss_total": 0.5063355565071106 }, { "epoch": 0.17392465936414642, "step": 651, "train/loss_ctc": 0.9415767788887024, "train/loss_error": 0.4886608421802521, "train/loss_total": 0.5792440176010132 }, { "epoch": 0.17419182473951375, "step": 652, "train/loss_ctc": 1.1027183532714844, "train/loss_error": 0.5361341834068298, "train/loss_total": 0.6494510173797607 }, { "epoch": 0.1744589901148811, "step": 653, "train/loss_ctc": 1.251807689666748, "train/loss_error": 0.5809524059295654, "train/loss_total": 0.7151234745979309 }, { "epoch": 0.17472615549024847, "step": 654, "train/loss_ctc": 0.6133713722229004, "train/loss_error": 0.5042531490325928, "train/loss_total": 0.5260767936706543 }, { "epoch": 0.1749933208656158, "step": 655, "train/loss_ctc": 1.0216035842895508, "train/loss_error": 0.49234339594841003, "train/loss_total": 0.5981954336166382 }, { "epoch": 0.17526048624098317, "step": 656, "train/loss_ctc": 0.9521618485450745, "train/loss_error": 0.49375924468040466, "train/loss_total": 0.5854398012161255 }, { "epoch": 0.17552765161635053, "step": 657, "train/loss_ctc": 0.861116886138916, "train/loss_error": 0.5025959610939026, "train/loss_total": 0.5743001699447632 }, { "epoch": 0.17579481699171787, "step": 658, "train/loss_ctc": 0.622373640537262, "train/loss_error": 0.4969383180141449, "train/loss_total": 0.5220254063606262 }, { "epoch": 0.17606198236708523, "step": 659, "train/loss_ctc": 0.7691766619682312, "train/loss_error": 0.5003224611282349, "train/loss_total": 0.5540933012962341 }, { "epoch": 0.1763291477424526, "grad_norm": 1.3702197074890137, "learning_rate": 2.8946834090301896e-05, "loss": 0.581, "step": 660 }, { "epoch": 0.1763291477424526, "step": 660, "train/loss_ctc": 1.1410284042358398, "train/loss_error": 0.4869234263896942, "train/loss_total": 0.6177444458007812 }, { "epoch": 0.17659631311781993, "step": 661, "train/loss_ctc": 0.7714488506317139, "train/loss_error": 0.501750111579895, "train/loss_total": 0.5556898713111877 }, { "epoch": 0.1768634784931873, "step": 662, "train/loss_ctc": 1.7846763134002686, "train/loss_error": 0.5044043660163879, "train/loss_total": 0.760458767414093 }, { "epoch": 0.17713064386855465, "step": 663, "train/loss_ctc": 1.1607376337051392, "train/loss_error": 0.5042917728424072, "train/loss_total": 0.6355809569358826 }, { "epoch": 0.17739780924392198, "step": 664, "train/loss_ctc": 1.6991981267929077, "train/loss_error": 0.5250749588012695, "train/loss_total": 0.7598996162414551 }, { "epoch": 0.17766497461928935, "step": 665, "train/loss_ctc": 1.0583025217056274, "train/loss_error": 0.4583629369735718, "train/loss_total": 0.5783509016036987 }, { "epoch": 0.17793213999465668, "step": 666, "train/loss_ctc": 0.8259671330451965, "train/loss_error": 0.46002861857414246, "train/loss_total": 0.5332162976264954 }, { "epoch": 0.17819930537002404, "step": 667, "train/loss_ctc": 1.3434827327728271, "train/loss_error": 0.5170500874519348, "train/loss_total": 0.6823366284370422 }, { "epoch": 0.1784664707453914, "step": 668, "train/loss_ctc": 0.8318564891815186, "train/loss_error": 0.5737220644950867, "train/loss_total": 0.6253489851951599 }, { "epoch": 0.17873363612075874, "step": 669, "train/loss_ctc": 0.9634722471237183, "train/loss_error": 0.5117483735084534, "train/loss_total": 0.6020931601524353 }, { "epoch": 0.1790008014961261, "grad_norm": 0.8110823035240173, "learning_rate": 2.8930804167779854e-05, "loss": 0.6351, "step": 670 }, { "epoch": 0.1790008014961261, "step": 670, "train/loss_ctc": 0.8936408758163452, "train/loss_error": 0.5393355488777161, "train/loss_total": 0.6101966500282288 }, { "epoch": 0.17926796687149346, "step": 671, "train/loss_ctc": 1.3114309310913086, "train/loss_error": 0.5148850083351135, "train/loss_total": 0.6741942167282104 }, { "epoch": 0.1795351322468608, "step": 672, "train/loss_ctc": 0.9220919609069824, "train/loss_error": 0.4709603488445282, "train/loss_total": 0.561186671257019 }, { "epoch": 0.17980229762222816, "step": 673, "train/loss_ctc": 1.2423632144927979, "train/loss_error": 0.4759604334831238, "train/loss_total": 0.6292409896850586 }, { "epoch": 0.18006946299759552, "step": 674, "train/loss_ctc": 1.232731819152832, "train/loss_error": 0.44671133160591125, "train/loss_total": 0.6039154529571533 }, { "epoch": 0.18033662837296285, "step": 675, "train/loss_ctc": 0.8424593210220337, "train/loss_error": 0.49048465490341187, "train/loss_total": 0.5608795881271362 }, { "epoch": 0.18060379374833022, "step": 676, "train/loss_ctc": 0.8347871899604797, "train/loss_error": 0.5233260989189148, "train/loss_total": 0.5856183171272278 }, { "epoch": 0.18087095912369758, "step": 677, "train/loss_ctc": 1.2048895359039307, "train/loss_error": 0.4580497145652771, "train/loss_total": 0.6074177026748657 }, { "epoch": 0.1811381244990649, "step": 678, "train/loss_ctc": 1.0270984172821045, "train/loss_error": 0.5336092710494995, "train/loss_total": 0.6323071122169495 }, { "epoch": 0.18140528987443227, "step": 679, "train/loss_ctc": 0.9026484489440918, "train/loss_error": 0.4366547763347626, "train/loss_total": 0.5298535227775574 }, { "epoch": 0.18167245524979964, "grad_norm": 0.9446099996566772, "learning_rate": 2.8914774245257816e-05, "loss": 0.5995, "step": 680 }, { "epoch": 0.18167245524979964, "step": 680, "train/loss_ctc": 0.9971451759338379, "train/loss_error": 0.5077513456344604, "train/loss_total": 0.6056301593780518 }, { "epoch": 0.18193962062516697, "step": 681, "train/loss_ctc": 0.5877916812896729, "train/loss_error": 0.49889910221099854, "train/loss_total": 0.5166776180267334 }, { "epoch": 0.18220678600053433, "step": 682, "train/loss_ctc": 0.9820219278335571, "train/loss_error": 0.5288887619972229, "train/loss_total": 0.6195154190063477 }, { "epoch": 0.1824739513759017, "step": 683, "train/loss_ctc": 0.896126925945282, "train/loss_error": 0.5707458853721619, "train/loss_total": 0.635822057723999 }, { "epoch": 0.18274111675126903, "step": 684, "train/loss_ctc": 0.6820318102836609, "train/loss_error": 0.5198422074317932, "train/loss_total": 0.5522801280021667 }, { "epoch": 0.1830082821266364, "step": 685, "train/loss_ctc": 1.9915838241577148, "train/loss_error": 0.544931173324585, "train/loss_total": 0.8342617154121399 }, { "epoch": 0.18327544750200375, "step": 686, "train/loss_ctc": 1.1779320240020752, "train/loss_error": 0.5245357155799866, "train/loss_total": 0.6552150249481201 }, { "epoch": 0.1835426128773711, "step": 687, "train/loss_ctc": 0.6614593863487244, "train/loss_error": 0.4778781533241272, "train/loss_total": 0.5145944356918335 }, { "epoch": 0.18380977825273845, "step": 688, "train/loss_ctc": 0.9899758100509644, "train/loss_error": 0.43649059534072876, "train/loss_total": 0.5471876263618469 }, { "epoch": 0.1840769436281058, "step": 689, "train/loss_ctc": 0.839042067527771, "train/loss_error": 0.46553871035575867, "train/loss_total": 0.5402393937110901 }, { "epoch": 0.18434410900347314, "grad_norm": 1.1395869255065918, "learning_rate": 2.8898744322735774e-05, "loss": 0.6021, "step": 690 }, { "epoch": 0.18434410900347314, "step": 690, "train/loss_ctc": 1.566044569015503, "train/loss_error": 0.47246965765953064, "train/loss_total": 0.6911846399307251 }, { "epoch": 0.1846112743788405, "step": 691, "train/loss_ctc": 1.2297539710998535, "train/loss_error": 0.5575843453407288, "train/loss_total": 0.6920182704925537 }, { "epoch": 0.18487843975420787, "step": 692, "train/loss_ctc": 1.1548712253570557, "train/loss_error": 0.43544331192970276, "train/loss_total": 0.5793288946151733 }, { "epoch": 0.1851456051295752, "step": 693, "train/loss_ctc": 0.6447799205780029, "train/loss_error": 0.5208213925361633, "train/loss_total": 0.5456131100654602 }, { "epoch": 0.18541277050494256, "step": 694, "train/loss_ctc": 0.5623853206634521, "train/loss_error": 0.46130526065826416, "train/loss_total": 0.48152127861976624 }, { "epoch": 0.1856799358803099, "step": 695, "train/loss_ctc": 0.5444759726524353, "train/loss_error": 0.5045074224472046, "train/loss_total": 0.5125011205673218 }, { "epoch": 0.18594710125567726, "step": 696, "train/loss_ctc": 0.5684395432472229, "train/loss_error": 0.49663737416267395, "train/loss_total": 0.5109978318214417 }, { "epoch": 0.18621426663104462, "step": 697, "train/loss_ctc": 0.5593403577804565, "train/loss_error": 0.50288987159729, "train/loss_total": 0.5141799449920654 }, { "epoch": 0.18648143200641196, "step": 698, "train/loss_ctc": 1.7338228225708008, "train/loss_error": 0.5149855017662048, "train/loss_total": 0.7587529420852661 }, { "epoch": 0.18674859738177932, "step": 699, "train/loss_ctc": 1.2497202157974243, "train/loss_error": 0.4461860656738281, "train/loss_total": 0.6068928837776184 }, { "epoch": 0.18701576275714668, "grad_norm": 1.5327507257461548, "learning_rate": 2.8882714400213732e-05, "loss": 0.5893, "step": 700 }, { "epoch": 0.18701576275714668, "step": 700, "train/loss_ctc": 1.4549214839935303, "train/loss_error": 0.5301598310470581, "train/loss_total": 0.7151122093200684 }, { "epoch": 0.18728292813251401, "step": 701, "train/loss_ctc": 0.8611916303634644, "train/loss_error": 0.5579241514205933, "train/loss_total": 0.6185776591300964 }, { "epoch": 0.18755009350788138, "step": 702, "train/loss_ctc": 0.4807477593421936, "train/loss_error": 0.554939329624176, "train/loss_total": 0.5401009917259216 }, { "epoch": 0.18781725888324874, "step": 703, "train/loss_ctc": 0.7421650886535645, "train/loss_error": 0.5309853553771973, "train/loss_total": 0.5732213258743286 }, { "epoch": 0.18808442425861607, "step": 704, "train/loss_ctc": 0.6840224266052246, "train/loss_error": 0.5474201440811157, "train/loss_total": 0.5747406482696533 }, { "epoch": 0.18835158963398344, "step": 705, "train/loss_ctc": 0.9471530914306641, "train/loss_error": 0.5287308692932129, "train/loss_total": 0.6124153137207031 }, { "epoch": 0.1886187550093508, "step": 706, "train/loss_ctc": 0.7183119654655457, "train/loss_error": 0.5064575672149658, "train/loss_total": 0.5488284826278687 }, { "epoch": 0.18888592038471813, "step": 707, "train/loss_ctc": 1.3846430778503418, "train/loss_error": 0.47895359992980957, "train/loss_total": 0.6600915193557739 }, { "epoch": 0.1891530857600855, "step": 708, "train/loss_ctc": 0.9775691032409668, "train/loss_error": 0.49907273054122925, "train/loss_total": 0.5947720408439636 }, { "epoch": 0.18942025113545286, "step": 709, "train/loss_ctc": 0.7295268774032593, "train/loss_error": 0.5529086589813232, "train/loss_total": 0.5882322788238525 }, { "epoch": 0.1896874165108202, "grad_norm": 1.0470540523529053, "learning_rate": 2.886668447769169e-05, "loss": 0.6026, "step": 710 }, { "epoch": 0.1896874165108202, "step": 710, "train/loss_ctc": 0.9840153455734253, "train/loss_error": 0.5443880558013916, "train/loss_total": 0.6323135495185852 }, { "epoch": 0.18995458188618755, "step": 711, "train/loss_ctc": 0.5428494215011597, "train/loss_error": 0.5653842091560364, "train/loss_total": 0.56087726354599 }, { "epoch": 0.1902217472615549, "step": 712, "train/loss_ctc": 0.804416298866272, "train/loss_error": 0.5333225727081299, "train/loss_total": 0.5875413417816162 }, { "epoch": 0.19048891263692225, "step": 713, "train/loss_ctc": 0.7540510892868042, "train/loss_error": 0.5810688138008118, "train/loss_total": 0.6156652569770813 }, { "epoch": 0.1907560780122896, "step": 714, "train/loss_ctc": 1.0281586647033691, "train/loss_error": 0.5042260885238647, "train/loss_total": 0.6090126037597656 }, { "epoch": 0.19102324338765697, "step": 715, "train/loss_ctc": 1.0244417190551758, "train/loss_error": 0.4931744933128357, "train/loss_total": 0.5994279384613037 }, { "epoch": 0.1912904087630243, "step": 716, "train/loss_ctc": 0.8666685819625854, "train/loss_error": 0.5354358553886414, "train/loss_total": 0.6016824245452881 }, { "epoch": 0.19155757413839167, "step": 717, "train/loss_ctc": 0.8818103671073914, "train/loss_error": 0.5064108371734619, "train/loss_total": 0.5814907550811768 }, { "epoch": 0.19182473951375903, "step": 718, "train/loss_ctc": 0.9686412811279297, "train/loss_error": 0.5145829319953918, "train/loss_total": 0.6053946018218994 }, { "epoch": 0.19209190488912636, "step": 719, "train/loss_ctc": 1.0134938955307007, "train/loss_error": 0.5137895345687866, "train/loss_total": 0.6137304306030273 }, { "epoch": 0.19235907026449373, "grad_norm": 1.4822005033493042, "learning_rate": 2.885065455516965e-05, "loss": 0.6007, "step": 720 }, { "epoch": 0.19235907026449373, "step": 720, "train/loss_ctc": 1.1997551918029785, "train/loss_error": 0.5026782155036926, "train/loss_total": 0.6420936584472656 }, { "epoch": 0.1926262356398611, "step": 721, "train/loss_ctc": 1.5335774421691895, "train/loss_error": 0.49352729320526123, "train/loss_total": 0.7015373706817627 }, { "epoch": 0.19289340101522842, "step": 722, "train/loss_ctc": 1.0371744632720947, "train/loss_error": 0.5626811981201172, "train/loss_total": 0.6575798988342285 }, { "epoch": 0.19316056639059578, "step": 723, "train/loss_ctc": 0.6718000173568726, "train/loss_error": 0.501460611820221, "train/loss_total": 0.5355284810066223 }, { "epoch": 0.19342773176596312, "step": 724, "train/loss_ctc": 1.541724443435669, "train/loss_error": 0.5642756819725037, "train/loss_total": 0.7597654461860657 }, { "epoch": 0.19369489714133048, "step": 725, "train/loss_ctc": 1.20637845993042, "train/loss_error": 0.4841330051422119, "train/loss_total": 0.6285821199417114 }, { "epoch": 0.19396206251669784, "step": 726, "train/loss_ctc": 0.6147509217262268, "train/loss_error": 0.5514440536499023, "train/loss_total": 0.5641054511070251 }, { "epoch": 0.19422922789206518, "step": 727, "train/loss_ctc": 1.0455971956253052, "train/loss_error": 0.49402448534965515, "train/loss_total": 0.6043390035629272 }, { "epoch": 0.19449639326743254, "step": 728, "train/loss_ctc": 1.117901086807251, "train/loss_error": 0.44712862372398376, "train/loss_total": 0.5812831521034241 }, { "epoch": 0.1947635586427999, "step": 729, "train/loss_ctc": 1.3041410446166992, "train/loss_error": 0.5368537306785583, "train/loss_total": 0.6903111934661865 }, { "epoch": 0.19503072401816723, "grad_norm": 1.2132238149642944, "learning_rate": 2.883462463264761e-05, "loss": 0.6365, "step": 730 }, { "epoch": 0.19503072401816723, "step": 730, "train/loss_ctc": 1.6666725873947144, "train/loss_error": 0.5403363704681396, "train/loss_total": 0.7656036615371704 }, { "epoch": 0.1952978893935346, "step": 731, "train/loss_ctc": 1.052260398864746, "train/loss_error": 0.5165148377418518, "train/loss_total": 0.6236639618873596 }, { "epoch": 0.19556505476890196, "step": 732, "train/loss_ctc": 1.2649731636047363, "train/loss_error": 0.48729920387268066, "train/loss_total": 0.6428340077400208 }, { "epoch": 0.1958322201442693, "step": 733, "train/loss_ctc": 0.7831673622131348, "train/loss_error": 0.5762136578559875, "train/loss_total": 0.6176044344902039 }, { "epoch": 0.19609938551963665, "step": 734, "train/loss_ctc": 0.8755239248275757, "train/loss_error": 0.5101384520530701, "train/loss_total": 0.5832155346870422 }, { "epoch": 0.19636655089500402, "step": 735, "train/loss_ctc": 1.0263192653656006, "train/loss_error": 0.4796803593635559, "train/loss_total": 0.5890081524848938 }, { "epoch": 0.19663371627037135, "step": 736, "train/loss_ctc": 1.0286668539047241, "train/loss_error": 0.49321189522743225, "train/loss_total": 0.6003028750419617 }, { "epoch": 0.1969008816457387, "step": 737, "train/loss_ctc": 0.6168441772460938, "train/loss_error": 0.5463690757751465, "train/loss_total": 0.560464084148407 }, { "epoch": 0.19716804702110607, "step": 738, "train/loss_ctc": 0.7417007684707642, "train/loss_error": 0.49693548679351807, "train/loss_total": 0.5458885431289673 }, { "epoch": 0.1974352123964734, "step": 739, "train/loss_ctc": 0.9922118186950684, "train/loss_error": 0.49023938179016113, "train/loss_total": 0.5906338691711426 }, { "epoch": 0.19770237777184077, "grad_norm": 0.99483722448349, "learning_rate": 2.881859471012557e-05, "loss": 0.6119, "step": 740 }, { "epoch": 0.19770237777184077, "step": 740, "train/loss_ctc": 0.8984609842300415, "train/loss_error": 0.552261471748352, "train/loss_total": 0.6215013861656189 }, { "epoch": 0.19796954314720813, "step": 741, "train/loss_ctc": 1.129744291305542, "train/loss_error": 0.4710294008255005, "train/loss_total": 0.6027723550796509 }, { "epoch": 0.19823670852257547, "step": 742, "train/loss_ctc": 0.5747281312942505, "train/loss_error": 0.4762219786643982, "train/loss_total": 0.4959232211112976 }, { "epoch": 0.19850387389794283, "step": 743, "train/loss_ctc": 0.7621989846229553, "train/loss_error": 0.5183914303779602, "train/loss_total": 0.5671529769897461 }, { "epoch": 0.1987710392733102, "step": 744, "train/loss_ctc": 0.8556767702102661, "train/loss_error": 0.43592920899391174, "train/loss_total": 0.5198787450790405 }, { "epoch": 0.19903820464867752, "step": 745, "train/loss_ctc": 1.6091885566711426, "train/loss_error": 0.4990321099758148, "train/loss_total": 0.7210633754730225 }, { "epoch": 0.1993053700240449, "step": 746, "train/loss_ctc": 1.3328220844268799, "train/loss_error": 0.5210263729095459, "train/loss_total": 0.6833854913711548 }, { "epoch": 0.19957253539941225, "step": 747, "train/loss_ctc": 0.6916912794113159, "train/loss_error": 0.4253390431404114, "train/loss_total": 0.47860950231552124 }, { "epoch": 0.19983970077477958, "step": 748, "train/loss_ctc": 0.6336890459060669, "train/loss_error": 0.543586790561676, "train/loss_total": 0.5616072416305542 }, { "epoch": 0.20010686615014694, "step": 749, "train/loss_ctc": 1.0505919456481934, "train/loss_error": 0.5381819009780884, "train/loss_total": 0.6406639218330383 }, { "epoch": 0.2003740315255143, "grad_norm": 1.098408818244934, "learning_rate": 2.880256478760353e-05, "loss": 0.5893, "step": 750 }, { "epoch": 0.2003740315255143, "step": 750, "train/loss_ctc": 1.3192501068115234, "train/loss_error": 0.49409565329551697, "train/loss_total": 0.6591265201568604 }, { "epoch": 0.20064119690088164, "step": 751, "train/loss_ctc": 0.9091768860816956, "train/loss_error": 0.5228160619735718, "train/loss_total": 0.6000882387161255 }, { "epoch": 0.200908362276249, "step": 752, "train/loss_ctc": 0.9026716947555542, "train/loss_error": 0.519254744052887, "train/loss_total": 0.5959381461143494 }, { "epoch": 0.20117552765161634, "step": 753, "train/loss_ctc": 0.3982361853122711, "train/loss_error": 0.493685245513916, "train/loss_total": 0.47459545731544495 }, { "epoch": 0.2014426930269837, "step": 754, "train/loss_ctc": 0.5711262226104736, "train/loss_error": 0.5014025568962097, "train/loss_total": 0.5153473019599915 }, { "epoch": 0.20170985840235106, "step": 755, "train/loss_ctc": 0.5249500274658203, "train/loss_error": 0.5614349842071533, "train/loss_total": 0.5541380047798157 }, { "epoch": 0.2019770237777184, "step": 756, "train/loss_ctc": 0.5882748365402222, "train/loss_error": 0.5573374629020691, "train/loss_total": 0.5635249614715576 }, { "epoch": 0.20224418915308576, "step": 757, "train/loss_ctc": 0.9571948647499084, "train/loss_error": 0.4796699285507202, "train/loss_total": 0.5751749277114868 }, { "epoch": 0.20251135452845312, "step": 758, "train/loss_ctc": 1.0214788913726807, "train/loss_error": 0.5141052603721619, "train/loss_total": 0.6155799627304077 }, { "epoch": 0.20277851990382045, "step": 759, "train/loss_ctc": 0.6119142770767212, "train/loss_error": 0.4911472201347351, "train/loss_total": 0.5153006315231323 }, { "epoch": 0.20304568527918782, "grad_norm": 1.0734703540802002, "learning_rate": 2.8786534865081487e-05, "loss": 0.5669, "step": 760 }, { "epoch": 0.20304568527918782, "step": 760, "train/loss_ctc": 0.9934805631637573, "train/loss_error": 0.5084050297737122, "train/loss_total": 0.6054201722145081 }, { "epoch": 0.20331285065455518, "step": 761, "train/loss_ctc": 1.0431870222091675, "train/loss_error": 0.5314395427703857, "train/loss_total": 0.6337890625 }, { "epoch": 0.2035800160299225, "step": 762, "train/loss_ctc": 0.887242317199707, "train/loss_error": 0.5012030601501465, "train/loss_total": 0.5784109234809875 }, { "epoch": 0.20384718140528987, "step": 763, "train/loss_ctc": 1.1875331401824951, "train/loss_error": 0.49113985896110535, "train/loss_total": 0.6304185390472412 }, { "epoch": 0.20411434678065724, "step": 764, "train/loss_ctc": 1.551167607307434, "train/loss_error": 0.5079914927482605, "train/loss_total": 0.716626763343811 }, { "epoch": 0.20438151215602457, "step": 765, "train/loss_ctc": 1.3525553941726685, "train/loss_error": 0.5091663002967834, "train/loss_total": 0.6778441667556763 }, { "epoch": 0.20464867753139193, "step": 766, "train/loss_ctc": 1.1623122692108154, "train/loss_error": 0.5101528763771057, "train/loss_total": 0.6405847668647766 }, { "epoch": 0.2049158429067593, "step": 767, "train/loss_ctc": 1.0972580909729004, "train/loss_error": 0.5362864136695862, "train/loss_total": 0.6484807729721069 }, { "epoch": 0.20518300828212663, "step": 768, "train/loss_ctc": 0.9810602068901062, "train/loss_error": 0.5310601592063904, "train/loss_total": 0.6210601925849915 }, { "epoch": 0.205450173657494, "step": 769, "train/loss_ctc": 1.6583929061889648, "train/loss_error": 0.5235243439674377, "train/loss_total": 0.7504980564117432 }, { "epoch": 0.20571733903286135, "grad_norm": 1.9383503198623657, "learning_rate": 2.8770504942559445e-05, "loss": 0.6503, "step": 770 }, { "epoch": 0.20571733903286135, "step": 770, "train/loss_ctc": 1.6286797523498535, "train/loss_error": 0.5207306742668152, "train/loss_total": 0.7423205375671387 }, { "epoch": 0.20598450440822869, "step": 771, "train/loss_ctc": 0.9343175888061523, "train/loss_error": 0.4824539124965668, "train/loss_total": 0.5728266835212708 }, { "epoch": 0.20625166978359605, "step": 772, "train/loss_ctc": 1.3893500566482544, "train/loss_error": 0.46142512559890747, "train/loss_total": 0.6470101475715637 }, { "epoch": 0.2065188351589634, "step": 773, "train/loss_ctc": 0.6419695615768433, "train/loss_error": 0.4474307596683502, "train/loss_total": 0.4863385260105133 }, { "epoch": 0.20678600053433074, "step": 774, "train/loss_ctc": 1.0270355939865112, "train/loss_error": 0.5291070938110352, "train/loss_total": 0.6286928057670593 }, { "epoch": 0.2070531659096981, "step": 775, "train/loss_ctc": 0.8126094341278076, "train/loss_error": 0.504257321357727, "train/loss_total": 0.5659277439117432 }, { "epoch": 0.20732033128506547, "step": 776, "train/loss_ctc": 0.8325379490852356, "train/loss_error": 0.45219776034355164, "train/loss_total": 0.5282657742500305 }, { "epoch": 0.2075874966604328, "step": 777, "train/loss_ctc": 0.8296412825584412, "train/loss_error": 0.509802520275116, "train/loss_total": 0.57377028465271 }, { "epoch": 0.20785466203580016, "step": 778, "train/loss_ctc": 1.1943719387054443, "train/loss_error": 0.5442773103713989, "train/loss_total": 0.6742962598800659 }, { "epoch": 0.20812182741116753, "step": 779, "train/loss_ctc": 1.2427964210510254, "train/loss_error": 0.49029678106307983, "train/loss_total": 0.6407967209815979 }, { "epoch": 0.20838899278653486, "grad_norm": 1.0861490964889526, "learning_rate": 2.8754475020037403e-05, "loss": 0.606, "step": 780 }, { "epoch": 0.20838899278653486, "step": 780, "train/loss_ctc": 0.725690484046936, "train/loss_error": 0.5286672115325928, "train/loss_total": 0.5680719017982483 }, { "epoch": 0.20865615816190222, "step": 781, "train/loss_ctc": 0.7474728226661682, "train/loss_error": 0.4744706153869629, "train/loss_total": 0.5290710926055908 }, { "epoch": 0.20892332353726956, "step": 782, "train/loss_ctc": 1.4380440711975098, "train/loss_error": 0.5571268796920776, "train/loss_total": 0.733310341835022 }, { "epoch": 0.20919048891263692, "step": 783, "train/loss_ctc": 0.8933600187301636, "train/loss_error": 0.4883660078048706, "train/loss_total": 0.5693647861480713 }, { "epoch": 0.20945765428800428, "step": 784, "train/loss_ctc": 0.9218055605888367, "train/loss_error": 0.5720210671424866, "train/loss_total": 0.6419779658317566 }, { "epoch": 0.20972481966337161, "step": 785, "train/loss_ctc": 0.9164857864379883, "train/loss_error": 0.553690493106842, "train/loss_total": 0.6262495517730713 }, { "epoch": 0.20999198503873898, "step": 786, "train/loss_ctc": 0.6814529895782471, "train/loss_error": 0.45924049615859985, "train/loss_total": 0.5036829710006714 }, { "epoch": 0.21025915041410634, "step": 787, "train/loss_ctc": 0.9138897657394409, "train/loss_error": 0.5156939625740051, "train/loss_total": 0.5953331589698792 }, { "epoch": 0.21052631578947367, "step": 788, "train/loss_ctc": 1.9491829872131348, "train/loss_error": 0.5010164380073547, "train/loss_total": 0.7906497716903687 }, { "epoch": 0.21079348116484103, "step": 789, "train/loss_ctc": 0.8553736209869385, "train/loss_error": 0.47508031129837036, "train/loss_total": 0.5511389970779419 }, { "epoch": 0.2110606465402084, "grad_norm": 1.7846601009368896, "learning_rate": 2.873844509751536e-05, "loss": 0.6109, "step": 790 }, { "epoch": 0.2110606465402084, "step": 790, "train/loss_ctc": 0.7993165254592896, "train/loss_error": 0.5049628615379333, "train/loss_total": 0.5638335943222046 }, { "epoch": 0.21132781191557573, "step": 791, "train/loss_ctc": 0.837144672870636, "train/loss_error": 0.4640269875526428, "train/loss_total": 0.5386505126953125 }, { "epoch": 0.2115949772909431, "step": 792, "train/loss_ctc": 0.53013676404953, "train/loss_error": 0.496179461479187, "train/loss_total": 0.5029709339141846 }, { "epoch": 0.21186214266631045, "step": 793, "train/loss_ctc": 1.1781013011932373, "train/loss_error": 0.49255722761154175, "train/loss_total": 0.6296660900115967 }, { "epoch": 0.2121293080416778, "step": 794, "train/loss_ctc": 0.9919089674949646, "train/loss_error": 0.49469923973083496, "train/loss_total": 0.5941411852836609 }, { "epoch": 0.21239647341704515, "step": 795, "train/loss_ctc": 0.9972256422042847, "train/loss_error": 0.5693990588188171, "train/loss_total": 0.6549643874168396 }, { "epoch": 0.2126636387924125, "step": 796, "train/loss_ctc": 0.5692790746688843, "train/loss_error": 0.5023961663246155, "train/loss_total": 0.5157727599143982 }, { "epoch": 0.21293080416777985, "step": 797, "train/loss_ctc": 1.1035749912261963, "train/loss_error": 0.5343785881996155, "train/loss_total": 0.6482178568840027 }, { "epoch": 0.2131979695431472, "step": 798, "train/loss_ctc": 0.8808456063270569, "train/loss_error": 0.4455116391181946, "train/loss_total": 0.5325784683227539 }, { "epoch": 0.21346513491851457, "step": 799, "train/loss_ctc": 0.4039064645767212, "train/loss_error": 0.44991058111190796, "train/loss_total": 0.44070976972579956 }, { "epoch": 0.2137323002938819, "grad_norm": 0.7968260049819946, "learning_rate": 2.8722415174993323e-05, "loss": 0.5622, "step": 800 }, { "epoch": 0.2137323002938819, "step": 800, "train/loss_ctc": 0.896963894367218, "train/loss_error": 0.5516268014907837, "train/loss_total": 0.6206942200660706 }, { "epoch": 0.21399946566924927, "step": 801, "train/loss_ctc": 0.45668816566467285, "train/loss_error": 0.46637606620788574, "train/loss_total": 0.4644384980201721 }, { "epoch": 0.21426663104461663, "step": 802, "train/loss_ctc": 1.4641642570495605, "train/loss_error": 0.4993683099746704, "train/loss_total": 0.6923274993896484 }, { "epoch": 0.21453379641998396, "step": 803, "train/loss_ctc": 0.6409623622894287, "train/loss_error": 0.49864327907562256, "train/loss_total": 0.5271071195602417 }, { "epoch": 0.21480096179535133, "step": 804, "train/loss_ctc": 0.9358602166175842, "train/loss_error": 0.48624932765960693, "train/loss_total": 0.5761715173721313 }, { "epoch": 0.2150681271707187, "step": 805, "train/loss_ctc": 0.6621308922767639, "train/loss_error": 0.46345463395118713, "train/loss_total": 0.5031899213790894 }, { "epoch": 0.21533529254608602, "step": 806, "train/loss_ctc": 0.6495643854141235, "train/loss_error": 0.4800650477409363, "train/loss_total": 0.5139648914337158 }, { "epoch": 0.21560245792145338, "step": 807, "train/loss_ctc": 1.2494330406188965, "train/loss_error": 0.5363457202911377, "train/loss_total": 0.6789631843566895 }, { "epoch": 0.21586962329682075, "step": 808, "train/loss_ctc": 0.6996278762817383, "train/loss_error": 0.5161024332046509, "train/loss_total": 0.5528075098991394 }, { "epoch": 0.21613678867218808, "step": 809, "train/loss_ctc": 1.366391658782959, "train/loss_error": 0.4681538939476013, "train/loss_total": 0.6478014588356018 }, { "epoch": 0.21640395404755544, "grad_norm": 3.2056984901428223, "learning_rate": 2.870638525247128e-05, "loss": 0.5777, "step": 810 }, { "epoch": 0.21640395404755544, "step": 810, "train/loss_ctc": 0.7676159143447876, "train/loss_error": 0.5408341288566589, "train/loss_total": 0.5861905217170715 }, { "epoch": 0.21667111942292278, "step": 811, "train/loss_ctc": 0.9872959852218628, "train/loss_error": 0.4994100332260132, "train/loss_total": 0.596987247467041 }, { "epoch": 0.21693828479829014, "step": 812, "train/loss_ctc": 1.1525152921676636, "train/loss_error": 0.5515093803405762, "train/loss_total": 0.6717105507850647 }, { "epoch": 0.2172054501736575, "step": 813, "train/loss_ctc": 0.7802209854125977, "train/loss_error": 0.523420512676239, "train/loss_total": 0.5747806429862976 }, { "epoch": 0.21747261554902483, "step": 814, "train/loss_ctc": 1.0856332778930664, "train/loss_error": 0.4832709729671478, "train/loss_total": 0.6037434339523315 }, { "epoch": 0.2177397809243922, "step": 815, "train/loss_ctc": 2.2800114154815674, "train/loss_error": 0.48229333758354187, "train/loss_total": 0.8418369293212891 }, { "epoch": 0.21800694629975956, "step": 816, "train/loss_ctc": 0.9901740550994873, "train/loss_error": 0.6017307043075562, "train/loss_total": 0.6794193983078003 }, { "epoch": 0.2182741116751269, "step": 817, "train/loss_ctc": 1.0567190647125244, "train/loss_error": 0.5608290433883667, "train/loss_total": 0.6600070595741272 }, { "epoch": 0.21854127705049425, "step": 818, "train/loss_ctc": 0.5440152287483215, "train/loss_error": 0.4831398129463196, "train/loss_total": 0.49531489610671997 }, { "epoch": 0.21880844242586162, "step": 819, "train/loss_ctc": 1.212613582611084, "train/loss_error": 0.5151775479316711, "train/loss_total": 0.6546647548675537 }, { "epoch": 0.21907560780122895, "grad_norm": 0.9446890950202942, "learning_rate": 2.869035532994924e-05, "loss": 0.6365, "step": 820 }, { "epoch": 0.21907560780122895, "step": 820, "train/loss_ctc": 0.8074418902397156, "train/loss_error": 0.4422387480735779, "train/loss_total": 0.5152794122695923 }, { "epoch": 0.2193427731765963, "step": 821, "train/loss_ctc": 0.8261446952819824, "train/loss_error": 0.4844505190849304, "train/loss_total": 0.5527893900871277 }, { "epoch": 0.21960993855196367, "step": 822, "train/loss_ctc": 0.5390125513076782, "train/loss_error": 0.5113322734832764, "train/loss_total": 0.5168683528900146 }, { "epoch": 0.219877103927331, "step": 823, "train/loss_ctc": 1.5447616577148438, "train/loss_error": 0.49009233713150024, "train/loss_total": 0.701026201248169 }, { "epoch": 0.22014426930269837, "step": 824, "train/loss_ctc": 0.7890001535415649, "train/loss_error": 0.48858943581581116, "train/loss_total": 0.5486716032028198 }, { "epoch": 0.22041143467806573, "step": 825, "train/loss_ctc": 1.572141408920288, "train/loss_error": 0.454303503036499, "train/loss_total": 0.6778711080551147 }, { "epoch": 0.22067860005343307, "step": 826, "train/loss_ctc": 0.6822950839996338, "train/loss_error": 0.5002129077911377, "train/loss_total": 0.536629319190979 }, { "epoch": 0.22094576542880043, "step": 827, "train/loss_ctc": 1.1259560585021973, "train/loss_error": 0.4770210385322571, "train/loss_total": 0.606808066368103 }, { "epoch": 0.2212129308041678, "step": 828, "train/loss_ctc": 0.8853558301925659, "train/loss_error": 0.5731450915336609, "train/loss_total": 0.6355872750282288 }, { "epoch": 0.22148009617953512, "step": 829, "train/loss_ctc": 0.7951763272285461, "train/loss_error": 0.4904487133026123, "train/loss_total": 0.5513942241668701 }, { "epoch": 0.2217472615549025, "grad_norm": 5.622978687286377, "learning_rate": 2.8674325407427197e-05, "loss": 0.5843, "step": 830 }, { "epoch": 0.2217472615549025, "step": 830, "train/loss_ctc": 1.2086070775985718, "train/loss_error": 0.4529837369918823, "train/loss_total": 0.604108452796936 }, { "epoch": 0.22201442693026985, "step": 831, "train/loss_ctc": 0.6794437170028687, "train/loss_error": 0.4872988760471344, "train/loss_total": 0.5257278680801392 }, { "epoch": 0.22228159230563718, "step": 832, "train/loss_ctc": 0.4096311330795288, "train/loss_error": 0.5174990296363831, "train/loss_total": 0.4959254562854767 }, { "epoch": 0.22254875768100454, "step": 833, "train/loss_ctc": 0.8404940962791443, "train/loss_error": 0.5065035820007324, "train/loss_total": 0.5733016729354858 }, { "epoch": 0.2228159230563719, "step": 834, "train/loss_ctc": 1.4203988313674927, "train/loss_error": 0.4781738817691803, "train/loss_total": 0.6666188836097717 }, { "epoch": 0.22308308843173924, "step": 835, "train/loss_ctc": 0.5741147994995117, "train/loss_error": 0.49642330408096313, "train/loss_total": 0.5119616389274597 }, { "epoch": 0.2233502538071066, "step": 836, "train/loss_ctc": 1.0624549388885498, "train/loss_error": 0.4525259733200073, "train/loss_total": 0.5745117664337158 }, { "epoch": 0.22361741918247396, "step": 837, "train/loss_ctc": 0.8801901340484619, "train/loss_error": 0.530634343624115, "train/loss_total": 0.6005455255508423 }, { "epoch": 0.2238845845578413, "step": 838, "train/loss_ctc": 1.2433738708496094, "train/loss_error": 0.5899025201797485, "train/loss_total": 0.7205967903137207 }, { "epoch": 0.22415174993320866, "step": 839, "train/loss_ctc": 0.9685338139533997, "train/loss_error": 0.5288878083229065, "train/loss_total": 0.6168169975280762 }, { "epoch": 0.224418915308576, "grad_norm": 0.9279994368553162, "learning_rate": 2.8658295484905155e-05, "loss": 0.589, "step": 840 }, { "epoch": 0.224418915308576, "step": 840, "train/loss_ctc": 0.7266818284988403, "train/loss_error": 0.4673117995262146, "train/loss_total": 0.5191858410835266 }, { "epoch": 0.22468608068394336, "step": 841, "train/loss_ctc": 0.8975730538368225, "train/loss_error": 0.5039427876472473, "train/loss_total": 0.5826688408851624 }, { "epoch": 0.22495324605931072, "step": 842, "train/loss_ctc": 0.8924522995948792, "train/loss_error": 0.5203530192375183, "train/loss_total": 0.5947728753089905 }, { "epoch": 0.22522041143467805, "step": 843, "train/loss_ctc": 0.83527672290802, "train/loss_error": 0.47977447509765625, "train/loss_total": 0.5508749485015869 }, { "epoch": 0.22548757681004541, "step": 844, "train/loss_ctc": 1.0385174751281738, "train/loss_error": 0.5263791084289551, "train/loss_total": 0.6288068294525146 }, { "epoch": 0.22575474218541278, "step": 845, "train/loss_ctc": 0.7914237976074219, "train/loss_error": 0.5378684997558594, "train/loss_total": 0.5885795950889587 }, { "epoch": 0.2260219075607801, "step": 846, "train/loss_ctc": 0.6214856505393982, "train/loss_error": 0.4887780547142029, "train/loss_total": 0.5153195858001709 }, { "epoch": 0.22628907293614747, "step": 847, "train/loss_ctc": 0.8665047883987427, "train/loss_error": 0.5170214176177979, "train/loss_total": 0.5869181156158447 }, { "epoch": 0.22655623831151483, "step": 848, "train/loss_ctc": 0.858995795249939, "train/loss_error": 0.5034934878349304, "train/loss_total": 0.5745939612388611 }, { "epoch": 0.22682340368688217, "step": 849, "train/loss_ctc": 0.9724344611167908, "train/loss_error": 0.5207926034927368, "train/loss_total": 0.6111209988594055 }, { "epoch": 0.22709056906224953, "grad_norm": 4.415884971618652, "learning_rate": 2.8642265562383113e-05, "loss": 0.5753, "step": 850 }, { "epoch": 0.22709056906224953, "step": 850, "train/loss_ctc": 1.4366339445114136, "train/loss_error": 0.4757506549358368, "train/loss_total": 0.6679273247718811 }, { "epoch": 0.2273577344376169, "step": 851, "train/loss_ctc": 2.0446484088897705, "train/loss_error": 0.5028891563415527, "train/loss_total": 0.8112410306930542 }, { "epoch": 0.22762489981298423, "step": 852, "train/loss_ctc": 0.9561516642570496, "train/loss_error": 0.5046992301940918, "train/loss_total": 0.5949897170066833 }, { "epoch": 0.2278920651883516, "step": 853, "train/loss_ctc": 0.5086665153503418, "train/loss_error": 0.5510738492012024, "train/loss_total": 0.5425924062728882 }, { "epoch": 0.22815923056371895, "step": 854, "train/loss_ctc": 0.9377186894416809, "train/loss_error": 0.5394992828369141, "train/loss_total": 0.6191431879997253 }, { "epoch": 0.22842639593908629, "step": 855, "train/loss_ctc": 0.43258413672447205, "train/loss_error": 0.5452830791473389, "train/loss_total": 0.522743284702301 }, { "epoch": 0.22869356131445365, "step": 856, "train/loss_ctc": 1.07541823387146, "train/loss_error": 0.5146826505661011, "train/loss_total": 0.6268297433853149 }, { "epoch": 0.228960726689821, "step": 857, "train/loss_ctc": 0.7560127377510071, "train/loss_error": 0.49456915259361267, "train/loss_total": 0.5468578934669495 }, { "epoch": 0.22922789206518834, "step": 858, "train/loss_ctc": 0.8355162143707275, "train/loss_error": 0.5393863320350647, "train/loss_total": 0.5986123085021973 }, { "epoch": 0.2294950574405557, "step": 859, "train/loss_ctc": 0.6168898940086365, "train/loss_error": 0.5156474709510803, "train/loss_total": 0.5358959436416626 }, { "epoch": 0.22976222281592307, "grad_norm": 1.1471140384674072, "learning_rate": 2.8626235639861075e-05, "loss": 0.6067, "step": 860 }, { "epoch": 0.22976222281592307, "step": 860, "train/loss_ctc": 1.1005123853683472, "train/loss_error": 0.4510921239852905, "train/loss_total": 0.5809761881828308 }, { "epoch": 0.2300293881912904, "step": 861, "train/loss_ctc": 0.555893063545227, "train/loss_error": 0.5199499130249023, "train/loss_total": 0.5271385312080383 }, { "epoch": 0.23029655356665776, "step": 862, "train/loss_ctc": 0.9666756391525269, "train/loss_error": 0.49966734647750854, "train/loss_total": 0.5930690169334412 }, { "epoch": 0.23056371894202513, "step": 863, "train/loss_ctc": 0.7152043581008911, "train/loss_error": 0.5002586841583252, "train/loss_total": 0.5432478189468384 }, { "epoch": 0.23083088431739246, "step": 864, "train/loss_ctc": 1.3791924715042114, "train/loss_error": 0.48755553364753723, "train/loss_total": 0.66588294506073 }, { "epoch": 0.23109804969275982, "step": 865, "train/loss_ctc": 0.7063227891921997, "train/loss_error": 0.5614022612571716, "train/loss_total": 0.5903863906860352 }, { "epoch": 0.23136521506812718, "step": 866, "train/loss_ctc": 0.6959387063980103, "train/loss_error": 0.49957403540611267, "train/loss_total": 0.5388469696044922 }, { "epoch": 0.23163238044349452, "step": 867, "train/loss_ctc": 1.1971044540405273, "train/loss_error": 0.4766925871372223, "train/loss_total": 0.6207749843597412 }, { "epoch": 0.23189954581886188, "step": 868, "train/loss_ctc": 1.1245238780975342, "train/loss_error": 0.438270628452301, "train/loss_total": 0.5755212903022766 }, { "epoch": 0.23216671119422924, "step": 869, "train/loss_ctc": 0.7433238625526428, "train/loss_error": 0.5279237627983093, "train/loss_total": 0.571003794670105 }, { "epoch": 0.23243387656959658, "grad_norm": 0.8672553896903992, "learning_rate": 2.8610205717339033e-05, "loss": 0.5807, "step": 870 }, { "epoch": 0.23243387656959658, "step": 870, "train/loss_ctc": 0.519776463508606, "train/loss_error": 0.5356647968292236, "train/loss_total": 0.532487154006958 }, { "epoch": 0.23270104194496394, "step": 871, "train/loss_ctc": 0.7291302680969238, "train/loss_error": 0.4831515848636627, "train/loss_total": 0.5323473215103149 }, { "epoch": 0.23296820732033127, "step": 872, "train/loss_ctc": 0.9863961935043335, "train/loss_error": 0.4955962896347046, "train/loss_total": 0.5937563180923462 }, { "epoch": 0.23323537269569863, "step": 873, "train/loss_ctc": 0.9353562593460083, "train/loss_error": 0.47580069303512573, "train/loss_total": 0.5677118301391602 }, { "epoch": 0.233502538071066, "step": 874, "train/loss_ctc": 0.6244555711746216, "train/loss_error": 0.5247597694396973, "train/loss_total": 0.54469895362854 }, { "epoch": 0.23376970344643333, "step": 875, "train/loss_ctc": 0.48828279972076416, "train/loss_error": 0.472247451543808, "train/loss_total": 0.47545450925827026 }, { "epoch": 0.2340368688218007, "step": 876, "train/loss_ctc": 0.5966725945472717, "train/loss_error": 0.49753451347351074, "train/loss_total": 0.517362117767334 }, { "epoch": 0.23430403419716805, "step": 877, "train/loss_ctc": 1.4423871040344238, "train/loss_error": 0.47374922037124634, "train/loss_total": 0.6674767732620239 }, { "epoch": 0.2345711995725354, "step": 878, "train/loss_ctc": 0.7528814077377319, "train/loss_error": 0.49967676401138306, "train/loss_total": 0.5503177046775818 }, { "epoch": 0.23483836494790275, "step": 879, "train/loss_ctc": 0.7075008153915405, "train/loss_error": 0.47768634557724, "train/loss_total": 0.5236492156982422 }, { "epoch": 0.2351055303232701, "grad_norm": 1.3497567176818848, "learning_rate": 2.859417579481699e-05, "loss": 0.5505, "step": 880 }, { "epoch": 0.2351055303232701, "step": 880, "train/loss_ctc": 0.8231223821640015, "train/loss_error": 0.49967876076698303, "train/loss_total": 0.5643675327301025 }, { "epoch": 0.23537269569863745, "step": 881, "train/loss_ctc": 0.5416457653045654, "train/loss_error": 0.46701785922050476, "train/loss_total": 0.48194342851638794 }, { "epoch": 0.2356398610740048, "step": 882, "train/loss_ctc": 0.6069537997245789, "train/loss_error": 0.5125812292098999, "train/loss_total": 0.5314557552337646 }, { "epoch": 0.23590702644937217, "step": 883, "train/loss_ctc": 0.5743815898895264, "train/loss_error": 0.48215916752815247, "train/loss_total": 0.5006036758422852 }, { "epoch": 0.2361741918247395, "step": 884, "train/loss_ctc": 1.0077159404754639, "train/loss_error": 0.5321859121322632, "train/loss_total": 0.6272919178009033 }, { "epoch": 0.23644135720010687, "step": 885, "train/loss_ctc": 0.7117501497268677, "train/loss_error": 0.49019455909729004, "train/loss_total": 0.5345056653022766 }, { "epoch": 0.23670852257547423, "step": 886, "train/loss_ctc": 0.7942472100257874, "train/loss_error": 0.5702508091926575, "train/loss_total": 0.6150500774383545 }, { "epoch": 0.23697568795084156, "step": 887, "train/loss_ctc": 1.1277120113372803, "train/loss_error": 0.4692451059818268, "train/loss_total": 0.6009384989738464 }, { "epoch": 0.23724285332620892, "step": 888, "train/loss_ctc": 1.0162615776062012, "train/loss_error": 0.43601566553115845, "train/loss_total": 0.5520648956298828 }, { "epoch": 0.2375100187015763, "step": 889, "train/loss_ctc": 1.1350589990615845, "train/loss_error": 0.49086669087409973, "train/loss_total": 0.6197052001953125 }, { "epoch": 0.23777718407694362, "grad_norm": 0.9478922486305237, "learning_rate": 2.857814587229495e-05, "loss": 0.5628, "step": 890 }, { "epoch": 0.23777718407694362, "step": 890, "train/loss_ctc": 0.3725704550743103, "train/loss_error": 0.48198872804641724, "train/loss_total": 0.4601050913333893 }, { "epoch": 0.23804434945231098, "step": 891, "train/loss_ctc": 1.7952320575714111, "train/loss_error": 0.5436153411865234, "train/loss_total": 0.7939386963844299 }, { "epoch": 0.23831151482767834, "step": 892, "train/loss_ctc": 1.1153922080993652, "train/loss_error": 0.5033040642738342, "train/loss_total": 0.6257216930389404 }, { "epoch": 0.23857868020304568, "step": 893, "train/loss_ctc": 2.1634271144866943, "train/loss_error": 0.4927162528038025, "train/loss_total": 0.826858401298523 }, { "epoch": 0.23884584557841304, "step": 894, "train/loss_ctc": 0.6236917972564697, "train/loss_error": 0.4786430895328522, "train/loss_total": 0.5076528191566467 }, { "epoch": 0.2391130109537804, "step": 895, "train/loss_ctc": 1.513343095779419, "train/loss_error": 0.4514230191707611, "train/loss_total": 0.6638070344924927 }, { "epoch": 0.23938017632914774, "step": 896, "train/loss_ctc": 0.5163319110870361, "train/loss_error": 0.46327921748161316, "train/loss_total": 0.4738897681236267 }, { "epoch": 0.2396473417045151, "step": 897, "train/loss_ctc": 1.5223584175109863, "train/loss_error": 0.4550063908100128, "train/loss_total": 0.6684768199920654 }, { "epoch": 0.23991450707988246, "step": 898, "train/loss_ctc": 1.1970596313476562, "train/loss_error": 0.5378395318984985, "train/loss_total": 0.669683575630188 }, { "epoch": 0.2401816724552498, "step": 899, "train/loss_ctc": 0.842172384262085, "train/loss_error": 0.47850340604782104, "train/loss_total": 0.5512372255325317 }, { "epoch": 0.24044883783061716, "grad_norm": 0.7688894867897034, "learning_rate": 2.856211594977291e-05, "loss": 0.6241, "step": 900 }, { "epoch": 0.24044883783061716, "step": 900, "train/loss_ctc": 0.670525074005127, "train/loss_error": 0.5108198523521423, "train/loss_total": 0.5427609086036682 }, { "epoch": 0.2407160032059845, "step": 901, "train/loss_ctc": 1.0352120399475098, "train/loss_error": 0.5315978527069092, "train/loss_total": 0.6323207020759583 }, { "epoch": 0.24098316858135185, "step": 902, "train/loss_ctc": 1.191725492477417, "train/loss_error": 0.5762168765068054, "train/loss_total": 0.6993185877799988 }, { "epoch": 0.24125033395671922, "step": 903, "train/loss_ctc": 0.7301642894744873, "train/loss_error": 0.5629652738571167, "train/loss_total": 0.5964050889015198 }, { "epoch": 0.24151749933208655, "step": 904, "train/loss_ctc": 0.7424222230911255, "train/loss_error": 0.5374295115470886, "train/loss_total": 0.5784280896186829 }, { "epoch": 0.2417846647074539, "step": 905, "train/loss_ctc": 0.5448414087295532, "train/loss_error": 0.5197813510894775, "train/loss_total": 0.5247933864593506 }, { "epoch": 0.24205183008282127, "step": 906, "train/loss_ctc": 1.4685274362564087, "train/loss_error": 0.5528523921966553, "train/loss_total": 0.7359874248504639 }, { "epoch": 0.2423189954581886, "step": 907, "train/loss_ctc": 1.2165387868881226, "train/loss_error": 0.5066663026809692, "train/loss_total": 0.6486408114433289 }, { "epoch": 0.24258616083355597, "step": 908, "train/loss_ctc": 0.4881551265716553, "train/loss_error": 0.5207399725914001, "train/loss_total": 0.5142229795455933 }, { "epoch": 0.24285332620892333, "step": 909, "train/loss_ctc": 0.9678099155426025, "train/loss_error": 0.5061157941818237, "train/loss_total": 0.5984546542167664 }, { "epoch": 0.24312049158429067, "grad_norm": 1.105841875076294, "learning_rate": 2.8546086027250872e-05, "loss": 0.6071, "step": 910 }, { "epoch": 0.24312049158429067, "step": 910, "train/loss_ctc": 0.9027812480926514, "train/loss_error": 0.5347343683242798, "train/loss_total": 0.608343780040741 }, { "epoch": 0.24338765695965803, "step": 911, "train/loss_ctc": 0.6505218744277954, "train/loss_error": 0.47845199704170227, "train/loss_total": 0.5128659605979919 }, { "epoch": 0.2436548223350254, "step": 912, "train/loss_ctc": 0.7285915613174438, "train/loss_error": 0.5148070454597473, "train/loss_total": 0.5575639605522156 }, { "epoch": 0.24392198771039272, "step": 913, "train/loss_ctc": 0.9275586605072021, "train/loss_error": 0.5132931470870972, "train/loss_total": 0.5961462259292603 }, { "epoch": 0.24418915308576009, "step": 914, "train/loss_ctc": 1.3231310844421387, "train/loss_error": 0.4357810616493225, "train/loss_total": 0.6132510900497437 }, { "epoch": 0.24445631846112745, "step": 915, "train/loss_ctc": 1.2303404808044434, "train/loss_error": 0.489486962556839, "train/loss_total": 0.6376577019691467 }, { "epoch": 0.24472348383649478, "step": 916, "train/loss_ctc": 0.8741582036018372, "train/loss_error": 0.5049630999565125, "train/loss_total": 0.5788021087646484 }, { "epoch": 0.24499064921186214, "step": 917, "train/loss_ctc": 1.0211951732635498, "train/loss_error": 0.5288274884223938, "train/loss_total": 0.627301037311554 }, { "epoch": 0.2452578145872295, "step": 918, "train/loss_ctc": 0.702278733253479, "train/loss_error": 0.5629345774650574, "train/loss_total": 0.5908033847808838 }, { "epoch": 0.24552497996259684, "step": 919, "train/loss_ctc": 0.3674703538417816, "train/loss_error": 0.4456944167613983, "train/loss_total": 0.4300496280193329 }, { "epoch": 0.2457921453379642, "grad_norm": 0.827580988407135, "learning_rate": 2.853005610472883e-05, "loss": 0.5753, "step": 920 }, { "epoch": 0.2457921453379642, "step": 920, "train/loss_ctc": 0.8728486895561218, "train/loss_error": 0.5239532589912415, "train/loss_total": 0.5937323570251465 }, { "epoch": 0.24605931071333156, "step": 921, "train/loss_ctc": 1.0457385778427124, "train/loss_error": 0.5560068488121033, "train/loss_total": 0.6539531946182251 }, { "epoch": 0.2463264760886989, "step": 922, "train/loss_ctc": 0.7941230535507202, "train/loss_error": 0.437793105840683, "train/loss_total": 0.5090590715408325 }, { "epoch": 0.24659364146406626, "step": 923, "train/loss_ctc": 1.4724327325820923, "train/loss_error": 0.5273020267486572, "train/loss_total": 0.7163281440734863 }, { "epoch": 0.24686080683943362, "step": 924, "train/loss_ctc": 0.8604068160057068, "train/loss_error": 0.4589734971523285, "train/loss_total": 0.5392601490020752 }, { "epoch": 0.24712797221480096, "step": 925, "train/loss_ctc": 1.049853801727295, "train/loss_error": 0.5088979005813599, "train/loss_total": 0.6170890927314758 }, { "epoch": 0.24739513759016832, "step": 926, "train/loss_ctc": 0.7653253078460693, "train/loss_error": 0.6058673858642578, "train/loss_total": 0.6377589702606201 }, { "epoch": 0.24766230296553568, "step": 927, "train/loss_ctc": 1.2353122234344482, "train/loss_error": 0.4535292685031891, "train/loss_total": 0.6098858714103699 }, { "epoch": 0.24792946834090301, "step": 928, "train/loss_ctc": 0.4475260078907013, "train/loss_error": 0.5467767119407654, "train/loss_total": 0.526926577091217 }, { "epoch": 0.24819663371627038, "step": 929, "train/loss_ctc": 0.8703930974006653, "train/loss_error": 0.4966164827346802, "train/loss_total": 0.5713717937469482 }, { "epoch": 0.2484637990916377, "grad_norm": 1.8688099384307861, "learning_rate": 2.8514026182206788e-05, "loss": 0.5975, "step": 930 }, { "epoch": 0.2484637990916377, "step": 930, "train/loss_ctc": 1.0242016315460205, "train/loss_error": 0.5338704586029053, "train/loss_total": 0.6319366693496704 }, { "epoch": 0.24873096446700507, "step": 931, "train/loss_ctc": 1.3882863521575928, "train/loss_error": 0.5062381625175476, "train/loss_total": 0.6826478242874146 }, { "epoch": 0.24899812984237243, "step": 932, "train/loss_ctc": 0.8835180401802063, "train/loss_error": 0.5448752045631409, "train/loss_total": 0.6126037836074829 }, { "epoch": 0.24926529521773977, "step": 933, "train/loss_ctc": 0.7993720173835754, "train/loss_error": 0.48950543999671936, "train/loss_total": 0.5514787435531616 }, { "epoch": 0.24953246059310713, "step": 934, "train/loss_ctc": 0.9869623184204102, "train/loss_error": 0.4635314345359802, "train/loss_total": 0.5682176351547241 }, { "epoch": 0.2497996259684745, "step": 935, "train/loss_ctc": 0.8069412708282471, "train/loss_error": 0.5753564238548279, "train/loss_total": 0.6216734051704407 }, { "epoch": 0.25006679134384185, "step": 936, "train/loss_ctc": 0.44903966784477234, "train/loss_error": 0.44321590662002563, "train/loss_total": 0.44438067078590393 }, { "epoch": 0.2503339567192092, "step": 937, "train/loss_ctc": 1.1083507537841797, "train/loss_error": 0.5046332478523254, "train/loss_total": 0.6253767609596252 }, { "epoch": 0.2506011220945765, "step": 938, "train/loss_ctc": 0.7559552788734436, "train/loss_error": 0.5833120346069336, "train/loss_total": 0.6178406476974487 }, { "epoch": 0.2508682874699439, "step": 939, "train/loss_ctc": 1.0350466966629028, "train/loss_error": 0.5267512202262878, "train/loss_total": 0.6284103393554688 }, { "epoch": 0.25113545284531125, "grad_norm": 0.7873122692108154, "learning_rate": 2.8497996259684746e-05, "loss": 0.5985, "step": 940 }, { "epoch": 0.25113545284531125, "step": 940, "train/loss_ctc": 0.62861168384552, "train/loss_error": 0.48522722721099854, "train/loss_total": 0.5139040946960449 }, { "epoch": 0.2514026182206786, "step": 941, "train/loss_ctc": 1.8642442226409912, "train/loss_error": 0.5516787171363831, "train/loss_total": 0.8141918182373047 }, { "epoch": 0.25166978359604597, "step": 942, "train/loss_ctc": 1.580970287322998, "train/loss_error": 0.5354058146476746, "train/loss_total": 0.7445187568664551 }, { "epoch": 0.25193694897141333, "step": 943, "train/loss_ctc": 0.626440167427063, "train/loss_error": 0.5308434367179871, "train/loss_total": 0.5499627590179443 }, { "epoch": 0.25220411434678064, "step": 944, "train/loss_ctc": 1.9714417457580566, "train/loss_error": 0.4938480854034424, "train/loss_total": 0.7893668413162231 }, { "epoch": 0.252471279722148, "step": 945, "train/loss_ctc": 1.2464756965637207, "train/loss_error": 0.5254364609718323, "train/loss_total": 0.6696443557739258 }, { "epoch": 0.25273844509751536, "step": 946, "train/loss_ctc": 1.5106370449066162, "train/loss_error": 0.4839428663253784, "train/loss_total": 0.689281702041626 }, { "epoch": 0.2530056104728827, "step": 947, "train/loss_ctc": 0.7843133211135864, "train/loss_error": 0.5177503824234009, "train/loss_total": 0.5710629820823669 }, { "epoch": 0.2532727758482501, "step": 948, "train/loss_ctc": 0.5993779897689819, "train/loss_error": 0.4994216561317444, "train/loss_total": 0.5194129347801208 }, { "epoch": 0.2535399412236174, "step": 949, "train/loss_ctc": 0.7289087176322937, "train/loss_error": 0.4693395495414734, "train/loss_total": 0.5212534070014954 }, { "epoch": 0.25380710659898476, "grad_norm": 2.1602656841278076, "learning_rate": 2.8481966337162704e-05, "loss": 0.6383, "step": 950 }, { "epoch": 0.25380710659898476, "step": 950, "train/loss_ctc": 0.8467796444892883, "train/loss_error": 0.4763055145740509, "train/loss_total": 0.5504003763198853 }, { "epoch": 0.2540742719743521, "step": 951, "train/loss_ctc": 0.45374444127082825, "train/loss_error": 0.5907241702079773, "train/loss_total": 0.563328206539154 }, { "epoch": 0.2543414373497195, "step": 952, "train/loss_ctc": 1.0979523658752441, "train/loss_error": 0.4832994043827057, "train/loss_total": 0.6062300205230713 }, { "epoch": 0.25460860272508684, "step": 953, "train/loss_ctc": 0.9949031472206116, "train/loss_error": 0.521869957447052, "train/loss_total": 0.6164765954017639 }, { "epoch": 0.2548757681004542, "step": 954, "train/loss_ctc": 0.6173232793807983, "train/loss_error": 0.5724607706069946, "train/loss_total": 0.5814332962036133 }, { "epoch": 0.2551429334758215, "step": 955, "train/loss_ctc": 0.9772128462791443, "train/loss_error": 0.4926822781562805, "train/loss_total": 0.5895884037017822 }, { "epoch": 0.25541009885118887, "step": 956, "train/loss_ctc": 1.0710327625274658, "train/loss_error": 0.5297697186470032, "train/loss_total": 0.6380223631858826 }, { "epoch": 0.25567726422655623, "step": 957, "train/loss_ctc": 0.9760623574256897, "train/loss_error": 0.47830918431282043, "train/loss_total": 0.5778598189353943 }, { "epoch": 0.2559444296019236, "step": 958, "train/loss_ctc": 0.9940687417984009, "train/loss_error": 0.47018003463745117, "train/loss_total": 0.5749577879905701 }, { "epoch": 0.25621159497729096, "step": 959, "train/loss_ctc": 1.143434762954712, "train/loss_error": 0.46959009766578674, "train/loss_total": 0.6043590307235718 }, { "epoch": 0.2564787603526583, "grad_norm": 1.0798085927963257, "learning_rate": 2.8465936414640662e-05, "loss": 0.5903, "step": 960 }, { "epoch": 0.2564787603526583, "step": 960, "train/loss_ctc": 0.7649879455566406, "train/loss_error": 0.5450243353843689, "train/loss_total": 0.5890170931816101 }, { "epoch": 0.2567459257280256, "step": 961, "train/loss_ctc": 0.6434125900268555, "train/loss_error": 0.49187490344047546, "train/loss_total": 0.5221824645996094 }, { "epoch": 0.257013091103393, "step": 962, "train/loss_ctc": 0.9713155031204224, "train/loss_error": 0.5307884812355042, "train/loss_total": 0.6188938617706299 }, { "epoch": 0.25728025647876035, "step": 963, "train/loss_ctc": 0.9950878620147705, "train/loss_error": 0.5002003312110901, "train/loss_total": 0.5991778373718262 }, { "epoch": 0.2575474218541277, "step": 964, "train/loss_ctc": 1.0651108026504517, "train/loss_error": 0.4540897309780121, "train/loss_total": 0.5762939453125 }, { "epoch": 0.2578145872294951, "step": 965, "train/loss_ctc": 1.121514081954956, "train/loss_error": 0.5178548693656921, "train/loss_total": 0.638586699962616 }, { "epoch": 0.25808175260486244, "step": 966, "train/loss_ctc": 1.4838988780975342, "train/loss_error": 0.48897188901901245, "train/loss_total": 0.6879572868347168 }, { "epoch": 0.25834891798022974, "step": 967, "train/loss_ctc": 0.5259803533554077, "train/loss_error": 0.5168321132659912, "train/loss_total": 0.5186617970466614 }, { "epoch": 0.2586160833555971, "step": 968, "train/loss_ctc": 1.4694344997406006, "train/loss_error": 0.4810263216495514, "train/loss_total": 0.6787079572677612 }, { "epoch": 0.25888324873096447, "step": 969, "train/loss_ctc": 0.5031110048294067, "train/loss_error": 0.5578323006629944, "train/loss_total": 0.5468880534172058 }, { "epoch": 0.25915041410633183, "grad_norm": 1.7189151048660278, "learning_rate": 2.8449906492118624e-05, "loss": 0.5976, "step": 970 }, { "epoch": 0.25915041410633183, "step": 970, "train/loss_ctc": 0.9857419729232788, "train/loss_error": 0.543423056602478, "train/loss_total": 0.6318868398666382 }, { "epoch": 0.2594175794816992, "step": 971, "train/loss_ctc": 0.5092543363571167, "train/loss_error": 0.5596047043800354, "train/loss_total": 0.5495346784591675 }, { "epoch": 0.25968474485706655, "step": 972, "train/loss_ctc": 1.880692720413208, "train/loss_error": 0.5058349967002869, "train/loss_total": 0.7808065414428711 }, { "epoch": 0.25995191023243386, "step": 973, "train/loss_ctc": 0.6249849796295166, "train/loss_error": 0.5222281813621521, "train/loss_total": 0.5427795648574829 }, { "epoch": 0.2602190756078012, "step": 974, "train/loss_ctc": 1.0047547817230225, "train/loss_error": 0.5450382232666016, "train/loss_total": 0.6369815468788147 }, { "epoch": 0.2604862409831686, "step": 975, "train/loss_ctc": 1.0234265327453613, "train/loss_error": 0.5352997779846191, "train/loss_total": 0.6329251527786255 }, { "epoch": 0.26075340635853594, "step": 976, "train/loss_ctc": 0.8856403231620789, "train/loss_error": 0.4925535321235657, "train/loss_total": 0.5711708664894104 }, { "epoch": 0.2610205717339033, "step": 977, "train/loss_ctc": 0.49684959650039673, "train/loss_error": 0.5246436595916748, "train/loss_total": 0.5190848708152771 }, { "epoch": 0.2612877371092706, "step": 978, "train/loss_ctc": 0.4115733504295349, "train/loss_error": 0.5021757483482361, "train/loss_total": 0.4840552806854248 }, { "epoch": 0.261554902484638, "step": 979, "train/loss_ctc": 1.1890475749969482, "train/loss_error": 0.5102719068527222, "train/loss_total": 0.6460270285606384 }, { "epoch": 0.26182206786000534, "grad_norm": 2.3994152545928955, "learning_rate": 2.8433876569596582e-05, "loss": 0.5995, "step": 980 }, { "epoch": 0.26182206786000534, "step": 980, "train/loss_ctc": 1.678800106048584, "train/loss_error": 0.5288023352622986, "train/loss_total": 0.7588019371032715 }, { "epoch": 0.2620892332353727, "step": 981, "train/loss_ctc": 1.118913173675537, "train/loss_error": 0.48206430673599243, "train/loss_total": 0.6094340682029724 }, { "epoch": 0.26235639861074006, "step": 982, "train/loss_ctc": 0.913030207157135, "train/loss_error": 0.5189501047134399, "train/loss_total": 0.5977661609649658 }, { "epoch": 0.2626235639861074, "step": 983, "train/loss_ctc": 1.1823103427886963, "train/loss_error": 0.4657309353351593, "train/loss_total": 0.6090468168258667 }, { "epoch": 0.26289072936147473, "step": 984, "train/loss_ctc": 0.9651087522506714, "train/loss_error": 0.4586275815963745, "train/loss_total": 0.5599238276481628 }, { "epoch": 0.2631578947368421, "step": 985, "train/loss_ctc": 0.5907794833183289, "train/loss_error": 0.5564531683921814, "train/loss_total": 0.5633184313774109 }, { "epoch": 0.26342506011220945, "step": 986, "train/loss_ctc": 1.33514404296875, "train/loss_error": 0.500235915184021, "train/loss_total": 0.6672175526618958 }, { "epoch": 0.2636922254875768, "step": 987, "train/loss_ctc": 0.7822925448417664, "train/loss_error": 0.5114529728889465, "train/loss_total": 0.5656208992004395 }, { "epoch": 0.2639593908629442, "step": 988, "train/loss_ctc": 0.8694069981575012, "train/loss_error": 0.5823407769203186, "train/loss_total": 0.6397539973258972 }, { "epoch": 0.26422655623831154, "step": 989, "train/loss_ctc": 0.6398468017578125, "train/loss_error": 0.5658832788467407, "train/loss_total": 0.5806760191917419 }, { "epoch": 0.26449372161367884, "grad_norm": 1.7065773010253906, "learning_rate": 2.841784664707454e-05, "loss": 0.6152, "step": 990 }, { "epoch": 0.26449372161367884, "step": 990, "train/loss_ctc": 1.0312285423278809, "train/loss_error": 0.4640553891658783, "train/loss_total": 0.5774900317192078 }, { "epoch": 0.2647608869890462, "step": 991, "train/loss_ctc": 0.7361096739768982, "train/loss_error": 0.5168078541755676, "train/loss_total": 0.5606682300567627 }, { "epoch": 0.26502805236441357, "step": 992, "train/loss_ctc": 0.8031829595565796, "train/loss_error": 0.5057724714279175, "train/loss_total": 0.5652545690536499 }, { "epoch": 0.26529521773978093, "step": 993, "train/loss_ctc": 0.6796929836273193, "train/loss_error": 0.42434531450271606, "train/loss_total": 0.47541487216949463 }, { "epoch": 0.2655623831151483, "step": 994, "train/loss_ctc": 0.6227737069129944, "train/loss_error": 0.5553726553916931, "train/loss_total": 0.5688528418540955 }, { "epoch": 0.26582954849051565, "step": 995, "train/loss_ctc": 2.0378289222717285, "train/loss_error": 0.4751475155353546, "train/loss_total": 0.7876838445663452 }, { "epoch": 0.26609671386588296, "step": 996, "train/loss_ctc": 1.6227751970291138, "train/loss_error": 0.4801112115383148, "train/loss_total": 0.7086440324783325 }, { "epoch": 0.2663638792412503, "step": 997, "train/loss_ctc": 0.7936679720878601, "train/loss_error": 0.5551573634147644, "train/loss_total": 0.6028594970703125 }, { "epoch": 0.2666310446166177, "step": 998, "train/loss_ctc": 0.5472421646118164, "train/loss_error": 0.5007869601249695, "train/loss_total": 0.5100780129432678 }, { "epoch": 0.26689820999198505, "step": 999, "train/loss_ctc": 0.49820446968078613, "train/loss_error": 0.5325030088424683, "train/loss_total": 0.5256432890892029 }, { "epoch": 0.2671653753673524, "grad_norm": 6.236136436462402, "learning_rate": 2.8401816724552498e-05, "loss": 0.5883, "step": 1000 }, { "epoch": 0.2671653753673524, "step": 1000, "train/loss_ctc": 0.8652306795120239, "train/loss_error": 0.5212668776512146, "train/loss_total": 0.5900596380233765 }, { "epoch": 0.26743254074271977, "step": 1001, "train/loss_ctc": 0.920620322227478, "train/loss_error": 0.5233315229415894, "train/loss_total": 0.6027892827987671 }, { "epoch": 0.2676997061180871, "step": 1002, "train/loss_ctc": 0.5826253890991211, "train/loss_error": 0.4800344407558441, "train/loss_total": 0.5005526542663574 }, { "epoch": 0.26796687149345444, "step": 1003, "train/loss_ctc": 0.9463958740234375, "train/loss_error": 0.4996936619281769, "train/loss_total": 0.5890341401100159 }, { "epoch": 0.2682340368688218, "step": 1004, "train/loss_ctc": 0.6834570169448853, "train/loss_error": 0.5996129512786865, "train/loss_total": 0.6163817644119263 }, { "epoch": 0.26850120224418916, "step": 1005, "train/loss_ctc": 0.9030910134315491, "train/loss_error": 0.4873582124710083, "train/loss_total": 0.5705047845840454 }, { "epoch": 0.2687683676195565, "step": 1006, "train/loss_ctc": 0.8246825933456421, "train/loss_error": 0.5702928304672241, "train/loss_total": 0.6211708188056946 }, { "epoch": 0.26903553299492383, "step": 1007, "train/loss_ctc": 1.0130689144134521, "train/loss_error": 0.5051876306533813, "train/loss_total": 0.6067638993263245 }, { "epoch": 0.2693026983702912, "step": 1008, "train/loss_ctc": 0.8055042624473572, "train/loss_error": 0.5410560369491577, "train/loss_total": 0.5939456820487976 }, { "epoch": 0.26956986374565856, "step": 1009, "train/loss_ctc": 1.2208977937698364, "train/loss_error": 0.5047102570533752, "train/loss_total": 0.6479477882385254 }, { "epoch": 0.2698370291210259, "grad_norm": 0.8658374547958374, "learning_rate": 2.8385786802030456e-05, "loss": 0.5939, "step": 1010 }, { "epoch": 0.2698370291210259, "step": 1010, "train/loss_ctc": 0.4624843895435333, "train/loss_error": 0.531988263130188, "train/loss_total": 0.5180875062942505 }, { "epoch": 0.2701041944963933, "step": 1011, "train/loss_ctc": 1.1070339679718018, "train/loss_error": 0.5089247226715088, "train/loss_total": 0.6285465955734253 }, { "epoch": 0.27037135987176064, "step": 1012, "train/loss_ctc": 1.2148325443267822, "train/loss_error": 0.5066384673118591, "train/loss_total": 0.6482772827148438 }, { "epoch": 0.27063852524712795, "step": 1013, "train/loss_ctc": 1.1086187362670898, "train/loss_error": 0.5038139820098877, "train/loss_total": 0.6247749328613281 }, { "epoch": 0.2709056906224953, "step": 1014, "train/loss_ctc": 1.5694916248321533, "train/loss_error": 0.47122350335121155, "train/loss_total": 0.6908771395683289 }, { "epoch": 0.27117285599786267, "step": 1015, "train/loss_ctc": 1.3115952014923096, "train/loss_error": 0.5434596538543701, "train/loss_total": 0.6970868110656738 }, { "epoch": 0.27144002137323003, "step": 1016, "train/loss_ctc": 0.7847313284873962, "train/loss_error": 0.5301573872566223, "train/loss_total": 0.581072211265564 }, { "epoch": 0.2717071867485974, "step": 1017, "train/loss_ctc": 0.7493692636489868, "train/loss_error": 0.5792976021766663, "train/loss_total": 0.6133119463920593 }, { "epoch": 0.27197435212396476, "step": 1018, "train/loss_ctc": 1.4553115367889404, "train/loss_error": 0.5052549839019775, "train/loss_total": 0.6952663064002991 }, { "epoch": 0.27224151749933206, "step": 1019, "train/loss_ctc": 1.1343282461166382, "train/loss_error": 0.4983988404273987, "train/loss_total": 0.6255847215652466 }, { "epoch": 0.2725086828746994, "grad_norm": 1.1246081590652466, "learning_rate": 2.8369756879508414e-05, "loss": 0.6323, "step": 1020 }, { "epoch": 0.2725086828746994, "step": 1020, "train/loss_ctc": 1.1858748197555542, "train/loss_error": 0.4987359941005707, "train/loss_total": 0.6361637711524963 }, { "epoch": 0.2727758482500668, "step": 1021, "train/loss_ctc": 0.9155614972114563, "train/loss_error": 0.5265827178955078, "train/loss_total": 0.6043784618377686 }, { "epoch": 0.27304301362543415, "step": 1022, "train/loss_ctc": 1.6575840711593628, "train/loss_error": 0.4946179687976837, "train/loss_total": 0.7272112369537354 }, { "epoch": 0.2733101790008015, "step": 1023, "train/loss_ctc": 0.8720295429229736, "train/loss_error": 0.4698387682437897, "train/loss_total": 0.5502769351005554 }, { "epoch": 0.2735773443761689, "step": 1024, "train/loss_ctc": 0.8778699636459351, "train/loss_error": 0.4664560556411743, "train/loss_total": 0.5487388372421265 }, { "epoch": 0.2738445097515362, "step": 1025, "train/loss_ctc": 0.6635571718215942, "train/loss_error": 0.4314153790473938, "train/loss_total": 0.4778437614440918 }, { "epoch": 0.27411167512690354, "step": 1026, "train/loss_ctc": 1.2712299823760986, "train/loss_error": 0.45879194140434265, "train/loss_total": 0.6212795972824097 }, { "epoch": 0.2743788405022709, "step": 1027, "train/loss_ctc": 0.7523744702339172, "train/loss_error": 0.48039039969444275, "train/loss_total": 0.5347872376441956 }, { "epoch": 0.27464600587763827, "step": 1028, "train/loss_ctc": 0.9915356636047363, "train/loss_error": 0.44284534454345703, "train/loss_total": 0.5525833964347839 }, { "epoch": 0.27491317125300563, "step": 1029, "train/loss_ctc": 0.9755409359931946, "train/loss_error": 0.5138897895812988, "train/loss_total": 0.606220006942749 }, { "epoch": 0.275180336628373, "grad_norm": 1.183128833770752, "learning_rate": 2.8353726956986376e-05, "loss": 0.5859, "step": 1030 }, { "epoch": 0.275180336628373, "step": 1030, "train/loss_ctc": 0.7965614199638367, "train/loss_error": 0.48804911971092224, "train/loss_total": 0.5497515797615051 }, { "epoch": 0.2754475020037403, "step": 1031, "train/loss_ctc": 0.7045763731002808, "train/loss_error": 0.514849841594696, "train/loss_total": 0.5527951717376709 }, { "epoch": 0.27571466737910766, "step": 1032, "train/loss_ctc": 0.47738003730773926, "train/loss_error": 0.5892190933227539, "train/loss_total": 0.5668513178825378 }, { "epoch": 0.275981832754475, "step": 1033, "train/loss_ctc": 1.3810315132141113, "train/loss_error": 0.47418031096458435, "train/loss_total": 0.6555505990982056 }, { "epoch": 0.2762489981298424, "step": 1034, "train/loss_ctc": 0.6681768894195557, "train/loss_error": 0.470220685005188, "train/loss_total": 0.5098119378089905 }, { "epoch": 0.27651616350520974, "step": 1035, "train/loss_ctc": 1.1921758651733398, "train/loss_error": 0.5451061725616455, "train/loss_total": 0.6745201349258423 }, { "epoch": 0.27678332888057705, "step": 1036, "train/loss_ctc": 0.8367992639541626, "train/loss_error": 0.4902208149433136, "train/loss_total": 0.5595365166664124 }, { "epoch": 0.2770504942559444, "step": 1037, "train/loss_ctc": 0.8126146793365479, "train/loss_error": 0.5004315972328186, "train/loss_total": 0.5628682374954224 }, { "epoch": 0.2773176596313118, "step": 1038, "train/loss_ctc": 0.9409319162368774, "train/loss_error": 0.547825813293457, "train/loss_total": 0.6264470219612122 }, { "epoch": 0.27758482500667914, "step": 1039, "train/loss_ctc": 0.7388333082199097, "train/loss_error": 0.5033689141273499, "train/loss_total": 0.5504618287086487 }, { "epoch": 0.2778519903820465, "grad_norm": 1.6962047815322876, "learning_rate": 2.8337697034464334e-05, "loss": 0.5809, "step": 1040 }, { "epoch": 0.2778519903820465, "step": 1040, "train/loss_ctc": 0.9530940055847168, "train/loss_error": 0.517834484577179, "train/loss_total": 0.6048864126205444 }, { "epoch": 0.27811915575741386, "step": 1041, "train/loss_ctc": 1.1167861223220825, "train/loss_error": 0.4348565638065338, "train/loss_total": 0.5712424516677856 }, { "epoch": 0.27838632113278117, "step": 1042, "train/loss_ctc": 0.7843544483184814, "train/loss_error": 0.5549915432929993, "train/loss_total": 0.6008641123771667 }, { "epoch": 0.27865348650814853, "step": 1043, "train/loss_ctc": 1.2080292701721191, "train/loss_error": 0.49667495489120483, "train/loss_total": 0.6389458179473877 }, { "epoch": 0.2789206518835159, "step": 1044, "train/loss_ctc": 0.5120350122451782, "train/loss_error": 0.5221047401428223, "train/loss_total": 0.5200908184051514 }, { "epoch": 0.27918781725888325, "step": 1045, "train/loss_ctc": 1.01726233959198, "train/loss_error": 0.47746536135673523, "train/loss_total": 0.5854247808456421 }, { "epoch": 0.2794549826342506, "step": 1046, "train/loss_ctc": 1.2620108127593994, "train/loss_error": 0.48653286695480347, "train/loss_total": 0.6416284441947937 }, { "epoch": 0.279722148009618, "step": 1047, "train/loss_ctc": 1.0167620182037354, "train/loss_error": 0.5407306551933289, "train/loss_total": 0.6359369158744812 }, { "epoch": 0.2799893133849853, "step": 1048, "train/loss_ctc": 0.6127496361732483, "train/loss_error": 0.5363116264343262, "train/loss_total": 0.5515992641448975 }, { "epoch": 0.28025647876035265, "step": 1049, "train/loss_ctc": 0.7231857180595398, "train/loss_error": 0.5602186322212219, "train/loss_total": 0.5928120613098145 }, { "epoch": 0.28052364413572, "grad_norm": 1.6305011510849, "learning_rate": 2.8321667111942292e-05, "loss": 0.5943, "step": 1050 }, { "epoch": 0.28052364413572, "step": 1050, "train/loss_ctc": 0.8705089092254639, "train/loss_error": 0.4973793923854828, "train/loss_total": 0.5720053315162659 }, { "epoch": 0.28079080951108737, "step": 1051, "train/loss_ctc": 0.4104495644569397, "train/loss_error": 0.529050886631012, "train/loss_total": 0.5053306221961975 }, { "epoch": 0.28105797488645473, "step": 1052, "train/loss_ctc": 1.2830355167388916, "train/loss_error": 0.5156480669975281, "train/loss_total": 0.6691255569458008 }, { "epoch": 0.2813251402618221, "step": 1053, "train/loss_ctc": 0.47612956166267395, "train/loss_error": 0.5427262783050537, "train/loss_total": 0.5294069647789001 }, { "epoch": 0.2815923056371894, "step": 1054, "train/loss_ctc": 1.0046370029449463, "train/loss_error": 0.5348653793334961, "train/loss_total": 0.6288197040557861 }, { "epoch": 0.28185947101255676, "step": 1055, "train/loss_ctc": 0.4734630882740021, "train/loss_error": 0.5126544833183289, "train/loss_total": 0.5048161745071411 }, { "epoch": 0.2821266363879241, "step": 1056, "train/loss_ctc": 0.8728653788566589, "train/loss_error": 0.4849846661090851, "train/loss_total": 0.5625607967376709 }, { "epoch": 0.2823938017632915, "step": 1057, "train/loss_ctc": 0.8674935102462769, "train/loss_error": 0.468342125415802, "train/loss_total": 0.5481724143028259 }, { "epoch": 0.28266096713865885, "step": 1058, "train/loss_ctc": 1.1964547634124756, "train/loss_error": 0.5299311280250549, "train/loss_total": 0.6632359027862549 }, { "epoch": 0.2829281325140262, "step": 1059, "train/loss_ctc": 0.9110982418060303, "train/loss_error": 0.47576674818992615, "train/loss_total": 0.5628330707550049 }, { "epoch": 0.2831952978893935, "grad_norm": 1.2698413133621216, "learning_rate": 2.830563718942025e-05, "loss": 0.5746, "step": 1060 }, { "epoch": 0.2831952978893935, "step": 1060, "train/loss_ctc": 1.6390743255615234, "train/loss_error": 0.5469023585319519, "train/loss_total": 0.7653367519378662 }, { "epoch": 0.2834624632647609, "step": 1061, "train/loss_ctc": 0.3148919343948364, "train/loss_error": 0.5116953253746033, "train/loss_total": 0.4723346531391144 }, { "epoch": 0.28372962864012824, "step": 1062, "train/loss_ctc": 1.02885103225708, "train/loss_error": 0.48864173889160156, "train/loss_total": 0.5966836214065552 }, { "epoch": 0.2839967940154956, "step": 1063, "train/loss_ctc": 0.6727335453033447, "train/loss_error": 0.5274327397346497, "train/loss_total": 0.5564929246902466 }, { "epoch": 0.28426395939086296, "step": 1064, "train/loss_ctc": 0.45345282554626465, "train/loss_error": 0.5484063625335693, "train/loss_total": 0.5294156670570374 }, { "epoch": 0.28453112476623027, "step": 1065, "train/loss_ctc": 0.8913042545318604, "train/loss_error": 0.46439382433891296, "train/loss_total": 0.5497758984565735 }, { "epoch": 0.28479829014159763, "step": 1066, "train/loss_ctc": 0.41325482726097107, "train/loss_error": 0.5338885188102722, "train/loss_total": 0.5097618103027344 }, { "epoch": 0.285065455516965, "step": 1067, "train/loss_ctc": 0.7795112133026123, "train/loss_error": 0.5274984240531921, "train/loss_total": 0.5779010057449341 }, { "epoch": 0.28533262089233236, "step": 1068, "train/loss_ctc": 0.8800858855247498, "train/loss_error": 0.5172678232192993, "train/loss_total": 0.5898314714431763 }, { "epoch": 0.2855997862676997, "step": 1069, "train/loss_ctc": 0.9803594350814819, "train/loss_error": 0.45024535059928894, "train/loss_total": 0.5562682151794434 }, { "epoch": 0.2858669516430671, "grad_norm": 1.1469037532806396, "learning_rate": 2.8289607266898208e-05, "loss": 0.5704, "step": 1070 }, { "epoch": 0.2858669516430671, "step": 1070, "train/loss_ctc": 0.5527356266975403, "train/loss_error": 0.49123039841651917, "train/loss_total": 0.5035314559936523 }, { "epoch": 0.2861341170184344, "step": 1071, "train/loss_ctc": 1.0815455913543701, "train/loss_error": 0.5322468280792236, "train/loss_total": 0.6421065926551819 }, { "epoch": 0.28640128239380175, "step": 1072, "train/loss_ctc": 1.0348610877990723, "train/loss_error": 0.5315369963645935, "train/loss_total": 0.6322018504142761 }, { "epoch": 0.2866684477691691, "step": 1073, "train/loss_ctc": 0.7819468975067139, "train/loss_error": 0.5552691221237183, "train/loss_total": 0.6006046533584595 }, { "epoch": 0.28693561314453647, "step": 1074, "train/loss_ctc": 0.7641949653625488, "train/loss_error": 0.564645528793335, "train/loss_total": 0.6045554280281067 }, { "epoch": 0.28720277851990383, "step": 1075, "train/loss_ctc": 0.8900673389434814, "train/loss_error": 0.49184611439704895, "train/loss_total": 0.5714904069900513 }, { "epoch": 0.2874699438952712, "step": 1076, "train/loss_ctc": 1.1804721355438232, "train/loss_error": 0.4732615053653717, "train/loss_total": 0.6147036552429199 }, { "epoch": 0.2877371092706385, "step": 1077, "train/loss_ctc": 0.8808681964874268, "train/loss_error": 0.48415425419807434, "train/loss_total": 0.5634970664978027 }, { "epoch": 0.28800427464600586, "step": 1078, "train/loss_ctc": 0.7101097702980042, "train/loss_error": 0.461374968290329, "train/loss_total": 0.511121928691864 }, { "epoch": 0.2882714400213732, "step": 1079, "train/loss_ctc": 0.9558542966842651, "train/loss_error": 0.48006632924079895, "train/loss_total": 0.5752239227294922 }, { "epoch": 0.2885386053967406, "grad_norm": 1.199493169784546, "learning_rate": 2.827357734437617e-05, "loss": 0.5819, "step": 1080 }, { "epoch": 0.2885386053967406, "step": 1080, "train/loss_ctc": 2.1002182960510254, "train/loss_error": 0.494953989982605, "train/loss_total": 0.8160068988800049 }, { "epoch": 0.28880577077210795, "step": 1081, "train/loss_ctc": 0.6174693703651428, "train/loss_error": 0.458391934633255, "train/loss_total": 0.4902074337005615 }, { "epoch": 0.2890729361474753, "step": 1082, "train/loss_ctc": 1.3565897941589355, "train/loss_error": 0.524174153804779, "train/loss_total": 0.6906572580337524 }, { "epoch": 0.2893401015228426, "step": 1083, "train/loss_ctc": 0.9019970893859863, "train/loss_error": 0.4551153779029846, "train/loss_total": 0.5444917678833008 }, { "epoch": 0.28960726689821, "step": 1084, "train/loss_ctc": 0.46620333194732666, "train/loss_error": 0.5444275736808777, "train/loss_total": 0.5287827253341675 }, { "epoch": 0.28987443227357734, "step": 1085, "train/loss_ctc": 0.8552090525627136, "train/loss_error": 0.5145465731620789, "train/loss_total": 0.5826790928840637 }, { "epoch": 0.2901415976489447, "step": 1086, "train/loss_ctc": 0.9069240093231201, "train/loss_error": 0.47093117237091064, "train/loss_total": 0.5581297874450684 }, { "epoch": 0.29040876302431207, "step": 1087, "train/loss_ctc": 0.6047811508178711, "train/loss_error": 0.5783179998397827, "train/loss_total": 0.5836106538772583 }, { "epoch": 0.29067592839967943, "step": 1088, "train/loss_ctc": 1.3652901649475098, "train/loss_error": 0.5483459830284119, "train/loss_total": 0.7117348313331604 }, { "epoch": 0.29094309377504673, "step": 1089, "train/loss_ctc": 0.6051489114761353, "train/loss_error": 0.47803816199302673, "train/loss_total": 0.5034602880477905 }, { "epoch": 0.2912102591504141, "grad_norm": 0.9193586111068726, "learning_rate": 2.825754742185413e-05, "loss": 0.601, "step": 1090 }, { "epoch": 0.2912102591504141, "step": 1090, "train/loss_ctc": 0.9827505350112915, "train/loss_error": 0.5072555541992188, "train/loss_total": 0.6023545861244202 }, { "epoch": 0.29147742452578146, "step": 1091, "train/loss_ctc": 0.5390650033950806, "train/loss_error": 0.5038344264030457, "train/loss_total": 0.5108805894851685 }, { "epoch": 0.2917445899011488, "step": 1092, "train/loss_ctc": 1.2283260822296143, "train/loss_error": 0.46367955207824707, "train/loss_total": 0.6166088581085205 }, { "epoch": 0.2920117552765162, "step": 1093, "train/loss_ctc": 0.6322541236877441, "train/loss_error": 0.4525526165962219, "train/loss_total": 0.4884929060935974 }, { "epoch": 0.2922789206518835, "step": 1094, "train/loss_ctc": 1.9102392196655273, "train/loss_error": 0.5196288228034973, "train/loss_total": 0.7977509498596191 }, { "epoch": 0.29254608602725085, "step": 1095, "train/loss_ctc": 0.856357991695404, "train/loss_error": 0.5455542802810669, "train/loss_total": 0.6077150106430054 }, { "epoch": 0.2928132514026182, "step": 1096, "train/loss_ctc": 1.2691795825958252, "train/loss_error": 0.5494890213012695, "train/loss_total": 0.6934271454811096 }, { "epoch": 0.2930804167779856, "step": 1097, "train/loss_ctc": 0.8212789297103882, "train/loss_error": 0.5017679929733276, "train/loss_total": 0.5656701922416687 }, { "epoch": 0.29334758215335294, "step": 1098, "train/loss_ctc": 0.9655287265777588, "train/loss_error": 0.43700751662254333, "train/loss_total": 0.5427117347717285 }, { "epoch": 0.2936147475287203, "step": 1099, "train/loss_ctc": 0.8220295310020447, "train/loss_error": 0.48215243220329285, "train/loss_total": 0.5501278638839722 }, { "epoch": 0.2938819129040876, "grad_norm": 3.604490041732788, "learning_rate": 2.824151749933209e-05, "loss": 0.5976, "step": 1100 }, { "epoch": 0.2938819129040876, "step": 1100, "train/loss_ctc": 0.7492812275886536, "train/loss_error": 0.4910247325897217, "train/loss_total": 0.5426760315895081 }, { "epoch": 0.29414907827945497, "step": 1101, "train/loss_ctc": 0.5087481737136841, "train/loss_error": 0.606319010257721, "train/loss_total": 0.5868048667907715 }, { "epoch": 0.29441624365482233, "step": 1102, "train/loss_ctc": 1.6133055686950684, "train/loss_error": 0.5297331809997559, "train/loss_total": 0.7464476823806763 }, { "epoch": 0.2946834090301897, "step": 1103, "train/loss_ctc": 0.8077778816223145, "train/loss_error": 0.45904022455215454, "train/loss_total": 0.5287877321243286 }, { "epoch": 0.29495057440555705, "step": 1104, "train/loss_ctc": 1.061334490776062, "train/loss_error": 0.47439855337142944, "train/loss_total": 0.591785728931427 }, { "epoch": 0.2952177397809244, "step": 1105, "train/loss_ctc": 0.6409186720848083, "train/loss_error": 0.5144848227500916, "train/loss_total": 0.5397716164588928 }, { "epoch": 0.2954849051562917, "step": 1106, "train/loss_ctc": 1.228123426437378, "train/loss_error": 0.5173777937889099, "train/loss_total": 0.6595269441604614 }, { "epoch": 0.2957520705316591, "step": 1107, "train/loss_ctc": 1.1330134868621826, "train/loss_error": 0.5040000081062317, "train/loss_total": 0.6298027038574219 }, { "epoch": 0.29601923590702645, "step": 1108, "train/loss_ctc": 1.4027990102767944, "train/loss_error": 0.4647281765937805, "train/loss_total": 0.6523423194885254 }, { "epoch": 0.2962864012823938, "step": 1109, "train/loss_ctc": 1.0892393589019775, "train/loss_error": 0.4887852370738983, "train/loss_total": 0.6088760495185852 }, { "epoch": 0.29655356665776117, "grad_norm": 1.2278351783752441, "learning_rate": 2.8225487576810047e-05, "loss": 0.6087, "step": 1110 }, { "epoch": 0.29655356665776117, "step": 1110, "train/loss_ctc": 0.5419203042984009, "train/loss_error": 0.4756966531276703, "train/loss_total": 0.48894140124320984 }, { "epoch": 0.29682073203312853, "step": 1111, "train/loss_ctc": 0.5863260626792908, "train/loss_error": 0.5010380148887634, "train/loss_total": 0.5180956125259399 }, { "epoch": 0.29708789740849584, "step": 1112, "train/loss_ctc": 0.8071649074554443, "train/loss_error": 0.5353680849075317, "train/loss_total": 0.5897274613380432 }, { "epoch": 0.2973550627838632, "step": 1113, "train/loss_ctc": 0.5055100917816162, "train/loss_error": 0.519750714302063, "train/loss_total": 0.5169025659561157 }, { "epoch": 0.29762222815923056, "step": 1114, "train/loss_ctc": 0.6947228908538818, "train/loss_error": 0.46081778407096863, "train/loss_total": 0.5075988173484802 }, { "epoch": 0.2978893935345979, "step": 1115, "train/loss_ctc": 0.7195894718170166, "train/loss_error": 0.49194765090942383, "train/loss_total": 0.5374760031700134 }, { "epoch": 0.2981565589099653, "step": 1116, "train/loss_ctc": 0.736169695854187, "train/loss_error": 0.42000612616539, "train/loss_total": 0.4832388758659363 }, { "epoch": 0.29842372428533265, "step": 1117, "train/loss_ctc": 0.833864688873291, "train/loss_error": 0.6055030226707458, "train/loss_total": 0.6511753797531128 }, { "epoch": 0.29869088966069995, "step": 1118, "train/loss_ctc": 1.1237794160842896, "train/loss_error": 0.46735408902168274, "train/loss_total": 0.5986391305923462 }, { "epoch": 0.2989580550360673, "step": 1119, "train/loss_ctc": 1.0863983631134033, "train/loss_error": 0.5381761193275452, "train/loss_total": 0.6478205919265747 }, { "epoch": 0.2992252204114347, "grad_norm": 1.0859586000442505, "learning_rate": 2.8209457654288005e-05, "loss": 0.554, "step": 1120 }, { "epoch": 0.2992252204114347, "step": 1120, "train/loss_ctc": 0.9405484795570374, "train/loss_error": 0.4904051423072815, "train/loss_total": 0.5804338455200195 }, { "epoch": 0.29949238578680204, "step": 1121, "train/loss_ctc": 1.4615144729614258, "train/loss_error": 0.48244529962539673, "train/loss_total": 0.6782591342926025 }, { "epoch": 0.2997595511621694, "step": 1122, "train/loss_ctc": 1.1373988389968872, "train/loss_error": 0.5085961818695068, "train/loss_total": 0.6343567371368408 }, { "epoch": 0.3000267165375367, "step": 1123, "train/loss_ctc": 0.7368777394294739, "train/loss_error": 0.5437148213386536, "train/loss_total": 0.5823473930358887 }, { "epoch": 0.30029388191290407, "step": 1124, "train/loss_ctc": 1.270589828491211, "train/loss_error": 0.5103675127029419, "train/loss_total": 0.6624119877815247 }, { "epoch": 0.30056104728827143, "step": 1125, "train/loss_ctc": 1.5786045789718628, "train/loss_error": 0.5414051413536072, "train/loss_total": 0.7488450407981873 }, { "epoch": 0.3008282126636388, "step": 1126, "train/loss_ctc": 1.1236217021942139, "train/loss_error": 0.5220602750778198, "train/loss_total": 0.6423725485801697 }, { "epoch": 0.30109537803900616, "step": 1127, "train/loss_ctc": 0.8021524548530579, "train/loss_error": 0.4775997996330261, "train/loss_total": 0.5425103306770325 }, { "epoch": 0.3013625434143735, "step": 1128, "train/loss_ctc": 1.076059103012085, "train/loss_error": 0.5208737850189209, "train/loss_total": 0.6319108605384827 }, { "epoch": 0.3016297087897408, "step": 1129, "train/loss_ctc": 0.4719177186489105, "train/loss_error": 0.5132819414138794, "train/loss_total": 0.505009114742279 }, { "epoch": 0.3018968741651082, "grad_norm": 2.5967857837677, "learning_rate": 2.8193427731765963e-05, "loss": 0.6208, "step": 1130 }, { "epoch": 0.3018968741651082, "step": 1130, "train/loss_ctc": 1.649173617362976, "train/loss_error": 0.492342084646225, "train/loss_total": 0.7237083911895752 }, { "epoch": 0.30216403954047555, "step": 1131, "train/loss_ctc": 0.6727229952812195, "train/loss_error": 0.4773617684841156, "train/loss_total": 0.5164340138435364 }, { "epoch": 0.3024312049158429, "step": 1132, "train/loss_ctc": 0.7918474674224854, "train/loss_error": 0.4588651955127716, "train/loss_total": 0.5254616737365723 }, { "epoch": 0.3026983702912103, "step": 1133, "train/loss_ctc": 0.9156040549278259, "train/loss_error": 0.44133269786834717, "train/loss_total": 0.5361869931221008 }, { "epoch": 0.30296553566657763, "step": 1134, "train/loss_ctc": 1.2943241596221924, "train/loss_error": 0.4988453686237335, "train/loss_total": 0.6579411625862122 }, { "epoch": 0.30323270104194494, "step": 1135, "train/loss_ctc": 0.651709794998169, "train/loss_error": 0.5483972430229187, "train/loss_total": 0.5690597891807556 }, { "epoch": 0.3034998664173123, "step": 1136, "train/loss_ctc": 0.726571798324585, "train/loss_error": 0.4526321291923523, "train/loss_total": 0.5074200630187988 }, { "epoch": 0.30376703179267966, "step": 1137, "train/loss_ctc": 1.0819417238235474, "train/loss_error": 0.5256638526916504, "train/loss_total": 0.6369194388389587 }, { "epoch": 0.304034197168047, "step": 1138, "train/loss_ctc": 0.6470769643783569, "train/loss_error": 0.5370212197303772, "train/loss_total": 0.5590323805809021 }, { "epoch": 0.3043013625434144, "step": 1139, "train/loss_ctc": 0.7266280651092529, "train/loss_error": 0.4605952799320221, "train/loss_total": 0.5138018727302551 }, { "epoch": 0.30456852791878175, "grad_norm": 2.1929879188537598, "learning_rate": 2.817739780924392e-05, "loss": 0.5746, "step": 1140 }, { "epoch": 0.30456852791878175, "step": 1140, "train/loss_ctc": 0.5065680742263794, "train/loss_error": 0.3982886075973511, "train/loss_total": 0.41994452476501465 }, { "epoch": 0.30483569329414906, "step": 1141, "train/loss_ctc": 0.592818021774292, "train/loss_error": 0.5609399080276489, "train/loss_total": 0.5673155188560486 }, { "epoch": 0.3051028586695164, "step": 1142, "train/loss_ctc": 0.46688756346702576, "train/loss_error": 0.5140247344970703, "train/loss_total": 0.5045973062515259 }, { "epoch": 0.3053700240448838, "step": 1143, "train/loss_ctc": 0.42681384086608887, "train/loss_error": 0.49047183990478516, "train/loss_total": 0.47774022817611694 }, { "epoch": 0.30563718942025114, "step": 1144, "train/loss_ctc": 1.068434238433838, "train/loss_error": 0.44098883867263794, "train/loss_total": 0.56647789478302 }, { "epoch": 0.3059043547956185, "step": 1145, "train/loss_ctc": 0.5959700345993042, "train/loss_error": 0.48296797275543213, "train/loss_total": 0.5055683851242065 }, { "epoch": 0.30617152017098587, "step": 1146, "train/loss_ctc": 1.7210750579833984, "train/loss_error": 0.519488513469696, "train/loss_total": 0.7598057985305786 }, { "epoch": 0.3064386855463532, "step": 1147, "train/loss_ctc": 0.8698604702949524, "train/loss_error": 0.5130146145820618, "train/loss_total": 0.5843837857246399 }, { "epoch": 0.30670585092172054, "step": 1148, "train/loss_ctc": 0.8069917559623718, "train/loss_error": 0.5485767722129822, "train/loss_total": 0.6002597808837891 }, { "epoch": 0.3069730162970879, "step": 1149, "train/loss_ctc": 0.8798857927322388, "train/loss_error": 0.48692744970321655, "train/loss_total": 0.5655190944671631 }, { "epoch": 0.30724018167245526, "grad_norm": 1.72476065158844, "learning_rate": 2.8161367886721883e-05, "loss": 0.5552, "step": 1150 }, { "epoch": 0.30724018167245526, "step": 1150, "train/loss_ctc": 1.278379201889038, "train/loss_error": 0.5167299509048462, "train/loss_total": 0.6690598130226135 }, { "epoch": 0.3075073470478226, "step": 1151, "train/loss_ctc": 1.3116025924682617, "train/loss_error": 0.5198026895523071, "train/loss_total": 0.678162693977356 }, { "epoch": 0.3077745124231899, "step": 1152, "train/loss_ctc": 1.0274326801300049, "train/loss_error": 0.4793739318847656, "train/loss_total": 0.5889856815338135 }, { "epoch": 0.3080416777985573, "step": 1153, "train/loss_ctc": 0.3548687994480133, "train/loss_error": 0.4973118007183075, "train/loss_total": 0.4688231945037842 }, { "epoch": 0.30830884317392465, "step": 1154, "train/loss_ctc": 0.6527724862098694, "train/loss_error": 0.49993786215782166, "train/loss_total": 0.5305048227310181 }, { "epoch": 0.308576008549292, "step": 1155, "train/loss_ctc": 0.557962954044342, "train/loss_error": 0.5125445127487183, "train/loss_total": 0.521628201007843 }, { "epoch": 0.3088431739246594, "step": 1156, "train/loss_ctc": 1.0444661378860474, "train/loss_error": 0.4836418628692627, "train/loss_total": 0.5958067178726196 }, { "epoch": 0.30911033930002674, "step": 1157, "train/loss_ctc": 0.7784594297409058, "train/loss_error": 0.5111584663391113, "train/loss_total": 0.564618706703186 }, { "epoch": 0.30937750467539404, "step": 1158, "train/loss_ctc": 1.2338429689407349, "train/loss_error": 0.4806582033634186, "train/loss_total": 0.6312952041625977 }, { "epoch": 0.3096446700507614, "step": 1159, "train/loss_ctc": 1.299668312072754, "train/loss_error": 0.48633021116256714, "train/loss_total": 0.6489978432655334 }, { "epoch": 0.30991183542612877, "grad_norm": 2.574923276901245, "learning_rate": 2.814533796419984e-05, "loss": 0.5898, "step": 1160 }, { "epoch": 0.30991183542612877, "step": 1160, "train/loss_ctc": 0.9011409282684326, "train/loss_error": 0.44209083914756775, "train/loss_total": 0.5339008569717407 }, { "epoch": 0.31017900080149613, "step": 1161, "train/loss_ctc": 1.3992232084274292, "train/loss_error": 0.48394379019737244, "train/loss_total": 0.6669996976852417 }, { "epoch": 0.3104461661768635, "step": 1162, "train/loss_ctc": 1.0886021852493286, "train/loss_error": 0.44875776767730713, "train/loss_total": 0.5767266750335693 }, { "epoch": 0.31071333155223085, "step": 1163, "train/loss_ctc": 0.43850797414779663, "train/loss_error": 0.48111847043037415, "train/loss_total": 0.4725963771343231 }, { "epoch": 0.31098049692759816, "step": 1164, "train/loss_ctc": 1.4673320055007935, "train/loss_error": 0.5450713634490967, "train/loss_total": 0.7295235395431519 }, { "epoch": 0.3112476623029655, "step": 1165, "train/loss_ctc": 0.8731098771095276, "train/loss_error": 0.5081071853637695, "train/loss_total": 0.5811077356338501 }, { "epoch": 0.3115148276783329, "step": 1166, "train/loss_ctc": 1.010240912437439, "train/loss_error": 0.4767206907272339, "train/loss_total": 0.5834247469902039 }, { "epoch": 0.31178199305370025, "step": 1167, "train/loss_ctc": 1.557000994682312, "train/loss_error": 0.5157648921012878, "train/loss_total": 0.7240121364593506 }, { "epoch": 0.3120491584290676, "step": 1168, "train/loss_ctc": 0.7531414031982422, "train/loss_error": 0.493755966424942, "train/loss_total": 0.54563307762146 }, { "epoch": 0.31231632380443497, "step": 1169, "train/loss_ctc": 1.4729710817337036, "train/loss_error": 0.5812178254127502, "train/loss_total": 0.759568452835083 }, { "epoch": 0.3125834891798023, "grad_norm": 1.3631430864334106, "learning_rate": 2.81293080416778e-05, "loss": 0.6173, "step": 1170 }, { "epoch": 0.3125834891798023, "step": 1170, "train/loss_ctc": 0.7916938066482544, "train/loss_error": 0.49926021695137024, "train/loss_total": 0.557746946811676 }, { "epoch": 0.31285065455516964, "step": 1171, "train/loss_ctc": 0.6950695514678955, "train/loss_error": 0.4675653278827667, "train/loss_total": 0.5130661725997925 }, { "epoch": 0.313117819930537, "step": 1172, "train/loss_ctc": 0.7038575410842896, "train/loss_error": 0.5617329478263855, "train/loss_total": 0.5901578664779663 }, { "epoch": 0.31338498530590436, "step": 1173, "train/loss_ctc": 0.8210610151290894, "train/loss_error": 0.4684965908527374, "train/loss_total": 0.5390095114707947 }, { "epoch": 0.3136521506812717, "step": 1174, "train/loss_ctc": 1.4081720113754272, "train/loss_error": 0.47734376788139343, "train/loss_total": 0.6635094285011292 }, { "epoch": 0.3139193160566391, "step": 1175, "train/loss_ctc": 1.5739270448684692, "train/loss_error": 0.5130450129508972, "train/loss_total": 0.7252213954925537 }, { "epoch": 0.3141864814320064, "step": 1176, "train/loss_ctc": 1.2494239807128906, "train/loss_error": 0.4979197084903717, "train/loss_total": 0.6482205986976624 }, { "epoch": 0.31445364680737375, "step": 1177, "train/loss_ctc": 0.6863462924957275, "train/loss_error": 0.48729074001312256, "train/loss_total": 0.5271018743515015 }, { "epoch": 0.3147208121827411, "step": 1178, "train/loss_ctc": 0.7757273316383362, "train/loss_error": 0.4414352476596832, "train/loss_total": 0.508293628692627 }, { "epoch": 0.3149879775581085, "step": 1179, "train/loss_ctc": 1.1553306579589844, "train/loss_error": 0.49399614334106445, "train/loss_total": 0.6262630224227905 }, { "epoch": 0.31525514293347584, "grad_norm": 0.9899635314941406, "learning_rate": 2.8113278119155757e-05, "loss": 0.5899, "step": 1180 }, { "epoch": 0.31525514293347584, "step": 1180, "train/loss_ctc": 0.8455262184143066, "train/loss_error": 0.5689294338226318, "train/loss_total": 0.6242488026618958 }, { "epoch": 0.31552230830884315, "step": 1181, "train/loss_ctc": 0.9150599837303162, "train/loss_error": 0.5435004234313965, "train/loss_total": 0.6178123354911804 }, { "epoch": 0.3157894736842105, "step": 1182, "train/loss_ctc": 1.6559146642684937, "train/loss_error": 0.5569655299186707, "train/loss_total": 0.7767553329467773 }, { "epoch": 0.31605663905957787, "step": 1183, "train/loss_ctc": 1.006522536277771, "train/loss_error": 0.5318096280097961, "train/loss_total": 0.6267521977424622 }, { "epoch": 0.31632380443494523, "step": 1184, "train/loss_ctc": 1.2531695365905762, "train/loss_error": 0.5131821036338806, "train/loss_total": 0.6611796021461487 }, { "epoch": 0.3165909698103126, "step": 1185, "train/loss_ctc": 1.189948558807373, "train/loss_error": 0.4759311079978943, "train/loss_total": 0.61873459815979 }, { "epoch": 0.31685813518567996, "step": 1186, "train/loss_ctc": 1.2285897731781006, "train/loss_error": 0.4285018742084503, "train/loss_total": 0.5885194540023804 }, { "epoch": 0.31712530056104726, "step": 1187, "train/loss_ctc": 0.9088031053543091, "train/loss_error": 0.5039812326431274, "train/loss_total": 0.5849456191062927 }, { "epoch": 0.3173924659364146, "step": 1188, "train/loss_ctc": 1.1218910217285156, "train/loss_error": 0.5218454599380493, "train/loss_total": 0.6418545842170715 }, { "epoch": 0.317659631311782, "step": 1189, "train/loss_ctc": 0.7297950983047485, "train/loss_error": 0.5391895174980164, "train/loss_total": 0.5773106813430786 }, { "epoch": 0.31792679668714935, "grad_norm": 1.620712399482727, "learning_rate": 2.8097248196633715e-05, "loss": 0.6318, "step": 1190 }, { "epoch": 0.31792679668714935, "step": 1190, "train/loss_ctc": 0.9223845601081848, "train/loss_error": 0.4515314996242523, "train/loss_total": 0.5457020998001099 }, { "epoch": 0.3181939620625167, "step": 1191, "train/loss_ctc": 1.162390947341919, "train/loss_error": 0.4794224202632904, "train/loss_total": 0.616016149520874 }, { "epoch": 0.3184611274378841, "step": 1192, "train/loss_ctc": 0.9072859287261963, "train/loss_error": 0.48659366369247437, "train/loss_total": 0.5707321166992188 }, { "epoch": 0.3187282928132514, "step": 1193, "train/loss_ctc": 0.7823413610458374, "train/loss_error": 0.6141000390052795, "train/loss_total": 0.6477483510971069 }, { "epoch": 0.31899545818861874, "step": 1194, "train/loss_ctc": 1.176065444946289, "train/loss_error": 0.6024671196937561, "train/loss_total": 0.7171868085861206 }, { "epoch": 0.3192626235639861, "step": 1195, "train/loss_ctc": 0.7339866161346436, "train/loss_error": 0.5843140482902527, "train/loss_total": 0.6142485737800598 }, { "epoch": 0.31952978893935347, "step": 1196, "train/loss_ctc": 0.9623058438301086, "train/loss_error": 0.5613006949424744, "train/loss_total": 0.6415017247200012 }, { "epoch": 0.3197969543147208, "step": 1197, "train/loss_ctc": 0.6035205721855164, "train/loss_error": 0.5174853205680847, "train/loss_total": 0.5346924066543579 }, { "epoch": 0.3200641196900882, "step": 1198, "train/loss_ctc": 0.5739284753799438, "train/loss_error": 0.5170830488204956, "train/loss_total": 0.5284521579742432 }, { "epoch": 0.3203312850654555, "step": 1199, "train/loss_ctc": 0.6045169234275818, "train/loss_error": 0.5270988941192627, "train/loss_total": 0.5425825119018555 }, { "epoch": 0.32059845044082286, "grad_norm": 1.17533278465271, "learning_rate": 2.8081218274111677e-05, "loss": 0.5959, "step": 1200 }, { "epoch": 0.32059845044082286, "step": 1200, "train/loss_ctc": 1.987743616104126, "train/loss_error": 0.4453640878200531, "train/loss_total": 0.7538399696350098 }, { "epoch": 0.3208656158161902, "step": 1201, "train/loss_ctc": 0.9310663938522339, "train/loss_error": 0.5231773257255554, "train/loss_total": 0.604755163192749 }, { "epoch": 0.3211327811915576, "step": 1202, "train/loss_ctc": 0.5233606696128845, "train/loss_error": 0.5477831959724426, "train/loss_total": 0.5428986549377441 }, { "epoch": 0.32139994656692494, "step": 1203, "train/loss_ctc": 1.681534767150879, "train/loss_error": 0.4765990376472473, "train/loss_total": 0.7175861597061157 }, { "epoch": 0.3216671119422923, "step": 1204, "train/loss_ctc": 0.6602916121482849, "train/loss_error": 0.4932299554347992, "train/loss_total": 0.5266423225402832 }, { "epoch": 0.3219342773176596, "step": 1205, "train/loss_ctc": 0.6032588481903076, "train/loss_error": 0.5687209367752075, "train/loss_total": 0.5756285190582275 }, { "epoch": 0.322201442693027, "step": 1206, "train/loss_ctc": 0.5605090856552124, "train/loss_error": 0.5015490055084229, "train/loss_total": 0.5133410096168518 }, { "epoch": 0.32246860806839434, "step": 1207, "train/loss_ctc": 1.55655038356781, "train/loss_error": 0.5096677541732788, "train/loss_total": 0.7190443277359009 }, { "epoch": 0.3227357734437617, "step": 1208, "train/loss_ctc": 0.9223853349685669, "train/loss_error": 0.5722648501396179, "train/loss_total": 0.6422889828681946 }, { "epoch": 0.32300293881912906, "step": 1209, "train/loss_ctc": 0.6561816930770874, "train/loss_error": 0.4707898199558258, "train/loss_total": 0.5078681707382202 }, { "epoch": 0.32327010419449637, "grad_norm": 0.8864442706108093, "learning_rate": 2.8065188351589635e-05, "loss": 0.6104, "step": 1210 }, { "epoch": 0.32327010419449637, "step": 1210, "train/loss_ctc": 1.0907044410705566, "train/loss_error": 0.47488948702812195, "train/loss_total": 0.5980525016784668 }, { "epoch": 0.32353726956986373, "step": 1211, "train/loss_ctc": 0.8288835287094116, "train/loss_error": 0.5309366583824158, "train/loss_total": 0.5905260443687439 }, { "epoch": 0.3238044349452311, "step": 1212, "train/loss_ctc": 0.5850517749786377, "train/loss_error": 0.5499856472015381, "train/loss_total": 0.5569988489151001 }, { "epoch": 0.32407160032059845, "step": 1213, "train/loss_ctc": 0.7487515211105347, "train/loss_error": 0.5212450623512268, "train/loss_total": 0.5667463541030884 }, { "epoch": 0.3243387656959658, "step": 1214, "train/loss_ctc": 1.500053882598877, "train/loss_error": 0.5490295886993408, "train/loss_total": 0.739234447479248 }, { "epoch": 0.3246059310713332, "step": 1215, "train/loss_ctc": 0.6086791157722473, "train/loss_error": 0.46048128604888916, "train/loss_total": 0.49012088775634766 }, { "epoch": 0.3248730964467005, "step": 1216, "train/loss_ctc": 0.9172284603118896, "train/loss_error": 0.5041053295135498, "train/loss_total": 0.5867300033569336 }, { "epoch": 0.32514026182206784, "step": 1217, "train/loss_ctc": 1.5871620178222656, "train/loss_error": 0.575717568397522, "train/loss_total": 0.7780064344406128 }, { "epoch": 0.3254074271974352, "step": 1218, "train/loss_ctc": 0.678202748298645, "train/loss_error": 0.5024054050445557, "train/loss_total": 0.5375648736953735 }, { "epoch": 0.32567459257280257, "step": 1219, "train/loss_ctc": 0.9859607219696045, "train/loss_error": 0.5227534174919128, "train/loss_total": 0.6153948903083801 }, { "epoch": 0.32594175794816993, "grad_norm": 2.6925344467163086, "learning_rate": 2.8049158429067593e-05, "loss": 0.6059, "step": 1220 }, { "epoch": 0.32594175794816993, "step": 1220, "train/loss_ctc": 1.0687199831008911, "train/loss_error": 0.5000377297401428, "train/loss_total": 0.6137741804122925 }, { "epoch": 0.3262089233235373, "step": 1221, "train/loss_ctc": 0.8274298906326294, "train/loss_error": 0.47328081727027893, "train/loss_total": 0.5441106557846069 }, { "epoch": 0.3264760886989046, "step": 1222, "train/loss_ctc": 1.2141339778900146, "train/loss_error": 0.4800844192504883, "train/loss_total": 0.6268943548202515 }, { "epoch": 0.32674325407427196, "step": 1223, "train/loss_ctc": 0.8794888257980347, "train/loss_error": 0.5163245797157288, "train/loss_total": 0.5889574289321899 }, { "epoch": 0.3270104194496393, "step": 1224, "train/loss_ctc": 1.0537922382354736, "train/loss_error": 0.5588509440422058, "train/loss_total": 0.6578391790390015 }, { "epoch": 0.3272775848250067, "step": 1225, "train/loss_ctc": 0.7462788224220276, "train/loss_error": 0.5043544173240662, "train/loss_total": 0.5527393221855164 }, { "epoch": 0.32754475020037405, "step": 1226, "train/loss_ctc": 0.8059045076370239, "train/loss_error": 0.46877822279930115, "train/loss_total": 0.5362035036087036 }, { "epoch": 0.3278119155757414, "step": 1227, "train/loss_ctc": 1.178783893585205, "train/loss_error": 0.51082843542099, "train/loss_total": 0.6444195508956909 }, { "epoch": 0.3280790809511087, "step": 1228, "train/loss_ctc": 0.8686504364013672, "train/loss_error": 0.5392065048217773, "train/loss_total": 0.6050953269004822 }, { "epoch": 0.3283462463264761, "step": 1229, "train/loss_ctc": 0.8399051427841187, "train/loss_error": 0.6114503145217896, "train/loss_total": 0.6571413278579712 }, { "epoch": 0.32861341170184344, "grad_norm": 1.3995834589004517, "learning_rate": 2.803312850654555e-05, "loss": 0.6027, "step": 1230 }, { "epoch": 0.32861341170184344, "step": 1230, "train/loss_ctc": 0.6962398290634155, "train/loss_error": 0.4812891185283661, "train/loss_total": 0.5242792963981628 }, { "epoch": 0.3288805770772108, "step": 1231, "train/loss_ctc": 0.7593073844909668, "train/loss_error": 0.49292975664138794, "train/loss_total": 0.5462052822113037 }, { "epoch": 0.32914774245257816, "step": 1232, "train/loss_ctc": 0.9522860050201416, "train/loss_error": 0.4875389635562897, "train/loss_total": 0.580488383769989 }, { "epoch": 0.3294149078279455, "step": 1233, "train/loss_ctc": 0.9924113154411316, "train/loss_error": 0.4979742169380188, "train/loss_total": 0.5968616604804993 }, { "epoch": 0.32968207320331283, "step": 1234, "train/loss_ctc": 1.1486269235610962, "train/loss_error": 0.4670983850955963, "train/loss_total": 0.6034041047096252 }, { "epoch": 0.3299492385786802, "step": 1235, "train/loss_ctc": 0.5199682712554932, "train/loss_error": 0.48233500123023987, "train/loss_total": 0.4898616671562195 }, { "epoch": 0.33021640395404755, "step": 1236, "train/loss_ctc": 0.5342891216278076, "train/loss_error": 0.49245205521583557, "train/loss_total": 0.5008194446563721 }, { "epoch": 0.3304835693294149, "step": 1237, "train/loss_ctc": 0.9325498938560486, "train/loss_error": 0.49704068899154663, "train/loss_total": 0.5841425657272339 }, { "epoch": 0.3307507347047823, "step": 1238, "train/loss_ctc": 1.2508975267410278, "train/loss_error": 0.43074971437454224, "train/loss_total": 0.5947792530059814 }, { "epoch": 0.3310179000801496, "step": 1239, "train/loss_ctc": 1.1018468141555786, "train/loss_error": 0.4242895245552063, "train/loss_total": 0.5598009824752808 }, { "epoch": 0.33128506545551695, "grad_norm": 1.7967405319213867, "learning_rate": 2.801709858402351e-05, "loss": 0.5581, "step": 1240 }, { "epoch": 0.33128506545551695, "step": 1240, "train/loss_ctc": 1.286418080329895, "train/loss_error": 0.5015324950218201, "train/loss_total": 0.6585096120834351 }, { "epoch": 0.3315522308308843, "step": 1241, "train/loss_ctc": 1.1832585334777832, "train/loss_error": 0.4744769334793091, "train/loss_total": 0.616233229637146 }, { "epoch": 0.33181939620625167, "step": 1242, "train/loss_ctc": 1.0564894676208496, "train/loss_error": 0.5265745520591736, "train/loss_total": 0.6325575113296509 }, { "epoch": 0.33208656158161903, "step": 1243, "train/loss_ctc": 0.8288829326629639, "train/loss_error": 0.4652668833732605, "train/loss_total": 0.5379900932312012 }, { "epoch": 0.3323537269569864, "step": 1244, "train/loss_ctc": 0.6815218925476074, "train/loss_error": 0.507627010345459, "train/loss_total": 0.5424059629440308 }, { "epoch": 0.3326208923323537, "step": 1245, "train/loss_ctc": 0.514948308467865, "train/loss_error": 0.5829646587371826, "train/loss_total": 0.5693613886833191 }, { "epoch": 0.33288805770772106, "step": 1246, "train/loss_ctc": 0.7137680053710938, "train/loss_error": 0.49911564588546753, "train/loss_total": 0.5420461297035217 }, { "epoch": 0.3331552230830884, "step": 1247, "train/loss_ctc": 0.7501192092895508, "train/loss_error": 0.5638498067855835, "train/loss_total": 0.601103663444519 }, { "epoch": 0.3334223884584558, "step": 1248, "train/loss_ctc": 1.064721941947937, "train/loss_error": 0.4570368826389313, "train/loss_total": 0.5785739421844482 }, { "epoch": 0.33368955383382315, "step": 1249, "train/loss_ctc": 0.7108680009841919, "train/loss_error": 0.45567139983177185, "train/loss_total": 0.5067107081413269 }, { "epoch": 0.3339567192091905, "grad_norm": 1.921706199645996, "learning_rate": 2.8001068661501467e-05, "loss": 0.5785, "step": 1250 }, { "epoch": 0.3339567192091905, "step": 1250, "train/loss_ctc": 1.2367429733276367, "train/loss_error": 0.5384478569030762, "train/loss_total": 0.6781069040298462 }, { "epoch": 0.3342238845845578, "step": 1251, "train/loss_ctc": 0.4833514392375946, "train/loss_error": 0.48778676986694336, "train/loss_total": 0.486899733543396 }, { "epoch": 0.3344910499599252, "step": 1252, "train/loss_ctc": 0.6435609459877014, "train/loss_error": 0.4484773278236389, "train/loss_total": 0.4874940514564514 }, { "epoch": 0.33475821533529254, "step": 1253, "train/loss_ctc": 0.7232162952423096, "train/loss_error": 0.5714797973632812, "train/loss_total": 0.601827085018158 }, { "epoch": 0.3350253807106599, "step": 1254, "train/loss_ctc": 0.7312846183776855, "train/loss_error": 0.491176038980484, "train/loss_total": 0.5391978025436401 }, { "epoch": 0.33529254608602727, "step": 1255, "train/loss_ctc": 0.8256241083145142, "train/loss_error": 0.46788567304611206, "train/loss_total": 0.5394333600997925 }, { "epoch": 0.3355597114613946, "step": 1256, "train/loss_ctc": 1.2614846229553223, "train/loss_error": 0.521275520324707, "train/loss_total": 0.669317364692688 }, { "epoch": 0.33582687683676193, "step": 1257, "train/loss_ctc": 0.8504688739776611, "train/loss_error": 0.4339272379875183, "train/loss_total": 0.5172355771064758 }, { "epoch": 0.3360940422121293, "step": 1258, "train/loss_ctc": 1.0750811100006104, "train/loss_error": 0.5223408937454224, "train/loss_total": 0.6328889727592468 }, { "epoch": 0.33636120758749666, "step": 1259, "train/loss_ctc": 0.7571320533752441, "train/loss_error": 0.4698435366153717, "train/loss_total": 0.5273012518882751 }, { "epoch": 0.336628372962864, "grad_norm": 0.8749204874038696, "learning_rate": 2.7985038738979432e-05, "loss": 0.568, "step": 1260 }, { "epoch": 0.336628372962864, "step": 1260, "train/loss_ctc": 0.5942346453666687, "train/loss_error": 0.46810826659202576, "train/loss_total": 0.4933335483074188 }, { "epoch": 0.3368955383382314, "step": 1261, "train/loss_ctc": 1.4166526794433594, "train/loss_error": 0.5611174702644348, "train/loss_total": 0.7322245240211487 }, { "epoch": 0.33716270371359874, "step": 1262, "train/loss_ctc": 0.7337155938148499, "train/loss_error": 0.5615863800048828, "train/loss_total": 0.5960122346878052 }, { "epoch": 0.33742986908896605, "step": 1263, "train/loss_ctc": 1.3358657360076904, "train/loss_error": 0.4986436367034912, "train/loss_total": 0.6660880446434021 }, { "epoch": 0.3376970344643334, "step": 1264, "train/loss_ctc": 0.9424523115158081, "train/loss_error": 0.538814127445221, "train/loss_total": 0.6195417642593384 }, { "epoch": 0.3379641998397008, "step": 1265, "train/loss_ctc": 1.5493066310882568, "train/loss_error": 0.4550990164279938, "train/loss_total": 0.6739405393600464 }, { "epoch": 0.33823136521506814, "step": 1266, "train/loss_ctc": 1.0688118934631348, "train/loss_error": 0.5100860595703125, "train/loss_total": 0.6218312382698059 }, { "epoch": 0.3384985305904355, "step": 1267, "train/loss_ctc": 0.7050684094429016, "train/loss_error": 0.46170955896377563, "train/loss_total": 0.5103813409805298 }, { "epoch": 0.3387656959658028, "step": 1268, "train/loss_ctc": 0.8727812767028809, "train/loss_error": 0.4930606186389923, "train/loss_total": 0.5690047740936279 }, { "epoch": 0.33903286134117017, "step": 1269, "train/loss_ctc": 1.172179937362671, "train/loss_error": 0.5172555446624756, "train/loss_total": 0.6482404470443726 }, { "epoch": 0.33930002671653753, "grad_norm": 1.0886591672897339, "learning_rate": 2.796900881645739e-05, "loss": 0.6131, "step": 1270 }, { "epoch": 0.33930002671653753, "step": 1270, "train/loss_ctc": 0.2666587829589844, "train/loss_error": 0.519524872303009, "train/loss_total": 0.46895167231559753 }, { "epoch": 0.3395671920919049, "step": 1271, "train/loss_ctc": 1.0465748310089111, "train/loss_error": 0.5076114535331726, "train/loss_total": 0.6154041290283203 }, { "epoch": 0.33983435746727225, "step": 1272, "train/loss_ctc": 0.6689584255218506, "train/loss_error": 0.4648745656013489, "train/loss_total": 0.5056913495063782 }, { "epoch": 0.3401015228426396, "step": 1273, "train/loss_ctc": 1.2626296281814575, "train/loss_error": 0.5022873282432556, "train/loss_total": 0.6543557643890381 }, { "epoch": 0.3403686882180069, "step": 1274, "train/loss_ctc": 0.4519091248512268, "train/loss_error": 0.5234079957008362, "train/loss_total": 0.5091082453727722 }, { "epoch": 0.3406358535933743, "step": 1275, "train/loss_ctc": 1.00544273853302, "train/loss_error": 0.5095899105072021, "train/loss_total": 0.6087604761123657 }, { "epoch": 0.34090301896874164, "step": 1276, "train/loss_ctc": 1.0554704666137695, "train/loss_error": 0.500687837600708, "train/loss_total": 0.6116443872451782 }, { "epoch": 0.341170184344109, "step": 1277, "train/loss_ctc": 1.0412545204162598, "train/loss_error": 0.4703788459300995, "train/loss_total": 0.5845539569854736 }, { "epoch": 0.34143734971947637, "step": 1278, "train/loss_ctc": 0.8310658931732178, "train/loss_error": 0.46580827236175537, "train/loss_total": 0.5388598442077637 }, { "epoch": 0.34170451509484373, "step": 1279, "train/loss_ctc": 0.8772612810134888, "train/loss_error": 0.4922937750816345, "train/loss_total": 0.5692873001098633 }, { "epoch": 0.34197168047021104, "grad_norm": 0.8455444574356079, "learning_rate": 2.7952978893935348e-05, "loss": 0.5667, "step": 1280 }, { "epoch": 0.34197168047021104, "step": 1280, "train/loss_ctc": 0.7335855960845947, "train/loss_error": 0.47260162234306335, "train/loss_total": 0.5247983932495117 }, { "epoch": 0.3422388458455784, "step": 1281, "train/loss_ctc": 1.2044939994812012, "train/loss_error": 0.4964819550514221, "train/loss_total": 0.638084352016449 }, { "epoch": 0.34250601122094576, "step": 1282, "train/loss_ctc": 0.6626138687133789, "train/loss_error": 0.5294207334518433, "train/loss_total": 0.5560593605041504 }, { "epoch": 0.3427731765963131, "step": 1283, "train/loss_ctc": 1.024863362312317, "train/loss_error": 0.5008829236030579, "train/loss_total": 0.6056790351867676 }, { "epoch": 0.3430403419716805, "step": 1284, "train/loss_ctc": 0.9381154775619507, "train/loss_error": 0.5559343099594116, "train/loss_total": 0.6323705315589905 }, { "epoch": 0.34330750734704785, "step": 1285, "train/loss_ctc": 1.1063910722732544, "train/loss_error": 0.5895324945449829, "train/loss_total": 0.6929042339324951 }, { "epoch": 0.34357467272241515, "step": 1286, "train/loss_ctc": 1.6485562324523926, "train/loss_error": 0.5040296912193298, "train/loss_total": 0.7329350113868713 }, { "epoch": 0.3438418380977825, "step": 1287, "train/loss_ctc": 0.9537802934646606, "train/loss_error": 0.45680782198905945, "train/loss_total": 0.5562023520469666 }, { "epoch": 0.3441090034731499, "step": 1288, "train/loss_ctc": 0.6637752056121826, "train/loss_error": 0.5440565347671509, "train/loss_total": 0.568000316619873 }, { "epoch": 0.34437616884851724, "step": 1289, "train/loss_ctc": 0.7251231670379639, "train/loss_error": 0.5064791440963745, "train/loss_total": 0.5502079725265503 }, { "epoch": 0.3446433342238846, "grad_norm": 2.0624754428863525, "learning_rate": 2.7936948971413306e-05, "loss": 0.6057, "step": 1290 }, { "epoch": 0.3446433342238846, "step": 1290, "train/loss_ctc": 0.4138530492782593, "train/loss_error": 0.5121077299118042, "train/loss_total": 0.4924567937850952 }, { "epoch": 0.34491049959925196, "step": 1291, "train/loss_ctc": 0.9353371262550354, "train/loss_error": 0.48637092113494873, "train/loss_total": 0.576164186000824 }, { "epoch": 0.34517766497461927, "step": 1292, "train/loss_ctc": 1.2275171279907227, "train/loss_error": 0.5480614304542542, "train/loss_total": 0.6839525699615479 }, { "epoch": 0.34544483034998663, "step": 1293, "train/loss_ctc": 0.6294554471969604, "train/loss_error": 0.4791750907897949, "train/loss_total": 0.5092312097549438 }, { "epoch": 0.345711995725354, "step": 1294, "train/loss_ctc": 0.4832063615322113, "train/loss_error": 0.5438731908798218, "train/loss_total": 0.5317398309707642 }, { "epoch": 0.34597916110072136, "step": 1295, "train/loss_ctc": 1.383489966392517, "train/loss_error": 0.4534991681575775, "train/loss_total": 0.6394973397254944 }, { "epoch": 0.3462463264760887, "step": 1296, "train/loss_ctc": 1.0359470844268799, "train/loss_error": 0.4722942113876343, "train/loss_total": 0.5850247740745544 }, { "epoch": 0.346513491851456, "step": 1297, "train/loss_ctc": 1.1966478824615479, "train/loss_error": 0.5299992561340332, "train/loss_total": 0.663329005241394 }, { "epoch": 0.3467806572268234, "step": 1298, "train/loss_ctc": 1.2298829555511475, "train/loss_error": 0.4851559102535248, "train/loss_total": 0.6341013312339783 }, { "epoch": 0.34704782260219075, "step": 1299, "train/loss_ctc": 0.5438444018363953, "train/loss_error": 0.46817001700401306, "train/loss_total": 0.483304888010025 }, { "epoch": 0.3473149879775581, "grad_norm": 1.1194219589233398, "learning_rate": 2.7920919048891264e-05, "loss": 0.5799, "step": 1300 }, { "epoch": 0.3473149879775581, "step": 1300, "train/loss_ctc": 1.482816457748413, "train/loss_error": 0.43204671144485474, "train/loss_total": 0.6422007083892822 }, { "epoch": 0.34758215335292547, "step": 1301, "train/loss_ctc": 0.8261309862136841, "train/loss_error": 0.4804233908653259, "train/loss_total": 0.5495648980140686 }, { "epoch": 0.34784931872829283, "step": 1302, "train/loss_ctc": 1.8418097496032715, "train/loss_error": 0.5387570858001709, "train/loss_total": 0.7993676662445068 }, { "epoch": 0.34811648410366014, "step": 1303, "train/loss_ctc": 0.8665562868118286, "train/loss_error": 0.5623345971107483, "train/loss_total": 0.6231789588928223 }, { "epoch": 0.3483836494790275, "step": 1304, "train/loss_ctc": 0.7193071842193604, "train/loss_error": 0.4594342112541199, "train/loss_total": 0.511408805847168 }, { "epoch": 0.34865081485439486, "step": 1305, "train/loss_ctc": 0.8027212023735046, "train/loss_error": 0.49382245540618896, "train/loss_total": 0.5556021928787231 }, { "epoch": 0.3489179802297622, "step": 1306, "train/loss_ctc": 0.4052063524723053, "train/loss_error": 0.5219091773033142, "train/loss_total": 0.4985686242580414 }, { "epoch": 0.3491851456051296, "step": 1307, "train/loss_ctc": 1.077406644821167, "train/loss_error": 0.49176719784736633, "train/loss_total": 0.6088950634002686 }, { "epoch": 0.34945231098049695, "step": 1308, "train/loss_ctc": 0.7075138092041016, "train/loss_error": 0.5016361474990845, "train/loss_total": 0.5428116917610168 }, { "epoch": 0.34971947635586426, "step": 1309, "train/loss_ctc": 0.892562747001648, "train/loss_error": 0.5173377990722656, "train/loss_total": 0.5923827886581421 }, { "epoch": 0.3499866417312316, "grad_norm": 0.6968734860420227, "learning_rate": 2.7904889126369223e-05, "loss": 0.5924, "step": 1310 }, { "epoch": 0.3499866417312316, "step": 1310, "train/loss_ctc": 1.1308441162109375, "train/loss_error": 0.5408658385276794, "train/loss_total": 0.658861517906189 }, { "epoch": 0.350253807106599, "step": 1311, "train/loss_ctc": 0.43064528703689575, "train/loss_error": 0.5408938527107239, "train/loss_total": 0.5188441276550293 }, { "epoch": 0.35052097248196634, "step": 1312, "train/loss_ctc": 1.1313374042510986, "train/loss_error": 0.49359962344169617, "train/loss_total": 0.6211471557617188 }, { "epoch": 0.3507881378573337, "step": 1313, "train/loss_ctc": 0.44761186838150024, "train/loss_error": 0.5244707465171814, "train/loss_total": 0.509099006652832 }, { "epoch": 0.35105530323270107, "step": 1314, "train/loss_ctc": 1.3666654825210571, "train/loss_error": 0.4817039370536804, "train/loss_total": 0.6586962938308716 }, { "epoch": 0.3513224686080684, "step": 1315, "train/loss_ctc": 1.2818922996520996, "train/loss_error": 0.5559588670730591, "train/loss_total": 0.7011455297470093 }, { "epoch": 0.35158963398343573, "step": 1316, "train/loss_ctc": 0.6996369361877441, "train/loss_error": 0.4785761833190918, "train/loss_total": 0.5227883458137512 }, { "epoch": 0.3518567993588031, "step": 1317, "train/loss_ctc": 1.7904115915298462, "train/loss_error": 0.5038840770721436, "train/loss_total": 0.7611895799636841 }, { "epoch": 0.35212396473417046, "step": 1318, "train/loss_ctc": 0.5521658658981323, "train/loss_error": 0.5094040632247925, "train/loss_total": 0.5179564356803894 }, { "epoch": 0.3523911301095378, "step": 1319, "train/loss_ctc": 0.9490521550178528, "train/loss_error": 0.4624759256839752, "train/loss_total": 0.5597912073135376 }, { "epoch": 0.3526582954849052, "grad_norm": 0.9850214719772339, "learning_rate": 2.7888859203847184e-05, "loss": 0.603, "step": 1320 }, { "epoch": 0.3526582954849052, "step": 1320, "train/loss_ctc": 0.6687244772911072, "train/loss_error": 0.5270805358886719, "train/loss_total": 0.55540931224823 }, { "epoch": 0.3529254608602725, "step": 1321, "train/loss_ctc": 0.815021276473999, "train/loss_error": 0.4706469178199768, "train/loss_total": 0.5395218133926392 }, { "epoch": 0.35319262623563985, "step": 1322, "train/loss_ctc": 0.6880671977996826, "train/loss_error": 0.5416601300239563, "train/loss_total": 0.5709415674209595 }, { "epoch": 0.3534597916110072, "step": 1323, "train/loss_ctc": 0.8600598573684692, "train/loss_error": 0.4885933995246887, "train/loss_total": 0.5628867149353027 }, { "epoch": 0.3537269569863746, "step": 1324, "train/loss_ctc": 0.8157669305801392, "train/loss_error": 0.538620114326477, "train/loss_total": 0.5940495133399963 }, { "epoch": 0.35399412236174194, "step": 1325, "train/loss_ctc": 1.298147201538086, "train/loss_error": 0.5466628670692444, "train/loss_total": 0.6969597339630127 }, { "epoch": 0.3542612877371093, "step": 1326, "train/loss_ctc": 0.7314004898071289, "train/loss_error": 0.49190565943717957, "train/loss_total": 0.5398046374320984 }, { "epoch": 0.3545284531124766, "step": 1327, "train/loss_ctc": 0.9448359608650208, "train/loss_error": 0.48946091532707214, "train/loss_total": 0.5805359482765198 }, { "epoch": 0.35479561848784397, "step": 1328, "train/loss_ctc": 0.8348202705383301, "train/loss_error": 0.4639679789543152, "train/loss_total": 0.5381384491920471 }, { "epoch": 0.35506278386321133, "step": 1329, "train/loss_ctc": 0.6463344693183899, "train/loss_error": 0.5180667638778687, "train/loss_total": 0.5437203049659729 }, { "epoch": 0.3553299492385787, "grad_norm": 0.966091513633728, "learning_rate": 2.7872829281325142e-05, "loss": 0.5722, "step": 1330 }, { "epoch": 0.3553299492385787, "step": 1330, "train/loss_ctc": 1.3954963684082031, "train/loss_error": 0.48612910509109497, "train/loss_total": 0.6680026054382324 }, { "epoch": 0.35559711461394605, "step": 1331, "train/loss_ctc": 0.7478389143943787, "train/loss_error": 0.5604743957519531, "train/loss_total": 0.5979472994804382 }, { "epoch": 0.35586427998931336, "step": 1332, "train/loss_ctc": 0.7232397794723511, "train/loss_error": 0.4613722860813141, "train/loss_total": 0.5137457847595215 }, { "epoch": 0.3561314453646807, "step": 1333, "train/loss_ctc": 2.016106128692627, "train/loss_error": 0.5101702809333801, "train/loss_total": 0.8113574385643005 }, { "epoch": 0.3563986107400481, "step": 1334, "train/loss_ctc": 0.5079103112220764, "train/loss_error": 0.5118476748466492, "train/loss_total": 0.5110602378845215 }, { "epoch": 0.35666577611541544, "step": 1335, "train/loss_ctc": 1.1658649444580078, "train/loss_error": 0.5172589421272278, "train/loss_total": 0.6469801664352417 }, { "epoch": 0.3569329414907828, "step": 1336, "train/loss_ctc": 1.3818882703781128, "train/loss_error": 0.40322887897491455, "train/loss_total": 0.5989607572555542 }, { "epoch": 0.35720010686615017, "step": 1337, "train/loss_ctc": 1.0993391275405884, "train/loss_error": 0.5444062948226929, "train/loss_total": 0.6553928852081299 }, { "epoch": 0.3574672722415175, "step": 1338, "train/loss_ctc": 0.9489324688911438, "train/loss_error": 0.4802483022212982, "train/loss_total": 0.5739850997924805 }, { "epoch": 0.35773443761688484, "step": 1339, "train/loss_ctc": 0.6405875086784363, "train/loss_error": 0.4706028401851654, "train/loss_total": 0.5045998096466064 }, { "epoch": 0.3580016029922522, "grad_norm": 1.9874635934829712, "learning_rate": 2.78567993588031e-05, "loss": 0.6082, "step": 1340 }, { "epoch": 0.3580016029922522, "step": 1340, "train/loss_ctc": 0.6303105354309082, "train/loss_error": 0.4866302013397217, "train/loss_total": 0.51536625623703 }, { "epoch": 0.35826876836761956, "step": 1341, "train/loss_ctc": 0.9091854691505432, "train/loss_error": 0.4501281976699829, "train/loss_total": 0.5419396758079529 }, { "epoch": 0.3585359337429869, "step": 1342, "train/loss_ctc": 0.9161957502365112, "train/loss_error": 0.47065210342407227, "train/loss_total": 0.5597608089447021 }, { "epoch": 0.3588030991183543, "step": 1343, "train/loss_ctc": 1.039245843887329, "train/loss_error": 0.46400564908981323, "train/loss_total": 0.5790536999702454 }, { "epoch": 0.3590702644937216, "step": 1344, "train/loss_ctc": 0.7306218147277832, "train/loss_error": 0.5043672919273376, "train/loss_total": 0.5496182441711426 }, { "epoch": 0.35933742986908895, "step": 1345, "train/loss_ctc": 0.7897089719772339, "train/loss_error": 0.6303969025611877, "train/loss_total": 0.6622593402862549 }, { "epoch": 0.3596045952444563, "step": 1346, "train/loss_ctc": 0.45364731550216675, "train/loss_error": 0.4861396849155426, "train/loss_total": 0.47964122891426086 }, { "epoch": 0.3598717606198237, "step": 1347, "train/loss_ctc": 0.6512826681137085, "train/loss_error": 0.5142549872398376, "train/loss_total": 0.5416605472564697 }, { "epoch": 0.36013892599519104, "step": 1348, "train/loss_ctc": 0.6267908215522766, "train/loss_error": 0.525738000869751, "train/loss_total": 0.5459485650062561 }, { "epoch": 0.3604060913705584, "step": 1349, "train/loss_ctc": 0.691318154335022, "train/loss_error": 0.48962536454200745, "train/loss_total": 0.5299639105796814 }, { "epoch": 0.3606732567459257, "grad_norm": 1.8506935834884644, "learning_rate": 2.7840769436281058e-05, "loss": 0.5505, "step": 1350 }, { "epoch": 0.3606732567459257, "step": 1350, "train/loss_ctc": 0.7966383099555969, "train/loss_error": 0.4745218753814697, "train/loss_total": 0.538945198059082 }, { "epoch": 0.36094042212129307, "step": 1351, "train/loss_ctc": 0.480783075094223, "train/loss_error": 0.45540541410446167, "train/loss_total": 0.4604809582233429 }, { "epoch": 0.36120758749666043, "step": 1352, "train/loss_ctc": 1.1520073413848877, "train/loss_error": 0.4944991171360016, "train/loss_total": 0.6260007619857788 }, { "epoch": 0.3614747528720278, "step": 1353, "train/loss_ctc": 0.7998361587524414, "train/loss_error": 0.5137830972671509, "train/loss_total": 0.5709937214851379 }, { "epoch": 0.36174191824739516, "step": 1354, "train/loss_ctc": 0.8538942337036133, "train/loss_error": 0.5314467549324036, "train/loss_total": 0.5959362387657166 }, { "epoch": 0.3620090836227625, "step": 1355, "train/loss_ctc": 0.8908560276031494, "train/loss_error": 0.49223798513412476, "train/loss_total": 0.5719615817070007 }, { "epoch": 0.3622762489981298, "step": 1356, "train/loss_ctc": 1.310489296913147, "train/loss_error": 0.47445908188819885, "train/loss_total": 0.6416651010513306 }, { "epoch": 0.3625434143734972, "step": 1357, "train/loss_ctc": 0.695813000202179, "train/loss_error": 0.533805787563324, "train/loss_total": 0.566207230091095 }, { "epoch": 0.36281057974886455, "step": 1358, "train/loss_ctc": 0.7508158683776855, "train/loss_error": 0.47990313172340393, "train/loss_total": 0.5340856909751892 }, { "epoch": 0.3630777451242319, "step": 1359, "train/loss_ctc": 0.5795724987983704, "train/loss_error": 0.589486300945282, "train/loss_total": 0.5875035524368286 }, { "epoch": 0.36334491049959927, "grad_norm": 1.6466717720031738, "learning_rate": 2.7824739513759016e-05, "loss": 0.5694, "step": 1360 }, { "epoch": 0.36334491049959927, "step": 1360, "train/loss_ctc": 0.5989464521408081, "train/loss_error": 0.5371760129928589, "train/loss_total": 0.5495300889015198 }, { "epoch": 0.3636120758749666, "step": 1361, "train/loss_ctc": 0.7951000332832336, "train/loss_error": 0.4964568018913269, "train/loss_total": 0.5561854839324951 }, { "epoch": 0.36387924125033394, "step": 1362, "train/loss_ctc": 0.46946102380752563, "train/loss_error": 0.5076707601547241, "train/loss_total": 0.5000287890434265 }, { "epoch": 0.3641464066257013, "step": 1363, "train/loss_ctc": 1.5899513959884644, "train/loss_error": 0.4328993856906891, "train/loss_total": 0.6643097996711731 }, { "epoch": 0.36441357200106866, "step": 1364, "train/loss_ctc": 0.491120845079422, "train/loss_error": 0.4437088966369629, "train/loss_total": 0.45319128036499023 }, { "epoch": 0.364680737376436, "step": 1365, "train/loss_ctc": 0.6706002950668335, "train/loss_error": 0.5506668090820312, "train/loss_total": 0.5746535062789917 }, { "epoch": 0.3649479027518034, "step": 1366, "train/loss_ctc": 1.468866229057312, "train/loss_error": 0.45657607913017273, "train/loss_total": 0.6590341329574585 }, { "epoch": 0.3652150681271707, "step": 1367, "train/loss_ctc": 1.3581061363220215, "train/loss_error": 0.4841386675834656, "train/loss_total": 0.6589322090148926 }, { "epoch": 0.36548223350253806, "step": 1368, "train/loss_ctc": 0.3363184630870819, "train/loss_error": 0.49015286564826965, "train/loss_total": 0.4593859910964966 }, { "epoch": 0.3657493988779054, "step": 1369, "train/loss_ctc": 0.6967446804046631, "train/loss_error": 0.48673292994499207, "train/loss_total": 0.5287352800369263 }, { "epoch": 0.3660165642532728, "grad_norm": 3.4918363094329834, "learning_rate": 2.7808709591236974e-05, "loss": 0.5604, "step": 1370 }, { "epoch": 0.3660165642532728, "step": 1370, "train/loss_ctc": 0.42983877658843994, "train/loss_error": 0.46941035985946655, "train/loss_total": 0.4614960551261902 }, { "epoch": 0.36628372962864014, "step": 1371, "train/loss_ctc": 1.307578444480896, "train/loss_error": 0.5661677122116089, "train/loss_total": 0.7144498825073242 }, { "epoch": 0.3665508950040075, "step": 1372, "train/loss_ctc": 0.9804920554161072, "train/loss_error": 0.4337158501148224, "train/loss_total": 0.5430710911750793 }, { "epoch": 0.3668180603793748, "step": 1373, "train/loss_ctc": 1.1736512184143066, "train/loss_error": 0.4696618318557739, "train/loss_total": 0.6104596853256226 }, { "epoch": 0.3670852257547422, "step": 1374, "train/loss_ctc": 0.5188969373703003, "train/loss_error": 0.4738735854625702, "train/loss_total": 0.48287826776504517 }, { "epoch": 0.36735239113010953, "step": 1375, "train/loss_ctc": 0.8583737015724182, "train/loss_error": 0.5151825547218323, "train/loss_total": 0.5838208198547363 }, { "epoch": 0.3676195565054769, "step": 1376, "train/loss_ctc": 1.2627146244049072, "train/loss_error": 0.5301622748374939, "train/loss_total": 0.6766727566719055 }, { "epoch": 0.36788672188084426, "step": 1377, "train/loss_ctc": 0.7598740458488464, "train/loss_error": 0.5004114508628845, "train/loss_total": 0.5523039698600769 }, { "epoch": 0.3681538872562116, "step": 1378, "train/loss_ctc": 0.3907318711280823, "train/loss_error": 0.47353389859199524, "train/loss_total": 0.45697349309921265 }, { "epoch": 0.3684210526315789, "step": 1379, "train/loss_ctc": 1.6409685611724854, "train/loss_error": 0.5365245342254639, "train/loss_total": 0.757413387298584 }, { "epoch": 0.3686882180069463, "grad_norm": 2.0806612968444824, "learning_rate": 2.7792679668714936e-05, "loss": 0.584, "step": 1380 }, { "epoch": 0.3686882180069463, "step": 1380, "train/loss_ctc": 0.5355179309844971, "train/loss_error": 0.4903002679347992, "train/loss_total": 0.4993438124656677 }, { "epoch": 0.36895538338231365, "step": 1381, "train/loss_ctc": 0.8565051555633545, "train/loss_error": 0.5147647857666016, "train/loss_total": 0.5831128358840942 }, { "epoch": 0.369222548757681, "step": 1382, "train/loss_ctc": 0.41517388820648193, "train/loss_error": 0.46340030431747437, "train/loss_total": 0.4537550210952759 }, { "epoch": 0.3694897141330484, "step": 1383, "train/loss_ctc": 1.329282522201538, "train/loss_error": 0.4653730094432831, "train/loss_total": 0.638154923915863 }, { "epoch": 0.36975687950841574, "step": 1384, "train/loss_ctc": 0.5735369920730591, "train/loss_error": 0.5413058996200562, "train/loss_total": 0.5477521419525146 }, { "epoch": 0.37002404488378304, "step": 1385, "train/loss_ctc": 0.6456788778305054, "train/loss_error": 0.478588342666626, "train/loss_total": 0.5120064616203308 }, { "epoch": 0.3702912102591504, "step": 1386, "train/loss_ctc": 0.6861743927001953, "train/loss_error": 0.4642569124698639, "train/loss_total": 0.5086404085159302 }, { "epoch": 0.37055837563451777, "step": 1387, "train/loss_ctc": 0.3999076187610626, "train/loss_error": 0.5231343507766724, "train/loss_total": 0.49848902225494385 }, { "epoch": 0.37082554100988513, "step": 1388, "train/loss_ctc": 0.5054029226303101, "train/loss_error": 0.5125442147254944, "train/loss_total": 0.5111159682273865 }, { "epoch": 0.3710927063852525, "step": 1389, "train/loss_ctc": 0.7229143381118774, "train/loss_error": 0.48731592297554016, "train/loss_total": 0.5344356298446655 }, { "epoch": 0.3713598717606198, "grad_norm": 1.0473378896713257, "learning_rate": 2.7776649746192894e-05, "loss": 0.5287, "step": 1390 }, { "epoch": 0.3713598717606198, "step": 1390, "train/loss_ctc": 0.9617455005645752, "train/loss_error": 0.4662611186504364, "train/loss_total": 0.56535804271698 }, { "epoch": 0.37162703713598716, "step": 1391, "train/loss_ctc": 1.0243473052978516, "train/loss_error": 0.5558444857597351, "train/loss_total": 0.6495450735092163 }, { "epoch": 0.3718942025113545, "step": 1392, "train/loss_ctc": 0.9813262224197388, "train/loss_error": 0.5339643359184265, "train/loss_total": 0.623436689376831 }, { "epoch": 0.3721613678867219, "step": 1393, "train/loss_ctc": 1.558561086654663, "train/loss_error": 0.4971986711025238, "train/loss_total": 0.7094711661338806 }, { "epoch": 0.37242853326208925, "step": 1394, "train/loss_ctc": 0.6074989438056946, "train/loss_error": 0.4903053641319275, "train/loss_total": 0.5137441158294678 }, { "epoch": 0.3726956986374566, "step": 1395, "train/loss_ctc": 0.7190775275230408, "train/loss_error": 0.4598960280418396, "train/loss_total": 0.5117323398590088 }, { "epoch": 0.3729628640128239, "step": 1396, "train/loss_ctc": 0.9617069959640503, "train/loss_error": 0.471741646528244, "train/loss_total": 0.5697347521781921 }, { "epoch": 0.3732300293881913, "step": 1397, "train/loss_ctc": 1.0615495443344116, "train/loss_error": 0.5378872156143188, "train/loss_total": 0.6426196694374084 }, { "epoch": 0.37349719476355864, "step": 1398, "train/loss_ctc": 0.8628868460655212, "train/loss_error": 0.4780776798725128, "train/loss_total": 0.5550395250320435 }, { "epoch": 0.373764360138926, "step": 1399, "train/loss_ctc": 0.6847663521766663, "train/loss_error": 0.5334926843643188, "train/loss_total": 0.5637474060058594 }, { "epoch": 0.37403152551429336, "grad_norm": 1.2760288715362549, "learning_rate": 2.7760619823670852e-05, "loss": 0.5904, "step": 1400 }, { "epoch": 0.37403152551429336, "step": 1400, "train/loss_ctc": 0.8142889738082886, "train/loss_error": 0.5045417547225952, "train/loss_total": 0.5664912462234497 }, { "epoch": 0.3742986908896607, "step": 1401, "train/loss_ctc": 0.778323769569397, "train/loss_error": 0.47355031967163086, "train/loss_total": 0.5345050096511841 }, { "epoch": 0.37456585626502803, "step": 1402, "train/loss_ctc": 0.8445566892623901, "train/loss_error": 0.48920130729675293, "train/loss_total": 0.5602723956108093 }, { "epoch": 0.3748330216403954, "step": 1403, "train/loss_ctc": 0.7114061117172241, "train/loss_error": 0.5338283777236938, "train/loss_total": 0.5693439245223999 }, { "epoch": 0.37510018701576275, "step": 1404, "train/loss_ctc": 1.6096851825714111, "train/loss_error": 0.4632621705532074, "train/loss_total": 0.6925467848777771 }, { "epoch": 0.3753673523911301, "step": 1405, "train/loss_ctc": 1.1347882747650146, "train/loss_error": 0.5051612257957458, "train/loss_total": 0.6310866475105286 }, { "epoch": 0.3756345177664975, "step": 1406, "train/loss_ctc": 1.288898229598999, "train/loss_error": 0.5010299682617188, "train/loss_total": 0.6586036682128906 }, { "epoch": 0.37590168314186484, "step": 1407, "train/loss_ctc": 0.5638785362243652, "train/loss_error": 0.47630244493484497, "train/loss_total": 0.49381768703460693 }, { "epoch": 0.37616884851723215, "step": 1408, "train/loss_ctc": 0.935575008392334, "train/loss_error": 0.5006293058395386, "train/loss_total": 0.5876184701919556 }, { "epoch": 0.3764360138925995, "step": 1409, "train/loss_ctc": 1.1477608680725098, "train/loss_error": 0.46187466382980347, "train/loss_total": 0.5990519523620605 }, { "epoch": 0.37670317926796687, "grad_norm": 0.9033429026603699, "learning_rate": 2.774458990114881e-05, "loss": 0.5893, "step": 1410 }, { "epoch": 0.37670317926796687, "step": 1410, "train/loss_ctc": 0.6979758739471436, "train/loss_error": 0.4637976586818695, "train/loss_total": 0.5106333494186401 }, { "epoch": 0.37697034464333423, "step": 1411, "train/loss_ctc": 0.9575357437133789, "train/loss_error": 0.49012187123298645, "train/loss_total": 0.583604633808136 }, { "epoch": 0.3772375100187016, "step": 1412, "train/loss_ctc": 0.8240147829055786, "train/loss_error": 0.5683130025863647, "train/loss_total": 0.6194533705711365 }, { "epoch": 0.37750467539406896, "step": 1413, "train/loss_ctc": 0.5912078619003296, "train/loss_error": 0.4717762768268585, "train/loss_total": 0.4956625998020172 }, { "epoch": 0.37777184076943626, "step": 1414, "train/loss_ctc": 1.234787940979004, "train/loss_error": 0.49620041251182556, "train/loss_total": 0.6439179182052612 }, { "epoch": 0.3780390061448036, "step": 1415, "train/loss_ctc": 1.082197666168213, "train/loss_error": 0.5796376466751099, "train/loss_total": 0.6801496744155884 }, { "epoch": 0.378306171520171, "step": 1416, "train/loss_ctc": 1.2989144325256348, "train/loss_error": 0.6186085939407349, "train/loss_total": 0.7546697854995728 }, { "epoch": 0.37857333689553835, "step": 1417, "train/loss_ctc": 0.9357178807258606, "train/loss_error": 0.46872857213020325, "train/loss_total": 0.5621264576911926 }, { "epoch": 0.3788405022709057, "step": 1418, "train/loss_ctc": 0.7446494698524475, "train/loss_error": 0.48748093843460083, "train/loss_total": 0.538914680480957 }, { "epoch": 0.379107667646273, "step": 1419, "train/loss_ctc": 0.9342494010925293, "train/loss_error": 0.4806031882762909, "train/loss_total": 0.5713324546813965 }, { "epoch": 0.3793748330216404, "grad_norm": 0.9878385066986084, "learning_rate": 2.7728559978626768e-05, "loss": 0.596, "step": 1420 }, { "epoch": 0.3793748330216404, "step": 1420, "train/loss_ctc": 1.102428674697876, "train/loss_error": 0.5400949716567993, "train/loss_total": 0.6525617241859436 }, { "epoch": 0.37964199839700774, "step": 1421, "train/loss_ctc": 1.126142978668213, "train/loss_error": 0.521668553352356, "train/loss_total": 0.6425634622573853 }, { "epoch": 0.3799091637723751, "step": 1422, "train/loss_ctc": 1.927292823791504, "train/loss_error": 0.5696879029273987, "train/loss_total": 0.8412088751792908 }, { "epoch": 0.38017632914774246, "step": 1423, "train/loss_ctc": 1.0151972770690918, "train/loss_error": 0.46152740716934204, "train/loss_total": 0.572261393070221 }, { "epoch": 0.3804434945231098, "step": 1424, "train/loss_ctc": 0.571252167224884, "train/loss_error": 0.48222434520721436, "train/loss_total": 0.5000299215316772 }, { "epoch": 0.38071065989847713, "step": 1425, "train/loss_ctc": 0.47656673192977905, "train/loss_error": 0.4963422119617462, "train/loss_total": 0.4923871159553528 }, { "epoch": 0.3809778252738445, "step": 1426, "train/loss_ctc": 0.4728066921234131, "train/loss_error": 0.40554001927375793, "train/loss_total": 0.41899335384368896 }, { "epoch": 0.38124499064921186, "step": 1427, "train/loss_ctc": 1.1658436059951782, "train/loss_error": 0.44994550943374634, "train/loss_total": 0.5931251049041748 }, { "epoch": 0.3815121560245792, "step": 1428, "train/loss_ctc": 1.7315078973770142, "train/loss_error": 0.4817962646484375, "train/loss_total": 0.7317385673522949 }, { "epoch": 0.3817793213999466, "step": 1429, "train/loss_ctc": 0.7169733047485352, "train/loss_error": 0.5264624953269958, "train/loss_total": 0.5645646452903748 }, { "epoch": 0.38204648677531394, "grad_norm": 1.4558122158050537, "learning_rate": 2.7712530056104726e-05, "loss": 0.6009, "step": 1430 }, { "epoch": 0.38204648677531394, "step": 1430, "train/loss_ctc": 0.533694863319397, "train/loss_error": 0.4783079922199249, "train/loss_total": 0.4893853962421417 }, { "epoch": 0.38231365215068125, "step": 1431, "train/loss_ctc": 0.63050776720047, "train/loss_error": 0.5217384696006775, "train/loss_total": 0.543492317199707 }, { "epoch": 0.3825808175260486, "step": 1432, "train/loss_ctc": 1.2105839252471924, "train/loss_error": 0.5221989750862122, "train/loss_total": 0.6598759889602661 }, { "epoch": 0.382847982901416, "step": 1433, "train/loss_ctc": 1.374178409576416, "train/loss_error": 0.5152668356895447, "train/loss_total": 0.687049150466919 }, { "epoch": 0.38311514827678333, "step": 1434, "train/loss_ctc": 0.8382725119590759, "train/loss_error": 0.4597645401954651, "train/loss_total": 0.5354661345481873 }, { "epoch": 0.3833823136521507, "step": 1435, "train/loss_ctc": 0.7786678075790405, "train/loss_error": 0.4937034249305725, "train/loss_total": 0.5506963133811951 }, { "epoch": 0.38364947902751806, "step": 1436, "train/loss_ctc": 1.4712010622024536, "train/loss_error": 0.48783358931541443, "train/loss_total": 0.6845070719718933 }, { "epoch": 0.38391664440288537, "step": 1437, "train/loss_ctc": 0.6127710342407227, "train/loss_error": 0.507743239402771, "train/loss_total": 0.5287488102912903 }, { "epoch": 0.3841838097782527, "step": 1438, "train/loss_ctc": 0.8778713941574097, "train/loss_error": 0.5010105967521667, "train/loss_total": 0.5763827562332153 }, { "epoch": 0.3844509751536201, "step": 1439, "train/loss_ctc": 0.7441178560256958, "train/loss_error": 0.5040586590766907, "train/loss_total": 0.5520704984664917 }, { "epoch": 0.38471814052898745, "grad_norm": 1.229493498802185, "learning_rate": 2.769650013358269e-05, "loss": 0.5808, "step": 1440 }, { "epoch": 0.38471814052898745, "step": 1440, "train/loss_ctc": 0.7777466177940369, "train/loss_error": 0.41519325971603394, "train/loss_total": 0.48770391941070557 }, { "epoch": 0.3849853059043548, "step": 1441, "train/loss_ctc": 0.8391835689544678, "train/loss_error": 0.531138002872467, "train/loss_total": 0.5927470922470093 }, { "epoch": 0.3852524712797222, "step": 1442, "train/loss_ctc": 1.0039386749267578, "train/loss_error": 0.47066041827201843, "train/loss_total": 0.5773161053657532 }, { "epoch": 0.3855196366550895, "step": 1443, "train/loss_ctc": 0.7939819097518921, "train/loss_error": 0.4697428345680237, "train/loss_total": 0.5345906615257263 }, { "epoch": 0.38578680203045684, "step": 1444, "train/loss_ctc": 0.7554484605789185, "train/loss_error": 0.457707941532135, "train/loss_total": 0.5172560214996338 }, { "epoch": 0.3860539674058242, "step": 1445, "train/loss_ctc": 0.5572042465209961, "train/loss_error": 0.5000113844871521, "train/loss_total": 0.5114499926567078 }, { "epoch": 0.38632113278119157, "step": 1446, "train/loss_ctc": 0.39129042625427246, "train/loss_error": 0.425150603055954, "train/loss_total": 0.4183785915374756 }, { "epoch": 0.38658829815655893, "step": 1447, "train/loss_ctc": 0.5357398986816406, "train/loss_error": 0.5030990242958069, "train/loss_total": 0.5096272230148315 }, { "epoch": 0.38685546353192624, "step": 1448, "train/loss_ctc": 0.6860736608505249, "train/loss_error": 0.542534589767456, "train/loss_total": 0.5712423920631409 }, { "epoch": 0.3871226289072936, "step": 1449, "train/loss_ctc": 0.7104473114013672, "train/loss_error": 0.49420297145843506, "train/loss_total": 0.5374518632888794 }, { "epoch": 0.38738979428266096, "grad_norm": 1.267602562904358, "learning_rate": 2.768047021106065e-05, "loss": 0.5258, "step": 1450 }, { "epoch": 0.38738979428266096, "step": 1450, "train/loss_ctc": 1.177865743637085, "train/loss_error": 0.46761980652809143, "train/loss_total": 0.609669029712677 }, { "epoch": 0.3876569596580283, "step": 1451, "train/loss_ctc": 0.8274719715118408, "train/loss_error": 0.532772421836853, "train/loss_total": 0.5917123556137085 }, { "epoch": 0.3879241250333957, "step": 1452, "train/loss_ctc": 0.6369132399559021, "train/loss_error": 0.5380575656890869, "train/loss_total": 0.5578287243843079 }, { "epoch": 0.38819129040876305, "step": 1453, "train/loss_ctc": 1.0609424114227295, "train/loss_error": 0.5426433682441711, "train/loss_total": 0.6463031768798828 }, { "epoch": 0.38845845578413035, "step": 1454, "train/loss_ctc": 0.7837125062942505, "train/loss_error": 0.44704335927963257, "train/loss_total": 0.5143771767616272 }, { "epoch": 0.3887256211594977, "step": 1455, "train/loss_ctc": 0.9628240466117859, "train/loss_error": 0.46268248558044434, "train/loss_total": 0.5627108216285706 }, { "epoch": 0.3889927865348651, "step": 1456, "train/loss_ctc": 1.8081251382827759, "train/loss_error": 0.5204591155052185, "train/loss_total": 0.7779923677444458 }, { "epoch": 0.38925995191023244, "step": 1457, "train/loss_ctc": 0.7269287109375, "train/loss_error": 0.47105127573013306, "train/loss_total": 0.5222268104553223 }, { "epoch": 0.3895271172855998, "step": 1458, "train/loss_ctc": 0.9688953161239624, "train/loss_error": 0.49908825755119324, "train/loss_total": 0.5930497050285339 }, { "epoch": 0.38979428266096716, "step": 1459, "train/loss_ctc": 0.29307642579078674, "train/loss_error": 0.4609755575656891, "train/loss_total": 0.427395761013031 }, { "epoch": 0.39006144803633447, "grad_norm": 1.8031765222549438, "learning_rate": 2.7664440288538607e-05, "loss": 0.5803, "step": 1460 }, { "epoch": 0.39006144803633447, "step": 1460, "train/loss_ctc": 0.8828452229499817, "train/loss_error": 0.5498174428939819, "train/loss_total": 0.6164230108261108 }, { "epoch": 0.39032861341170183, "step": 1461, "train/loss_ctc": 0.9904204607009888, "train/loss_error": 0.4601491391658783, "train/loss_total": 0.5662034153938293 }, { "epoch": 0.3905957787870692, "step": 1462, "train/loss_ctc": 0.6514812111854553, "train/loss_error": 0.5896725058555603, "train/loss_total": 0.6020342707633972 }, { "epoch": 0.39086294416243655, "step": 1463, "train/loss_ctc": 0.7474843263626099, "train/loss_error": 0.5275354385375977, "train/loss_total": 0.5715252161026001 }, { "epoch": 0.3911301095378039, "step": 1464, "train/loss_ctc": 0.8353899717330933, "train/loss_error": 0.4685545563697815, "train/loss_total": 0.5419216752052307 }, { "epoch": 0.3913972749131713, "step": 1465, "train/loss_ctc": 0.6951267719268799, "train/loss_error": 0.47852882742881775, "train/loss_total": 0.5218484401702881 }, { "epoch": 0.3916644402885386, "step": 1466, "train/loss_ctc": 1.2106122970581055, "train/loss_error": 0.4618516266345978, "train/loss_total": 0.6116037368774414 }, { "epoch": 0.39193160566390595, "step": 1467, "train/loss_ctc": 0.5727131366729736, "train/loss_error": 0.46740925312042236, "train/loss_total": 0.48847004771232605 }, { "epoch": 0.3921987710392733, "step": 1468, "train/loss_ctc": 0.23203766345977783, "train/loss_error": 0.4978165030479431, "train/loss_total": 0.4446607232093811 }, { "epoch": 0.39246593641464067, "step": 1469, "train/loss_ctc": 1.853965163230896, "train/loss_error": 0.4724360704421997, "train/loss_total": 0.748741865158081 }, { "epoch": 0.39273310179000803, "grad_norm": 1.4929500818252563, "learning_rate": 2.7648410366016565e-05, "loss": 0.5713, "step": 1470 }, { "epoch": 0.39273310179000803, "step": 1470, "train/loss_ctc": 0.8922927379608154, "train/loss_error": 0.4689972400665283, "train/loss_total": 0.5536563396453857 }, { "epoch": 0.3930002671653754, "step": 1471, "train/loss_ctc": 0.8715744614601135, "train/loss_error": 0.470402330160141, "train/loss_total": 0.5506367683410645 }, { "epoch": 0.3932674325407427, "step": 1472, "train/loss_ctc": 0.9374152421951294, "train/loss_error": 0.45708608627319336, "train/loss_total": 0.5531519055366516 }, { "epoch": 0.39353459791611006, "step": 1473, "train/loss_ctc": 0.7341422438621521, "train/loss_error": 0.4590086042881012, "train/loss_total": 0.5140353441238403 }, { "epoch": 0.3938017632914774, "step": 1474, "train/loss_ctc": 0.84175705909729, "train/loss_error": 0.5008912682533264, "train/loss_total": 0.5690644383430481 }, { "epoch": 0.3940689286668448, "step": 1475, "train/loss_ctc": 1.4696943759918213, "train/loss_error": 0.46235084533691406, "train/loss_total": 0.6638195514678955 }, { "epoch": 0.39433609404221215, "step": 1476, "train/loss_ctc": 0.8475005626678467, "train/loss_error": 0.4401249885559082, "train/loss_total": 0.5216001272201538 }, { "epoch": 0.39460325941757946, "step": 1477, "train/loss_ctc": 0.9590047597885132, "train/loss_error": 0.5377150177955627, "train/loss_total": 0.6219729781150818 }, { "epoch": 0.3948704247929468, "step": 1478, "train/loss_ctc": 1.0122674703598022, "train/loss_error": 0.5055476427078247, "train/loss_total": 0.6068916320800781 }, { "epoch": 0.3951375901683142, "step": 1479, "train/loss_ctc": 1.0500407218933105, "train/loss_error": 0.5201478004455566, "train/loss_total": 0.6261264085769653 }, { "epoch": 0.39540475554368154, "grad_norm": 1.9426761865615845, "learning_rate": 2.7632380443494523e-05, "loss": 0.5781, "step": 1480 }, { "epoch": 0.39540475554368154, "step": 1480, "train/loss_ctc": 0.8599970936775208, "train/loss_error": 0.5282623767852783, "train/loss_total": 0.5946093201637268 }, { "epoch": 0.3956719209190489, "step": 1481, "train/loss_ctc": 0.8659152984619141, "train/loss_error": 0.5117096900939941, "train/loss_total": 0.5825508236885071 }, { "epoch": 0.39593908629441626, "step": 1482, "train/loss_ctc": 0.8150504231452942, "train/loss_error": 0.4785330295562744, "train/loss_total": 0.5458365082740784 }, { "epoch": 0.39620625166978357, "step": 1483, "train/loss_ctc": 0.584417462348938, "train/loss_error": 0.41827118396759033, "train/loss_total": 0.45150044560432434 }, { "epoch": 0.39647341704515093, "step": 1484, "train/loss_ctc": 1.3109149932861328, "train/loss_error": 0.5356020331382751, "train/loss_total": 0.6906646490097046 }, { "epoch": 0.3967405824205183, "step": 1485, "train/loss_ctc": 0.4329470992088318, "train/loss_error": 0.5224671959877014, "train/loss_total": 0.5045631527900696 }, { "epoch": 0.39700774779588566, "step": 1486, "train/loss_ctc": 0.754361629486084, "train/loss_error": 0.49881985783576965, "train/loss_total": 0.5499282479286194 }, { "epoch": 0.397274913171253, "step": 1487, "train/loss_ctc": 0.9377058744430542, "train/loss_error": 0.5121560096740723, "train/loss_total": 0.5972659587860107 }, { "epoch": 0.3975420785466204, "step": 1488, "train/loss_ctc": 0.8295944929122925, "train/loss_error": 0.5264187455177307, "train/loss_total": 0.5870538949966431 }, { "epoch": 0.3978092439219877, "step": 1489, "train/loss_ctc": 1.0905143022537231, "train/loss_error": 0.4932364821434021, "train/loss_total": 0.6126920580863953 }, { "epoch": 0.39807640929735505, "grad_norm": 1.5576804876327515, "learning_rate": 2.7616350520972485e-05, "loss": 0.5717, "step": 1490 }, { "epoch": 0.39807640929735505, "step": 1490, "train/loss_ctc": 0.9712051749229431, "train/loss_error": 0.47329825162887573, "train/loss_total": 0.5728796124458313 }, { "epoch": 0.3983435746727224, "step": 1491, "train/loss_ctc": 0.5206034779548645, "train/loss_error": 0.4630497097969055, "train/loss_total": 0.4745604693889618 }, { "epoch": 0.3986107400480898, "step": 1492, "train/loss_ctc": 0.5386836528778076, "train/loss_error": 0.502880334854126, "train/loss_total": 0.5100409984588623 }, { "epoch": 0.39887790542345714, "step": 1493, "train/loss_ctc": 1.4933120012283325, "train/loss_error": 0.6102201342582703, "train/loss_total": 0.7868385314941406 }, { "epoch": 0.3991450707988245, "step": 1494, "train/loss_ctc": 0.3106761574745178, "train/loss_error": 0.5262290835380554, "train/loss_total": 0.4831185042858124 }, { "epoch": 0.3994122361741918, "step": 1495, "train/loss_ctc": 0.4399287700653076, "train/loss_error": 0.5459349155426025, "train/loss_total": 0.5247336626052856 }, { "epoch": 0.39967940154955917, "step": 1496, "train/loss_ctc": 1.0659911632537842, "train/loss_error": 0.47673454880714417, "train/loss_total": 0.5945858955383301 }, { "epoch": 0.3999465669249265, "step": 1497, "train/loss_ctc": 1.2030105590820312, "train/loss_error": 0.4691089987754822, "train/loss_total": 0.615889310836792 }, { "epoch": 0.4002137323002939, "step": 1498, "train/loss_ctc": 1.2787067890167236, "train/loss_error": 0.4972764551639557, "train/loss_total": 0.6535625457763672 }, { "epoch": 0.40048089767566125, "step": 1499, "train/loss_ctc": 0.9820539355278015, "train/loss_error": 0.5121068954467773, "train/loss_total": 0.6060963273048401 }, { "epoch": 0.4007480630510286, "grad_norm": 1.2102112770080566, "learning_rate": 2.7600320598450443e-05, "loss": 0.5822, "step": 1500 }, { "epoch": 0.4007480630510286, "step": 1500, "train/loss_ctc": 0.6670656800270081, "train/loss_error": 0.5386043787002563, "train/loss_total": 0.5642966032028198 }, { "epoch": 0.4010152284263959, "step": 1501, "train/loss_ctc": 0.8868120908737183, "train/loss_error": 0.49187031388282776, "train/loss_total": 0.5708586573600769 }, { "epoch": 0.4012823938017633, "step": 1502, "train/loss_ctc": 0.7588002681732178, "train/loss_error": 0.4534209370613098, "train/loss_total": 0.5144968032836914 }, { "epoch": 0.40154955917713064, "step": 1503, "train/loss_ctc": 0.8787074089050293, "train/loss_error": 0.4586310386657715, "train/loss_total": 0.5426462888717651 }, { "epoch": 0.401816724552498, "step": 1504, "train/loss_ctc": 0.8199428915977478, "train/loss_error": 0.4662010073661804, "train/loss_total": 0.5369493961334229 }, { "epoch": 0.40208388992786537, "step": 1505, "train/loss_ctc": 0.9821164011955261, "train/loss_error": 0.41674575209617615, "train/loss_total": 0.529819905757904 }, { "epoch": 0.4023510553032327, "step": 1506, "train/loss_ctc": 0.6307063698768616, "train/loss_error": 0.5069335699081421, "train/loss_total": 0.5316881537437439 }, { "epoch": 0.40261822067860004, "step": 1507, "train/loss_ctc": 0.7531934976577759, "train/loss_error": 0.4862712323665619, "train/loss_total": 0.5396556854248047 }, { "epoch": 0.4028853860539674, "step": 1508, "train/loss_ctc": 0.9041082859039307, "train/loss_error": 0.558976411819458, "train/loss_total": 0.6280027627944946 }, { "epoch": 0.40315255142933476, "step": 1509, "train/loss_ctc": 0.6749637722969055, "train/loss_error": 0.4894547760486603, "train/loss_total": 0.5265566110610962 }, { "epoch": 0.4034197168047021, "grad_norm": 1.9745759963989258, "learning_rate": 2.75842906759284e-05, "loss": 0.5485, "step": 1510 }, { "epoch": 0.4034197168047021, "step": 1510, "train/loss_ctc": 0.7674564719200134, "train/loss_error": 0.463510662317276, "train/loss_total": 0.5242998600006104 }, { "epoch": 0.4036868821800695, "step": 1511, "train/loss_ctc": 1.1609156131744385, "train/loss_error": 0.48504194617271423, "train/loss_total": 0.6202167272567749 }, { "epoch": 0.4039540475554368, "step": 1512, "train/loss_ctc": 0.9729940891265869, "train/loss_error": 0.5281552672386169, "train/loss_total": 0.617123007774353 }, { "epoch": 0.40422121293080415, "step": 1513, "train/loss_ctc": 0.9437740445137024, "train/loss_error": 0.5464101433753967, "train/loss_total": 0.6258829236030579 }, { "epoch": 0.4044883783061715, "step": 1514, "train/loss_ctc": 0.9610235095024109, "train/loss_error": 0.5520428419113159, "train/loss_total": 0.633838951587677 }, { "epoch": 0.4047555436815389, "step": 1515, "train/loss_ctc": 0.5803678035736084, "train/loss_error": 0.4846811294555664, "train/loss_total": 0.5038184523582458 }, { "epoch": 0.40502270905690624, "step": 1516, "train/loss_ctc": 0.49039754271507263, "train/loss_error": 0.503373920917511, "train/loss_total": 0.5007786750793457 }, { "epoch": 0.4052898744322736, "step": 1517, "train/loss_ctc": 1.3153432607650757, "train/loss_error": 0.48813608288764954, "train/loss_total": 0.6535775065422058 }, { "epoch": 0.4055570398076409, "step": 1518, "train/loss_ctc": 0.8186814188957214, "train/loss_error": 0.501483142375946, "train/loss_total": 0.5649228096008301 }, { "epoch": 0.40582420518300827, "step": 1519, "train/loss_ctc": 0.7760680913925171, "train/loss_error": 0.5210235118865967, "train/loss_total": 0.5720324516296387 }, { "epoch": 0.40609137055837563, "grad_norm": 1.5286279916763306, "learning_rate": 2.756826075340636e-05, "loss": 0.5816, "step": 1520 }, { "epoch": 0.40609137055837563, "step": 1520, "train/loss_ctc": 0.806343138217926, "train/loss_error": 0.4848450720310211, "train/loss_total": 0.5491446852684021 }, { "epoch": 0.406358535933743, "step": 1521, "train/loss_ctc": 1.283592939376831, "train/loss_error": 0.49410533905029297, "train/loss_total": 0.6520028710365295 }, { "epoch": 0.40662570130911035, "step": 1522, "train/loss_ctc": 1.2859766483306885, "train/loss_error": 0.4800979197025299, "train/loss_total": 0.6412736773490906 }, { "epoch": 0.4068928666844777, "step": 1523, "train/loss_ctc": 0.5007240176200867, "train/loss_error": 0.502164900302887, "train/loss_total": 0.501876711845398 }, { "epoch": 0.407160032059845, "step": 1524, "train/loss_ctc": 0.9340533018112183, "train/loss_error": 0.42806169390678406, "train/loss_total": 0.5292600393295288 }, { "epoch": 0.4074271974352124, "step": 1525, "train/loss_ctc": 0.8486087322235107, "train/loss_error": 0.533653199672699, "train/loss_total": 0.5966442823410034 }, { "epoch": 0.40769436281057975, "step": 1526, "train/loss_ctc": 0.8911928534507751, "train/loss_error": 0.5035310387611389, "train/loss_total": 0.5810633897781372 }, { "epoch": 0.4079615281859471, "step": 1527, "train/loss_ctc": 1.1513440608978271, "train/loss_error": 0.5368415713310242, "train/loss_total": 0.6597420573234558 }, { "epoch": 0.40822869356131447, "step": 1528, "train/loss_ctc": 0.7396979331970215, "train/loss_error": 0.4562649130821228, "train/loss_total": 0.5129514932632446 }, { "epoch": 0.40849585893668183, "step": 1529, "train/loss_ctc": 1.2448687553405762, "train/loss_error": 0.46269115805625916, "train/loss_total": 0.6191266775131226 }, { "epoch": 0.40876302431204914, "grad_norm": 0.9548366069793701, "learning_rate": 2.7552230830884317e-05, "loss": 0.5843, "step": 1530 }, { "epoch": 0.40876302431204914, "step": 1530, "train/loss_ctc": 0.7533482313156128, "train/loss_error": 0.5574935078620911, "train/loss_total": 0.5966644883155823 }, { "epoch": 0.4090301896874165, "step": 1531, "train/loss_ctc": 0.3695825934410095, "train/loss_error": 0.5108044147491455, "train/loss_total": 0.48256006836891174 }, { "epoch": 0.40929735506278386, "step": 1532, "train/loss_ctc": 1.1809486150741577, "train/loss_error": 0.45433667302131653, "train/loss_total": 0.5996590852737427 }, { "epoch": 0.4095645204381512, "step": 1533, "train/loss_ctc": 0.938450276851654, "train/loss_error": 0.4863404929637909, "train/loss_total": 0.5767624378204346 }, { "epoch": 0.4098316858135186, "step": 1534, "train/loss_ctc": 0.855927586555481, "train/loss_error": 0.4881598651409149, "train/loss_total": 0.561713457107544 }, { "epoch": 0.4100988511888859, "step": 1535, "train/loss_ctc": 0.7724976539611816, "train/loss_error": 0.42422541975975037, "train/loss_total": 0.49387988448143005 }, { "epoch": 0.41036601656425326, "step": 1536, "train/loss_ctc": 0.9551892280578613, "train/loss_error": 0.4760489761829376, "train/loss_total": 0.5718770623207092 }, { "epoch": 0.4106331819396206, "step": 1537, "train/loss_ctc": 0.8436883687973022, "train/loss_error": 0.4650046229362488, "train/loss_total": 0.5407413840293884 }, { "epoch": 0.410900347314988, "step": 1538, "train/loss_ctc": 1.5577831268310547, "train/loss_error": 0.6583834290504456, "train/loss_total": 0.8382633924484253 }, { "epoch": 0.41116751269035534, "step": 1539, "train/loss_ctc": 0.666729211807251, "train/loss_error": 0.4853041172027588, "train/loss_total": 0.5215891599655151 }, { "epoch": 0.4114346780657227, "grad_norm": 1.1015762090682983, "learning_rate": 2.7536200908362275e-05, "loss": 0.5784, "step": 1540 }, { "epoch": 0.4114346780657227, "step": 1540, "train/loss_ctc": 0.5870542526245117, "train/loss_error": 0.4858722388744354, "train/loss_total": 0.5061086416244507 }, { "epoch": 0.41170184344109, "step": 1541, "train/loss_ctc": 0.6215024590492249, "train/loss_error": 0.5172854661941528, "train/loss_total": 0.5381288528442383 }, { "epoch": 0.41196900881645737, "step": 1542, "train/loss_ctc": 0.9077237248420715, "train/loss_error": 0.5404654145240784, "train/loss_total": 0.6139171123504639 }, { "epoch": 0.41223617419182473, "step": 1543, "train/loss_ctc": 0.4790773391723633, "train/loss_error": 0.5613284707069397, "train/loss_total": 0.5448782444000244 }, { "epoch": 0.4125033395671921, "step": 1544, "train/loss_ctc": 1.1067107915878296, "train/loss_error": 0.5247828960418701, "train/loss_total": 0.641168475151062 }, { "epoch": 0.41277050494255946, "step": 1545, "train/loss_ctc": 1.8161213397979736, "train/loss_error": 0.490632563829422, "train/loss_total": 0.7557303309440613 }, { "epoch": 0.4130376703179268, "step": 1546, "train/loss_ctc": 0.7616819143295288, "train/loss_error": 0.4878666400909424, "train/loss_total": 0.5426297187805176 }, { "epoch": 0.4133048356932941, "step": 1547, "train/loss_ctc": 1.2736409902572632, "train/loss_error": 0.47741401195526123, "train/loss_total": 0.6366593837738037 }, { "epoch": 0.4135720010686615, "step": 1548, "train/loss_ctc": 0.8099225759506226, "train/loss_error": 0.48917075991630554, "train/loss_total": 0.553321123123169 }, { "epoch": 0.41383916644402885, "step": 1549, "train/loss_ctc": 0.5735929012298584, "train/loss_error": 0.5150957107543945, "train/loss_total": 0.5267951488494873 }, { "epoch": 0.4141063318193962, "grad_norm": 1.2188644409179688, "learning_rate": 2.7520170985840237e-05, "loss": 0.5859, "step": 1550 }, { "epoch": 0.4141063318193962, "step": 1550, "train/loss_ctc": 0.9348063468933105, "train/loss_error": 0.5240499973297119, "train/loss_total": 0.6062012910842896 }, { "epoch": 0.4143734971947636, "step": 1551, "train/loss_ctc": 1.8449329137802124, "train/loss_error": 0.468279093503952, "train/loss_total": 0.7436098456382751 }, { "epoch": 0.41464066257013094, "step": 1552, "train/loss_ctc": 0.538231372833252, "train/loss_error": 0.5263993144035339, "train/loss_total": 0.5287657380104065 }, { "epoch": 0.41490782794549824, "step": 1553, "train/loss_ctc": 0.66927570104599, "train/loss_error": 0.48937365412712097, "train/loss_total": 0.5253540873527527 }, { "epoch": 0.4151749933208656, "step": 1554, "train/loss_ctc": 1.1568589210510254, "train/loss_error": 0.5322397947311401, "train/loss_total": 0.6571636199951172 }, { "epoch": 0.41544215869623297, "step": 1555, "train/loss_ctc": 1.1920708417892456, "train/loss_error": 0.5624156594276428, "train/loss_total": 0.6883467435836792 }, { "epoch": 0.41570932407160033, "step": 1556, "train/loss_ctc": 1.0523812770843506, "train/loss_error": 0.4512774348258972, "train/loss_total": 0.5714982151985168 }, { "epoch": 0.4159764894469677, "step": 1557, "train/loss_ctc": 1.2232013940811157, "train/loss_error": 0.4863028824329376, "train/loss_total": 0.6336826086044312 }, { "epoch": 0.41624365482233505, "step": 1558, "train/loss_ctc": 2.057216167449951, "train/loss_error": 0.4306756854057312, "train/loss_total": 0.755983829498291 }, { "epoch": 0.41651082019770236, "step": 1559, "train/loss_ctc": 1.0411485433578491, "train/loss_error": 0.4974101781845093, "train/loss_total": 0.6061578392982483 }, { "epoch": 0.4167779855730697, "grad_norm": 1.1612149477005005, "learning_rate": 2.7504141063318195e-05, "loss": 0.6317, "step": 1560 }, { "epoch": 0.4167779855730697, "step": 1560, "train/loss_ctc": 0.8867027163505554, "train/loss_error": 0.5496289134025574, "train/loss_total": 0.617043673992157 }, { "epoch": 0.4170451509484371, "step": 1561, "train/loss_ctc": 1.2903833389282227, "train/loss_error": 0.4991971254348755, "train/loss_total": 0.657434344291687 }, { "epoch": 0.41731231632380444, "step": 1562, "train/loss_ctc": 1.0801658630371094, "train/loss_error": 0.5275373458862305, "train/loss_total": 0.6380630731582642 }, { "epoch": 0.4175794816991718, "step": 1563, "train/loss_ctc": 1.0854837894439697, "train/loss_error": 0.4916391372680664, "train/loss_total": 0.6104080677032471 }, { "epoch": 0.4178466470745391, "step": 1564, "train/loss_ctc": 0.4740612804889679, "train/loss_error": 0.5418465733528137, "train/loss_total": 0.5282894968986511 }, { "epoch": 0.4181138124499065, "step": 1565, "train/loss_ctc": 0.5607962012290955, "train/loss_error": 0.5413545966148376, "train/loss_total": 0.5452429056167603 }, { "epoch": 0.41838097782527384, "step": 1566, "train/loss_ctc": 1.0599961280822754, "train/loss_error": 0.48510703444480896, "train/loss_total": 0.6000848412513733 }, { "epoch": 0.4186481432006412, "step": 1567, "train/loss_ctc": 1.1897958517074585, "train/loss_error": 0.5236999988555908, "train/loss_total": 0.6569191813468933 }, { "epoch": 0.41891530857600856, "step": 1568, "train/loss_ctc": 0.5450310111045837, "train/loss_error": 0.48041436076164246, "train/loss_total": 0.4933376908302307 }, { "epoch": 0.4191824739513759, "step": 1569, "train/loss_ctc": 0.7180869579315186, "train/loss_error": 0.5066389441490173, "train/loss_total": 0.5489285588264465 }, { "epoch": 0.41944963932674323, "grad_norm": 1.3636550903320312, "learning_rate": 2.7488111140796153e-05, "loss": 0.5896, "step": 1570 }, { "epoch": 0.41944963932674323, "step": 1570, "train/loss_ctc": 1.7880821228027344, "train/loss_error": 0.4861830770969391, "train/loss_total": 0.7465628981590271 }, { "epoch": 0.4197168047021106, "step": 1571, "train/loss_ctc": 0.840351939201355, "train/loss_error": 0.4815416932106018, "train/loss_total": 0.5533037781715393 }, { "epoch": 0.41998397007747795, "step": 1572, "train/loss_ctc": 0.7122622132301331, "train/loss_error": 0.4978089928627014, "train/loss_total": 0.5406996607780457 }, { "epoch": 0.4202511354528453, "step": 1573, "train/loss_ctc": 0.6523661613464355, "train/loss_error": 0.48943305015563965, "train/loss_total": 0.5220196843147278 }, { "epoch": 0.4205183008282127, "step": 1574, "train/loss_ctc": 0.8390273451805115, "train/loss_error": 0.4783320724964142, "train/loss_total": 0.5504711270332336 }, { "epoch": 0.42078546620358004, "step": 1575, "train/loss_ctc": 1.0375313758850098, "train/loss_error": 0.427345335483551, "train/loss_total": 0.5493825674057007 }, { "epoch": 0.42105263157894735, "step": 1576, "train/loss_ctc": 1.2818751335144043, "train/loss_error": 0.5657684803009033, "train/loss_total": 0.7089898586273193 }, { "epoch": 0.4213197969543147, "step": 1577, "train/loss_ctc": 0.7304320335388184, "train/loss_error": 0.4585399925708771, "train/loss_total": 0.5129184126853943 }, { "epoch": 0.42158696232968207, "step": 1578, "train/loss_ctc": 1.1110553741455078, "train/loss_error": 0.5144277215003967, "train/loss_total": 0.63375324010849 }, { "epoch": 0.42185412770504943, "step": 1579, "train/loss_ctc": 1.4998176097869873, "train/loss_error": 0.4954543709754944, "train/loss_total": 0.6963270306587219 }, { "epoch": 0.4221212930804168, "grad_norm": 1.6553120613098145, "learning_rate": 2.747208121827411e-05, "loss": 0.6014, "step": 1580 }, { "epoch": 0.4221212930804168, "step": 1580, "train/loss_ctc": 0.8597254157066345, "train/loss_error": 0.47910362482070923, "train/loss_total": 0.5552279949188232 }, { "epoch": 0.42238845845578415, "step": 1581, "train/loss_ctc": 0.5888466835021973, "train/loss_error": 0.5722366571426392, "train/loss_total": 0.5755586624145508 }, { "epoch": 0.42265562383115146, "step": 1582, "train/loss_ctc": 0.7849969863891602, "train/loss_error": 0.49826502799987793, "train/loss_total": 0.5556114315986633 }, { "epoch": 0.4229227892065188, "step": 1583, "train/loss_ctc": 0.5163779258728027, "train/loss_error": 0.49403440952301025, "train/loss_total": 0.4985031187534332 }, { "epoch": 0.4231899545818862, "step": 1584, "train/loss_ctc": 1.0818932056427002, "train/loss_error": 0.48938867449760437, "train/loss_total": 0.6078895926475525 }, { "epoch": 0.42345711995725355, "step": 1585, "train/loss_ctc": 0.36716383695602417, "train/loss_error": 0.4969876706600189, "train/loss_total": 0.47102290391921997 }, { "epoch": 0.4237242853326209, "step": 1586, "train/loss_ctc": 1.244714379310608, "train/loss_error": 0.5055722594261169, "train/loss_total": 0.6534006595611572 }, { "epoch": 0.42399145070798827, "step": 1587, "train/loss_ctc": 1.2004653215408325, "train/loss_error": 0.5151118636131287, "train/loss_total": 0.6521825790405273 }, { "epoch": 0.4242586160833556, "step": 1588, "train/loss_ctc": 0.7478582859039307, "train/loss_error": 0.41327357292175293, "train/loss_total": 0.4801905155181885 }, { "epoch": 0.42452578145872294, "step": 1589, "train/loss_ctc": 0.6307867169380188, "train/loss_error": 0.47385504841804504, "train/loss_total": 0.5052413940429688 }, { "epoch": 0.4247929468340903, "grad_norm": 1.040102481842041, "learning_rate": 2.745605129575207e-05, "loss": 0.5555, "step": 1590 }, { "epoch": 0.4247929468340903, "step": 1590, "train/loss_ctc": 0.5122452974319458, "train/loss_error": 0.4968850016593933, "train/loss_total": 0.49995705485343933 }, { "epoch": 0.42506011220945766, "step": 1591, "train/loss_ctc": 0.9583691358566284, "train/loss_error": 0.49169519543647766, "train/loss_total": 0.5850300192832947 }, { "epoch": 0.425327277584825, "step": 1592, "train/loss_ctc": 0.8280011415481567, "train/loss_error": 0.5133963227272034, "train/loss_total": 0.576317310333252 }, { "epoch": 0.42559444296019233, "step": 1593, "train/loss_ctc": 1.4875086545944214, "train/loss_error": 0.4852718710899353, "train/loss_total": 0.6857192516326904 }, { "epoch": 0.4258616083355597, "step": 1594, "train/loss_ctc": 0.5385469198226929, "train/loss_error": 0.5181313157081604, "train/loss_total": 0.5222144722938538 }, { "epoch": 0.42612877371092706, "step": 1595, "train/loss_ctc": 0.587291955947876, "train/loss_error": 0.4767298996448517, "train/loss_total": 0.49884232878685 }, { "epoch": 0.4263959390862944, "step": 1596, "train/loss_ctc": 0.8165537714958191, "train/loss_error": 0.5278756618499756, "train/loss_total": 0.5856112837791443 }, { "epoch": 0.4266631044616618, "step": 1597, "train/loss_ctc": 0.8026505708694458, "train/loss_error": 0.5269947052001953, "train/loss_total": 0.5821259021759033 }, { "epoch": 0.42693026983702914, "step": 1598, "train/loss_ctc": 0.8804445862770081, "train/loss_error": 0.43503236770629883, "train/loss_total": 0.5241147875785828 }, { "epoch": 0.42719743521239645, "step": 1599, "train/loss_ctc": 0.7260840535163879, "train/loss_error": 0.47336170077323914, "train/loss_total": 0.5239061713218689 }, { "epoch": 0.4274646005877638, "grad_norm": 1.8299522399902344, "learning_rate": 2.7440021373230027e-05, "loss": 0.5584, "step": 1600 }, { "epoch": 0.4274646005877638, "step": 1600, "train/loss_ctc": 1.1007845401763916, "train/loss_error": 0.5353220701217651, "train/loss_total": 0.6484146118164062 }, { "epoch": 0.42773176596313117, "step": 1601, "train/loss_ctc": 0.6771512627601624, "train/loss_error": 0.5191667079925537, "train/loss_total": 0.5507636070251465 }, { "epoch": 0.42799893133849853, "step": 1602, "train/loss_ctc": 0.8426879048347473, "train/loss_error": 0.48388785123825073, "train/loss_total": 0.5556478500366211 }, { "epoch": 0.4282660967138659, "step": 1603, "train/loss_ctc": 2.2550137042999268, "train/loss_error": 0.49098822474479675, "train/loss_total": 0.8437933325767517 }, { "epoch": 0.42853326208923326, "step": 1604, "train/loss_ctc": 0.8082648515701294, "train/loss_error": 0.4545195400714874, "train/loss_total": 0.5252686142921448 }, { "epoch": 0.42880042746460056, "step": 1605, "train/loss_ctc": 0.7402321100234985, "train/loss_error": 0.4237515926361084, "train/loss_total": 0.4870476722717285 }, { "epoch": 0.4290675928399679, "step": 1606, "train/loss_ctc": 0.419836163520813, "train/loss_error": 0.4668155312538147, "train/loss_total": 0.45741966366767883 }, { "epoch": 0.4293347582153353, "step": 1607, "train/loss_ctc": 0.5921157598495483, "train/loss_error": 0.47527971863746643, "train/loss_total": 0.49864694476127625 }, { "epoch": 0.42960192359070265, "step": 1608, "train/loss_ctc": 0.6672950983047485, "train/loss_error": 0.47498655319213867, "train/loss_total": 0.5134482383728027 }, { "epoch": 0.42986908896607, "step": 1609, "train/loss_ctc": 0.5189138650894165, "train/loss_error": 0.4954223036766052, "train/loss_total": 0.5001206398010254 }, { "epoch": 0.4301362543414374, "grad_norm": 1.1738477945327759, "learning_rate": 2.742399145070799e-05, "loss": 0.5581, "step": 1610 }, { "epoch": 0.4301362543414374, "step": 1610, "train/loss_ctc": 1.193371295928955, "train/loss_error": 0.4735091030597687, "train/loss_total": 0.617481529712677 }, { "epoch": 0.4304034197168047, "step": 1611, "train/loss_ctc": 0.5334111452102661, "train/loss_error": 0.4245859682559967, "train/loss_total": 0.446351021528244 }, { "epoch": 0.43067058509217204, "step": 1612, "train/loss_ctc": 0.7493411898612976, "train/loss_error": 0.5614068508148193, "train/loss_total": 0.598993718624115 }, { "epoch": 0.4309377504675394, "step": 1613, "train/loss_ctc": 1.01618492603302, "train/loss_error": 0.5141737461090088, "train/loss_total": 0.614575982093811 }, { "epoch": 0.43120491584290677, "step": 1614, "train/loss_ctc": 0.7933374047279358, "train/loss_error": 0.45816755294799805, "train/loss_total": 0.5252015590667725 }, { "epoch": 0.43147208121827413, "step": 1615, "train/loss_ctc": 0.8989961743354797, "train/loss_error": 0.49797025322914124, "train/loss_total": 0.57817542552948 }, { "epoch": 0.4317392465936415, "step": 1616, "train/loss_ctc": 0.5207957029342651, "train/loss_error": 0.4658200144767761, "train/loss_total": 0.4768151640892029 }, { "epoch": 0.4320064119690088, "step": 1617, "train/loss_ctc": 0.7043005228042603, "train/loss_error": 0.505189061164856, "train/loss_total": 0.5450114011764526 }, { "epoch": 0.43227357734437616, "step": 1618, "train/loss_ctc": 0.8911440372467041, "train/loss_error": 0.4914940595626831, "train/loss_total": 0.5714240670204163 }, { "epoch": 0.4325407427197435, "step": 1619, "train/loss_ctc": 0.38085758686065674, "train/loss_error": 0.5117622017860413, "train/loss_total": 0.48558127880096436 }, { "epoch": 0.4328079080951109, "grad_norm": 1.1724547147750854, "learning_rate": 2.740796152818595e-05, "loss": 0.546, "step": 1620 }, { "epoch": 0.4328079080951109, "step": 1620, "train/loss_ctc": 1.440769910812378, "train/loss_error": 0.4729458689689636, "train/loss_total": 0.6665107011795044 }, { "epoch": 0.43307507347047824, "step": 1621, "train/loss_ctc": 0.9325155019760132, "train/loss_error": 0.46925750374794006, "train/loss_total": 0.5619090795516968 }, { "epoch": 0.43334223884584555, "step": 1622, "train/loss_ctc": 0.9014639854431152, "train/loss_error": 0.4888473153114319, "train/loss_total": 0.5713706612586975 }, { "epoch": 0.4336094042212129, "step": 1623, "train/loss_ctc": 0.9597283005714417, "train/loss_error": 0.5708000063896179, "train/loss_total": 0.6485856771469116 }, { "epoch": 0.4338765695965803, "step": 1624, "train/loss_ctc": 1.1455843448638916, "train/loss_error": 0.5330942869186401, "train/loss_total": 0.6555923223495483 }, { "epoch": 0.43414373497194764, "step": 1625, "train/loss_ctc": 0.3938498795032501, "train/loss_error": 0.49258479475975037, "train/loss_total": 0.4728378355503082 }, { "epoch": 0.434410900347315, "step": 1626, "train/loss_ctc": 0.40599989891052246, "train/loss_error": 0.45282912254333496, "train/loss_total": 0.4434632658958435 }, { "epoch": 0.43467806572268236, "step": 1627, "train/loss_ctc": 1.120137095451355, "train/loss_error": 0.5441192984580994, "train/loss_total": 0.6593228578567505 }, { "epoch": 0.43494523109804967, "step": 1628, "train/loss_ctc": 0.8629398345947266, "train/loss_error": 0.5282735824584961, "train/loss_total": 0.5952068567276001 }, { "epoch": 0.43521239647341703, "step": 1629, "train/loss_ctc": 0.9624820947647095, "train/loss_error": 0.43772825598716736, "train/loss_total": 0.5426790714263916 }, { "epoch": 0.4354795618487844, "grad_norm": 3.0199570655822754, "learning_rate": 2.7391931605663908e-05, "loss": 0.5817, "step": 1630 }, { "epoch": 0.4354795618487844, "step": 1630, "train/loss_ctc": 1.2502102851867676, "train/loss_error": 0.4957639276981354, "train/loss_total": 0.6466531753540039 }, { "epoch": 0.43574672722415175, "step": 1631, "train/loss_ctc": 0.7785703539848328, "train/loss_error": 0.46094048023223877, "train/loss_total": 0.5244664549827576 }, { "epoch": 0.4360138925995191, "step": 1632, "train/loss_ctc": 0.8009412884712219, "train/loss_error": 0.5585806965827942, "train/loss_total": 0.6070528030395508 }, { "epoch": 0.4362810579748865, "step": 1633, "train/loss_ctc": 0.4843606650829315, "train/loss_error": 0.5464047789573669, "train/loss_total": 0.5339959859848022 }, { "epoch": 0.4365482233502538, "step": 1634, "train/loss_ctc": 0.5243436694145203, "train/loss_error": 0.4217519164085388, "train/loss_total": 0.44227027893066406 }, { "epoch": 0.43681538872562115, "step": 1635, "train/loss_ctc": 0.814773678779602, "train/loss_error": 0.4968780279159546, "train/loss_total": 0.560457170009613 }, { "epoch": 0.4370825541009885, "step": 1636, "train/loss_ctc": 0.9781674146652222, "train/loss_error": 0.5370224118232727, "train/loss_total": 0.6252514123916626 }, { "epoch": 0.43734971947635587, "step": 1637, "train/loss_ctc": 0.6672288775444031, "train/loss_error": 0.45819199085235596, "train/loss_total": 0.49999940395355225 }, { "epoch": 0.43761688485172323, "step": 1638, "train/loss_ctc": 0.8048025369644165, "train/loss_error": 0.6232740879058838, "train/loss_total": 0.6595798134803772 }, { "epoch": 0.4378840502270906, "step": 1639, "train/loss_ctc": 0.6843622922897339, "train/loss_error": 0.6210084557533264, "train/loss_total": 0.633679211139679 }, { "epoch": 0.4381512156024579, "grad_norm": 2.2956037521362305, "learning_rate": 2.7375901683141866e-05, "loss": 0.5733, "step": 1640 }, { "epoch": 0.4381512156024579, "step": 1640, "train/loss_ctc": 0.9878394603729248, "train/loss_error": 0.4791490435600281, "train/loss_total": 0.5808871388435364 }, { "epoch": 0.43841838097782526, "step": 1641, "train/loss_ctc": 1.6706767082214355, "train/loss_error": 0.49840685725212097, "train/loss_total": 0.732860803604126 }, { "epoch": 0.4386855463531926, "step": 1642, "train/loss_ctc": 0.37557119131088257, "train/loss_error": 0.4834074079990387, "train/loss_total": 0.4618401825428009 }, { "epoch": 0.43895271172856, "step": 1643, "train/loss_ctc": 0.7908281683921814, "train/loss_error": 0.5311196446418762, "train/loss_total": 0.5830613374710083 }, { "epoch": 0.43921987710392735, "step": 1644, "train/loss_ctc": 0.8653100728988647, "train/loss_error": 0.5226167440414429, "train/loss_total": 0.5911554098129272 }, { "epoch": 0.4394870424792947, "step": 1645, "train/loss_ctc": 0.6372666358947754, "train/loss_error": 0.5147604942321777, "train/loss_total": 0.5392616987228394 }, { "epoch": 0.439754207854662, "step": 1646, "train/loss_ctc": 1.0616358518600464, "train/loss_error": 0.5645161867141724, "train/loss_total": 0.6639401316642761 }, { "epoch": 0.4400213732300294, "step": 1647, "train/loss_ctc": 1.344377040863037, "train/loss_error": 0.47913920879364014, "train/loss_total": 0.6521867513656616 }, { "epoch": 0.44028853860539674, "step": 1648, "train/loss_ctc": 0.6121383905410767, "train/loss_error": 0.4784739315509796, "train/loss_total": 0.505206823348999 }, { "epoch": 0.4405557039807641, "step": 1649, "train/loss_ctc": 0.6947766542434692, "train/loss_error": 0.5254459381103516, "train/loss_total": 0.559312105178833 }, { "epoch": 0.44082286935613146, "grad_norm": 1.1238651275634766, "learning_rate": 2.7359871760619824e-05, "loss": 0.587, "step": 1650 }, { "epoch": 0.44082286935613146, "step": 1650, "train/loss_ctc": 1.0063952207565308, "train/loss_error": 0.4721522629261017, "train/loss_total": 0.5790008306503296 }, { "epoch": 0.44109003473149877, "step": 1651, "train/loss_ctc": 0.8401203155517578, "train/loss_error": 0.4452649652957916, "train/loss_total": 0.5242360830307007 }, { "epoch": 0.44135720010686613, "step": 1652, "train/loss_ctc": 1.402177095413208, "train/loss_error": 0.4840855598449707, "train/loss_total": 0.6677038669586182 }, { "epoch": 0.4416243654822335, "step": 1653, "train/loss_ctc": 0.5890903472900391, "train/loss_error": 0.452528715133667, "train/loss_total": 0.47984105348587036 }, { "epoch": 0.44189153085760086, "step": 1654, "train/loss_ctc": 1.1576988697052002, "train/loss_error": 0.4624195396900177, "train/loss_total": 0.6014754176139832 }, { "epoch": 0.4421586962329682, "step": 1655, "train/loss_ctc": 1.3985118865966797, "train/loss_error": 0.5107211470603943, "train/loss_total": 0.6882793307304382 }, { "epoch": 0.4424258616083356, "step": 1656, "train/loss_ctc": 1.1442512273788452, "train/loss_error": 0.4507763981819153, "train/loss_total": 0.5894713401794434 }, { "epoch": 0.4426930269837029, "step": 1657, "train/loss_ctc": 0.9224789142608643, "train/loss_error": 0.5704823136329651, "train/loss_total": 0.6408816576004028 }, { "epoch": 0.44296019235907025, "step": 1658, "train/loss_ctc": 0.7253264784812927, "train/loss_error": 0.494891881942749, "train/loss_total": 0.5409787893295288 }, { "epoch": 0.4432273577344376, "step": 1659, "train/loss_ctc": 0.786207377910614, "train/loss_error": 0.49804091453552246, "train/loss_total": 0.5556741952896118 }, { "epoch": 0.443494523109805, "grad_norm": 0.9904990792274475, "learning_rate": 2.7343841838097783e-05, "loss": 0.5868, "step": 1660 }, { "epoch": 0.443494523109805, "step": 1660, "train/loss_ctc": 0.4244973659515381, "train/loss_error": 0.4805246889591217, "train/loss_total": 0.46931925415992737 }, { "epoch": 0.44376168848517233, "step": 1661, "train/loss_ctc": 1.3410556316375732, "train/loss_error": 0.5156322717666626, "train/loss_total": 0.6807169914245605 }, { "epoch": 0.4440288538605397, "step": 1662, "train/loss_ctc": 0.48429015278816223, "train/loss_error": 0.5446457266807556, "train/loss_total": 0.5325746536254883 }, { "epoch": 0.444296019235907, "step": 1663, "train/loss_ctc": 0.6613962054252625, "train/loss_error": 0.5023707151412964, "train/loss_total": 0.5341758131980896 }, { "epoch": 0.44456318461127436, "step": 1664, "train/loss_ctc": 1.0116682052612305, "train/loss_error": 0.49600955843925476, "train/loss_total": 0.5991412997245789 }, { "epoch": 0.4448303499866417, "step": 1665, "train/loss_ctc": 0.7889575958251953, "train/loss_error": 0.4321030378341675, "train/loss_total": 0.5034739971160889 }, { "epoch": 0.4450975153620091, "step": 1666, "train/loss_ctc": 0.6686984300613403, "train/loss_error": 0.47562724351882935, "train/loss_total": 0.5142415165901184 }, { "epoch": 0.44536468073737645, "step": 1667, "train/loss_ctc": 1.3821232318878174, "train/loss_error": 0.5620048642158508, "train/loss_total": 0.726028561592102 }, { "epoch": 0.4456318461127438, "step": 1668, "train/loss_ctc": 0.8848749399185181, "train/loss_error": 0.5181623697280884, "train/loss_total": 0.5915048718452454 }, { "epoch": 0.4458990114881111, "step": 1669, "train/loss_ctc": 0.5018500685691833, "train/loss_error": 0.42807653546333313, "train/loss_total": 0.44283124804496765 }, { "epoch": 0.4461661768634785, "grad_norm": 2.969097852706909, "learning_rate": 2.7327811915575744e-05, "loss": 0.5594, "step": 1670 }, { "epoch": 0.4461661768634785, "step": 1670, "train/loss_ctc": 0.7974900007247925, "train/loss_error": 0.510293185710907, "train/loss_total": 0.567732572555542 }, { "epoch": 0.44643334223884584, "step": 1671, "train/loss_ctc": 0.7308613657951355, "train/loss_error": 0.5327968001365662, "train/loss_total": 0.5724096894264221 }, { "epoch": 0.4467005076142132, "step": 1672, "train/loss_ctc": 0.7919473052024841, "train/loss_error": 0.49429285526275635, "train/loss_total": 0.5538237690925598 }, { "epoch": 0.44696767298958057, "step": 1673, "train/loss_ctc": 0.4151173233985901, "train/loss_error": 0.4989554286003113, "train/loss_total": 0.48218780755996704 }, { "epoch": 0.44723483836494793, "step": 1674, "train/loss_ctc": 0.2634474039077759, "train/loss_error": 0.5012998580932617, "train/loss_total": 0.45372939109802246 }, { "epoch": 0.44750200374031524, "step": 1675, "train/loss_ctc": 0.6925363540649414, "train/loss_error": 0.5141341686248779, "train/loss_total": 0.5498145818710327 }, { "epoch": 0.4477691691156826, "step": 1676, "train/loss_ctc": 0.41001570224761963, "train/loss_error": 0.45043158531188965, "train/loss_total": 0.4423484206199646 }, { "epoch": 0.44803633449104996, "step": 1677, "train/loss_ctc": 0.7752453088760376, "train/loss_error": 0.4641382694244385, "train/loss_total": 0.5263596773147583 }, { "epoch": 0.4483034998664173, "step": 1678, "train/loss_ctc": 0.8268418908119202, "train/loss_error": 0.46256858110427856, "train/loss_total": 0.5354232788085938 }, { "epoch": 0.4485706652417847, "step": 1679, "train/loss_ctc": 0.6616332530975342, "train/loss_error": 0.4992407560348511, "train/loss_total": 0.5317192673683167 }, { "epoch": 0.448837830617152, "grad_norm": 2.3843295574188232, "learning_rate": 2.7311781993053702e-05, "loss": 0.5216, "step": 1680 }, { "epoch": 0.448837830617152, "step": 1680, "train/loss_ctc": 0.8632264137268066, "train/loss_error": 0.5055208802223206, "train/loss_total": 0.5770620107650757 }, { "epoch": 0.44910499599251935, "step": 1681, "train/loss_ctc": 0.26179808378219604, "train/loss_error": 0.4916072189807892, "train/loss_total": 0.44564539194107056 }, { "epoch": 0.4493721613678867, "step": 1682, "train/loss_ctc": 1.153418779373169, "train/loss_error": 0.5424520373344421, "train/loss_total": 0.6646453738212585 }, { "epoch": 0.4496393267432541, "step": 1683, "train/loss_ctc": 1.1348000764846802, "train/loss_error": 0.44386711716651917, "train/loss_total": 0.5820537209510803 }, { "epoch": 0.44990649211862144, "step": 1684, "train/loss_ctc": 0.469556987285614, "train/loss_error": 0.5061347484588623, "train/loss_total": 0.4988192021846771 }, { "epoch": 0.4501736574939888, "step": 1685, "train/loss_ctc": 0.5193031430244446, "train/loss_error": 0.4702450931072235, "train/loss_total": 0.4800567030906677 }, { "epoch": 0.4504408228693561, "step": 1686, "train/loss_ctc": 1.115801215171814, "train/loss_error": 0.5065003633499146, "train/loss_total": 0.6283605694770813 }, { "epoch": 0.45070798824472347, "step": 1687, "train/loss_ctc": 0.7103747129440308, "train/loss_error": 0.5411005616188049, "train/loss_total": 0.574955403804779 }, { "epoch": 0.45097515362009083, "step": 1688, "train/loss_ctc": 0.6640626788139343, "train/loss_error": 0.48819366097450256, "train/loss_total": 0.5233674645423889 }, { "epoch": 0.4512423189954582, "step": 1689, "train/loss_ctc": 0.5161669254302979, "train/loss_error": 0.4822782278060913, "train/loss_total": 0.4890559911727905 }, { "epoch": 0.45150948437082555, "grad_norm": 0.7895596027374268, "learning_rate": 2.729575207053166e-05, "loss": 0.5464, "step": 1690 }, { "epoch": 0.45150948437082555, "step": 1690, "train/loss_ctc": 0.9117891192436218, "train/loss_error": 0.4727216958999634, "train/loss_total": 0.560535192489624 }, { "epoch": 0.4517766497461929, "step": 1691, "train/loss_ctc": 1.0060335397720337, "train/loss_error": 0.5295419096946716, "train/loss_total": 0.624840259552002 }, { "epoch": 0.4520438151215602, "step": 1692, "train/loss_ctc": 0.7798508405685425, "train/loss_error": 0.4935908615589142, "train/loss_total": 0.5508428812026978 }, { "epoch": 0.4523109804969276, "step": 1693, "train/loss_ctc": 0.7302679419517517, "train/loss_error": 0.49586203694343567, "train/loss_total": 0.5427432060241699 }, { "epoch": 0.45257814587229495, "step": 1694, "train/loss_ctc": 1.2562427520751953, "train/loss_error": 0.4507526755332947, "train/loss_total": 0.6118507385253906 }, { "epoch": 0.4528453112476623, "step": 1695, "train/loss_ctc": 0.780154287815094, "train/loss_error": 0.5548869371414185, "train/loss_total": 0.5999404191970825 }, { "epoch": 0.45311247662302967, "step": 1696, "train/loss_ctc": 1.0589656829833984, "train/loss_error": 0.45398056507110596, "train/loss_total": 0.5749775767326355 }, { "epoch": 0.45337964199839703, "step": 1697, "train/loss_ctc": 0.6764829158782959, "train/loss_error": 0.6005641222000122, "train/loss_total": 0.6157479286193848 }, { "epoch": 0.45364680737376434, "step": 1698, "train/loss_ctc": 0.22943024337291718, "train/loss_error": 0.4973394572734833, "train/loss_total": 0.4437576234340668 }, { "epoch": 0.4539139727491317, "step": 1699, "train/loss_ctc": 0.9261268973350525, "train/loss_error": 0.5028333067893982, "train/loss_total": 0.587492048740387 }, { "epoch": 0.45418113812449906, "grad_norm": 0.9508458375930786, "learning_rate": 2.7279722148009618e-05, "loss": 0.5713, "step": 1700 }, { "epoch": 0.45418113812449906, "step": 1700, "train/loss_ctc": 1.1482584476470947, "train/loss_error": 0.5057412385940552, "train/loss_total": 0.6342446804046631 }, { "epoch": 0.4544483034998664, "step": 1701, "train/loss_ctc": 0.7570159435272217, "train/loss_error": 0.49217545986175537, "train/loss_total": 0.5451436042785645 }, { "epoch": 0.4547154688752338, "step": 1702, "train/loss_ctc": 1.5314809083938599, "train/loss_error": 0.45060646533966064, "train/loss_total": 0.6667813658714294 }, { "epoch": 0.45498263425060115, "step": 1703, "train/loss_ctc": 0.8509970903396606, "train/loss_error": 0.4628923535346985, "train/loss_total": 0.540513277053833 }, { "epoch": 0.45524979962596845, "step": 1704, "train/loss_ctc": 2.841970920562744, "train/loss_error": 0.5289221405982971, "train/loss_total": 0.9915319085121155 }, { "epoch": 0.4555169650013358, "step": 1705, "train/loss_ctc": 0.53803551197052, "train/loss_error": 0.5114611983299255, "train/loss_total": 0.5167760848999023 }, { "epoch": 0.4557841303767032, "step": 1706, "train/loss_ctc": 0.6357517838478088, "train/loss_error": 0.4982590973377228, "train/loss_total": 0.5257576704025269 }, { "epoch": 0.45605129575207054, "step": 1707, "train/loss_ctc": 0.7695362567901611, "train/loss_error": 0.46189185976982117, "train/loss_total": 0.5234207510948181 }, { "epoch": 0.4563184611274379, "step": 1708, "train/loss_ctc": 0.9706466794013977, "train/loss_error": 0.40381094813346863, "train/loss_total": 0.5171781182289124 }, { "epoch": 0.45658562650280526, "step": 1709, "train/loss_ctc": 0.981142520904541, "train/loss_error": 0.4942055940628052, "train/loss_total": 0.5915930271148682 }, { "epoch": 0.45685279187817257, "grad_norm": 1.1177860498428345, "learning_rate": 2.7263692225487576e-05, "loss": 0.6053, "step": 1710 }, { "epoch": 0.45685279187817257, "step": 1710, "train/loss_ctc": 0.6543451547622681, "train/loss_error": 0.5171199440956116, "train/loss_total": 0.544564962387085 }, { "epoch": 0.45711995725353993, "step": 1711, "train/loss_ctc": 1.0909831523895264, "train/loss_error": 0.49025437235832214, "train/loss_total": 0.6104001402854919 }, { "epoch": 0.4573871226289073, "step": 1712, "train/loss_ctc": 1.238537311553955, "train/loss_error": 0.4827468991279602, "train/loss_total": 0.6339049935340881 }, { "epoch": 0.45765428800427466, "step": 1713, "train/loss_ctc": 0.5544184446334839, "train/loss_error": 0.4905470609664917, "train/loss_total": 0.5033213496208191 }, { "epoch": 0.457921453379642, "step": 1714, "train/loss_ctc": 0.7934234142303467, "train/loss_error": 0.4433436989784241, "train/loss_total": 0.5133596658706665 }, { "epoch": 0.4581886187550093, "step": 1715, "train/loss_ctc": 0.7957836389541626, "train/loss_error": 0.4755556285381317, "train/loss_total": 0.53960120677948 }, { "epoch": 0.4584557841303767, "step": 1716, "train/loss_ctc": 0.5991537570953369, "train/loss_error": 0.4888817071914673, "train/loss_total": 0.5109361410140991 }, { "epoch": 0.45872294950574405, "step": 1717, "train/loss_ctc": 0.6260542869567871, "train/loss_error": 0.41802582144737244, "train/loss_total": 0.4596315026283264 }, { "epoch": 0.4589901148811114, "step": 1718, "train/loss_ctc": 0.6991853713989258, "train/loss_error": 0.4620843827724457, "train/loss_total": 0.5095045566558838 }, { "epoch": 0.4592572802564788, "step": 1719, "train/loss_ctc": 0.4246464967727661, "train/loss_error": 0.5263217687606812, "train/loss_total": 0.505986750125885 }, { "epoch": 0.45952444563184613, "grad_norm": 5.470366477966309, "learning_rate": 2.7247662302965534e-05, "loss": 0.5331, "step": 1720 }, { "epoch": 0.45952444563184613, "step": 1720, "train/loss_ctc": 0.5074010491371155, "train/loss_error": 0.5411202311515808, "train/loss_total": 0.5343763828277588 }, { "epoch": 0.45979161100721344, "step": 1721, "train/loss_ctc": 0.882624626159668, "train/loss_error": 0.5017759799957275, "train/loss_total": 0.5779457092285156 }, { "epoch": 0.4600587763825808, "step": 1722, "train/loss_ctc": 1.713371753692627, "train/loss_error": 0.49475452303886414, "train/loss_total": 0.7384779453277588 }, { "epoch": 0.46032594175794816, "step": 1723, "train/loss_ctc": 1.0988824367523193, "train/loss_error": 0.44514143466949463, "train/loss_total": 0.5758896470069885 }, { "epoch": 0.4605931071333155, "step": 1724, "train/loss_ctc": 0.643949568271637, "train/loss_error": 0.5532346963882446, "train/loss_total": 0.571377694606781 }, { "epoch": 0.4608602725086829, "step": 1725, "train/loss_ctc": 0.6055353283882141, "train/loss_error": 0.4920353889465332, "train/loss_total": 0.5147354006767273 }, { "epoch": 0.46112743788405025, "step": 1726, "train/loss_ctc": 1.0720473527908325, "train/loss_error": 0.43185529112815857, "train/loss_total": 0.5598937273025513 }, { "epoch": 0.46139460325941756, "step": 1727, "train/loss_ctc": 1.0258923768997192, "train/loss_error": 0.450204998254776, "train/loss_total": 0.5653424859046936 }, { "epoch": 0.4616617686347849, "step": 1728, "train/loss_ctc": 0.8305425643920898, "train/loss_error": 0.5151402354240417, "train/loss_total": 0.5782207250595093 }, { "epoch": 0.4619289340101523, "step": 1729, "train/loss_ctc": 0.6506708860397339, "train/loss_error": 0.5079531073570251, "train/loss_total": 0.5364966988563538 }, { "epoch": 0.46219609938551964, "grad_norm": 1.3203837871551514, "learning_rate": 2.7231632380443496e-05, "loss": 0.5753, "step": 1730 }, { "epoch": 0.46219609938551964, "step": 1730, "train/loss_ctc": 1.2436189651489258, "train/loss_error": 0.47139737010002136, "train/loss_total": 0.6258416771888733 }, { "epoch": 0.462463264760887, "step": 1731, "train/loss_ctc": 0.8484674096107483, "train/loss_error": 0.5668227672576904, "train/loss_total": 0.6231517195701599 }, { "epoch": 0.46273043013625437, "step": 1732, "train/loss_ctc": 1.1588211059570312, "train/loss_error": 0.4068731665611267, "train/loss_total": 0.5572627782821655 }, { "epoch": 0.4629975955116217, "step": 1733, "train/loss_ctc": 0.7283199429512024, "train/loss_error": 0.4688059687614441, "train/loss_total": 0.5207087993621826 }, { "epoch": 0.46326476088698904, "step": 1734, "train/loss_ctc": 0.5654900074005127, "train/loss_error": 0.40951719880104065, "train/loss_total": 0.44071176648139954 }, { "epoch": 0.4635319262623564, "step": 1735, "train/loss_ctc": 0.7074934244155884, "train/loss_error": 0.5008411407470703, "train/loss_total": 0.5421715974807739 }, { "epoch": 0.46379909163772376, "step": 1736, "train/loss_ctc": 1.2992949485778809, "train/loss_error": 0.5463101863861084, "train/loss_total": 0.6969071626663208 }, { "epoch": 0.4640662570130911, "step": 1737, "train/loss_ctc": 0.9403249025344849, "train/loss_error": 0.4549694061279297, "train/loss_total": 0.5520405173301697 }, { "epoch": 0.4643334223884585, "step": 1738, "train/loss_ctc": 0.6357536315917969, "train/loss_error": 0.4672413468360901, "train/loss_total": 0.5009438395500183 }, { "epoch": 0.4646005877638258, "step": 1739, "train/loss_ctc": 1.175382137298584, "train/loss_error": 0.4393025040626526, "train/loss_total": 0.586518406867981 }, { "epoch": 0.46486775313919315, "grad_norm": 1.301230549812317, "learning_rate": 2.7215602457921454e-05, "loss": 0.5646, "step": 1740 }, { "epoch": 0.46486775313919315, "step": 1740, "train/loss_ctc": 1.9970439672470093, "train/loss_error": 0.47436946630477905, "train/loss_total": 0.778904378414154 }, { "epoch": 0.4651349185145605, "step": 1741, "train/loss_ctc": 0.7291949987411499, "train/loss_error": 0.4580974876880646, "train/loss_total": 0.5123170018196106 }, { "epoch": 0.4654020838899279, "step": 1742, "train/loss_ctc": 0.5881571769714355, "train/loss_error": 0.5127154588699341, "train/loss_total": 0.5278037786483765 }, { "epoch": 0.46566924926529524, "step": 1743, "train/loss_ctc": 0.5980949401855469, "train/loss_error": 0.46564197540283203, "train/loss_total": 0.4921325743198395 }, { "epoch": 0.46593641464066254, "step": 1744, "train/loss_ctc": 1.0830682516098022, "train/loss_error": 0.4548795819282532, "train/loss_total": 0.5805172920227051 }, { "epoch": 0.4662035800160299, "step": 1745, "train/loss_ctc": 0.5420658588409424, "train/loss_error": 0.5176218748092651, "train/loss_total": 0.5225107073783875 }, { "epoch": 0.46647074539139727, "step": 1746, "train/loss_ctc": 0.8185349702835083, "train/loss_error": 0.5397334098815918, "train/loss_total": 0.595493733882904 }, { "epoch": 0.46673791076676463, "step": 1747, "train/loss_ctc": 1.185870885848999, "train/loss_error": 0.4539656639099121, "train/loss_total": 0.6003466844558716 }, { "epoch": 0.467005076142132, "step": 1748, "train/loss_ctc": 0.7527841329574585, "train/loss_error": 0.4880644977092743, "train/loss_total": 0.541008472442627 }, { "epoch": 0.46727224151749935, "step": 1749, "train/loss_ctc": 0.44164836406707764, "train/loss_error": 0.45066726207733154, "train/loss_total": 0.4488634765148163 }, { "epoch": 0.46753940689286666, "grad_norm": 2.9262571334838867, "learning_rate": 2.7199572535399412e-05, "loss": 0.56, "step": 1750 }, { "epoch": 0.46753940689286666, "step": 1750, "train/loss_ctc": 1.1576862335205078, "train/loss_error": 0.5002511739730835, "train/loss_total": 0.6317381858825684 }, { "epoch": 0.467806572268234, "step": 1751, "train/loss_ctc": 0.7992799282073975, "train/loss_error": 0.5537449717521667, "train/loss_total": 0.6028519868850708 }, { "epoch": 0.4680737376436014, "step": 1752, "train/loss_ctc": 0.5616744756698608, "train/loss_error": 0.5068115592002869, "train/loss_total": 0.5177841186523438 }, { "epoch": 0.46834090301896875, "step": 1753, "train/loss_ctc": 0.9465431571006775, "train/loss_error": 0.49742117524147034, "train/loss_total": 0.5872455835342407 }, { "epoch": 0.4686080683943361, "step": 1754, "train/loss_ctc": 1.0606486797332764, "train/loss_error": 0.4643481373786926, "train/loss_total": 0.5836082696914673 }, { "epoch": 0.46887523376970347, "step": 1755, "train/loss_ctc": 0.7250447273254395, "train/loss_error": 0.4373994469642639, "train/loss_total": 0.4949285089969635 }, { "epoch": 0.4691423991450708, "step": 1756, "train/loss_ctc": 0.38860639929771423, "train/loss_error": 0.4624653160572052, "train/loss_total": 0.44769352674484253 }, { "epoch": 0.46940956452043814, "step": 1757, "train/loss_ctc": 0.7523423433303833, "train/loss_error": 0.5495008826255798, "train/loss_total": 0.5900691747665405 }, { "epoch": 0.4696767298958055, "step": 1758, "train/loss_ctc": 0.37484464049339294, "train/loss_error": 0.44169196486473083, "train/loss_total": 0.42832252383232117 }, { "epoch": 0.46994389527117286, "step": 1759, "train/loss_ctc": 0.8560441732406616, "train/loss_error": 0.5172264575958252, "train/loss_total": 0.5849900245666504 }, { "epoch": 0.4702110606465402, "grad_norm": 1.5664658546447754, "learning_rate": 2.718354261287737e-05, "loss": 0.5469, "step": 1760 }, { "epoch": 0.4702110606465402, "step": 1760, "train/loss_ctc": 0.837590754032135, "train/loss_error": 0.4236561954021454, "train/loss_total": 0.5064431428909302 }, { "epoch": 0.4704782260219076, "step": 1761, "train/loss_ctc": 0.7018814086914062, "train/loss_error": 0.5172407031059265, "train/loss_total": 0.5541688799858093 }, { "epoch": 0.4707453913972749, "step": 1762, "train/loss_ctc": 1.1282727718353271, "train/loss_error": 0.5245018005371094, "train/loss_total": 0.645255982875824 }, { "epoch": 0.47101255677264225, "step": 1763, "train/loss_ctc": 0.9414933323860168, "train/loss_error": 0.4903305470943451, "train/loss_total": 0.5805631279945374 }, { "epoch": 0.4712797221480096, "step": 1764, "train/loss_ctc": 1.4340243339538574, "train/loss_error": 0.4978736340999603, "train/loss_total": 0.6851037740707397 }, { "epoch": 0.471546887523377, "step": 1765, "train/loss_ctc": 0.9344233274459839, "train/loss_error": 0.537434458732605, "train/loss_total": 0.6168322563171387 }, { "epoch": 0.47181405289874434, "step": 1766, "train/loss_ctc": 0.6236850619316101, "train/loss_error": 0.537715494632721, "train/loss_total": 0.5549094080924988 }, { "epoch": 0.4720812182741117, "step": 1767, "train/loss_ctc": 0.616009533405304, "train/loss_error": 0.5033923983573914, "train/loss_total": 0.5259158611297607 }, { "epoch": 0.472348383649479, "step": 1768, "train/loss_ctc": 1.0930310487747192, "train/loss_error": 0.4724769592285156, "train/loss_total": 0.5965877771377563 }, { "epoch": 0.47261554902484637, "step": 1769, "train/loss_ctc": 0.5621228218078613, "train/loss_error": 0.4902477264404297, "train/loss_total": 0.504622757434845 }, { "epoch": 0.47288271440021373, "grad_norm": 1.6456459760665894, "learning_rate": 2.7167512690355328e-05, "loss": 0.577, "step": 1770 }, { "epoch": 0.47288271440021373, "step": 1770, "train/loss_ctc": 0.6468148827552795, "train/loss_error": 0.4491572380065918, "train/loss_total": 0.48868876695632935 }, { "epoch": 0.4731498797755811, "step": 1771, "train/loss_ctc": 0.603725790977478, "train/loss_error": 0.5197580456733704, "train/loss_total": 0.5365515947341919 }, { "epoch": 0.47341704515094846, "step": 1772, "train/loss_ctc": 0.8288331031799316, "train/loss_error": 0.4857035279273987, "train/loss_total": 0.5543294548988342 }, { "epoch": 0.47368421052631576, "step": 1773, "train/loss_ctc": 0.288287490606308, "train/loss_error": 0.4643990099430084, "train/loss_total": 0.4291767179965973 }, { "epoch": 0.4739513759016831, "step": 1774, "train/loss_ctc": 0.5997623205184937, "train/loss_error": 0.4808163642883301, "train/loss_total": 0.5046055316925049 }, { "epoch": 0.4742185412770505, "step": 1775, "train/loss_ctc": 0.7354345321655273, "train/loss_error": 0.4396246671676636, "train/loss_total": 0.4987866282463074 }, { "epoch": 0.47448570665241785, "step": 1776, "train/loss_ctc": 0.4692818522453308, "train/loss_error": 0.4759175777435303, "train/loss_total": 0.4745904207229614 }, { "epoch": 0.4747528720277852, "step": 1777, "train/loss_ctc": 1.3089817762374878, "train/loss_error": 0.46236109733581543, "train/loss_total": 0.6316852569580078 }, { "epoch": 0.4750200374031526, "step": 1778, "train/loss_ctc": 0.9436511397361755, "train/loss_error": 0.5331679582595825, "train/loss_total": 0.6152645945549011 }, { "epoch": 0.4752872027785199, "step": 1779, "train/loss_ctc": 1.0475975275039673, "train/loss_error": 0.5078719258308411, "train/loss_total": 0.6158170700073242 }, { "epoch": 0.47555436815388724, "grad_norm": 1.4465914964675903, "learning_rate": 2.715148276783329e-05, "loss": 0.5349, "step": 1780 }, { "epoch": 0.47555436815388724, "step": 1780, "train/loss_ctc": 0.9574860334396362, "train/loss_error": 0.5130298137664795, "train/loss_total": 0.6019210815429688 }, { "epoch": 0.4758215335292546, "step": 1781, "train/loss_ctc": 0.6648732423782349, "train/loss_error": 0.5252945423126221, "train/loss_total": 0.5532102584838867 }, { "epoch": 0.47608869890462197, "step": 1782, "train/loss_ctc": 1.2923811674118042, "train/loss_error": 0.5070573687553406, "train/loss_total": 0.6641221046447754 }, { "epoch": 0.4763558642799893, "step": 1783, "train/loss_ctc": 0.7788630723953247, "train/loss_error": 0.44402891397476196, "train/loss_total": 0.5109957456588745 }, { "epoch": 0.4766230296553567, "step": 1784, "train/loss_ctc": 0.6000111699104309, "train/loss_error": 0.4150258004665375, "train/loss_total": 0.45202288031578064 }, { "epoch": 0.476890195030724, "step": 1785, "train/loss_ctc": 1.6050565242767334, "train/loss_error": 0.48338064551353455, "train/loss_total": 0.7077158689498901 }, { "epoch": 0.47715736040609136, "step": 1786, "train/loss_ctc": 0.6678625345230103, "train/loss_error": 0.48445355892181396, "train/loss_total": 0.5211353302001953 }, { "epoch": 0.4774245257814587, "step": 1787, "train/loss_ctc": 0.8231152296066284, "train/loss_error": 0.46228599548339844, "train/loss_total": 0.5344518423080444 }, { "epoch": 0.4776916911568261, "step": 1788, "train/loss_ctc": 0.6799290180206299, "train/loss_error": 0.4805927276611328, "train/loss_total": 0.5204600095748901 }, { "epoch": 0.47795885653219344, "step": 1789, "train/loss_ctc": 0.9173966646194458, "train/loss_error": 0.4986879527568817, "train/loss_total": 0.5824297070503235 }, { "epoch": 0.4782260219075608, "grad_norm": 7.0782647132873535, "learning_rate": 2.7135452845311248e-05, "loss": 0.5648, "step": 1790 }, { "epoch": 0.4782260219075608, "step": 1790, "train/loss_ctc": 0.4712675213813782, "train/loss_error": 0.48652908205986023, "train/loss_total": 0.48347678780555725 }, { "epoch": 0.4784931872829281, "step": 1791, "train/loss_ctc": 0.9362298250198364, "train/loss_error": 0.492887020111084, "train/loss_total": 0.5815556049346924 }, { "epoch": 0.4787603526582955, "step": 1792, "train/loss_ctc": 1.0403789281845093, "train/loss_error": 0.5341442823410034, "train/loss_total": 0.6353912353515625 }, { "epoch": 0.47902751803366284, "step": 1793, "train/loss_ctc": 1.1768367290496826, "train/loss_error": 0.49693506956100464, "train/loss_total": 0.6329153776168823 }, { "epoch": 0.4792946834090302, "step": 1794, "train/loss_ctc": 0.5784932374954224, "train/loss_error": 0.5748878717422485, "train/loss_total": 0.5756089687347412 }, { "epoch": 0.47956184878439756, "step": 1795, "train/loss_ctc": 0.39691030979156494, "train/loss_error": 0.4426940083503723, "train/loss_total": 0.4335372745990753 }, { "epoch": 0.4798290141597649, "step": 1796, "train/loss_ctc": 0.5896270275115967, "train/loss_error": 0.4916689395904541, "train/loss_total": 0.5112605690956116 }, { "epoch": 0.48009617953513223, "step": 1797, "train/loss_ctc": 0.8303544521331787, "train/loss_error": 0.47872328758239746, "train/loss_total": 0.5490495562553406 }, { "epoch": 0.4803633449104996, "step": 1798, "train/loss_ctc": 1.7440769672393799, "train/loss_error": 0.4914371371269226, "train/loss_total": 0.741965115070343 }, { "epoch": 0.48063051028586695, "step": 1799, "train/loss_ctc": 0.9518206119537354, "train/loss_error": 0.5299512147903442, "train/loss_total": 0.6143251061439514 }, { "epoch": 0.4808976756612343, "grad_norm": 1.0624645948410034, "learning_rate": 2.711942292278921e-05, "loss": 0.5759, "step": 1800 }, { "epoch": 0.4808976756612343, "step": 1800, "train/loss_ctc": 0.9552634954452515, "train/loss_error": 0.4590194821357727, "train/loss_total": 0.5582683086395264 }, { "epoch": 0.4811648410366017, "step": 1801, "train/loss_ctc": 1.822455883026123, "train/loss_error": 0.4294767677783966, "train/loss_total": 0.7080726027488708 }, { "epoch": 0.481432006411969, "step": 1802, "train/loss_ctc": 0.3729633688926697, "train/loss_error": 0.46813008189201355, "train/loss_total": 0.4490967392921448 }, { "epoch": 0.48169917178733634, "step": 1803, "train/loss_ctc": 0.3990657329559326, "train/loss_error": 0.552547037601471, "train/loss_total": 0.5218507647514343 }, { "epoch": 0.4819663371627037, "step": 1804, "train/loss_ctc": 0.5073525905609131, "train/loss_error": 0.4122399389743805, "train/loss_total": 0.4312624931335449 }, { "epoch": 0.48223350253807107, "step": 1805, "train/loss_ctc": 0.8042616844177246, "train/loss_error": 0.48038771748542786, "train/loss_total": 0.545162558555603 }, { "epoch": 0.48250066791343843, "step": 1806, "train/loss_ctc": 0.8393641710281372, "train/loss_error": 0.5176782608032227, "train/loss_total": 0.5820154547691345 }, { "epoch": 0.4827678332888058, "step": 1807, "train/loss_ctc": 0.7909981608390808, "train/loss_error": 0.5383379459381104, "train/loss_total": 0.5888699889183044 }, { "epoch": 0.4830349986641731, "step": 1808, "train/loss_ctc": 1.1110827922821045, "train/loss_error": 0.5242317914962769, "train/loss_total": 0.6416019797325134 }, { "epoch": 0.48330216403954046, "step": 1809, "train/loss_ctc": 0.5545113682746887, "train/loss_error": 0.41156598925590515, "train/loss_total": 0.4401550889015198 }, { "epoch": 0.4835693294149078, "grad_norm": 2.7740731239318848, "learning_rate": 2.7103393000267167e-05, "loss": 0.5466, "step": 1810 }, { "epoch": 0.4835693294149078, "step": 1810, "train/loss_ctc": 0.9552551507949829, "train/loss_error": 0.49487292766571045, "train/loss_total": 0.586949348449707 }, { "epoch": 0.4838364947902752, "step": 1811, "train/loss_ctc": 0.8858271241188049, "train/loss_error": 0.44313740730285645, "train/loss_total": 0.5316753387451172 }, { "epoch": 0.48410366016564255, "step": 1812, "train/loss_ctc": 0.5651500821113586, "train/loss_error": 0.5037713646888733, "train/loss_total": 0.5160471200942993 }, { "epoch": 0.4843708255410099, "step": 1813, "train/loss_ctc": 0.827977180480957, "train/loss_error": 0.4711848795413971, "train/loss_total": 0.542543351650238 }, { "epoch": 0.4846379909163772, "step": 1814, "train/loss_ctc": 0.8184492588043213, "train/loss_error": 0.49997609853744507, "train/loss_total": 0.5636707544326782 }, { "epoch": 0.4849051562917446, "step": 1815, "train/loss_ctc": 1.1935956478118896, "train/loss_error": 0.5249979496002197, "train/loss_total": 0.6587175130844116 }, { "epoch": 0.48517232166711194, "step": 1816, "train/loss_ctc": 0.614086925983429, "train/loss_error": 0.5381088256835938, "train/loss_total": 0.5533044338226318 }, { "epoch": 0.4854394870424793, "step": 1817, "train/loss_ctc": 0.7761174440383911, "train/loss_error": 0.5458945631980896, "train/loss_total": 0.5919391512870789 }, { "epoch": 0.48570665241784666, "step": 1818, "train/loss_ctc": 0.5804386734962463, "train/loss_error": 0.471801221370697, "train/loss_total": 0.49352872371673584 }, { "epoch": 0.485973817793214, "step": 1819, "train/loss_ctc": 0.827298104763031, "train/loss_error": 0.5151201486587524, "train/loss_total": 0.5775557160377502 }, { "epoch": 0.48624098316858133, "grad_norm": 1.6534467935562134, "learning_rate": 2.7087363077745125e-05, "loss": 0.5616, "step": 1820 }, { "epoch": 0.48624098316858133, "step": 1820, "train/loss_ctc": 2.1053671836853027, "train/loss_error": 0.49619197845458984, "train/loss_total": 0.8180270195007324 }, { "epoch": 0.4865081485439487, "step": 1821, "train/loss_ctc": 0.7533798813819885, "train/loss_error": 0.4561360776424408, "train/loss_total": 0.5155848264694214 }, { "epoch": 0.48677531391931605, "step": 1822, "train/loss_ctc": 0.8641993999481201, "train/loss_error": 0.5219951868057251, "train/loss_total": 0.5904360413551331 }, { "epoch": 0.4870424792946834, "step": 1823, "train/loss_ctc": 1.1897222995758057, "train/loss_error": 0.5194106698036194, "train/loss_total": 0.6534730195999146 }, { "epoch": 0.4873096446700508, "step": 1824, "train/loss_ctc": 0.7873276472091675, "train/loss_error": 0.46556878089904785, "train/loss_total": 0.5299205780029297 }, { "epoch": 0.48757681004541814, "step": 1825, "train/loss_ctc": 0.6696836948394775, "train/loss_error": 0.4618970453739166, "train/loss_total": 0.5034543871879578 }, { "epoch": 0.48784397542078545, "step": 1826, "train/loss_ctc": 0.801548421382904, "train/loss_error": 0.4899500608444214, "train/loss_total": 0.5522697567939758 }, { "epoch": 0.4881111407961528, "step": 1827, "train/loss_ctc": 0.9967836141586304, "train/loss_error": 0.48155125975608826, "train/loss_total": 0.5845977067947388 }, { "epoch": 0.48837830617152017, "step": 1828, "train/loss_ctc": 0.9812539219856262, "train/loss_error": 0.5038117170333862, "train/loss_total": 0.5993001461029053 }, { "epoch": 0.48864547154688753, "step": 1829, "train/loss_ctc": 1.2848875522613525, "train/loss_error": 0.502532958984375, "train/loss_total": 0.6590039134025574 }, { "epoch": 0.4889126369222549, "grad_norm": 1.4187167882919312, "learning_rate": 2.7071333155223083e-05, "loss": 0.6006, "step": 1830 }, { "epoch": 0.4889126369222549, "step": 1830, "train/loss_ctc": 0.9462878704071045, "train/loss_error": 0.5266033411026001, "train/loss_total": 0.6105402708053589 }, { "epoch": 0.4891798022976222, "step": 1831, "train/loss_ctc": 1.7588318586349487, "train/loss_error": 0.5003484487533569, "train/loss_total": 0.7520451545715332 }, { "epoch": 0.48944696767298956, "step": 1832, "train/loss_ctc": 0.9776418209075928, "train/loss_error": 0.4720655381679535, "train/loss_total": 0.5731807947158813 }, { "epoch": 0.4897141330483569, "step": 1833, "train/loss_ctc": 0.9414768218994141, "train/loss_error": 0.4868476688861847, "train/loss_total": 0.5777735114097595 }, { "epoch": 0.4899812984237243, "step": 1834, "train/loss_ctc": 1.7956751585006714, "train/loss_error": 0.5274688005447388, "train/loss_total": 0.7811100482940674 }, { "epoch": 0.49024846379909165, "step": 1835, "train/loss_ctc": 0.7281171083450317, "train/loss_error": 0.45678281784057617, "train/loss_total": 0.5110496878623962 }, { "epoch": 0.490515629174459, "step": 1836, "train/loss_ctc": 1.0461770296096802, "train/loss_error": 0.4848487079143524, "train/loss_total": 0.5971143841743469 }, { "epoch": 0.4907827945498263, "step": 1837, "train/loss_ctc": 0.35364270210266113, "train/loss_error": 0.4536632001399994, "train/loss_total": 0.4336591064929962 }, { "epoch": 0.4910499599251937, "step": 1838, "train/loss_ctc": 0.5247647166252136, "train/loss_error": 0.5541324615478516, "train/loss_total": 0.548258900642395 }, { "epoch": 0.49131712530056104, "step": 1839, "train/loss_ctc": 0.3559379577636719, "train/loss_error": 0.5066416263580322, "train/loss_total": 0.47650089859962463 }, { "epoch": 0.4915842906759284, "grad_norm": 2.3224363327026367, "learning_rate": 2.7055303232701045e-05, "loss": 0.5861, "step": 1840 }, { "epoch": 0.4915842906759284, "step": 1840, "train/loss_ctc": 0.43788859248161316, "train/loss_error": 0.531840980052948, "train/loss_total": 0.5130504965782166 }, { "epoch": 0.49185145605129577, "step": 1841, "train/loss_ctc": 0.5686877965927124, "train/loss_error": 0.5214748978614807, "train/loss_total": 0.5309174656867981 }, { "epoch": 0.4921186214266631, "step": 1842, "train/loss_ctc": 0.8542982339859009, "train/loss_error": 0.44536691904067993, "train/loss_total": 0.5271531939506531 }, { "epoch": 0.49238578680203043, "step": 1843, "train/loss_ctc": 0.5244426131248474, "train/loss_error": 0.4443272352218628, "train/loss_total": 0.4603503346443176 }, { "epoch": 0.4926529521773978, "step": 1844, "train/loss_ctc": 0.5149096250534058, "train/loss_error": 0.47425439953804016, "train/loss_total": 0.48238545656204224 }, { "epoch": 0.49292011755276516, "step": 1845, "train/loss_ctc": 0.7087442278862, "train/loss_error": 0.4626535177230835, "train/loss_total": 0.5118716955184937 }, { "epoch": 0.4931872829281325, "step": 1846, "train/loss_ctc": 0.8213683366775513, "train/loss_error": 0.49222272634506226, "train/loss_total": 0.5580518245697021 }, { "epoch": 0.4934544483034999, "step": 1847, "train/loss_ctc": 0.47252166271209717, "train/loss_error": 0.4207402765750885, "train/loss_total": 0.43109655380249023 }, { "epoch": 0.49372161367886724, "step": 1848, "train/loss_ctc": 0.6689860820770264, "train/loss_error": 0.4347796142101288, "train/loss_total": 0.4816209077835083 }, { "epoch": 0.49398877905423455, "step": 1849, "train/loss_ctc": 0.8230946063995361, "train/loss_error": 0.48747363686561584, "train/loss_total": 0.5545978546142578 }, { "epoch": 0.4942559444296019, "grad_norm": 0.8551931977272034, "learning_rate": 2.7039273310179003e-05, "loss": 0.5051, "step": 1850 }, { "epoch": 0.4942559444296019, "step": 1850, "train/loss_ctc": 0.8067273497581482, "train/loss_error": 0.5296904444694519, "train/loss_total": 0.5850978493690491 }, { "epoch": 0.4945231098049693, "step": 1851, "train/loss_ctc": 1.34049654006958, "train/loss_error": 0.535771906375885, "train/loss_total": 0.696716845035553 }, { "epoch": 0.49479027518033664, "step": 1852, "train/loss_ctc": 0.8454016447067261, "train/loss_error": 0.5150414109230042, "train/loss_total": 0.5811134576797485 }, { "epoch": 0.495057440555704, "step": 1853, "train/loss_ctc": 1.0591408014297485, "train/loss_error": 0.5218778252601624, "train/loss_total": 0.6293303966522217 }, { "epoch": 0.49532460593107136, "step": 1854, "train/loss_ctc": 0.7503117322921753, "train/loss_error": 0.4753243625164032, "train/loss_total": 0.5303218364715576 }, { "epoch": 0.49559177130643867, "step": 1855, "train/loss_ctc": 0.5480915307998657, "train/loss_error": 0.47147178649902344, "train/loss_total": 0.4867957532405853 }, { "epoch": 0.49585893668180603, "step": 1856, "train/loss_ctc": 0.4291495680809021, "train/loss_error": 0.4819944202899933, "train/loss_total": 0.47142544388771057 }, { "epoch": 0.4961261020571734, "step": 1857, "train/loss_ctc": 0.8201133012771606, "train/loss_error": 0.47835999727249146, "train/loss_total": 0.5467106699943542 }, { "epoch": 0.49639326743254075, "step": 1858, "train/loss_ctc": 1.2260470390319824, "train/loss_error": 0.4936199486255646, "train/loss_total": 0.6401053667068481 }, { "epoch": 0.4966604328079081, "step": 1859, "train/loss_ctc": 0.7129860520362854, "train/loss_error": 0.5179611444473267, "train/loss_total": 0.5569661259651184 }, { "epoch": 0.4969275981832754, "grad_norm": 3.43810772895813, "learning_rate": 2.702324338765696e-05, "loss": 0.5725, "step": 1860 }, { "epoch": 0.4969275981832754, "step": 1860, "train/loss_ctc": 0.7191165685653687, "train/loss_error": 0.4522503912448883, "train/loss_total": 0.5056236386299133 }, { "epoch": 0.4971947635586428, "step": 1861, "train/loss_ctc": 0.8961750268936157, "train/loss_error": 0.47651639580726624, "train/loss_total": 0.560448169708252 }, { "epoch": 0.49746192893401014, "step": 1862, "train/loss_ctc": 1.1584956645965576, "train/loss_error": 0.5267400741577148, "train/loss_total": 0.6530911922454834 }, { "epoch": 0.4977290943093775, "step": 1863, "train/loss_ctc": 0.5475234389305115, "train/loss_error": 0.49520689249038696, "train/loss_total": 0.5056701898574829 }, { "epoch": 0.49799625968474487, "step": 1864, "train/loss_ctc": 0.5946962833404541, "train/loss_error": 0.47742557525634766, "train/loss_total": 0.50087970495224 }, { "epoch": 0.49826342506011223, "step": 1865, "train/loss_ctc": 0.5680558681488037, "train/loss_error": 0.5171670913696289, "train/loss_total": 0.5273448824882507 }, { "epoch": 0.49853059043547954, "step": 1866, "train/loss_ctc": 0.6935060620307922, "train/loss_error": 0.4774961471557617, "train/loss_total": 0.5206981301307678 }, { "epoch": 0.4987977558108469, "step": 1867, "train/loss_ctc": 0.5682988166809082, "train/loss_error": 0.44290846586227417, "train/loss_total": 0.4679865539073944 }, { "epoch": 0.49906492118621426, "step": 1868, "train/loss_ctc": 0.5501537322998047, "train/loss_error": 0.5168723464012146, "train/loss_total": 0.5235286355018616 }, { "epoch": 0.4993320865615816, "step": 1869, "train/loss_ctc": 1.3125391006469727, "train/loss_error": 0.4319438934326172, "train/loss_total": 0.6080629825592041 }, { "epoch": 0.499599251936949, "grad_norm": 1.3216900825500488, "learning_rate": 2.700721346513492e-05, "loss": 0.5373, "step": 1870 }, { "epoch": 0.499599251936949, "step": 1870, "train/loss_ctc": 1.6480451822280884, "train/loss_error": 0.4674752354621887, "train/loss_total": 0.7035892009735107 }, { "epoch": 0.49986641731231635, "step": 1871, "train/loss_ctc": 0.5855549573898315, "train/loss_error": 0.4639819264411926, "train/loss_total": 0.4882965385913849 }, { "epoch": 0.5001335826876837, "step": 1872, "train/loss_ctc": 0.4678175449371338, "train/loss_error": 0.47314944863319397, "train/loss_total": 0.47208309173583984 }, { "epoch": 0.500400748063051, "step": 1873, "train/loss_ctc": 0.9891514778137207, "train/loss_error": 0.489696204662323, "train/loss_total": 0.5895872712135315 }, { "epoch": 0.5006679134384184, "step": 1874, "train/loss_ctc": 1.1056948900222778, "train/loss_error": 0.5030297636985779, "train/loss_total": 0.6235628128051758 }, { "epoch": 0.5009350788137857, "step": 1875, "train/loss_ctc": 0.9738263487815857, "train/loss_error": 0.4942035675048828, "train/loss_total": 0.5901281237602234 }, { "epoch": 0.501202244189153, "step": 1876, "train/loss_ctc": 0.6563993692398071, "train/loss_error": 0.43654873967170715, "train/loss_total": 0.4805188775062561 }, { "epoch": 0.5014694095645205, "step": 1877, "train/loss_ctc": 0.6960849761962891, "train/loss_error": 0.4733874797821045, "train/loss_total": 0.5179269909858704 }, { "epoch": 0.5017365749398878, "step": 1878, "train/loss_ctc": 1.1359548568725586, "train/loss_error": 0.4871528744697571, "train/loss_total": 0.6169133186340332 }, { "epoch": 0.5020037403152552, "step": 1879, "train/loss_ctc": 0.3709595203399658, "train/loss_error": 0.4813421368598938, "train/loss_total": 0.4592656195163727 }, { "epoch": 0.5022709056906225, "grad_norm": 1.005273461341858, "learning_rate": 2.6991183542612877e-05, "loss": 0.5542, "step": 1880 }, { "epoch": 0.5022709056906225, "step": 1880, "train/loss_ctc": 0.797405481338501, "train/loss_error": 0.4683902859687805, "train/loss_total": 0.5341933369636536 }, { "epoch": 0.5025380710659898, "step": 1881, "train/loss_ctc": 0.6927933692932129, "train/loss_error": 0.5499777793884277, "train/loss_total": 0.5785409212112427 }, { "epoch": 0.5028052364413572, "step": 1882, "train/loss_ctc": 0.5328855514526367, "train/loss_error": 0.48149624466896057, "train/loss_total": 0.49177414178848267 }, { "epoch": 0.5030724018167245, "step": 1883, "train/loss_ctc": 0.5334036350250244, "train/loss_error": 0.5050088763237, "train/loss_total": 0.5106878280639648 }, { "epoch": 0.5033395671920919, "step": 1884, "train/loss_ctc": 0.7730911374092102, "train/loss_error": 0.4303254187107086, "train/loss_total": 0.4988785684108734 }, { "epoch": 0.5036067325674592, "step": 1885, "train/loss_ctc": 0.5708945393562317, "train/loss_error": 0.5374658107757568, "train/loss_total": 0.5441515445709229 }, { "epoch": 0.5038738979428267, "step": 1886, "train/loss_ctc": 0.9499059915542603, "train/loss_error": 0.4978616535663605, "train/loss_total": 0.5882705450057983 }, { "epoch": 0.504141063318194, "step": 1887, "train/loss_ctc": 0.23869089782238007, "train/loss_error": 0.4944849908351898, "train/loss_total": 0.4433261752128601 }, { "epoch": 0.5044082286935613, "step": 1888, "train/loss_ctc": 0.33567988872528076, "train/loss_error": 0.45452681183815, "train/loss_total": 0.43075743317604065 }, { "epoch": 0.5046753940689287, "step": 1889, "train/loss_ctc": 0.42657941579818726, "train/loss_error": 0.4855883717536926, "train/loss_total": 0.4737865924835205 }, { "epoch": 0.504942559444296, "grad_norm": 0.9720871448516846, "learning_rate": 2.6975153620090835e-05, "loss": 0.5094, "step": 1890 }, { "epoch": 0.504942559444296, "step": 1890, "train/loss_ctc": 0.5472267866134644, "train/loss_error": 0.5062466859817505, "train/loss_total": 0.5144426822662354 }, { "epoch": 0.5052097248196634, "step": 1891, "train/loss_ctc": 0.8955749273300171, "train/loss_error": 0.45778244733810425, "train/loss_total": 0.5453409552574158 }, { "epoch": 0.5054768901950307, "step": 1892, "train/loss_ctc": 0.2943224012851715, "train/loss_error": 0.4291927218437195, "train/loss_total": 0.40221866965293884 }, { "epoch": 0.505744055570398, "step": 1893, "train/loss_ctc": 0.6666575074195862, "train/loss_error": 0.5916845202445984, "train/loss_total": 0.6066791415214539 }, { "epoch": 0.5060112209457654, "step": 1894, "train/loss_ctc": 0.758023202419281, "train/loss_error": 0.4400634765625, "train/loss_total": 0.5036554336547852 }, { "epoch": 0.5062783863211328, "step": 1895, "train/loss_ctc": 1.4239121675491333, "train/loss_error": 0.49159374833106995, "train/loss_total": 0.6780574321746826 }, { "epoch": 0.5065455516965002, "step": 1896, "train/loss_ctc": 0.5971482396125793, "train/loss_error": 0.4569992423057556, "train/loss_total": 0.48502904176712036 }, { "epoch": 0.5068127170718675, "step": 1897, "train/loss_ctc": 0.9670853614807129, "train/loss_error": 0.4851764738559723, "train/loss_total": 0.5815582275390625 }, { "epoch": 0.5070798824472348, "step": 1898, "train/loss_ctc": 0.6691323518753052, "train/loss_error": 0.46422290802001953, "train/loss_total": 0.5052047967910767 }, { "epoch": 0.5073470478226022, "step": 1899, "train/loss_ctc": 0.7330787777900696, "train/loss_error": 0.46071872115135193, "train/loss_total": 0.5151907205581665 }, { "epoch": 0.5076142131979695, "grad_norm": 1.5303609371185303, "learning_rate": 2.6959123697568797e-05, "loss": 0.5337, "step": 1900 }, { "epoch": 0.5076142131979695, "step": 1900, "train/loss_ctc": 0.680156946182251, "train/loss_error": 0.4773719012737274, "train/loss_total": 0.517928957939148 }, { "epoch": 0.5078813785733369, "step": 1901, "train/loss_ctc": 1.5298328399658203, "train/loss_error": 0.4651075601577759, "train/loss_total": 0.6780526638031006 }, { "epoch": 0.5081485439487042, "step": 1902, "train/loss_ctc": 1.2416577339172363, "train/loss_error": 0.5934969782829285, "train/loss_total": 0.723129153251648 }, { "epoch": 0.5084157093240717, "step": 1903, "train/loss_ctc": 1.5896720886230469, "train/loss_error": 0.5303773283958435, "train/loss_total": 0.7422362565994263 }, { "epoch": 0.508682874699439, "step": 1904, "train/loss_ctc": 0.972557783126831, "train/loss_error": 0.4720276892185211, "train/loss_total": 0.5721337199211121 }, { "epoch": 0.5089500400748063, "step": 1905, "train/loss_ctc": 0.7896476984024048, "train/loss_error": 0.47556638717651367, "train/loss_total": 0.5383826494216919 }, { "epoch": 0.5092172054501737, "step": 1906, "train/loss_ctc": 0.5347006320953369, "train/loss_error": 0.49888768792152405, "train/loss_total": 0.5060502886772156 }, { "epoch": 0.509484370825541, "step": 1907, "train/loss_ctc": 0.8251707553863525, "train/loss_error": 0.49262920022010803, "train/loss_total": 0.5591375231742859 }, { "epoch": 0.5097515362009084, "step": 1908, "train/loss_ctc": 1.0134291648864746, "train/loss_error": 0.5675564408302307, "train/loss_total": 0.6567310094833374 }, { "epoch": 0.5100187015762757, "step": 1909, "train/loss_ctc": 0.7398759722709656, "train/loss_error": 0.46960699558258057, "train/loss_total": 0.5236607789993286 }, { "epoch": 0.510285866951643, "grad_norm": 1.2313345670700073, "learning_rate": 2.6943093775046755e-05, "loss": 0.6017, "step": 1910 }, { "epoch": 0.510285866951643, "step": 1910, "train/loss_ctc": 1.2560310363769531, "train/loss_error": 0.5339027047157288, "train/loss_total": 0.6783283948898315 }, { "epoch": 0.5105530323270104, "step": 1911, "train/loss_ctc": 0.7186596393585205, "train/loss_error": 0.4918704926967621, "train/loss_total": 0.5372283458709717 }, { "epoch": 0.5108201977023777, "step": 1912, "train/loss_ctc": 1.339608073234558, "train/loss_error": 0.5416676998138428, "train/loss_total": 0.7012557983398438 }, { "epoch": 0.5110873630777452, "step": 1913, "train/loss_ctc": 1.2117472887039185, "train/loss_error": 0.5195788741111755, "train/loss_total": 0.6580125689506531 }, { "epoch": 0.5113545284531125, "step": 1914, "train/loss_ctc": 1.0020297765731812, "train/loss_error": 0.5289512276649475, "train/loss_total": 0.6235669851303101 }, { "epoch": 0.5116216938284799, "step": 1915, "train/loss_ctc": 0.8460618257522583, "train/loss_error": 0.4707832336425781, "train/loss_total": 0.5458389520645142 }, { "epoch": 0.5118888592038472, "step": 1916, "train/loss_ctc": 1.438169002532959, "train/loss_error": 0.5372126698493958, "train/loss_total": 0.7174039483070374 }, { "epoch": 0.5121560245792145, "step": 1917, "train/loss_ctc": 1.0432703495025635, "train/loss_error": 0.5284384489059448, "train/loss_total": 0.6314048767089844 }, { "epoch": 0.5124231899545819, "step": 1918, "train/loss_ctc": 1.1575462818145752, "train/loss_error": 0.5230383276939392, "train/loss_total": 0.6499398946762085 }, { "epoch": 0.5126903553299492, "step": 1919, "train/loss_ctc": 0.7891975045204163, "train/loss_error": 0.49980196356773376, "train/loss_total": 0.5576810836791992 }, { "epoch": 0.5129575207053166, "grad_norm": 2.1877784729003906, "learning_rate": 2.6927063852524713e-05, "loss": 0.6301, "step": 1920 }, { "epoch": 0.5129575207053166, "step": 1920, "train/loss_ctc": 0.8942033052444458, "train/loss_error": 0.4535484313964844, "train/loss_total": 0.5416793823242188 }, { "epoch": 0.513224686080684, "step": 1921, "train/loss_ctc": 0.656969428062439, "train/loss_error": 0.43574631214141846, "train/loss_total": 0.47999095916748047 }, { "epoch": 0.5134918514560513, "step": 1922, "train/loss_ctc": 0.5992405414581299, "train/loss_error": 0.5309561491012573, "train/loss_total": 0.5446130633354187 }, { "epoch": 0.5137590168314187, "step": 1923, "train/loss_ctc": 0.3325810432434082, "train/loss_error": 0.4412883222103119, "train/loss_total": 0.41954687237739563 }, { "epoch": 0.514026182206786, "step": 1924, "train/loss_ctc": 0.9956830739974976, "train/loss_error": 0.5266477465629578, "train/loss_total": 0.6204547882080078 }, { "epoch": 0.5142933475821534, "step": 1925, "train/loss_ctc": 1.5135042667388916, "train/loss_error": 0.4955643117427826, "train/loss_total": 0.6991522908210754 }, { "epoch": 0.5145605129575207, "step": 1926, "train/loss_ctc": 0.9657490849494934, "train/loss_error": 0.5284769535064697, "train/loss_total": 0.6159313917160034 }, { "epoch": 0.514827678332888, "step": 1927, "train/loss_ctc": 0.8265988826751709, "train/loss_error": 0.4337790012359619, "train/loss_total": 0.5123429894447327 }, { "epoch": 0.5150948437082554, "step": 1928, "train/loss_ctc": 0.9689902663230896, "train/loss_error": 0.4963253438472748, "train/loss_total": 0.5908583402633667 }, { "epoch": 0.5153620090836227, "step": 1929, "train/loss_ctc": 0.747669517993927, "train/loss_error": 0.4902884066104889, "train/loss_total": 0.5417646169662476 }, { "epoch": 0.5156291744589901, "grad_norm": 1.528369426727295, "learning_rate": 2.691103393000267e-05, "loss": 0.5566, "step": 1930 }, { "epoch": 0.5156291744589901, "step": 1930, "train/loss_ctc": 0.572281002998352, "train/loss_error": 0.4713387191295624, "train/loss_total": 0.4915271997451782 }, { "epoch": 0.5158963398343575, "step": 1931, "train/loss_ctc": 0.6252992153167725, "train/loss_error": 0.5383346676826477, "train/loss_total": 0.5557276010513306 }, { "epoch": 0.5161635052097249, "step": 1932, "train/loss_ctc": 0.5372791886329651, "train/loss_error": 0.49751633405685425, "train/loss_total": 0.5054689049720764 }, { "epoch": 0.5164306705850922, "step": 1933, "train/loss_ctc": 0.609021008014679, "train/loss_error": 0.5459429621696472, "train/loss_total": 0.5585585832595825 }, { "epoch": 0.5166978359604595, "step": 1934, "train/loss_ctc": 1.0411510467529297, "train/loss_error": 0.5040030479431152, "train/loss_total": 0.611432671546936 }, { "epoch": 0.5169650013358269, "step": 1935, "train/loss_ctc": 0.7798787355422974, "train/loss_error": 0.4849705696105957, "train/loss_total": 0.543952226638794 }, { "epoch": 0.5172321667111942, "step": 1936, "train/loss_ctc": 1.0258283615112305, "train/loss_error": 0.5263725519180298, "train/loss_total": 0.6262637376785278 }, { "epoch": 0.5174993320865616, "step": 1937, "train/loss_ctc": 1.2037417888641357, "train/loss_error": 0.4783872067928314, "train/loss_total": 0.6234581470489502 }, { "epoch": 0.5177664974619289, "step": 1938, "train/loss_ctc": 1.279390811920166, "train/loss_error": 0.48199886083602905, "train/loss_total": 0.6414772868156433 }, { "epoch": 0.5180336628372962, "step": 1939, "train/loss_ctc": 0.6353675127029419, "train/loss_error": 0.4544248580932617, "train/loss_total": 0.4906134009361267 }, { "epoch": 0.5183008282126637, "grad_norm": 1.0716285705566406, "learning_rate": 2.689500400748063e-05, "loss": 0.5648, "step": 1940 }, { "epoch": 0.5183008282126637, "step": 1940, "train/loss_ctc": 1.1464605331420898, "train/loss_error": 0.47385770082473755, "train/loss_total": 0.6083782911300659 }, { "epoch": 0.518567993588031, "step": 1941, "train/loss_ctc": 1.035651683807373, "train/loss_error": 0.473992258310318, "train/loss_total": 0.586324155330658 }, { "epoch": 0.5188351589633984, "step": 1942, "train/loss_ctc": 0.6350387334823608, "train/loss_error": 0.42393437027931213, "train/loss_total": 0.4661552608013153 }, { "epoch": 0.5191023243387657, "step": 1943, "train/loss_ctc": 0.7041726112365723, "train/loss_error": 0.4689725339412689, "train/loss_total": 0.5160125494003296 }, { "epoch": 0.5193694897141331, "step": 1944, "train/loss_ctc": 1.2314153909683228, "train/loss_error": 0.5933098793029785, "train/loss_total": 0.7209309935569763 }, { "epoch": 0.5196366550895004, "step": 1945, "train/loss_ctc": 0.6845033168792725, "train/loss_error": 0.5070317387580872, "train/loss_total": 0.5425260663032532 }, { "epoch": 0.5199038204648677, "step": 1946, "train/loss_ctc": 0.770793080329895, "train/loss_error": 0.4649742841720581, "train/loss_total": 0.5261380672454834 }, { "epoch": 0.5201709858402351, "step": 1947, "train/loss_ctc": 0.6653980016708374, "train/loss_error": 0.457326203584671, "train/loss_total": 0.4989405870437622 }, { "epoch": 0.5204381512156024, "step": 1948, "train/loss_ctc": 0.6226885318756104, "train/loss_error": 0.5205296277999878, "train/loss_total": 0.5409613847732544 }, { "epoch": 0.5207053165909699, "step": 1949, "train/loss_ctc": 0.990873396396637, "train/loss_error": 0.4815137982368469, "train/loss_total": 0.583385705947876 }, { "epoch": 0.5209724819663372, "grad_norm": 1.4721667766571045, "learning_rate": 2.6878974084958587e-05, "loss": 0.559, "step": 1950 }, { "epoch": 0.5209724819663372, "step": 1950, "train/loss_ctc": 0.7659547328948975, "train/loss_error": 0.45434561371803284, "train/loss_total": 0.5166674256324768 }, { "epoch": 0.5212396473417045, "step": 1951, "train/loss_ctc": 0.7704789638519287, "train/loss_error": 0.4535634517669678, "train/loss_total": 0.51694655418396 }, { "epoch": 0.5215068127170719, "step": 1952, "train/loss_ctc": 1.125368595123291, "train/loss_error": 0.4666937589645386, "train/loss_total": 0.5984287261962891 }, { "epoch": 0.5217739780924392, "step": 1953, "train/loss_ctc": 0.9064292907714844, "train/loss_error": 0.5368151664733887, "train/loss_total": 0.6107380390167236 }, { "epoch": 0.5220411434678066, "step": 1954, "train/loss_ctc": 1.1951898336410522, "train/loss_error": 0.4594871699810028, "train/loss_total": 0.6066277027130127 }, { "epoch": 0.5223083088431739, "step": 1955, "train/loss_ctc": 0.5588418841362, "train/loss_error": 0.4573252201080322, "train/loss_total": 0.47762858867645264 }, { "epoch": 0.5225754742185412, "step": 1956, "train/loss_ctc": 0.40681588649749756, "train/loss_error": 0.4785325229167938, "train/loss_total": 0.46418920159339905 }, { "epoch": 0.5228426395939086, "step": 1957, "train/loss_ctc": 0.4287513494491577, "train/loss_error": 0.4836755394935608, "train/loss_total": 0.47269073128700256 }, { "epoch": 0.523109804969276, "step": 1958, "train/loss_ctc": 0.8071886897087097, "train/loss_error": 0.4738166630268097, "train/loss_total": 0.5404910445213318 }, { "epoch": 0.5233769703446434, "step": 1959, "train/loss_ctc": 0.5763117074966431, "train/loss_error": 0.5115600824356079, "train/loss_total": 0.5245104432106018 }, { "epoch": 0.5236441357200107, "grad_norm": 2.0690855979919434, "learning_rate": 2.686294416243655e-05, "loss": 0.5329, "step": 1960 }, { "epoch": 0.5236441357200107, "step": 1960, "train/loss_ctc": 0.6920216679573059, "train/loss_error": 0.5068231821060181, "train/loss_total": 0.5438628792762756 }, { "epoch": 0.5239113010953781, "step": 1961, "train/loss_ctc": 0.413145512342453, "train/loss_error": 0.48177751898765564, "train/loss_total": 0.46805113554000854 }, { "epoch": 0.5241784664707454, "step": 1962, "train/loss_ctc": 1.1653342247009277, "train/loss_error": 0.5334453582763672, "train/loss_total": 0.6598231196403503 }, { "epoch": 0.5244456318461127, "step": 1963, "train/loss_ctc": 0.614316463470459, "train/loss_error": 0.48179665207862854, "train/loss_total": 0.5083006620407104 }, { "epoch": 0.5247127972214801, "step": 1964, "train/loss_ctc": 0.7054312229156494, "train/loss_error": 0.4933790862560272, "train/loss_total": 0.5357894897460938 }, { "epoch": 0.5249799625968474, "step": 1965, "train/loss_ctc": 1.0857346057891846, "train/loss_error": 0.4901261329650879, "train/loss_total": 0.6092478036880493 }, { "epoch": 0.5252471279722148, "step": 1966, "train/loss_ctc": 0.8357124328613281, "train/loss_error": 0.5057715177536011, "train/loss_total": 0.5717597007751465 }, { "epoch": 0.5255142933475822, "step": 1967, "train/loss_ctc": 0.5246742963790894, "train/loss_error": 0.49282363057136536, "train/loss_total": 0.49919378757476807 }, { "epoch": 0.5257814587229495, "step": 1968, "train/loss_ctc": 2.3163902759552, "train/loss_error": 0.5077959895133972, "train/loss_total": 0.8695148229598999 }, { "epoch": 0.5260486240983169, "step": 1969, "train/loss_ctc": 1.0986051559448242, "train/loss_error": 0.4851747155189514, "train/loss_total": 0.607860803604126 }, { "epoch": 0.5263157894736842, "grad_norm": 5.90981912612915, "learning_rate": 2.6846914239914507e-05, "loss": 0.5873, "step": 1970 }, { "epoch": 0.5263157894736842, "step": 1970, "train/loss_ctc": 0.9216995239257812, "train/loss_error": 0.48674166202545166, "train/loss_total": 0.5737332105636597 }, { "epoch": 0.5265829548490516, "step": 1971, "train/loss_ctc": 0.5782532691955566, "train/loss_error": 0.42226654291152954, "train/loss_total": 0.4534638822078705 }, { "epoch": 0.5268501202244189, "step": 1972, "train/loss_ctc": 0.8999265432357788, "train/loss_error": 0.49924376606941223, "train/loss_total": 0.5793803334236145 }, { "epoch": 0.5271172855997863, "step": 1973, "train/loss_ctc": 0.7826344966888428, "train/loss_error": 0.4988285303115845, "train/loss_total": 0.5555897355079651 }, { "epoch": 0.5273844509751536, "step": 1974, "train/loss_ctc": 0.5452723503112793, "train/loss_error": 0.5690402984619141, "train/loss_total": 0.5642867088317871 }, { "epoch": 0.5276516163505209, "step": 1975, "train/loss_ctc": 0.761962354183197, "train/loss_error": 0.44024744629859924, "train/loss_total": 0.5045904517173767 }, { "epoch": 0.5279187817258884, "step": 1976, "train/loss_ctc": 0.6582666635513306, "train/loss_error": 0.5339187383651733, "train/loss_total": 0.5587882995605469 }, { "epoch": 0.5281859471012557, "step": 1977, "train/loss_ctc": 0.9201760292053223, "train/loss_error": 0.5093547105789185, "train/loss_total": 0.5915189981460571 }, { "epoch": 0.5284531124766231, "step": 1978, "train/loss_ctc": 0.8094145059585571, "train/loss_error": 0.44926130771636963, "train/loss_total": 0.521291971206665 }, { "epoch": 0.5287202778519904, "step": 1979, "train/loss_ctc": 1.4119458198547363, "train/loss_error": 0.48508021235466003, "train/loss_total": 0.6704533100128174 }, { "epoch": 0.5289874432273577, "grad_norm": 2.1921374797821045, "learning_rate": 2.6830884317392468e-05, "loss": 0.5573, "step": 1980 }, { "epoch": 0.5289874432273577, "step": 1980, "train/loss_ctc": 0.743240475654602, "train/loss_error": 0.46761754155158997, "train/loss_total": 0.5227421522140503 }, { "epoch": 0.5292546086027251, "step": 1981, "train/loss_ctc": 0.6311296820640564, "train/loss_error": 0.5202569365501404, "train/loss_total": 0.5424314737319946 }, { "epoch": 0.5295217739780924, "step": 1982, "train/loss_ctc": 0.6461219787597656, "train/loss_error": 0.4931618273258209, "train/loss_total": 0.5237538814544678 }, { "epoch": 0.5297889393534598, "step": 1983, "train/loss_ctc": 0.8532775640487671, "train/loss_error": 0.4792827367782593, "train/loss_total": 0.5540816783905029 }, { "epoch": 0.5300561047288271, "step": 1984, "train/loss_ctc": 0.4940182864665985, "train/loss_error": 0.47371673583984375, "train/loss_total": 0.47777706384658813 }, { "epoch": 0.5303232701041944, "step": 1985, "train/loss_ctc": 0.9267599582672119, "train/loss_error": 0.44668135046958923, "train/loss_total": 0.5426970720291138 }, { "epoch": 0.5305904354795619, "step": 1986, "train/loss_ctc": 0.6572375297546387, "train/loss_error": 0.5763318538665771, "train/loss_total": 0.5925130248069763 }, { "epoch": 0.5308576008549292, "step": 1987, "train/loss_ctc": 0.6009742617607117, "train/loss_error": 0.6288419961929321, "train/loss_total": 0.6232684850692749 }, { "epoch": 0.5311247662302966, "step": 1988, "train/loss_ctc": 0.9422667026519775, "train/loss_error": 0.45999324321746826, "train/loss_total": 0.5564479827880859 }, { "epoch": 0.5313919316056639, "step": 1989, "train/loss_ctc": 0.834075927734375, "train/loss_error": 0.45121142268180847, "train/loss_total": 0.5277843475341797 }, { "epoch": 0.5316590969810313, "grad_norm": 2.6554555892944336, "learning_rate": 2.6814854394870426e-05, "loss": 0.5463, "step": 1990 }, { "epoch": 0.5316590969810313, "step": 1990, "train/loss_ctc": 0.4500264525413513, "train/loss_error": 0.5161029100418091, "train/loss_total": 0.5028876066207886 }, { "epoch": 0.5319262623563986, "step": 1991, "train/loss_ctc": 0.8979281783103943, "train/loss_error": 0.47960859537124634, "train/loss_total": 0.5632724761962891 }, { "epoch": 0.5321934277317659, "step": 1992, "train/loss_ctc": 0.48892295360565186, "train/loss_error": 0.4825306534767151, "train/loss_total": 0.48380911350250244 }, { "epoch": 0.5324605931071333, "step": 1993, "train/loss_ctc": 0.34798768162727356, "train/loss_error": 0.49751678109169006, "train/loss_total": 0.4676109850406647 }, { "epoch": 0.5327277584825006, "step": 1994, "train/loss_ctc": 0.6581308245658875, "train/loss_error": 0.45247790217399597, "train/loss_total": 0.4936084747314453 }, { "epoch": 0.5329949238578681, "step": 1995, "train/loss_ctc": 0.600861668586731, "train/loss_error": 0.4924774765968323, "train/loss_total": 0.514154314994812 }, { "epoch": 0.5332620892332354, "step": 1996, "train/loss_ctc": 0.5867658853530884, "train/loss_error": 0.48303863406181335, "train/loss_total": 0.5037841200828552 }, { "epoch": 0.5335292546086027, "step": 1997, "train/loss_ctc": 0.6171690225601196, "train/loss_error": 0.4778018891811371, "train/loss_total": 0.5056753158569336 }, { "epoch": 0.5337964199839701, "step": 1998, "train/loss_ctc": 0.5974406599998474, "train/loss_error": 0.4695379137992859, "train/loss_total": 0.49511849880218506 }, { "epoch": 0.5340635853593374, "step": 1999, "train/loss_ctc": 1.337720274925232, "train/loss_error": 0.4493355453014374, "train/loss_total": 0.6270124912261963 }, { "epoch": 0.5343307507347048, "grad_norm": 1.623711109161377, "learning_rate": 2.6798824472348384e-05, "loss": 0.5157, "step": 2000 }, { "epoch": 0.5343307507347048, "step": 2000, "train/loss_ctc": 0.8354886770248413, "train/loss_error": 0.4223324954509735, "train/loss_total": 0.504963755607605 }, { "epoch": 0.5345979161100721, "step": 2001, "train/loss_ctc": 0.8733242750167847, "train/loss_error": 0.4740966856479645, "train/loss_total": 0.5539422035217285 }, { "epoch": 0.5348650814854395, "step": 2002, "train/loss_ctc": 0.8117237091064453, "train/loss_error": 0.5472634434700012, "train/loss_total": 0.6001554727554321 }, { "epoch": 0.5351322468608068, "step": 2003, "train/loss_ctc": 0.6265661716461182, "train/loss_error": 0.4923900067806244, "train/loss_total": 0.5192252397537231 }, { "epoch": 0.5353994122361742, "step": 2004, "train/loss_ctc": 0.3961327373981476, "train/loss_error": 0.4715512990951538, "train/loss_total": 0.4564675986766815 }, { "epoch": 0.5356665776115416, "step": 2005, "train/loss_ctc": 1.2934281826019287, "train/loss_error": 0.506428062915802, "train/loss_total": 0.6638281345367432 }, { "epoch": 0.5359337429869089, "step": 2006, "train/loss_ctc": 0.6098083257675171, "train/loss_error": 0.45732325315475464, "train/loss_total": 0.48782026767730713 }, { "epoch": 0.5362009083622763, "step": 2007, "train/loss_ctc": 1.8562393188476562, "train/loss_error": 0.48497459292411804, "train/loss_total": 0.7592275142669678 }, { "epoch": 0.5364680737376436, "step": 2008, "train/loss_ctc": 0.8560696244239807, "train/loss_error": 0.4865931272506714, "train/loss_total": 0.5604884624481201 }, { "epoch": 0.5367352391130109, "step": 2009, "train/loss_ctc": 0.7800741195678711, "train/loss_error": 0.5282811522483826, "train/loss_total": 0.5786397457122803 }, { "epoch": 0.5370024044883783, "grad_norm": 1.0335036516189575, "learning_rate": 2.6782794549826343e-05, "loss": 0.5685, "step": 2010 }, { "epoch": 0.5370024044883783, "step": 2010, "train/loss_ctc": 1.2389384508132935, "train/loss_error": 0.5167080760002136, "train/loss_total": 0.6611541509628296 }, { "epoch": 0.5372695698637456, "step": 2011, "train/loss_ctc": 1.0936912298202515, "train/loss_error": 0.5111739039421082, "train/loss_total": 0.6276773810386658 }, { "epoch": 0.537536735239113, "step": 2012, "train/loss_ctc": 0.6915228366851807, "train/loss_error": 0.4937824010848999, "train/loss_total": 0.533330500125885 }, { "epoch": 0.5378039006144804, "step": 2013, "train/loss_ctc": 1.329609990119934, "train/loss_error": 0.4939765930175781, "train/loss_total": 0.6611032485961914 }, { "epoch": 0.5380710659898477, "step": 2014, "train/loss_ctc": 0.5962321162223816, "train/loss_error": 0.49672988057136536, "train/loss_total": 0.5166302919387817 }, { "epoch": 0.5383382313652151, "step": 2015, "train/loss_ctc": 2.18353271484375, "train/loss_error": 0.4394918382167816, "train/loss_total": 0.7883000373840332 }, { "epoch": 0.5386053967405824, "step": 2016, "train/loss_ctc": 0.9460763931274414, "train/loss_error": 0.4595204293727875, "train/loss_total": 0.5568316578865051 }, { "epoch": 0.5388725621159498, "step": 2017, "train/loss_ctc": 1.263925313949585, "train/loss_error": 0.4819708466529846, "train/loss_total": 0.6383617520332336 }, { "epoch": 0.5391397274913171, "step": 2018, "train/loss_ctc": 0.8693198561668396, "train/loss_error": 0.525047242641449, "train/loss_total": 0.5939017534255981 }, { "epoch": 0.5394068928666845, "step": 2019, "train/loss_ctc": 1.253156304359436, "train/loss_error": 0.4677087366580963, "train/loss_total": 0.6247982978820801 }, { "epoch": 0.5396740582420518, "grad_norm": 1.932521939277649, "learning_rate": 2.6766764627304304e-05, "loss": 0.6202, "step": 2020 }, { "epoch": 0.5396740582420518, "step": 2020, "train/loss_ctc": 0.8931070566177368, "train/loss_error": 0.44294631481170654, "train/loss_total": 0.5329784750938416 }, { "epoch": 0.5399412236174191, "step": 2021, "train/loss_ctc": 1.4457883834838867, "train/loss_error": 0.5363092422485352, "train/loss_total": 0.7182050943374634 }, { "epoch": 0.5402083889927866, "step": 2022, "train/loss_ctc": 0.6852400302886963, "train/loss_error": 0.482655793428421, "train/loss_total": 0.5231726169586182 }, { "epoch": 0.5404755543681539, "step": 2023, "train/loss_ctc": 0.9123678207397461, "train/loss_error": 0.42691537737846375, "train/loss_total": 0.5240058898925781 }, { "epoch": 0.5407427197435213, "step": 2024, "train/loss_ctc": 0.7689051032066345, "train/loss_error": 0.5067163705825806, "train/loss_total": 0.5591541528701782 }, { "epoch": 0.5410098851188886, "step": 2025, "train/loss_ctc": 0.4509468674659729, "train/loss_error": 0.4878584146499634, "train/loss_total": 0.48047611117362976 }, { "epoch": 0.5412770504942559, "step": 2026, "train/loss_ctc": 0.6737258434295654, "train/loss_error": 0.5986810326576233, "train/loss_total": 0.6136900186538696 }, { "epoch": 0.5415442158696233, "step": 2027, "train/loss_ctc": 1.2331585884094238, "train/loss_error": 0.43023836612701416, "train/loss_total": 0.5908223986625671 }, { "epoch": 0.5418113812449906, "step": 2028, "train/loss_ctc": 0.7560993432998657, "train/loss_error": 0.5035766363143921, "train/loss_total": 0.5540812015533447 }, { "epoch": 0.542078546620358, "step": 2029, "train/loss_ctc": 0.977408230304718, "train/loss_error": 0.45246100425720215, "train/loss_total": 0.5574504733085632 }, { "epoch": 0.5423457119957253, "grad_norm": 1.891918659210205, "learning_rate": 2.6750734704782262e-05, "loss": 0.5654, "step": 2030 }, { "epoch": 0.5423457119957253, "step": 2030, "train/loss_ctc": 0.585628867149353, "train/loss_error": 0.47933897376060486, "train/loss_total": 0.5005969405174255 }, { "epoch": 0.5426128773710928, "step": 2031, "train/loss_ctc": 0.7942174673080444, "train/loss_error": 0.49880900979042053, "train/loss_total": 0.5578907132148743 }, { "epoch": 0.5428800427464601, "step": 2032, "train/loss_ctc": 0.5881825685501099, "train/loss_error": 0.47920823097229004, "train/loss_total": 0.501003086566925 }, { "epoch": 0.5431472081218274, "step": 2033, "train/loss_ctc": 0.9310426712036133, "train/loss_error": 0.5317668914794922, "train/loss_total": 0.6116220355033875 }, { "epoch": 0.5434143734971948, "step": 2034, "train/loss_ctc": 0.5931129455566406, "train/loss_error": 0.49976587295532227, "train/loss_total": 0.5184352993965149 }, { "epoch": 0.5436815388725621, "step": 2035, "train/loss_ctc": 0.764299750328064, "train/loss_error": 0.4009582996368408, "train/loss_total": 0.47362661361694336 }, { "epoch": 0.5439487042479295, "step": 2036, "train/loss_ctc": 0.8337475061416626, "train/loss_error": 0.5106210708618164, "train/loss_total": 0.5752463340759277 }, { "epoch": 0.5442158696232968, "step": 2037, "train/loss_ctc": 1.0818992853164673, "train/loss_error": 0.5147864818572998, "train/loss_total": 0.6282090544700623 }, { "epoch": 0.5444830349986641, "step": 2038, "train/loss_ctc": 0.5102263689041138, "train/loss_error": 0.4706926941871643, "train/loss_total": 0.4785994291305542 }, { "epoch": 0.5447502003740315, "step": 2039, "train/loss_ctc": 0.9345664978027344, "train/loss_error": 0.497886061668396, "train/loss_total": 0.5852221250534058 }, { "epoch": 0.5450173657493989, "grad_norm": 1.5014595985412598, "learning_rate": 2.673470478226022e-05, "loss": 0.543, "step": 2040 }, { "epoch": 0.5450173657493989, "step": 2040, "train/loss_ctc": 0.9923716187477112, "train/loss_error": 0.5066301822662354, "train/loss_total": 0.6037784814834595 }, { "epoch": 0.5452845311247663, "step": 2041, "train/loss_ctc": 1.3822999000549316, "train/loss_error": 0.54194176197052, "train/loss_total": 0.7100133895874023 }, { "epoch": 0.5455516965001336, "step": 2042, "train/loss_ctc": 1.117601990699768, "train/loss_error": 0.47166919708251953, "train/loss_total": 0.6008557677268982 }, { "epoch": 0.5458188618755009, "step": 2043, "train/loss_ctc": 2.213251829147339, "train/loss_error": 0.4766133725643158, "train/loss_total": 0.8239411115646362 }, { "epoch": 0.5460860272508683, "step": 2044, "train/loss_ctc": 0.7167199850082397, "train/loss_error": 0.5493921637535095, "train/loss_total": 0.5828577280044556 }, { "epoch": 0.5463531926262356, "step": 2045, "train/loss_ctc": 0.7184391021728516, "train/loss_error": 0.48453766107559204, "train/loss_total": 0.531317949295044 }, { "epoch": 0.546620358001603, "step": 2046, "train/loss_ctc": 0.7279472947120667, "train/loss_error": 0.4936128556728363, "train/loss_total": 0.5404797196388245 }, { "epoch": 0.5468875233769703, "step": 2047, "train/loss_ctc": 0.7249205112457275, "train/loss_error": 0.5209087133407593, "train/loss_total": 0.5617110729217529 }, { "epoch": 0.5471546887523377, "step": 2048, "train/loss_ctc": 0.9010422229766846, "train/loss_error": 0.4814365804195404, "train/loss_total": 0.5653576850891113 }, { "epoch": 0.547421854127705, "step": 2049, "train/loss_ctc": 0.9673395156860352, "train/loss_error": 0.4408149719238281, "train/loss_total": 0.5461198687553406 }, { "epoch": 0.5476890195030724, "grad_norm": 3.8465640544891357, "learning_rate": 2.6718674859738178e-05, "loss": 0.6066, "step": 2050 }, { "epoch": 0.5476890195030724, "step": 2050, "train/loss_ctc": 0.6780873537063599, "train/loss_error": 0.5094365477561951, "train/loss_total": 0.5431666970252991 }, { "epoch": 0.5479561848784398, "step": 2051, "train/loss_ctc": 1.240216612815857, "train/loss_error": 0.48099374771118164, "train/loss_total": 0.6328383684158325 }, { "epoch": 0.5482233502538071, "step": 2052, "train/loss_ctc": 0.7306329607963562, "train/loss_error": 0.48356494307518005, "train/loss_total": 0.5329785346984863 }, { "epoch": 0.5484905156291745, "step": 2053, "train/loss_ctc": 1.1371338367462158, "train/loss_error": 0.4983110725879669, "train/loss_total": 0.6260756254196167 }, { "epoch": 0.5487576810045418, "step": 2054, "train/loss_ctc": 0.5082763433456421, "train/loss_error": 0.5032264590263367, "train/loss_total": 0.5042364597320557 }, { "epoch": 0.5490248463799091, "step": 2055, "train/loss_ctc": 1.3853719234466553, "train/loss_error": 0.4945301413536072, "train/loss_total": 0.6726984977722168 }, { "epoch": 0.5492920117552765, "step": 2056, "train/loss_ctc": 0.6585818529129028, "train/loss_error": 0.49381619691848755, "train/loss_total": 0.5267693400382996 }, { "epoch": 0.5495591771306438, "step": 2057, "train/loss_ctc": 0.9469307661056519, "train/loss_error": 0.49527618288993835, "train/loss_total": 0.58560711145401 }, { "epoch": 0.5498263425060113, "step": 2058, "train/loss_ctc": 0.5401862859725952, "train/loss_error": 0.5204377770423889, "train/loss_total": 0.5243874788284302 }, { "epoch": 0.5500935078813786, "step": 2059, "train/loss_ctc": 1.0754601955413818, "train/loss_error": 0.5025197863578796, "train/loss_total": 0.6171078681945801 }, { "epoch": 0.550360673256746, "grad_norm": 1.2379106283187866, "learning_rate": 2.6702644937216136e-05, "loss": 0.5766, "step": 2060 }, { "epoch": 0.550360673256746, "step": 2060, "train/loss_ctc": 0.9487025141716003, "train/loss_error": 0.4400499761104584, "train/loss_total": 0.5417804718017578 }, { "epoch": 0.5506278386321133, "step": 2061, "train/loss_ctc": 0.5012784004211426, "train/loss_error": 0.46377652883529663, "train/loss_total": 0.4712769389152527 }, { "epoch": 0.5508950040074806, "step": 2062, "train/loss_ctc": 0.5576305389404297, "train/loss_error": 0.5201663970947266, "train/loss_total": 0.5276592373847961 }, { "epoch": 0.551162169382848, "step": 2063, "train/loss_ctc": 0.9546409845352173, "train/loss_error": 0.47070521116256714, "train/loss_total": 0.5674923658370972 }, { "epoch": 0.5514293347582153, "step": 2064, "train/loss_ctc": 1.477961778640747, "train/loss_error": 0.461721271276474, "train/loss_total": 0.6649693846702576 }, { "epoch": 0.5516965001335827, "step": 2065, "train/loss_ctc": 0.9124997854232788, "train/loss_error": 0.4710150957107544, "train/loss_total": 0.5593120455741882 }, { "epoch": 0.55196366550895, "step": 2066, "train/loss_ctc": 0.7180376648902893, "train/loss_error": 0.5182104706764221, "train/loss_total": 0.5581759214401245 }, { "epoch": 0.5522308308843173, "step": 2067, "train/loss_ctc": 0.5509564876556396, "train/loss_error": 0.4689348638057709, "train/loss_total": 0.4853392243385315 }, { "epoch": 0.5524979962596848, "step": 2068, "train/loss_ctc": 1.7771854400634766, "train/loss_error": 0.49877211451530457, "train/loss_total": 0.7544547915458679 }, { "epoch": 0.5527651616350521, "step": 2069, "train/loss_ctc": 1.211262822151184, "train/loss_error": 0.49870720505714417, "train/loss_total": 0.641218364238739 }, { "epoch": 0.5530323270104195, "grad_norm": 1.6024302244186401, "learning_rate": 2.6686615014694098e-05, "loss": 0.5772, "step": 2070 }, { "epoch": 0.5530323270104195, "step": 2070, "train/loss_ctc": 0.7122435569763184, "train/loss_error": 0.5521694421768188, "train/loss_total": 0.5841842889785767 }, { "epoch": 0.5532994923857868, "step": 2071, "train/loss_ctc": 0.9570258855819702, "train/loss_error": 0.5252595543861389, "train/loss_total": 0.6116127967834473 }, { "epoch": 0.5535666577611541, "step": 2072, "train/loss_ctc": 1.0158936977386475, "train/loss_error": 0.5173759460449219, "train/loss_total": 0.617079496383667 }, { "epoch": 0.5538338231365215, "step": 2073, "train/loss_ctc": 0.7045053243637085, "train/loss_error": 0.45117178559303284, "train/loss_total": 0.5018385052680969 }, { "epoch": 0.5541009885118888, "step": 2074, "train/loss_ctc": 0.9183162450790405, "train/loss_error": 0.4643233120441437, "train/loss_total": 0.555121898651123 }, { "epoch": 0.5543681538872562, "step": 2075, "train/loss_ctc": 0.8246582746505737, "train/loss_error": 0.5000776648521423, "train/loss_total": 0.5649937987327576 }, { "epoch": 0.5546353192626235, "step": 2076, "train/loss_ctc": 0.9583225250244141, "train/loss_error": 0.4714324176311493, "train/loss_total": 0.5688104629516602 }, { "epoch": 0.554902484637991, "step": 2077, "train/loss_ctc": 0.7855226993560791, "train/loss_error": 0.4977206289768219, "train/loss_total": 0.5552810430526733 }, { "epoch": 0.5551696500133583, "step": 2078, "train/loss_ctc": 1.052082896232605, "train/loss_error": 0.427165150642395, "train/loss_total": 0.552148699760437 }, { "epoch": 0.5554368153887256, "step": 2079, "train/loss_ctc": 0.6962456107139587, "train/loss_error": 0.44099482893943787, "train/loss_total": 0.49204498529434204 }, { "epoch": 0.555703980764093, "grad_norm": 5.024156093597412, "learning_rate": 2.6670585092172056e-05, "loss": 0.5603, "step": 2080 }, { "epoch": 0.555703980764093, "step": 2080, "train/loss_ctc": 0.6906258463859558, "train/loss_error": 0.5138686299324036, "train/loss_total": 0.549220085144043 }, { "epoch": 0.5559711461394603, "step": 2081, "train/loss_ctc": 1.0028164386749268, "train/loss_error": 0.47295302152633667, "train/loss_total": 0.5789257287979126 }, { "epoch": 0.5562383115148277, "step": 2082, "train/loss_ctc": 1.0776681900024414, "train/loss_error": 0.47767412662506104, "train/loss_total": 0.5976729393005371 }, { "epoch": 0.556505476890195, "step": 2083, "train/loss_ctc": 1.026405692100525, "train/loss_error": 0.5023743510246277, "train/loss_total": 0.6071805953979492 }, { "epoch": 0.5567726422655623, "step": 2084, "train/loss_ctc": 0.4310619533061981, "train/loss_error": 0.5469431281089783, "train/loss_total": 0.5237668752670288 }, { "epoch": 0.5570398076409298, "step": 2085, "train/loss_ctc": 0.8409071564674377, "train/loss_error": 0.49424484372138977, "train/loss_total": 0.5635772943496704 }, { "epoch": 0.5573069730162971, "step": 2086, "train/loss_ctc": 1.2747995853424072, "train/loss_error": 0.4353903830051422, "train/loss_total": 0.6032721996307373 }, { "epoch": 0.5575741383916645, "step": 2087, "train/loss_ctc": 0.5146745443344116, "train/loss_error": 0.4946117699146271, "train/loss_total": 0.498624324798584 }, { "epoch": 0.5578413037670318, "step": 2088, "train/loss_ctc": 0.7374007701873779, "train/loss_error": 0.48779240250587463, "train/loss_total": 0.5377141237258911 }, { "epoch": 0.5581084691423992, "step": 2089, "train/loss_ctc": 0.7457935810089111, "train/loss_error": 0.47795620560646057, "train/loss_total": 0.5315237045288086 }, { "epoch": 0.5583756345177665, "grad_norm": 1.1762932538986206, "learning_rate": 2.6654555169650014e-05, "loss": 0.5591, "step": 2090 }, { "epoch": 0.5583756345177665, "step": 2090, "train/loss_ctc": 0.9101541638374329, "train/loss_error": 0.525845468044281, "train/loss_total": 0.6027072072029114 }, { "epoch": 0.5586427998931338, "step": 2091, "train/loss_ctc": 1.4881516695022583, "train/loss_error": 0.47143620252609253, "train/loss_total": 0.6747792959213257 }, { "epoch": 0.5589099652685012, "step": 2092, "train/loss_ctc": 0.6921308040618896, "train/loss_error": 0.5139007568359375, "train/loss_total": 0.5495467782020569 }, { "epoch": 0.5591771306438685, "step": 2093, "train/loss_ctc": 1.6022558212280273, "train/loss_error": 0.4893026351928711, "train/loss_total": 0.7118933200836182 }, { "epoch": 0.559444296019236, "step": 2094, "train/loss_ctc": 1.6045814752578735, "train/loss_error": 0.4729921817779541, "train/loss_total": 0.6993100643157959 }, { "epoch": 0.5597114613946033, "step": 2095, "train/loss_ctc": 0.8320499658584595, "train/loss_error": 0.49975305795669556, "train/loss_total": 0.5662124156951904 }, { "epoch": 0.5599786267699706, "step": 2096, "train/loss_ctc": 0.7060467004776001, "train/loss_error": 0.4903002679347992, "train/loss_total": 0.5334495902061462 }, { "epoch": 0.560245792145338, "step": 2097, "train/loss_ctc": 1.072692632675171, "train/loss_error": 0.513351321220398, "train/loss_total": 0.6252195835113525 }, { "epoch": 0.5605129575207053, "step": 2098, "train/loss_ctc": 1.1515285968780518, "train/loss_error": 0.5025210380554199, "train/loss_total": 0.6323225498199463 }, { "epoch": 0.5607801228960727, "step": 2099, "train/loss_ctc": 0.6182887554168701, "train/loss_error": 0.44107404351234436, "train/loss_total": 0.476516991853714 }, { "epoch": 0.56104728827144, "grad_norm": 1.2060102224349976, "learning_rate": 2.6638525247127972e-05, "loss": 0.6072, "step": 2100 }, { "epoch": 0.56104728827144, "step": 2100, "train/loss_ctc": 0.7254340052604675, "train/loss_error": 0.4779283106327057, "train/loss_total": 0.527429461479187 }, { "epoch": 0.5613144536468073, "step": 2101, "train/loss_ctc": 0.9219603538513184, "train/loss_error": 0.5318010449409485, "train/loss_total": 0.6098329424858093 }, { "epoch": 0.5615816190221747, "step": 2102, "train/loss_ctc": 0.4744037389755249, "train/loss_error": 0.47492823004722595, "train/loss_total": 0.47482335567474365 }, { "epoch": 0.561848784397542, "step": 2103, "train/loss_ctc": 1.0853924751281738, "train/loss_error": 0.48357176780700684, "train/loss_total": 0.6039358973503113 }, { "epoch": 0.5621159497729095, "step": 2104, "train/loss_ctc": 0.8182965517044067, "train/loss_error": 0.5649858117103577, "train/loss_total": 0.6156479716300964 }, { "epoch": 0.5623831151482768, "step": 2105, "train/loss_ctc": 1.2528367042541504, "train/loss_error": 0.5423412919044495, "train/loss_total": 0.6844403743743896 }, { "epoch": 0.5626502805236442, "step": 2106, "train/loss_ctc": 1.1824671030044556, "train/loss_error": 0.4370366036891937, "train/loss_total": 0.5861226916313171 }, { "epoch": 0.5629174458990115, "step": 2107, "train/loss_ctc": 0.4793630838394165, "train/loss_error": 0.5209736227989197, "train/loss_total": 0.5126515030860901 }, { "epoch": 0.5631846112743788, "step": 2108, "train/loss_ctc": 0.6811686158180237, "train/loss_error": 0.46322405338287354, "train/loss_total": 0.5068129897117615 }, { "epoch": 0.5634517766497462, "step": 2109, "train/loss_ctc": 0.7343512773513794, "train/loss_error": 0.46896129846572876, "train/loss_total": 0.5220392942428589 }, { "epoch": 0.5637189420251135, "grad_norm": 1.6258058547973633, "learning_rate": 2.662249532460593e-05, "loss": 0.5644, "step": 2110 }, { "epoch": 0.5637189420251135, "step": 2110, "train/loss_ctc": 0.6770434379577637, "train/loss_error": 0.4418451189994812, "train/loss_total": 0.4888848066329956 }, { "epoch": 0.5639861074004809, "step": 2111, "train/loss_ctc": 0.5438105463981628, "train/loss_error": 0.5786232352256775, "train/loss_total": 0.5716606974601746 }, { "epoch": 0.5642532727758482, "step": 2112, "train/loss_ctc": 1.0372238159179688, "train/loss_error": 0.45387428998947144, "train/loss_total": 0.5705441832542419 }, { "epoch": 0.5645204381512156, "step": 2113, "train/loss_ctc": 0.6650502681732178, "train/loss_error": 0.5736234188079834, "train/loss_total": 0.5919088125228882 }, { "epoch": 0.564787603526583, "step": 2114, "train/loss_ctc": 1.0421416759490967, "train/loss_error": 0.47839686274528503, "train/loss_total": 0.5911458134651184 }, { "epoch": 0.5650547689019503, "step": 2115, "train/loss_ctc": 0.6998612880706787, "train/loss_error": 0.46406126022338867, "train/loss_total": 0.5112212896347046 }, { "epoch": 0.5653219342773177, "step": 2116, "train/loss_ctc": 0.6571709513664246, "train/loss_error": 0.5153689980506897, "train/loss_total": 0.5437293648719788 }, { "epoch": 0.565589099652685, "step": 2117, "train/loss_ctc": 1.4793941974639893, "train/loss_error": 0.5130859017372131, "train/loss_total": 0.7063475847244263 }, { "epoch": 0.5658562650280524, "step": 2118, "train/loss_ctc": 0.7895385026931763, "train/loss_error": 0.4844292104244232, "train/loss_total": 0.5454511046409607 }, { "epoch": 0.5661234304034197, "step": 2119, "train/loss_ctc": 0.5118681192398071, "train/loss_error": 0.4382275640964508, "train/loss_total": 0.4529556930065155 }, { "epoch": 0.566390595778787, "grad_norm": 1.1685065031051636, "learning_rate": 2.6606465402083888e-05, "loss": 0.5574, "step": 2120 }, { "epoch": 0.566390595778787, "step": 2120, "train/loss_ctc": 0.9081968069076538, "train/loss_error": 0.42012158036231995, "train/loss_total": 0.5177366137504578 }, { "epoch": 0.5666577611541544, "step": 2121, "train/loss_ctc": 0.9177202582359314, "train/loss_error": 0.497224897146225, "train/loss_total": 0.5813239812850952 }, { "epoch": 0.5669249265295218, "step": 2122, "train/loss_ctc": 0.6404843330383301, "train/loss_error": 0.4806753695011139, "train/loss_total": 0.5126371383666992 }, { "epoch": 0.5671920919048892, "step": 2123, "train/loss_ctc": 0.5387067794799805, "train/loss_error": 0.5401576161384583, "train/loss_total": 0.5398674607276917 }, { "epoch": 0.5674592572802565, "step": 2124, "train/loss_ctc": 1.379588007926941, "train/loss_error": 0.4327670633792877, "train/loss_total": 0.6221312880516052 }, { "epoch": 0.5677264226556238, "step": 2125, "train/loss_ctc": 0.6609417200088501, "train/loss_error": 0.4863208830356598, "train/loss_total": 0.5212450623512268 }, { "epoch": 0.5679935880309912, "step": 2126, "train/loss_ctc": 0.5449415445327759, "train/loss_error": 0.49704769253730774, "train/loss_total": 0.5066264867782593 }, { "epoch": 0.5682607534063585, "step": 2127, "train/loss_ctc": 1.1036841869354248, "train/loss_error": 0.5185655355453491, "train/loss_total": 0.6355893015861511 }, { "epoch": 0.5685279187817259, "step": 2128, "train/loss_ctc": 1.5823487043380737, "train/loss_error": 0.4390590190887451, "train/loss_total": 0.6677169799804688 }, { "epoch": 0.5687950841570932, "step": 2129, "train/loss_ctc": 0.5900733470916748, "train/loss_error": 0.531589150428772, "train/loss_total": 0.5432860255241394 }, { "epoch": 0.5690622495324605, "grad_norm": 1.5261350870132446, "learning_rate": 2.659043547956185e-05, "loss": 0.5648, "step": 2130 }, { "epoch": 0.5690622495324605, "step": 2130, "train/loss_ctc": 1.5309994220733643, "train/loss_error": 0.4744245707988739, "train/loss_total": 0.6857395172119141 }, { "epoch": 0.569329414907828, "step": 2131, "train/loss_ctc": 0.5070352554321289, "train/loss_error": 0.470867782831192, "train/loss_total": 0.47810128331184387 }, { "epoch": 0.5695965802831953, "step": 2132, "train/loss_ctc": 1.5661982297897339, "train/loss_error": 0.4422180950641632, "train/loss_total": 0.6670141220092773 }, { "epoch": 0.5698637456585627, "step": 2133, "train/loss_ctc": 0.6815997362136841, "train/loss_error": 0.4511076509952545, "train/loss_total": 0.49720609188079834 }, { "epoch": 0.57013091103393, "step": 2134, "train/loss_ctc": 0.4560670852661133, "train/loss_error": 0.49774977564811707, "train/loss_total": 0.4894132614135742 }, { "epoch": 0.5703980764092974, "step": 2135, "train/loss_ctc": 0.9821045398712158, "train/loss_error": 0.5421224236488342, "train/loss_total": 0.6301188468933105 }, { "epoch": 0.5706652417846647, "step": 2136, "train/loss_ctc": 0.47254911065101624, "train/loss_error": 0.5164642930030823, "train/loss_total": 0.5076812505722046 }, { "epoch": 0.570932407160032, "step": 2137, "train/loss_ctc": 0.45578789710998535, "train/loss_error": 0.4711044430732727, "train/loss_total": 0.46804115176200867 }, { "epoch": 0.5711995725353994, "step": 2138, "train/loss_ctc": 1.103456974029541, "train/loss_error": 0.5008626580238342, "train/loss_total": 0.6213815212249756 }, { "epoch": 0.5714667379107667, "step": 2139, "train/loss_ctc": 0.8269535303115845, "train/loss_error": 0.43805935978889465, "train/loss_total": 0.5158382058143616 }, { "epoch": 0.5717339032861342, "grad_norm": 2.361992359161377, "learning_rate": 2.6574405557039808e-05, "loss": 0.5561, "step": 2140 }, { "epoch": 0.5717339032861342, "step": 2140, "train/loss_ctc": 0.5378776788711548, "train/loss_error": 0.45646485686302185, "train/loss_total": 0.47274741530418396 }, { "epoch": 0.5720010686615015, "step": 2141, "train/loss_ctc": 0.48011499643325806, "train/loss_error": 0.4850044250488281, "train/loss_total": 0.48402655124664307 }, { "epoch": 0.5722682340368688, "step": 2142, "train/loss_ctc": 1.3461412191390991, "train/loss_error": 0.4603053331375122, "train/loss_total": 0.6374725103378296 }, { "epoch": 0.5725353994122362, "step": 2143, "train/loss_ctc": 1.0846238136291504, "train/loss_error": 0.502878725528717, "train/loss_total": 0.6192277669906616 }, { "epoch": 0.5728025647876035, "step": 2144, "train/loss_ctc": 1.1870096921920776, "train/loss_error": 0.49129438400268555, "train/loss_total": 0.630437433719635 }, { "epoch": 0.5730697301629709, "step": 2145, "train/loss_ctc": 1.0210915803909302, "train/loss_error": 0.5378831028938293, "train/loss_total": 0.6345248222351074 }, { "epoch": 0.5733368955383382, "step": 2146, "train/loss_ctc": 1.6050442457199097, "train/loss_error": 0.5019673705101013, "train/loss_total": 0.7225827574729919 }, { "epoch": 0.5736040609137056, "step": 2147, "train/loss_ctc": 0.3850361108779907, "train/loss_error": 0.46769317984580994, "train/loss_total": 0.45116177201271057 }, { "epoch": 0.5738712262890729, "step": 2148, "train/loss_ctc": 0.6238149404525757, "train/loss_error": 0.4468061327934265, "train/loss_total": 0.48220789432525635 }, { "epoch": 0.5741383916644403, "step": 2149, "train/loss_ctc": 0.658394992351532, "train/loss_error": 0.4468427002429962, "train/loss_total": 0.4891531765460968 }, { "epoch": 0.5744055570398077, "grad_norm": 5.48142147064209, "learning_rate": 2.6558375634517766e-05, "loss": 0.5624, "step": 2150 }, { "epoch": 0.5744055570398077, "step": 2150, "train/loss_ctc": 1.0732636451721191, "train/loss_error": 0.459231972694397, "train/loss_total": 0.5820383429527283 }, { "epoch": 0.574672722415175, "step": 2151, "train/loss_ctc": 1.523300051689148, "train/loss_error": 0.4652267098426819, "train/loss_total": 0.6768413782119751 }, { "epoch": 0.5749398877905424, "step": 2152, "train/loss_ctc": 0.5478709936141968, "train/loss_error": 0.44724106788635254, "train/loss_total": 0.4673670530319214 }, { "epoch": 0.5752070531659097, "step": 2153, "train/loss_ctc": 0.840298056602478, "train/loss_error": 0.47001227736473083, "train/loss_total": 0.5440694093704224 }, { "epoch": 0.575474218541277, "step": 2154, "train/loss_ctc": 0.5829248428344727, "train/loss_error": 0.51016765832901, "train/loss_total": 0.5247191190719604 }, { "epoch": 0.5757413839166444, "step": 2155, "train/loss_ctc": 1.4440783262252808, "train/loss_error": 0.4762042462825775, "train/loss_total": 0.6697790622711182 }, { "epoch": 0.5760085492920117, "step": 2156, "train/loss_ctc": 0.7128185033798218, "train/loss_error": 0.48976844549179077, "train/loss_total": 0.5343784689903259 }, { "epoch": 0.5762757146673791, "step": 2157, "train/loss_ctc": 0.42014363408088684, "train/loss_error": 0.44505247473716736, "train/loss_total": 0.4400707185268402 }, { "epoch": 0.5765428800427465, "step": 2158, "train/loss_ctc": 0.3029025197029114, "train/loss_error": 0.5244237184524536, "train/loss_total": 0.4801194667816162 }, { "epoch": 0.5768100454181138, "step": 2159, "train/loss_ctc": 0.6290205717086792, "train/loss_error": 0.5246527791023254, "train/loss_total": 0.5455263257026672 }, { "epoch": 0.5770772107934812, "grad_norm": 1.0827232599258423, "learning_rate": 2.6542345711995727e-05, "loss": 0.5465, "step": 2160 }, { "epoch": 0.5770772107934812, "step": 2160, "train/loss_ctc": 0.889362096786499, "train/loss_error": 0.46488267183303833, "train/loss_total": 0.5497785806655884 }, { "epoch": 0.5773443761688485, "step": 2161, "train/loss_ctc": 0.9079715013504028, "train/loss_error": 0.45142078399658203, "train/loss_total": 0.5427309274673462 }, { "epoch": 0.5776115415442159, "step": 2162, "train/loss_ctc": 1.1118018627166748, "train/loss_error": 0.5086410641670227, "train/loss_total": 0.6292732357978821 }, { "epoch": 0.5778787069195832, "step": 2163, "train/loss_ctc": 0.34649503231048584, "train/loss_error": 0.43782204389572144, "train/loss_total": 0.4195566475391388 }, { "epoch": 0.5781458722949506, "step": 2164, "train/loss_ctc": 0.6568470001220703, "train/loss_error": 0.46058908104896545, "train/loss_total": 0.4998406767845154 }, { "epoch": 0.5784130376703179, "step": 2165, "train/loss_ctc": 1.3894176483154297, "train/loss_error": 0.49273478984832764, "train/loss_total": 0.6720713376998901 }, { "epoch": 0.5786802030456852, "step": 2166, "train/loss_ctc": 0.4181373119354248, "train/loss_error": 0.4741908013820648, "train/loss_total": 0.46298012137413025 }, { "epoch": 0.5789473684210527, "step": 2167, "train/loss_ctc": 1.5024921894073486, "train/loss_error": 0.4548436403274536, "train/loss_total": 0.6643733978271484 }, { "epoch": 0.57921453379642, "step": 2168, "train/loss_ctc": 0.30002978444099426, "train/loss_error": 0.47575312852859497, "train/loss_total": 0.4406084716320038 }, { "epoch": 0.5794816991717874, "step": 2169, "train/loss_ctc": 1.2392477989196777, "train/loss_error": 0.53310626745224, "train/loss_total": 0.6743345856666565 }, { "epoch": 0.5797488645471547, "grad_norm": 2.7916030883789062, "learning_rate": 2.6526315789473685e-05, "loss": 0.5556, "step": 2170 }, { "epoch": 0.5797488645471547, "step": 2170, "train/loss_ctc": 1.0003457069396973, "train/loss_error": 0.4715019762516022, "train/loss_total": 0.5772707462310791 }, { "epoch": 0.580016029922522, "step": 2171, "train/loss_ctc": 1.1128573417663574, "train/loss_error": 0.45701661705970764, "train/loss_total": 0.5881847739219666 }, { "epoch": 0.5802831952978894, "step": 2172, "train/loss_ctc": 0.7533919811248779, "train/loss_error": 0.46347421407699585, "train/loss_total": 0.5214577913284302 }, { "epoch": 0.5805503606732567, "step": 2173, "train/loss_ctc": 0.5586832761764526, "train/loss_error": 0.46889007091522217, "train/loss_total": 0.48684871196746826 }, { "epoch": 0.5808175260486241, "step": 2174, "train/loss_ctc": 0.5808306336402893, "train/loss_error": 0.44571051001548767, "train/loss_total": 0.47273457050323486 }, { "epoch": 0.5810846914239914, "step": 2175, "train/loss_ctc": 0.8027843832969666, "train/loss_error": 0.49355199933052063, "train/loss_total": 0.5553984642028809 }, { "epoch": 0.5813518567993589, "step": 2176, "train/loss_ctc": 0.47054582834243774, "train/loss_error": 0.5370692014694214, "train/loss_total": 0.5237645506858826 }, { "epoch": 0.5816190221747262, "step": 2177, "train/loss_ctc": 0.8110271692276001, "train/loss_error": 0.5986390113830566, "train/loss_total": 0.6411166787147522 }, { "epoch": 0.5818861875500935, "step": 2178, "train/loss_ctc": 1.1128971576690674, "train/loss_error": 0.46425920724868774, "train/loss_total": 0.5939868092536926 }, { "epoch": 0.5821533529254609, "step": 2179, "train/loss_ctc": 0.5327073335647583, "train/loss_error": 0.4985540211200714, "train/loss_total": 0.5053846836090088 }, { "epoch": 0.5824205183008282, "grad_norm": 0.8076666593551636, "learning_rate": 2.6510285866951644e-05, "loss": 0.5466, "step": 2180 }, { "epoch": 0.5824205183008282, "step": 2180, "train/loss_ctc": 0.42071831226348877, "train/loss_error": 0.43106627464294434, "train/loss_total": 0.4289966821670532 }, { "epoch": 0.5826876836761956, "step": 2181, "train/loss_ctc": 0.930173933506012, "train/loss_error": 0.5002850890159607, "train/loss_total": 0.5862628817558289 }, { "epoch": 0.5829548490515629, "step": 2182, "train/loss_ctc": 0.9503912925720215, "train/loss_error": 0.5161027312278748, "train/loss_total": 0.602960467338562 }, { "epoch": 0.5832220144269302, "step": 2183, "train/loss_ctc": 0.9877041578292847, "train/loss_error": 0.4926507771015167, "train/loss_total": 0.5916614532470703 }, { "epoch": 0.5834891798022976, "step": 2184, "train/loss_ctc": 0.8013324737548828, "train/loss_error": 0.5044583678245544, "train/loss_total": 0.5638331770896912 }, { "epoch": 0.583756345177665, "step": 2185, "train/loss_ctc": 1.08810293674469, "train/loss_error": 0.4751730263233185, "train/loss_total": 0.5977590084075928 }, { "epoch": 0.5840235105530324, "step": 2186, "train/loss_ctc": 0.8845372200012207, "train/loss_error": 0.4901497960090637, "train/loss_total": 0.569027304649353 }, { "epoch": 0.5842906759283997, "step": 2187, "train/loss_ctc": 0.4614570438861847, "train/loss_error": 0.5175899863243103, "train/loss_total": 0.5063633918762207 }, { "epoch": 0.584557841303767, "step": 2188, "train/loss_ctc": 0.8282309770584106, "train/loss_error": 0.4976848065853119, "train/loss_total": 0.5637940168380737 }, { "epoch": 0.5848250066791344, "step": 2189, "train/loss_ctc": 0.9735565781593323, "train/loss_error": 0.4855285882949829, "train/loss_total": 0.5831341743469238 }, { "epoch": 0.5850921720545017, "grad_norm": 2.839047431945801, "learning_rate": 2.6494255944429605e-05, "loss": 0.5594, "step": 2190 }, { "epoch": 0.5850921720545017, "step": 2190, "train/loss_ctc": 0.39022296667099, "train/loss_error": 0.4153304994106293, "train/loss_total": 0.41030898690223694 }, { "epoch": 0.5853593374298691, "step": 2191, "train/loss_ctc": 1.110123634338379, "train/loss_error": 0.5532162189483643, "train/loss_total": 0.6645976901054382 }, { "epoch": 0.5856265028052364, "step": 2192, "train/loss_ctc": 1.2278269529342651, "train/loss_error": 0.5365294218063354, "train/loss_total": 0.6747889518737793 }, { "epoch": 0.5858936681806038, "step": 2193, "train/loss_ctc": 0.6651476621627808, "train/loss_error": 0.48933231830596924, "train/loss_total": 0.5244954228401184 }, { "epoch": 0.5861608335559712, "step": 2194, "train/loss_ctc": 0.9006833434104919, "train/loss_error": 0.4513368606567383, "train/loss_total": 0.5412061810493469 }, { "epoch": 0.5864279989313385, "step": 2195, "train/loss_ctc": 0.561204195022583, "train/loss_error": 0.4315290153026581, "train/loss_total": 0.4574640691280365 }, { "epoch": 0.5866951643067059, "step": 2196, "train/loss_ctc": 0.5717536211013794, "train/loss_error": 0.42920300364494324, "train/loss_total": 0.45771315693855286 }, { "epoch": 0.5869623296820732, "step": 2197, "train/loss_ctc": 0.6797307133674622, "train/loss_error": 0.49552956223487854, "train/loss_total": 0.5323697924613953 }, { "epoch": 0.5872294950574406, "step": 2198, "train/loss_ctc": 0.8661384582519531, "train/loss_error": 0.5364223122596741, "train/loss_total": 0.6023655533790588 }, { "epoch": 0.5874966604328079, "step": 2199, "train/loss_ctc": 0.7760909795761108, "train/loss_error": 0.44329673051834106, "train/loss_total": 0.5098555684089661 }, { "epoch": 0.5877638258081752, "grad_norm": 1.0255602598190308, "learning_rate": 2.6478226021907563e-05, "loss": 0.5375, "step": 2200 }, { "epoch": 0.5877638258081752, "step": 2200, "train/loss_ctc": 0.8233547210693359, "train/loss_error": 0.40531936287879944, "train/loss_total": 0.4889264404773712 }, { "epoch": 0.5880309911835426, "step": 2201, "train/loss_ctc": 1.0466080904006958, "train/loss_error": 0.44088858366012573, "train/loss_total": 0.5620324611663818 }, { "epoch": 0.5882981565589099, "step": 2202, "train/loss_ctc": 0.4062798023223877, "train/loss_error": 0.4210664629936218, "train/loss_total": 0.41810914874076843 }, { "epoch": 0.5885653219342774, "step": 2203, "train/loss_ctc": 0.46531862020492554, "train/loss_error": 0.47827017307281494, "train/loss_total": 0.475679874420166 }, { "epoch": 0.5888324873096447, "step": 2204, "train/loss_ctc": 1.2918623685836792, "train/loss_error": 0.4861218333244324, "train/loss_total": 0.6472699642181396 }, { "epoch": 0.5890996526850121, "step": 2205, "train/loss_ctc": 0.7663664817810059, "train/loss_error": 0.45934000611305237, "train/loss_total": 0.5207453370094299 }, { "epoch": 0.5893668180603794, "step": 2206, "train/loss_ctc": 0.9877809882164001, "train/loss_error": 0.5095193386077881, "train/loss_total": 0.6051716804504395 }, { "epoch": 0.5896339834357467, "step": 2207, "train/loss_ctc": 0.6681714653968811, "train/loss_error": 0.4842776656150818, "train/loss_total": 0.5210564136505127 }, { "epoch": 0.5899011488111141, "step": 2208, "train/loss_ctc": 0.5427515506744385, "train/loss_error": 0.49450355768203735, "train/loss_total": 0.5041531324386597 }, { "epoch": 0.5901683141864814, "step": 2209, "train/loss_ctc": 1.7191834449768066, "train/loss_error": 0.5047954320907593, "train/loss_total": 0.7476730346679688 }, { "epoch": 0.5904354795618488, "grad_norm": 1.79416823387146, "learning_rate": 2.646219609938552e-05, "loss": 0.5491, "step": 2210 }, { "epoch": 0.5904354795618488, "step": 2210, "train/loss_ctc": 1.2272403240203857, "train/loss_error": 0.5387282967567444, "train/loss_total": 0.6764307022094727 }, { "epoch": 0.5907026449372161, "step": 2211, "train/loss_ctc": 1.1664767265319824, "train/loss_error": 0.5035008788108826, "train/loss_total": 0.6360960602760315 }, { "epoch": 0.5909698103125834, "step": 2212, "train/loss_ctc": 0.8917574882507324, "train/loss_error": 0.5027098059654236, "train/loss_total": 0.5805193781852722 }, { "epoch": 0.5912369756879509, "step": 2213, "train/loss_ctc": 0.6867469549179077, "train/loss_error": 0.498999685049057, "train/loss_total": 0.5365491509437561 }, { "epoch": 0.5915041410633182, "step": 2214, "train/loss_ctc": 1.610248327255249, "train/loss_error": 0.526045560836792, "train/loss_total": 0.7428861260414124 }, { "epoch": 0.5917713064386856, "step": 2215, "train/loss_ctc": 1.2849575281143188, "train/loss_error": 0.4809918999671936, "train/loss_total": 0.6417850255966187 }, { "epoch": 0.5920384718140529, "step": 2216, "train/loss_ctc": 1.1914042234420776, "train/loss_error": 0.47311368584632874, "train/loss_total": 0.6167718172073364 }, { "epoch": 0.5923056371894202, "step": 2217, "train/loss_ctc": 1.119689702987671, "train/loss_error": 0.46736249327659607, "train/loss_total": 0.5978279709815979 }, { "epoch": 0.5925728025647876, "step": 2218, "train/loss_ctc": 1.0814303159713745, "train/loss_error": 0.49513036012649536, "train/loss_total": 0.612390398979187 }, { "epoch": 0.5928399679401549, "step": 2219, "train/loss_ctc": 0.3052120804786682, "train/loss_error": 0.4446727931499481, "train/loss_total": 0.4167806804180145 }, { "epoch": 0.5931071333155223, "grad_norm": 1.9039855003356934, "learning_rate": 2.644616617686348e-05, "loss": 0.6058, "step": 2220 }, { "epoch": 0.5931071333155223, "step": 2220, "train/loss_ctc": 1.2611931562423706, "train/loss_error": 0.47886228561401367, "train/loss_total": 0.635328471660614 }, { "epoch": 0.5933742986908896, "step": 2221, "train/loss_ctc": 0.8820614814758301, "train/loss_error": 0.4696084260940552, "train/loss_total": 0.5520990490913391 }, { "epoch": 0.5936414640662571, "step": 2222, "train/loss_ctc": 0.5888262987136841, "train/loss_error": 0.44584962725639343, "train/loss_total": 0.4744449853897095 }, { "epoch": 0.5939086294416244, "step": 2223, "train/loss_ctc": 0.6740909218788147, "train/loss_error": 0.46569275856018066, "train/loss_total": 0.5073723793029785 }, { "epoch": 0.5941757948169917, "step": 2224, "train/loss_ctc": 1.6265308856964111, "train/loss_error": 0.4652986526489258, "train/loss_total": 0.6975451111793518 }, { "epoch": 0.5944429601923591, "step": 2225, "train/loss_ctc": 0.7415665984153748, "train/loss_error": 0.5433998107910156, "train/loss_total": 0.5830332040786743 }, { "epoch": 0.5947101255677264, "step": 2226, "train/loss_ctc": 0.8790251016616821, "train/loss_error": 0.493044376373291, "train/loss_total": 0.5702404975891113 }, { "epoch": 0.5949772909430938, "step": 2227, "train/loss_ctc": 0.8535575270652771, "train/loss_error": 0.5806581974029541, "train/loss_total": 0.6352380514144897 }, { "epoch": 0.5952444563184611, "step": 2228, "train/loss_ctc": 0.6227012872695923, "train/loss_error": 0.5025441646575928, "train/loss_total": 0.5265755653381348 }, { "epoch": 0.5955116216938284, "step": 2229, "train/loss_ctc": 1.4863967895507812, "train/loss_error": 0.4995620846748352, "train/loss_total": 0.6969290375709534 }, { "epoch": 0.5957787870691958, "grad_norm": 1.2245343923568726, "learning_rate": 2.6430136254341437e-05, "loss": 0.5879, "step": 2230 }, { "epoch": 0.5957787870691958, "step": 2230, "train/loss_ctc": 0.5578579902648926, "train/loss_error": 0.4961877465248108, "train/loss_total": 0.5085217952728271 }, { "epoch": 0.5960459524445632, "step": 2231, "train/loss_ctc": 0.41919058561325073, "train/loss_error": 0.44926297664642334, "train/loss_total": 0.4432485103607178 }, { "epoch": 0.5963131178199306, "step": 2232, "train/loss_ctc": 0.9001880884170532, "train/loss_error": 0.4457862079143524, "train/loss_total": 0.5366666316986084 }, { "epoch": 0.5965802831952979, "step": 2233, "train/loss_ctc": 0.6641249060630798, "train/loss_error": 0.4908280670642853, "train/loss_total": 0.5254874229431152 }, { "epoch": 0.5968474485706653, "step": 2234, "train/loss_ctc": 0.9522308111190796, "train/loss_error": 0.5196747779846191, "train/loss_total": 0.606186032295227 }, { "epoch": 0.5971146139460326, "step": 2235, "train/loss_ctc": 0.8723039627075195, "train/loss_error": 0.519293487071991, "train/loss_total": 0.5898956060409546 }, { "epoch": 0.5973817793213999, "step": 2236, "train/loss_ctc": 0.812897801399231, "train/loss_error": 0.4786069095134735, "train/loss_total": 0.5454651117324829 }, { "epoch": 0.5976489446967673, "step": 2237, "train/loss_ctc": 0.9600305557250977, "train/loss_error": 0.49204811453819275, "train/loss_total": 0.5856446027755737 }, { "epoch": 0.5979161100721346, "step": 2238, "train/loss_ctc": 1.027125597000122, "train/loss_error": 0.5188949704170227, "train/loss_total": 0.6205410957336426 }, { "epoch": 0.598183275447502, "step": 2239, "train/loss_ctc": 0.8720567226409912, "train/loss_error": 0.46637263894081116, "train/loss_total": 0.5475094318389893 }, { "epoch": 0.5984504408228694, "grad_norm": 0.851109504699707, "learning_rate": 2.6414106331819395e-05, "loss": 0.5509, "step": 2240 }, { "epoch": 0.5984504408228694, "step": 2240, "train/loss_ctc": 0.6387491226196289, "train/loss_error": 0.44794341921806335, "train/loss_total": 0.4861045777797699 }, { "epoch": 0.5987176061982367, "step": 2241, "train/loss_ctc": 1.2863612174987793, "train/loss_error": 0.4849529266357422, "train/loss_total": 0.6452345848083496 }, { "epoch": 0.5989847715736041, "step": 2242, "train/loss_ctc": 0.8231836557388306, "train/loss_error": 0.5013755559921265, "train/loss_total": 0.5657371878623962 }, { "epoch": 0.5992519369489714, "step": 2243, "train/loss_ctc": 0.6020219922065735, "train/loss_error": 0.500228226184845, "train/loss_total": 0.5205869674682617 }, { "epoch": 0.5995191023243388, "step": 2244, "train/loss_ctc": 0.7953977584838867, "train/loss_error": 0.5329187512397766, "train/loss_total": 0.5854145288467407 }, { "epoch": 0.5997862676997061, "step": 2245, "train/loss_ctc": 1.2975425720214844, "train/loss_error": 0.46113908290863037, "train/loss_total": 0.6284197568893433 }, { "epoch": 0.6000534330750734, "step": 2246, "train/loss_ctc": 0.8728671073913574, "train/loss_error": 0.4519593417644501, "train/loss_total": 0.5361409187316895 }, { "epoch": 0.6003205984504408, "step": 2247, "train/loss_ctc": 0.8779610991477966, "train/loss_error": 0.4797467589378357, "train/loss_total": 0.5593896508216858 }, { "epoch": 0.6005877638258081, "step": 2248, "train/loss_ctc": 0.49395570158958435, "train/loss_error": 0.6108476519584656, "train/loss_total": 0.5874692797660828 }, { "epoch": 0.6008549292011756, "step": 2249, "train/loss_ctc": 1.8666090965270996, "train/loss_error": 0.4092453718185425, "train/loss_total": 0.7007181644439697 }, { "epoch": 0.6011220945765429, "grad_norm": 2.36305832862854, "learning_rate": 2.6398076409297357e-05, "loss": 0.5815, "step": 2250 }, { "epoch": 0.6011220945765429, "step": 2250, "train/loss_ctc": 0.6635403633117676, "train/loss_error": 0.49436429142951965, "train/loss_total": 0.5281995534896851 }, { "epoch": 0.6013892599519103, "step": 2251, "train/loss_ctc": 1.4237308502197266, "train/loss_error": 0.5069358348846436, "train/loss_total": 0.6902948617935181 }, { "epoch": 0.6016564253272776, "step": 2252, "train/loss_ctc": 1.1960535049438477, "train/loss_error": 0.5367001295089722, "train/loss_total": 0.6685708165168762 }, { "epoch": 0.6019235907026449, "step": 2253, "train/loss_ctc": 1.1016989946365356, "train/loss_error": 0.5665633678436279, "train/loss_total": 0.6735905408859253 }, { "epoch": 0.6021907560780123, "step": 2254, "train/loss_ctc": 0.9026604294776917, "train/loss_error": 0.529344379901886, "train/loss_total": 0.6040076017379761 }, { "epoch": 0.6024579214533796, "step": 2255, "train/loss_ctc": 0.5014845132827759, "train/loss_error": 0.47897985577583313, "train/loss_total": 0.4834808111190796 }, { "epoch": 0.602725086828747, "step": 2256, "train/loss_ctc": 0.926037609577179, "train/loss_error": 0.4706220328807831, "train/loss_total": 0.5617051720619202 }, { "epoch": 0.6029922522041143, "step": 2257, "train/loss_ctc": 0.46201974153518677, "train/loss_error": 0.49523019790649414, "train/loss_total": 0.4885881245136261 }, { "epoch": 0.6032594175794816, "step": 2258, "train/loss_ctc": 0.5677938461303711, "train/loss_error": 0.38730353116989136, "train/loss_total": 0.4234015941619873 }, { "epoch": 0.6035265829548491, "step": 2259, "train/loss_ctc": 0.9976428747177124, "train/loss_error": 0.5007451176643372, "train/loss_total": 0.600124716758728 }, { "epoch": 0.6037937483302164, "grad_norm": 1.3893650770187378, "learning_rate": 2.6382046486775315e-05, "loss": 0.5722, "step": 2260 }, { "epoch": 0.6037937483302164, "step": 2260, "train/loss_ctc": 0.7775940299034119, "train/loss_error": 0.4802249073982239, "train/loss_total": 0.5396987199783325 }, { "epoch": 0.6040609137055838, "step": 2261, "train/loss_ctc": 1.2502517700195312, "train/loss_error": 0.4494081437587738, "train/loss_total": 0.6095768809318542 }, { "epoch": 0.6043280790809511, "step": 2262, "train/loss_ctc": 1.1412843465805054, "train/loss_error": 0.5551050901412964, "train/loss_total": 0.6723409295082092 }, { "epoch": 0.6045952444563185, "step": 2263, "train/loss_ctc": 1.494592547416687, "train/loss_error": 0.46173110604286194, "train/loss_total": 0.668303370475769 }, { "epoch": 0.6048624098316858, "step": 2264, "train/loss_ctc": 0.5179508924484253, "train/loss_error": 0.49532848596572876, "train/loss_total": 0.4998529851436615 }, { "epoch": 0.6051295752070531, "step": 2265, "train/loss_ctc": 1.3286292552947998, "train/loss_error": 0.4970014691352844, "train/loss_total": 0.6633270382881165 }, { "epoch": 0.6053967405824205, "step": 2266, "train/loss_ctc": 1.2165496349334717, "train/loss_error": 0.4955030083656311, "train/loss_total": 0.6397123336791992 }, { "epoch": 0.6056639059577879, "step": 2267, "train/loss_ctc": 0.3360533118247986, "train/loss_error": 0.5029158592224121, "train/loss_total": 0.46954336762428284 }, { "epoch": 0.6059310713331553, "step": 2268, "train/loss_ctc": 1.3783938884735107, "train/loss_error": 0.4699597656726837, "train/loss_total": 0.651646614074707 }, { "epoch": 0.6061982367085226, "step": 2269, "train/loss_ctc": 0.7713393568992615, "train/loss_error": 0.4400237798690796, "train/loss_total": 0.5062869191169739 }, { "epoch": 0.6064654020838899, "grad_norm": 1.1992305517196655, "learning_rate": 2.6366016564253273e-05, "loss": 0.592, "step": 2270 }, { "epoch": 0.6064654020838899, "step": 2270, "train/loss_ctc": 0.770691990852356, "train/loss_error": 0.5309169292449951, "train/loss_total": 0.5788719654083252 }, { "epoch": 0.6067325674592573, "step": 2271, "train/loss_ctc": 0.5047787427902222, "train/loss_error": 0.5010212659835815, "train/loss_total": 0.5017727613449097 }, { "epoch": 0.6069997328346246, "step": 2272, "train/loss_ctc": 0.5699714422225952, "train/loss_error": 0.498250812292099, "train/loss_total": 0.5125949382781982 }, { "epoch": 0.607266898209992, "step": 2273, "train/loss_ctc": 1.1589325666427612, "train/loss_error": 0.4575062394142151, "train/loss_total": 0.5977915525436401 }, { "epoch": 0.6075340635853593, "step": 2274, "train/loss_ctc": 0.6704059839248657, "train/loss_error": 0.5201942324638367, "train/loss_total": 0.5502365827560425 }, { "epoch": 0.6078012289607266, "step": 2275, "train/loss_ctc": 0.5215646028518677, "train/loss_error": 0.5146567821502686, "train/loss_total": 0.5160383582115173 }, { "epoch": 0.608068394336094, "step": 2276, "train/loss_ctc": 1.5142616033554077, "train/loss_error": 0.4954632818698883, "train/loss_total": 0.6992229223251343 }, { "epoch": 0.6083355597114614, "step": 2277, "train/loss_ctc": 0.860169529914856, "train/loss_error": 0.5159441232681274, "train/loss_total": 0.5847892165184021 }, { "epoch": 0.6086027250868288, "step": 2278, "train/loss_ctc": 0.7808136940002441, "train/loss_error": 0.5355674624443054, "train/loss_total": 0.5846167206764221 }, { "epoch": 0.6088698904621961, "step": 2279, "train/loss_ctc": 0.7765761613845825, "train/loss_error": 0.5367499589920044, "train/loss_total": 0.5847151875495911 }, { "epoch": 0.6091370558375635, "grad_norm": 1.199464201927185, "learning_rate": 2.634998664173123e-05, "loss": 0.5711, "step": 2280 }, { "epoch": 0.6091370558375635, "step": 2280, "train/loss_ctc": 0.6271153688430786, "train/loss_error": 0.48962730169296265, "train/loss_total": 0.5171248912811279 }, { "epoch": 0.6094042212129308, "step": 2281, "train/loss_ctc": 1.2271827459335327, "train/loss_error": 0.4415063261985779, "train/loss_total": 0.5986416339874268 }, { "epoch": 0.6096713865882981, "step": 2282, "train/loss_ctc": 1.0801067352294922, "train/loss_error": 0.4944172203540802, "train/loss_total": 0.6115550994873047 }, { "epoch": 0.6099385519636655, "step": 2283, "train/loss_ctc": 0.9728536605834961, "train/loss_error": 0.46487873792648315, "train/loss_total": 0.5664737224578857 }, { "epoch": 0.6102057173390328, "step": 2284, "train/loss_ctc": 0.6356879472732544, "train/loss_error": 0.5460349917411804, "train/loss_total": 0.5639655590057373 }, { "epoch": 0.6104728827144003, "step": 2285, "train/loss_ctc": 1.0340156555175781, "train/loss_error": 0.458058625459671, "train/loss_total": 0.5732500553131104 }, { "epoch": 0.6107400480897676, "step": 2286, "train/loss_ctc": 0.9658154845237732, "train/loss_error": 0.4807622730731964, "train/loss_total": 0.5777729153633118 }, { "epoch": 0.6110072134651349, "step": 2287, "train/loss_ctc": 1.0375399589538574, "train/loss_error": 0.4450801610946655, "train/loss_total": 0.5635721683502197 }, { "epoch": 0.6112743788405023, "step": 2288, "train/loss_ctc": 0.6682193279266357, "train/loss_error": 0.5422667264938354, "train/loss_total": 0.5674572587013245 }, { "epoch": 0.6115415442158696, "step": 2289, "train/loss_ctc": 0.7790536880493164, "train/loss_error": 0.4712006747722626, "train/loss_total": 0.5327712893486023 }, { "epoch": 0.611808709591237, "grad_norm": 1.4645674228668213, "learning_rate": 2.633395671920919e-05, "loss": 0.5673, "step": 2290 }, { "epoch": 0.611808709591237, "step": 2290, "train/loss_ctc": 0.25899165868759155, "train/loss_error": 0.39624685049057007, "train/loss_total": 0.36879584193229675 }, { "epoch": 0.6120758749666043, "step": 2291, "train/loss_ctc": 0.5956171154975891, "train/loss_error": 0.5162267088890076, "train/loss_total": 0.5321047902107239 }, { "epoch": 0.6123430403419717, "step": 2292, "train/loss_ctc": 0.6524993181228638, "train/loss_error": 0.5887505412101746, "train/loss_total": 0.6015002727508545 }, { "epoch": 0.612610205717339, "step": 2293, "train/loss_ctc": 0.8475629687309265, "train/loss_error": 0.4352017343044281, "train/loss_total": 0.5176739692687988 }, { "epoch": 0.6128773710927063, "step": 2294, "train/loss_ctc": 0.7702466249465942, "train/loss_error": 0.5855514407157898, "train/loss_total": 0.6224904656410217 }, { "epoch": 0.6131445364680738, "step": 2295, "train/loss_ctc": 0.9774993062019348, "train/loss_error": 0.47574156522750854, "train/loss_total": 0.5760931372642517 }, { "epoch": 0.6134117018434411, "step": 2296, "train/loss_ctc": 1.0204696655273438, "train/loss_error": 0.493028849363327, "train/loss_total": 0.5985170602798462 }, { "epoch": 0.6136788672188085, "step": 2297, "train/loss_ctc": 1.1029833555221558, "train/loss_error": 0.497486412525177, "train/loss_total": 0.6185858249664307 }, { "epoch": 0.6139460325941758, "step": 2298, "train/loss_ctc": 0.8165880441665649, "train/loss_error": 0.47762593626976013, "train/loss_total": 0.545418381690979 }, { "epoch": 0.6142131979695431, "step": 2299, "train/loss_ctc": 1.1581692695617676, "train/loss_error": 0.5050579309463501, "train/loss_total": 0.6356801986694336 }, { "epoch": 0.6144803633449105, "grad_norm": 1.2001508474349976, "learning_rate": 2.6317926796687147e-05, "loss": 0.5617, "step": 2300 }, { "epoch": 0.6144803633449105, "step": 2300, "train/loss_ctc": 0.6469178199768066, "train/loss_error": 0.4360533654689789, "train/loss_total": 0.47822627425193787 }, { "epoch": 0.6147475287202778, "step": 2301, "train/loss_ctc": 1.2525789737701416, "train/loss_error": 0.5867936015129089, "train/loss_total": 0.7199506759643555 }, { "epoch": 0.6150146940956452, "step": 2302, "train/loss_ctc": 0.685813307762146, "train/loss_error": 0.44889721274375916, "train/loss_total": 0.4962804317474365 }, { "epoch": 0.6152818594710125, "step": 2303, "train/loss_ctc": 0.6587910652160645, "train/loss_error": 0.46543997526168823, "train/loss_total": 0.5041102170944214 }, { "epoch": 0.6155490248463799, "step": 2304, "train/loss_ctc": 1.0120301246643066, "train/loss_error": 0.5045881271362305, "train/loss_total": 0.6060765385627747 }, { "epoch": 0.6158161902217473, "step": 2305, "train/loss_ctc": 0.45459887385368347, "train/loss_error": 0.5544295310974121, "train/loss_total": 0.5344634056091309 }, { "epoch": 0.6160833555971146, "step": 2306, "train/loss_ctc": 1.0646326541900635, "train/loss_error": 0.5104012489318848, "train/loss_total": 0.6212475299835205 }, { "epoch": 0.616350520972482, "step": 2307, "train/loss_ctc": 1.1117290258407593, "train/loss_error": 0.4224845767021179, "train/loss_total": 0.5603334903717041 }, { "epoch": 0.6166176863478493, "step": 2308, "train/loss_ctc": 0.9593875408172607, "train/loss_error": 0.5787988901138306, "train/loss_total": 0.6549166440963745 }, { "epoch": 0.6168848517232167, "step": 2309, "train/loss_ctc": 0.5905020236968994, "train/loss_error": 0.4169614017009735, "train/loss_total": 0.4516695439815521 }, { "epoch": 0.617152017098584, "grad_norm": 0.8421879410743713, "learning_rate": 2.630189687416511e-05, "loss": 0.5627, "step": 2310 }, { "epoch": 0.617152017098584, "step": 2310, "train/loss_ctc": 0.8526383638381958, "train/loss_error": 0.4884262681007385, "train/loss_total": 0.56126868724823 }, { "epoch": 0.6174191824739513, "step": 2311, "train/loss_ctc": 0.3486315906047821, "train/loss_error": 0.42636245489120483, "train/loss_total": 0.4108162820339203 }, { "epoch": 0.6176863478493188, "step": 2312, "train/loss_ctc": 0.8288956880569458, "train/loss_error": 0.44964396953582764, "train/loss_total": 0.5254943370819092 }, { "epoch": 0.6179535132246861, "step": 2313, "train/loss_ctc": 1.1115422248840332, "train/loss_error": 0.5740723609924316, "train/loss_total": 0.6815663576126099 }, { "epoch": 0.6182206786000535, "step": 2314, "train/loss_ctc": 0.6730567812919617, "train/loss_error": 0.44874560832977295, "train/loss_total": 0.4936078190803528 }, { "epoch": 0.6184878439754208, "step": 2315, "train/loss_ctc": 1.308251142501831, "train/loss_error": 0.4601929485797882, "train/loss_total": 0.6298046112060547 }, { "epoch": 0.6187550093507881, "step": 2316, "train/loss_ctc": 0.8527747392654419, "train/loss_error": 0.5061174035072327, "train/loss_total": 0.5754488706588745 }, { "epoch": 0.6190221747261555, "step": 2317, "train/loss_ctc": 0.9701160192489624, "train/loss_error": 0.6081376075744629, "train/loss_total": 0.6805332899093628 }, { "epoch": 0.6192893401015228, "step": 2318, "train/loss_ctc": 1.0394188165664673, "train/loss_error": 0.4403746426105499, "train/loss_total": 0.5601834654808044 }, { "epoch": 0.6195565054768902, "step": 2319, "train/loss_ctc": 0.45220160484313965, "train/loss_error": 0.4769887924194336, "train/loss_total": 0.4720313847064972 }, { "epoch": 0.6198236708522575, "grad_norm": 1.1929234266281128, "learning_rate": 2.6285866951643067e-05, "loss": 0.5591, "step": 2320 }, { "epoch": 0.6198236708522575, "step": 2320, "train/loss_ctc": 0.4542778432369232, "train/loss_error": 0.4685603380203247, "train/loss_total": 0.4657038450241089 }, { "epoch": 0.620090836227625, "step": 2321, "train/loss_ctc": 0.6045666933059692, "train/loss_error": 0.5000658631324768, "train/loss_total": 0.5209660530090332 }, { "epoch": 0.6203580016029923, "step": 2322, "train/loss_ctc": 1.3964290618896484, "train/loss_error": 0.48164182901382446, "train/loss_total": 0.6645992994308472 }, { "epoch": 0.6206251669783596, "step": 2323, "train/loss_ctc": 0.5656213760375977, "train/loss_error": 0.4576672315597534, "train/loss_total": 0.47925806045532227 }, { "epoch": 0.620892332353727, "step": 2324, "train/loss_ctc": 0.7966647148132324, "train/loss_error": 0.4915432333946228, "train/loss_total": 0.5525675415992737 }, { "epoch": 0.6211594977290943, "step": 2325, "train/loss_ctc": 0.35312405228614807, "train/loss_error": 0.4699268043041229, "train/loss_total": 0.44656628370285034 }, { "epoch": 0.6214266631044617, "step": 2326, "train/loss_ctc": 0.5337169170379639, "train/loss_error": 0.4611978828907013, "train/loss_total": 0.4757017195224762 }, { "epoch": 0.621693828479829, "step": 2327, "train/loss_ctc": 1.1815234422683716, "train/loss_error": 0.48496317863464355, "train/loss_total": 0.6242752075195312 }, { "epoch": 0.6219609938551963, "step": 2328, "train/loss_ctc": 0.9731522798538208, "train/loss_error": 0.5131111741065979, "train/loss_total": 0.6051194071769714 }, { "epoch": 0.6222281592305637, "step": 2329, "train/loss_ctc": 0.5598769187927246, "train/loss_error": 0.45775270462036133, "train/loss_total": 0.478177547454834 }, { "epoch": 0.622495324605931, "grad_norm": 1.64352548122406, "learning_rate": 2.6269837029121025e-05, "loss": 0.5313, "step": 2330 }, { "epoch": 0.622495324605931, "step": 2330, "train/loss_ctc": 0.33170267939567566, "train/loss_error": 0.47180551290512085, "train/loss_total": 0.4437849521636963 }, { "epoch": 0.6227624899812985, "step": 2331, "train/loss_ctc": 0.7578302621841431, "train/loss_error": 0.44523516297340393, "train/loss_total": 0.5077542066574097 }, { "epoch": 0.6230296553566658, "step": 2332, "train/loss_ctc": 0.8791429996490479, "train/loss_error": 0.5050249099731445, "train/loss_total": 0.5798485279083252 }, { "epoch": 0.6232968207320331, "step": 2333, "train/loss_ctc": 0.8272342681884766, "train/loss_error": 0.5182866454124451, "train/loss_total": 0.5800761580467224 }, { "epoch": 0.6235639861074005, "step": 2334, "train/loss_ctc": 0.778944730758667, "train/loss_error": 0.550838828086853, "train/loss_total": 0.5964599847793579 }, { "epoch": 0.6238311514827678, "step": 2335, "train/loss_ctc": 0.8947319984436035, "train/loss_error": 0.47461995482444763, "train/loss_total": 0.5586423873901367 }, { "epoch": 0.6240983168581352, "step": 2336, "train/loss_ctc": 1.1774170398712158, "train/loss_error": 0.5186691284179688, "train/loss_total": 0.650418758392334 }, { "epoch": 0.6243654822335025, "step": 2337, "train/loss_ctc": 0.3547133505344391, "train/loss_error": 0.4846084713935852, "train/loss_total": 0.45862945914268494 }, { "epoch": 0.6246326476088699, "step": 2338, "train/loss_ctc": 0.7603763341903687, "train/loss_error": 0.5383266806602478, "train/loss_total": 0.582736611366272 }, { "epoch": 0.6248998129842372, "step": 2339, "train/loss_ctc": 1.1118108034133911, "train/loss_error": 0.5334917902946472, "train/loss_total": 0.6491556167602539 }, { "epoch": 0.6251669783596046, "grad_norm": 1.9396289587020874, "learning_rate": 2.6253807106598986e-05, "loss": 0.5608, "step": 2340 }, { "epoch": 0.6251669783596046, "step": 2340, "train/loss_ctc": 0.7394474148750305, "train/loss_error": 0.4856887757778168, "train/loss_total": 0.5364404916763306 }, { "epoch": 0.625434143734972, "step": 2341, "train/loss_ctc": 0.8807168006896973, "train/loss_error": 0.4922690987586975, "train/loss_total": 0.5699586272239685 }, { "epoch": 0.6257013091103393, "step": 2342, "train/loss_ctc": 0.7650191783905029, "train/loss_error": 0.518962562084198, "train/loss_total": 0.568173885345459 }, { "epoch": 0.6259684744857067, "step": 2343, "train/loss_ctc": 0.8199267983436584, "train/loss_error": 0.4918576776981354, "train/loss_total": 0.557471513748169 }, { "epoch": 0.626235639861074, "step": 2344, "train/loss_ctc": 1.0367658138275146, "train/loss_error": 0.5019578337669373, "train/loss_total": 0.6089194416999817 }, { "epoch": 0.6265028052364413, "step": 2345, "train/loss_ctc": 0.5637894868850708, "train/loss_error": 0.5039218664169312, "train/loss_total": 0.515895426273346 }, { "epoch": 0.6267699706118087, "step": 2346, "train/loss_ctc": 0.9742390513420105, "train/loss_error": 0.48878443241119385, "train/loss_total": 0.5858753323554993 }, { "epoch": 0.627037135987176, "step": 2347, "train/loss_ctc": 0.7879047393798828, "train/loss_error": 0.5179222822189331, "train/loss_total": 0.571918785572052 }, { "epoch": 0.6273043013625434, "step": 2348, "train/loss_ctc": 1.019777536392212, "train/loss_error": 0.5114968419075012, "train/loss_total": 0.6131529808044434 }, { "epoch": 0.6275714667379108, "step": 2349, "train/loss_ctc": 0.5062547922134399, "train/loss_error": 0.5039989352226257, "train/loss_total": 0.5044501423835754 }, { "epoch": 0.6278386321132782, "grad_norm": 2.710197687149048, "learning_rate": 2.6237777184076944e-05, "loss": 0.5632, "step": 2350 }, { "epoch": 0.6278386321132782, "step": 2350, "train/loss_ctc": 0.6163191795349121, "train/loss_error": 0.44823095202445984, "train/loss_total": 0.4818485975265503 }, { "epoch": 0.6281057974886455, "step": 2351, "train/loss_ctc": 1.142878770828247, "train/loss_error": 0.4505455195903778, "train/loss_total": 0.5890121459960938 }, { "epoch": 0.6283729628640128, "step": 2352, "train/loss_ctc": 0.9981396198272705, "train/loss_error": 0.5213640332221985, "train/loss_total": 0.616719126701355 }, { "epoch": 0.6286401282393802, "step": 2353, "train/loss_ctc": 0.5557378530502319, "train/loss_error": 0.5879606008529663, "train/loss_total": 0.5815160274505615 }, { "epoch": 0.6289072936147475, "step": 2354, "train/loss_ctc": 0.4839939773082733, "train/loss_error": 0.49840593338012695, "train/loss_total": 0.4955235719680786 }, { "epoch": 0.6291744589901149, "step": 2355, "train/loss_ctc": 0.7095690965652466, "train/loss_error": 0.5364603400230408, "train/loss_total": 0.5710821151733398 }, { "epoch": 0.6294416243654822, "step": 2356, "train/loss_ctc": 1.1621577739715576, "train/loss_error": 0.5306060314178467, "train/loss_total": 0.6569163799285889 }, { "epoch": 0.6297087897408495, "step": 2357, "train/loss_ctc": 0.8174052834510803, "train/loss_error": 0.4956868886947632, "train/loss_total": 0.5600305795669556 }, { "epoch": 0.629975955116217, "step": 2358, "train/loss_ctc": 0.8793708086013794, "train/loss_error": 0.44330278038978577, "train/loss_total": 0.5305163860321045 }, { "epoch": 0.6302431204915843, "step": 2359, "train/loss_ctc": 0.6477584838867188, "train/loss_error": 0.6407091617584229, "train/loss_total": 0.6421190500259399 }, { "epoch": 0.6305102858669517, "grad_norm": 4.579220771789551, "learning_rate": 2.6221747261554906e-05, "loss": 0.5725, "step": 2360 }, { "epoch": 0.6305102858669517, "step": 2360, "train/loss_ctc": 0.808509886264801, "train/loss_error": 0.470486581325531, "train/loss_total": 0.538091242313385 }, { "epoch": 0.630777451242319, "step": 2361, "train/loss_ctc": 0.8467395305633545, "train/loss_error": 0.5897144079208374, "train/loss_total": 0.6411194801330566 }, { "epoch": 0.6310446166176863, "step": 2362, "train/loss_ctc": 1.5176676511764526, "train/loss_error": 0.5555058717727661, "train/loss_total": 0.7479382157325745 }, { "epoch": 0.6313117819930537, "step": 2363, "train/loss_ctc": 0.48371559381484985, "train/loss_error": 0.48117080330848694, "train/loss_total": 0.4816797971725464 }, { "epoch": 0.631578947368421, "step": 2364, "train/loss_ctc": 0.8431240916252136, "train/loss_error": 0.5621076226234436, "train/loss_total": 0.6183109283447266 }, { "epoch": 0.6318461127437884, "step": 2365, "train/loss_ctc": 0.5198838710784912, "train/loss_error": 0.4866909384727478, "train/loss_total": 0.49332955479621887 }, { "epoch": 0.6321132781191557, "step": 2366, "train/loss_ctc": 0.7500802874565125, "train/loss_error": 0.5356675386428833, "train/loss_total": 0.5785501003265381 }, { "epoch": 0.6323804434945232, "step": 2367, "train/loss_ctc": 1.0507949590682983, "train/loss_error": 0.5051873326301575, "train/loss_total": 0.6143088340759277 }, { "epoch": 0.6326476088698905, "step": 2368, "train/loss_ctc": 1.0600597858428955, "train/loss_error": 0.4608299136161804, "train/loss_total": 0.5806758999824524 }, { "epoch": 0.6329147742452578, "step": 2369, "train/loss_ctc": 0.8154696226119995, "train/loss_error": 0.4652898609638214, "train/loss_total": 0.535325825214386 }, { "epoch": 0.6331819396206252, "grad_norm": 1.0985885858535767, "learning_rate": 2.6205717339032864e-05, "loss": 0.5829, "step": 2370 }, { "epoch": 0.6331819396206252, "step": 2370, "train/loss_ctc": 0.6530369520187378, "train/loss_error": 0.5186419486999512, "train/loss_total": 0.5455209612846375 }, { "epoch": 0.6334491049959925, "step": 2371, "train/loss_ctc": 1.0018227100372314, "train/loss_error": 0.46789130568504333, "train/loss_total": 0.574677586555481 }, { "epoch": 0.6337162703713599, "step": 2372, "train/loss_ctc": 0.38964778184890747, "train/loss_error": 0.4975046217441559, "train/loss_total": 0.4759332537651062 }, { "epoch": 0.6339834357467272, "step": 2373, "train/loss_ctc": 0.4946310520172119, "train/loss_error": 0.4592236876487732, "train/loss_total": 0.4663051664829254 }, { "epoch": 0.6342506011220945, "step": 2374, "train/loss_ctc": 0.558488667011261, "train/loss_error": 0.42293280363082886, "train/loss_total": 0.4500439763069153 }, { "epoch": 0.6345177664974619, "step": 2375, "train/loss_ctc": 2.0057835578918457, "train/loss_error": 0.5121253132820129, "train/loss_total": 0.8108569383621216 }, { "epoch": 0.6347849318728293, "step": 2376, "train/loss_ctc": 0.8043289184570312, "train/loss_error": 0.467501699924469, "train/loss_total": 0.5348671674728394 }, { "epoch": 0.6350520972481967, "step": 2377, "train/loss_ctc": 1.000180721282959, "train/loss_error": 0.5040245652198792, "train/loss_total": 0.6032558083534241 }, { "epoch": 0.635319262623564, "step": 2378, "train/loss_ctc": 0.46644169092178345, "train/loss_error": 0.4878126084804535, "train/loss_total": 0.483538419008255 }, { "epoch": 0.6355864279989314, "step": 2379, "train/loss_ctc": 1.0355432033538818, "train/loss_error": 0.49637115001678467, "train/loss_total": 0.6042056083679199 }, { "epoch": 0.6358535933742987, "grad_norm": 3.283704996109009, "learning_rate": 2.6189687416510822e-05, "loss": 0.5549, "step": 2380 }, { "epoch": 0.6358535933742987, "step": 2380, "train/loss_ctc": 0.740120530128479, "train/loss_error": 0.47837552428245544, "train/loss_total": 0.5307245254516602 }, { "epoch": 0.636120758749666, "step": 2381, "train/loss_ctc": 1.2124252319335938, "train/loss_error": 0.578639805316925, "train/loss_total": 0.7053968906402588 }, { "epoch": 0.6363879241250334, "step": 2382, "train/loss_ctc": 0.38528361916542053, "train/loss_error": 0.49943608045578003, "train/loss_total": 0.4766055941581726 }, { "epoch": 0.6366550895004007, "step": 2383, "train/loss_ctc": 1.0403302907943726, "train/loss_error": 0.499502956867218, "train/loss_total": 0.6076684594154358 }, { "epoch": 0.6369222548757681, "step": 2384, "train/loss_ctc": 0.633922815322876, "train/loss_error": 0.4691855311393738, "train/loss_total": 0.5021330118179321 }, { "epoch": 0.6371894202511355, "step": 2385, "train/loss_ctc": 1.09590744972229, "train/loss_error": 0.47420522570610046, "train/loss_total": 0.5985456705093384 }, { "epoch": 0.6374565856265028, "step": 2386, "train/loss_ctc": 0.540597677230835, "train/loss_error": 0.4814092814922333, "train/loss_total": 0.49324697256088257 }, { "epoch": 0.6377237510018702, "step": 2387, "train/loss_ctc": 1.0261285305023193, "train/loss_error": 0.5030264258384705, "train/loss_total": 0.6076468229293823 }, { "epoch": 0.6379909163772375, "step": 2388, "train/loss_ctc": 1.1415789127349854, "train/loss_error": 0.5358133316040039, "train/loss_total": 0.6569664478302002 }, { "epoch": 0.6382580817526049, "step": 2389, "train/loss_ctc": 0.5302240252494812, "train/loss_error": 0.503493070602417, "train/loss_total": 0.5088392496109009 }, { "epoch": 0.6385252471279722, "grad_norm": 2.2670648097991943, "learning_rate": 2.617365749398878e-05, "loss": 0.5688, "step": 2390 }, { "epoch": 0.6385252471279722, "step": 2390, "train/loss_ctc": 1.4699101448059082, "train/loss_error": 0.49829092621803284, "train/loss_total": 0.6926147937774658 }, { "epoch": 0.6387924125033395, "step": 2391, "train/loss_ctc": 0.369430810213089, "train/loss_error": 0.5186086297035217, "train/loss_total": 0.48877307772636414 }, { "epoch": 0.6390595778787069, "step": 2392, "train/loss_ctc": 0.49431538581848145, "train/loss_error": 0.4465271532535553, "train/loss_total": 0.4560847878456116 }, { "epoch": 0.6393267432540742, "step": 2393, "train/loss_ctc": 0.7725009918212891, "train/loss_error": 0.527470052242279, "train/loss_total": 0.5764762759208679 }, { "epoch": 0.6395939086294417, "step": 2394, "train/loss_ctc": 0.6958761215209961, "train/loss_error": 0.4495473802089691, "train/loss_total": 0.4988131523132324 }, { "epoch": 0.639861074004809, "step": 2395, "train/loss_ctc": 0.6353195309638977, "train/loss_error": 0.5192769765853882, "train/loss_total": 0.5424854755401611 }, { "epoch": 0.6401282393801764, "step": 2396, "train/loss_ctc": 0.4890398681163788, "train/loss_error": 0.4860471189022064, "train/loss_total": 0.4866456687450409 }, { "epoch": 0.6403954047555437, "step": 2397, "train/loss_ctc": 1.560211181640625, "train/loss_error": 0.4897086024284363, "train/loss_total": 0.7038091421127319 }, { "epoch": 0.640662570130911, "step": 2398, "train/loss_ctc": 0.2892211675643921, "train/loss_error": 0.4758084714412689, "train/loss_total": 0.43849101662635803 }, { "epoch": 0.6409297355062784, "step": 2399, "train/loss_ctc": 1.0566136837005615, "train/loss_error": 0.43957090377807617, "train/loss_total": 0.5629794597625732 }, { "epoch": 0.6411969008816457, "grad_norm": 1.6172327995300293, "learning_rate": 2.6157627571466738e-05, "loss": 0.5447, "step": 2400 }, { "epoch": 0.6411969008816457, "step": 2400, "train/loss_ctc": 0.8305121064186096, "train/loss_error": 0.429708331823349, "train/loss_total": 0.5098690986633301 }, { "epoch": 0.6414640662570131, "step": 2401, "train/loss_ctc": 0.8108018636703491, "train/loss_error": 0.47882628440856934, "train/loss_total": 0.5452213883399963 }, { "epoch": 0.6417312316323804, "step": 2402, "train/loss_ctc": 1.151652455329895, "train/loss_error": 0.5245584845542908, "train/loss_total": 0.6499773263931274 }, { "epoch": 0.6419983970077477, "step": 2403, "train/loss_ctc": 1.8562686443328857, "train/loss_error": 0.4748743772506714, "train/loss_total": 0.7511532306671143 }, { "epoch": 0.6422655623831152, "step": 2404, "train/loss_ctc": 0.5527773499488831, "train/loss_error": 0.46401447057724, "train/loss_total": 0.48176705837249756 }, { "epoch": 0.6425327277584825, "step": 2405, "train/loss_ctc": 1.4640792608261108, "train/loss_error": 0.49789679050445557, "train/loss_total": 0.6911332607269287 }, { "epoch": 0.6427998931338499, "step": 2406, "train/loss_ctc": 1.1840184926986694, "train/loss_error": 0.4921551048755646, "train/loss_total": 0.6305277943611145 }, { "epoch": 0.6430670585092172, "step": 2407, "train/loss_ctc": 0.7751659154891968, "train/loss_error": 0.4497108459472656, "train/loss_total": 0.5148018598556519 }, { "epoch": 0.6433342238845846, "step": 2408, "train/loss_ctc": 0.8549221754074097, "train/loss_error": 0.5675807595252991, "train/loss_total": 0.6250490546226501 }, { "epoch": 0.6436013892599519, "step": 2409, "train/loss_ctc": 0.3739016354084015, "train/loss_error": 0.4491247236728668, "train/loss_total": 0.4340801239013672 }, { "epoch": 0.6438685546353192, "grad_norm": 1.0398857593536377, "learning_rate": 2.6141597648944696e-05, "loss": 0.5834, "step": 2410 }, { "epoch": 0.6438685546353192, "step": 2410, "train/loss_ctc": 0.5642654895782471, "train/loss_error": 0.4848458170890808, "train/loss_total": 0.5007297396659851 }, { "epoch": 0.6441357200106866, "step": 2411, "train/loss_ctc": 0.872653067111969, "train/loss_error": 0.5228753089904785, "train/loss_total": 0.5928308367729187 }, { "epoch": 0.644402885386054, "step": 2412, "train/loss_ctc": 0.7540321350097656, "train/loss_error": 0.45291805267333984, "train/loss_total": 0.5131409168243408 }, { "epoch": 0.6446700507614214, "step": 2413, "train/loss_ctc": 0.506367027759552, "train/loss_error": 0.4820006787776947, "train/loss_total": 0.48687395453453064 }, { "epoch": 0.6449372161367887, "step": 2414, "train/loss_ctc": 0.7545442581176758, "train/loss_error": 0.5265989303588867, "train/loss_total": 0.5721880197525024 }, { "epoch": 0.645204381512156, "step": 2415, "train/loss_ctc": 0.6293585896492004, "train/loss_error": 0.4715864956378937, "train/loss_total": 0.503140926361084 }, { "epoch": 0.6454715468875234, "step": 2416, "train/loss_ctc": 0.7196609377861023, "train/loss_error": 0.42143386602401733, "train/loss_total": 0.4810792803764343 }, { "epoch": 0.6457387122628907, "step": 2417, "train/loss_ctc": 1.3464256525039673, "train/loss_error": 0.5473387241363525, "train/loss_total": 0.7071561217308044 }, { "epoch": 0.6460058776382581, "step": 2418, "train/loss_ctc": 0.7104256749153137, "train/loss_error": 0.5657211542129517, "train/loss_total": 0.594662070274353 }, { "epoch": 0.6462730430136254, "step": 2419, "train/loss_ctc": 1.1305012702941895, "train/loss_error": 0.4311360716819763, "train/loss_total": 0.57100909948349 }, { "epoch": 0.6465402083889927, "grad_norm": 1.7292025089263916, "learning_rate": 2.6125567726422658e-05, "loss": 0.5523, "step": 2420 }, { "epoch": 0.6465402083889927, "step": 2420, "train/loss_ctc": 0.26971977949142456, "train/loss_error": 0.46563082933425903, "train/loss_total": 0.42644864320755005 }, { "epoch": 0.6468073737643601, "step": 2421, "train/loss_ctc": 0.7563420534133911, "train/loss_error": 0.42699795961380005, "train/loss_total": 0.49286675453186035 }, { "epoch": 0.6470745391397275, "step": 2422, "train/loss_ctc": 0.6891849040985107, "train/loss_error": 0.45798036456108093, "train/loss_total": 0.5042212605476379 }, { "epoch": 0.6473417045150949, "step": 2423, "train/loss_ctc": 0.7406444549560547, "train/loss_error": 0.4838251769542694, "train/loss_total": 0.5351890325546265 }, { "epoch": 0.6476088698904622, "step": 2424, "train/loss_ctc": 0.8638713955879211, "train/loss_error": 0.4810316562652588, "train/loss_total": 0.5575996041297913 }, { "epoch": 0.6478760352658296, "step": 2425, "train/loss_ctc": 0.5915875434875488, "train/loss_error": 0.4786660075187683, "train/loss_total": 0.5012503266334534 }, { "epoch": 0.6481432006411969, "step": 2426, "train/loss_ctc": 1.1192246675491333, "train/loss_error": 0.517722487449646, "train/loss_total": 0.6380228996276855 }, { "epoch": 0.6484103660165642, "step": 2427, "train/loss_ctc": 0.3709845244884491, "train/loss_error": 0.516623854637146, "train/loss_total": 0.4874959886074066 }, { "epoch": 0.6486775313919316, "step": 2428, "train/loss_ctc": 1.7390552759170532, "train/loss_error": 0.5251469016075134, "train/loss_total": 0.7679286003112793 }, { "epoch": 0.6489446967672989, "step": 2429, "train/loss_ctc": 0.28003278374671936, "train/loss_error": 0.493278443813324, "train/loss_total": 0.450629323720932 }, { "epoch": 0.6492118621426664, "grad_norm": 2.3158812522888184, "learning_rate": 2.6109537803900616e-05, "loss": 0.5362, "step": 2430 }, { "epoch": 0.6492118621426664, "step": 2430, "train/loss_ctc": 0.8214490413665771, "train/loss_error": 0.5283877849578857, "train/loss_total": 0.5870000720024109 }, { "epoch": 0.6494790275180337, "step": 2431, "train/loss_ctc": 1.6836977005004883, "train/loss_error": 0.48213285207748413, "train/loss_total": 0.7224458456039429 }, { "epoch": 0.649746192893401, "step": 2432, "train/loss_ctc": 0.8759747743606567, "train/loss_error": 0.466356098651886, "train/loss_total": 0.5482798218727112 }, { "epoch": 0.6500133582687684, "step": 2433, "train/loss_ctc": 1.0267994403839111, "train/loss_error": 0.43279120326042175, "train/loss_total": 0.5515928864479065 }, { "epoch": 0.6502805236441357, "step": 2434, "train/loss_ctc": 1.3952802419662476, "train/loss_error": 0.45601001381874084, "train/loss_total": 0.6438640356063843 }, { "epoch": 0.6505476890195031, "step": 2435, "train/loss_ctc": 1.3331595659255981, "train/loss_error": 0.4547666609287262, "train/loss_total": 0.6304452419281006 }, { "epoch": 0.6508148543948704, "step": 2436, "train/loss_ctc": 0.5714223980903625, "train/loss_error": 0.4881964325904846, "train/loss_total": 0.5048416256904602 }, { "epoch": 0.6510820197702378, "step": 2437, "train/loss_ctc": 0.7312445640563965, "train/loss_error": 0.5455858707427979, "train/loss_total": 0.5827175974845886 }, { "epoch": 0.6513491851456051, "step": 2438, "train/loss_ctc": 0.3196544051170349, "train/loss_error": 0.4537937045097351, "train/loss_total": 0.4269658327102661 }, { "epoch": 0.6516163505209724, "step": 2439, "train/loss_ctc": 1.2538659572601318, "train/loss_error": 0.45793336629867554, "train/loss_total": 0.6171199083328247 }, { "epoch": 0.6518835158963399, "grad_norm": 1.6693085432052612, "learning_rate": 2.6093507881378574e-05, "loss": 0.5815, "step": 2440 }, { "epoch": 0.6518835158963399, "step": 2440, "train/loss_ctc": 0.9544434547424316, "train/loss_error": 0.5002941489219666, "train/loss_total": 0.5911239981651306 }, { "epoch": 0.6521506812717072, "step": 2441, "train/loss_ctc": 0.8627065420150757, "train/loss_error": 0.518591582775116, "train/loss_total": 0.587414562702179 }, { "epoch": 0.6524178466470746, "step": 2442, "train/loss_ctc": 1.324676275253296, "train/loss_error": 0.48809605836868286, "train/loss_total": 0.6554120779037476 }, { "epoch": 0.6526850120224419, "step": 2443, "train/loss_ctc": 0.9585050344467163, "train/loss_error": 0.3955121338367462, "train/loss_total": 0.5081107020378113 }, { "epoch": 0.6529521773978092, "step": 2444, "train/loss_ctc": 0.6882954835891724, "train/loss_error": 0.46427860856056213, "train/loss_total": 0.5090819597244263 }, { "epoch": 0.6532193427731766, "step": 2445, "train/loss_ctc": 1.3485519886016846, "train/loss_error": 0.4701414108276367, "train/loss_total": 0.6458235383033752 }, { "epoch": 0.6534865081485439, "step": 2446, "train/loss_ctc": 1.0893689393997192, "train/loss_error": 0.5395680665969849, "train/loss_total": 0.6495282649993896 }, { "epoch": 0.6537536735239113, "step": 2447, "train/loss_ctc": 0.7282141447067261, "train/loss_error": 0.48811352252960205, "train/loss_total": 0.5361336469650269 }, { "epoch": 0.6540208388992786, "step": 2448, "train/loss_ctc": 0.9973967671394348, "train/loss_error": 0.4795180857181549, "train/loss_total": 0.5830938220024109 }, { "epoch": 0.654288004274646, "step": 2449, "train/loss_ctc": 0.6272554397583008, "train/loss_error": 0.44733288884162903, "train/loss_total": 0.48331740498542786 }, { "epoch": 0.6545551696500134, "grad_norm": 2.880495548248291, "learning_rate": 2.6077477958856532e-05, "loss": 0.5749, "step": 2450 }, { "epoch": 0.6545551696500134, "step": 2450, "train/loss_ctc": 0.9384908676147461, "train/loss_error": 0.5145816206932068, "train/loss_total": 0.5993634462356567 }, { "epoch": 0.6548223350253807, "step": 2451, "train/loss_ctc": 0.6292911767959595, "train/loss_error": 0.4956827163696289, "train/loss_total": 0.5224044322967529 }, { "epoch": 0.6550895004007481, "step": 2452, "train/loss_ctc": 1.004127025604248, "train/loss_error": 0.480000764131546, "train/loss_total": 0.5848260521888733 }, { "epoch": 0.6553566657761154, "step": 2453, "train/loss_ctc": 1.3800251483917236, "train/loss_error": 0.457317054271698, "train/loss_total": 0.641858696937561 }, { "epoch": 0.6556238311514828, "step": 2454, "train/loss_ctc": 1.2801387310028076, "train/loss_error": 0.54136061668396, "train/loss_total": 0.6891162395477295 }, { "epoch": 0.6558909965268501, "step": 2455, "train/loss_ctc": 1.4063352346420288, "train/loss_error": 0.4333558976650238, "train/loss_total": 0.6279517412185669 }, { "epoch": 0.6561581619022174, "step": 2456, "train/loss_ctc": 1.2946885824203491, "train/loss_error": 0.4605935215950012, "train/loss_total": 0.6274125576019287 }, { "epoch": 0.6564253272775848, "step": 2457, "train/loss_ctc": 0.4974789023399353, "train/loss_error": 0.48070138692855835, "train/loss_total": 0.48405689001083374 }, { "epoch": 0.6566924926529522, "step": 2458, "train/loss_ctc": 0.9469785690307617, "train/loss_error": 0.4738551676273346, "train/loss_total": 0.5684798359870911 }, { "epoch": 0.6569596580283196, "step": 2459, "train/loss_ctc": 0.8438612818717957, "train/loss_error": 0.4847458600997925, "train/loss_total": 0.55656898021698 }, { "epoch": 0.6572268234036869, "grad_norm": 1.0331507921218872, "learning_rate": 2.606144803633449e-05, "loss": 0.5902, "step": 2460 }, { "epoch": 0.6572268234036869, "step": 2460, "train/loss_ctc": 0.6032195091247559, "train/loss_error": 0.48136216402053833, "train/loss_total": 0.5057336091995239 }, { "epoch": 0.6574939887790542, "step": 2461, "train/loss_ctc": 0.46392005681991577, "train/loss_error": 0.5067206025123596, "train/loss_total": 0.4981605112552643 }, { "epoch": 0.6577611541544216, "step": 2462, "train/loss_ctc": 1.3323490619659424, "train/loss_error": 0.43449866771698, "train/loss_total": 0.6140687465667725 }, { "epoch": 0.6580283195297889, "step": 2463, "train/loss_ctc": 0.9268487095832825, "train/loss_error": 0.4230923652648926, "train/loss_total": 0.5238436460494995 }, { "epoch": 0.6582954849051563, "step": 2464, "train/loss_ctc": 0.7529875040054321, "train/loss_error": 0.4553987979888916, "train/loss_total": 0.5149165391921997 }, { "epoch": 0.6585626502805236, "step": 2465, "train/loss_ctc": 0.8155443668365479, "train/loss_error": 0.47619712352752686, "train/loss_total": 0.5440665483474731 }, { "epoch": 0.658829815655891, "step": 2466, "train/loss_ctc": 0.9557648301124573, "train/loss_error": 0.5421391129493713, "train/loss_total": 0.6248642802238464 }, { "epoch": 0.6590969810312584, "step": 2467, "train/loss_ctc": 0.8657554984092712, "train/loss_error": 0.42686301469802856, "train/loss_total": 0.514641523361206 }, { "epoch": 0.6593641464066257, "step": 2468, "train/loss_ctc": 0.6200259923934937, "train/loss_error": 0.4962165653705597, "train/loss_total": 0.5209784507751465 }, { "epoch": 0.6596313117819931, "step": 2469, "train/loss_ctc": 0.9813040494918823, "train/loss_error": 0.511356770992279, "train/loss_total": 0.6053462028503418 }, { "epoch": 0.6598984771573604, "grad_norm": 1.758487582206726, "learning_rate": 2.6045418113812448e-05, "loss": 0.5467, "step": 2470 }, { "epoch": 0.6598984771573604, "step": 2470, "train/loss_ctc": 0.3723706007003784, "train/loss_error": 0.5493146181106567, "train/loss_total": 0.5139257907867432 }, { "epoch": 0.6601656425327278, "step": 2471, "train/loss_ctc": 0.6661413908004761, "train/loss_error": 0.4599279761314392, "train/loss_total": 0.5011706948280334 }, { "epoch": 0.6604328079080951, "step": 2472, "train/loss_ctc": 0.6114945411682129, "train/loss_error": 0.533406138420105, "train/loss_total": 0.5490238070487976 }, { "epoch": 0.6606999732834624, "step": 2473, "train/loss_ctc": 1.0991787910461426, "train/loss_error": 0.44792652130126953, "train/loss_total": 0.5781769752502441 }, { "epoch": 0.6609671386588298, "step": 2474, "train/loss_ctc": 1.4447063207626343, "train/loss_error": 0.5606310367584229, "train/loss_total": 0.7374460697174072 }, { "epoch": 0.6612343040341971, "step": 2475, "train/loss_ctc": 0.6076755523681641, "train/loss_error": 0.42672333121299744, "train/loss_total": 0.46291378140449524 }, { "epoch": 0.6615014694095646, "step": 2476, "train/loss_ctc": 0.6319173574447632, "train/loss_error": 0.44874441623687744, "train/loss_total": 0.4853789806365967 }, { "epoch": 0.6617686347849319, "step": 2477, "train/loss_ctc": 1.2245140075683594, "train/loss_error": 0.4328053593635559, "train/loss_total": 0.5911471247673035 }, { "epoch": 0.6620358001602992, "step": 2478, "train/loss_ctc": 0.6534522771835327, "train/loss_error": 0.5069893002510071, "train/loss_total": 0.536281943321228 }, { "epoch": 0.6623029655356666, "step": 2479, "train/loss_ctc": 1.6881531476974487, "train/loss_error": 0.47085171937942505, "train/loss_total": 0.7143120169639587 }, { "epoch": 0.6625701309110339, "grad_norm": 1.1870168447494507, "learning_rate": 2.602938819129041e-05, "loss": 0.567, "step": 2480 }, { "epoch": 0.6625701309110339, "step": 2480, "train/loss_ctc": 0.6191748380661011, "train/loss_error": 0.5039449334144592, "train/loss_total": 0.5269908905029297 }, { "epoch": 0.6628372962864013, "step": 2481, "train/loss_ctc": 0.8385828733444214, "train/loss_error": 0.43003055453300476, "train/loss_total": 0.511741042137146 }, { "epoch": 0.6631044616617686, "step": 2482, "train/loss_ctc": 0.9482558965682983, "train/loss_error": 0.4293338358402252, "train/loss_total": 0.5331182479858398 }, { "epoch": 0.663371627037136, "step": 2483, "train/loss_ctc": 1.8547064065933228, "train/loss_error": 0.4923431873321533, "train/loss_total": 0.7648158073425293 }, { "epoch": 0.6636387924125033, "step": 2484, "train/loss_ctc": 0.8062237501144409, "train/loss_error": 0.5981716513633728, "train/loss_total": 0.6397820711135864 }, { "epoch": 0.6639059577878706, "step": 2485, "train/loss_ctc": 0.6840248703956604, "train/loss_error": 0.46788129210472107, "train/loss_total": 0.5111100077629089 }, { "epoch": 0.6641731231632381, "step": 2486, "train/loss_ctc": 1.2260222434997559, "train/loss_error": 0.5047054290771484, "train/loss_total": 0.6489688158035278 }, { "epoch": 0.6644402885386054, "step": 2487, "train/loss_ctc": 0.6080583930015564, "train/loss_error": 0.4505705237388611, "train/loss_total": 0.48206812143325806 }, { "epoch": 0.6647074539139728, "step": 2488, "train/loss_ctc": 0.7090607285499573, "train/loss_error": 0.4550221264362335, "train/loss_total": 0.5058298110961914 }, { "epoch": 0.6649746192893401, "step": 2489, "train/loss_ctc": 0.5828854441642761, "train/loss_error": 0.48124441504478455, "train/loss_total": 0.5015726089477539 }, { "epoch": 0.6652417846647074, "grad_norm": 1.0979173183441162, "learning_rate": 2.6013358268768368e-05, "loss": 0.5626, "step": 2490 }, { "epoch": 0.6652417846647074, "step": 2490, "train/loss_ctc": 0.9154868125915527, "train/loss_error": 0.5355636477470398, "train/loss_total": 0.6115483045578003 }, { "epoch": 0.6655089500400748, "step": 2491, "train/loss_ctc": 0.6822959184646606, "train/loss_error": 0.49521106481552124, "train/loss_total": 0.532628059387207 }, { "epoch": 0.6657761154154421, "step": 2492, "train/loss_ctc": 0.3911342918872833, "train/loss_error": 0.434621125459671, "train/loss_total": 0.42592376470565796 }, { "epoch": 0.6660432807908095, "step": 2493, "train/loss_ctc": 0.4275742471218109, "train/loss_error": 0.4743730127811432, "train/loss_total": 0.4650132656097412 }, { "epoch": 0.6663104461661769, "step": 2494, "train/loss_ctc": 0.994009792804718, "train/loss_error": 0.448971152305603, "train/loss_total": 0.5579788684844971 }, { "epoch": 0.6665776115415443, "step": 2495, "train/loss_ctc": 1.0737159252166748, "train/loss_error": 0.48171934485435486, "train/loss_total": 0.6001186370849609 }, { "epoch": 0.6668447769169116, "step": 2496, "train/loss_ctc": 1.4382834434509277, "train/loss_error": 0.4591459035873413, "train/loss_total": 0.6549733877182007 }, { "epoch": 0.6671119422922789, "step": 2497, "train/loss_ctc": 1.2971487045288086, "train/loss_error": 0.423718124628067, "train/loss_total": 0.5984042882919312 }, { "epoch": 0.6673791076676463, "step": 2498, "train/loss_ctc": 0.8170950412750244, "train/loss_error": 0.4803590476512909, "train/loss_total": 0.5477062463760376 }, { "epoch": 0.6676462730430136, "step": 2499, "train/loss_ctc": 0.4268971085548401, "train/loss_error": 0.4526873230934143, "train/loss_total": 0.44752928614616394 }, { "epoch": 0.667913438418381, "grad_norm": 1.0593785047531128, "learning_rate": 2.5997328346246326e-05, "loss": 0.5442, "step": 2500 }, { "epoch": 0.667913438418381, "step": 2500, "train/loss_ctc": 1.1780290603637695, "train/loss_error": 0.45736703276634216, "train/loss_total": 0.6014994382858276 }, { "epoch": 0.6681806037937483, "step": 2501, "train/loss_ctc": 1.1913405656814575, "train/loss_error": 0.5237951278686523, "train/loss_total": 0.6573042273521423 }, { "epoch": 0.6684477691691156, "step": 2502, "train/loss_ctc": 1.0209665298461914, "train/loss_error": 0.476516991853714, "train/loss_total": 0.5854068994522095 }, { "epoch": 0.668714934544483, "step": 2503, "train/loss_ctc": 0.7818716764450073, "train/loss_error": 0.4689525067806244, "train/loss_total": 0.531536340713501 }, { "epoch": 0.6689820999198504, "step": 2504, "train/loss_ctc": 1.3750277757644653, "train/loss_error": 0.43308019638061523, "train/loss_total": 0.6214697360992432 }, { "epoch": 0.6692492652952178, "step": 2505, "train/loss_ctc": 0.8984801769256592, "train/loss_error": 0.5079973340034485, "train/loss_total": 0.5860939025878906 }, { "epoch": 0.6695164306705851, "step": 2506, "train/loss_ctc": 0.830041229724884, "train/loss_error": 0.44435617327690125, "train/loss_total": 0.5214931964874268 }, { "epoch": 0.6697835960459524, "step": 2507, "train/loss_ctc": 1.7888689041137695, "train/loss_error": 0.44231024384498596, "train/loss_total": 0.7116219997406006 }, { "epoch": 0.6700507614213198, "step": 2508, "train/loss_ctc": 0.9996908903121948, "train/loss_error": 0.5705764889717102, "train/loss_total": 0.6563993692398071 }, { "epoch": 0.6703179267966871, "step": 2509, "train/loss_ctc": 1.2313590049743652, "train/loss_error": 0.5671404004096985, "train/loss_total": 0.6999841332435608 }, { "epoch": 0.6705850921720545, "grad_norm": 2.3418147563934326, "learning_rate": 2.5981298423724287e-05, "loss": 0.6173, "step": 2510 }, { "epoch": 0.6705850921720545, "step": 2510, "train/loss_ctc": 0.6740537285804749, "train/loss_error": 0.48297613859176636, "train/loss_total": 0.5211916565895081 }, { "epoch": 0.6708522575474218, "step": 2511, "train/loss_ctc": 0.6517959833145142, "train/loss_error": 0.505392849445343, "train/loss_total": 0.5346734523773193 }, { "epoch": 0.6711194229227893, "step": 2512, "train/loss_ctc": 0.9358398914337158, "train/loss_error": 0.4545433521270752, "train/loss_total": 0.5508026480674744 }, { "epoch": 0.6713865882981566, "step": 2513, "train/loss_ctc": 1.197960376739502, "train/loss_error": 0.534764289855957, "train/loss_total": 0.667403519153595 }, { "epoch": 0.6716537536735239, "step": 2514, "train/loss_ctc": 1.1058759689331055, "train/loss_error": 0.4792984127998352, "train/loss_total": 0.6046139001846313 }, { "epoch": 0.6719209190488913, "step": 2515, "train/loss_ctc": 1.4410263299942017, "train/loss_error": 0.4246121048927307, "train/loss_total": 0.6278949975967407 }, { "epoch": 0.6721880844242586, "step": 2516, "train/loss_ctc": 0.4030989408493042, "train/loss_error": 0.4307352602481842, "train/loss_total": 0.4252080023288727 }, { "epoch": 0.672455249799626, "step": 2517, "train/loss_ctc": 0.7372463941574097, "train/loss_error": 0.4482431411743164, "train/loss_total": 0.5060437917709351 }, { "epoch": 0.6727224151749933, "step": 2518, "train/loss_ctc": 0.4619552493095398, "train/loss_error": 0.48187577724456787, "train/loss_total": 0.4778916835784912 }, { "epoch": 0.6729895805503606, "step": 2519, "train/loss_ctc": 0.5550915002822876, "train/loss_error": 0.48561713099479675, "train/loss_total": 0.4995120167732239 }, { "epoch": 0.673256745925728, "grad_norm": 1.3145917654037476, "learning_rate": 2.5965268501202245e-05, "loss": 0.5415, "step": 2520 }, { "epoch": 0.673256745925728, "step": 2520, "train/loss_ctc": 1.0215195417404175, "train/loss_error": 0.46703946590423584, "train/loss_total": 0.5779354572296143 }, { "epoch": 0.6735239113010953, "step": 2521, "train/loss_ctc": 0.5720057487487793, "train/loss_error": 0.4448806941509247, "train/loss_total": 0.4703057110309601 }, { "epoch": 0.6737910766764628, "step": 2522, "train/loss_ctc": 0.6532067060470581, "train/loss_error": 0.47406482696533203, "train/loss_total": 0.5098931789398193 }, { "epoch": 0.6740582420518301, "step": 2523, "train/loss_ctc": 1.0365471839904785, "train/loss_error": 0.49557897448539734, "train/loss_total": 0.6037726402282715 }, { "epoch": 0.6743254074271975, "step": 2524, "train/loss_ctc": 0.5872087478637695, "train/loss_error": 0.5437229871749878, "train/loss_total": 0.5524201393127441 }, { "epoch": 0.6745925728025648, "step": 2525, "train/loss_ctc": 1.0166270732879639, "train/loss_error": 0.5339509844779968, "train/loss_total": 0.630486249923706 }, { "epoch": 0.6748597381779321, "step": 2526, "train/loss_ctc": 0.6305155754089355, "train/loss_error": 0.4608859121799469, "train/loss_total": 0.4948118329048157 }, { "epoch": 0.6751269035532995, "step": 2527, "train/loss_ctc": 0.4327871799468994, "train/loss_error": 0.5233645439147949, "train/loss_total": 0.5052490830421448 }, { "epoch": 0.6753940689286668, "step": 2528, "train/loss_ctc": 1.4505889415740967, "train/loss_error": 0.5289775729179382, "train/loss_total": 0.7132998704910278 }, { "epoch": 0.6756612343040342, "step": 2529, "train/loss_ctc": 1.3795788288116455, "train/loss_error": 0.49002620577812195, "train/loss_total": 0.6679367423057556 }, { "epoch": 0.6759283996794015, "grad_norm": 1.1306899785995483, "learning_rate": 2.5949238578680204e-05, "loss": 0.5726, "step": 2530 }, { "epoch": 0.6759283996794015, "step": 2530, "train/loss_ctc": 0.9894024729728699, "train/loss_error": 0.5174568295478821, "train/loss_total": 0.6118459701538086 }, { "epoch": 0.6761955650547689, "step": 2531, "train/loss_ctc": 1.0195770263671875, "train/loss_error": 0.5463733673095703, "train/loss_total": 0.6410140991210938 }, { "epoch": 0.6764627304301363, "step": 2532, "train/loss_ctc": 0.8460643291473389, "train/loss_error": 0.5527092814445496, "train/loss_total": 0.6113802790641785 }, { "epoch": 0.6767298958055036, "step": 2533, "train/loss_ctc": 1.056012511253357, "train/loss_error": 0.4860871136188507, "train/loss_total": 0.6000722050666809 }, { "epoch": 0.676997061180871, "step": 2534, "train/loss_ctc": 0.7552390694618225, "train/loss_error": 0.4301227033138275, "train/loss_total": 0.4951459765434265 }, { "epoch": 0.6772642265562383, "step": 2535, "train/loss_ctc": 1.0127562284469604, "train/loss_error": 0.40340089797973633, "train/loss_total": 0.525272011756897 }, { "epoch": 0.6775313919316056, "step": 2536, "train/loss_ctc": 0.8144130706787109, "train/loss_error": 0.5579736828804016, "train/loss_total": 0.6092615723609924 }, { "epoch": 0.677798557306973, "step": 2537, "train/loss_ctc": 0.7076586484909058, "train/loss_error": 0.5005475282669067, "train/loss_total": 0.5419697761535645 }, { "epoch": 0.6780657226823403, "step": 2538, "train/loss_ctc": 0.6701695322990417, "train/loss_error": 0.4387844204902649, "train/loss_total": 0.4850614666938782 }, { "epoch": 0.6783328880577078, "step": 2539, "train/loss_ctc": 0.9305233955383301, "train/loss_error": 0.48800358176231384, "train/loss_total": 0.576507568359375 }, { "epoch": 0.6786000534330751, "grad_norm": 4.862504482269287, "learning_rate": 2.5933208656158165e-05, "loss": 0.5698, "step": 2540 }, { "epoch": 0.6786000534330751, "step": 2540, "train/loss_ctc": 0.9305868148803711, "train/loss_error": 0.4558608829975128, "train/loss_total": 0.5508061051368713 }, { "epoch": 0.6788672188084425, "step": 2541, "train/loss_ctc": 1.150562047958374, "train/loss_error": 0.49024027585983276, "train/loss_total": 0.6223046183586121 }, { "epoch": 0.6791343841838098, "step": 2542, "train/loss_ctc": 0.5420478582382202, "train/loss_error": 0.503508985042572, "train/loss_total": 0.5112167596817017 }, { "epoch": 0.6794015495591771, "step": 2543, "train/loss_ctc": 0.5752859115600586, "train/loss_error": 0.5171982645988464, "train/loss_total": 0.5288158059120178 }, { "epoch": 0.6796687149345445, "step": 2544, "train/loss_ctc": 1.0389206409454346, "train/loss_error": 0.5188641548156738, "train/loss_total": 0.622875452041626 }, { "epoch": 0.6799358803099118, "step": 2545, "train/loss_ctc": 0.6636767387390137, "train/loss_error": 0.40593400597572327, "train/loss_total": 0.45748257637023926 }, { "epoch": 0.6802030456852792, "step": 2546, "train/loss_ctc": 0.9721450209617615, "train/loss_error": 0.513859748840332, "train/loss_total": 0.605516791343689 }, { "epoch": 0.6804702110606465, "step": 2547, "train/loss_ctc": 0.7661432027816772, "train/loss_error": 0.506755530834198, "train/loss_total": 0.5586330890655518 }, { "epoch": 0.6807373764360138, "step": 2548, "train/loss_ctc": 0.39601051807403564, "train/loss_error": 0.5010442137718201, "train/loss_total": 0.48003748059272766 }, { "epoch": 0.6810045418113813, "step": 2549, "train/loss_ctc": 0.9764049053192139, "train/loss_error": 0.4326944947242737, "train/loss_total": 0.5414366126060486 }, { "epoch": 0.6812717071867486, "grad_norm": 1.2442086935043335, "learning_rate": 2.5917178733636123e-05, "loss": 0.5479, "step": 2550 }, { "epoch": 0.6812717071867486, "step": 2550, "train/loss_ctc": 0.7455668449401855, "train/loss_error": 0.5014264583587646, "train/loss_total": 0.5502545237541199 }, { "epoch": 0.681538872562116, "step": 2551, "train/loss_ctc": 0.8284452557563782, "train/loss_error": 0.5102340579032898, "train/loss_total": 0.5738762617111206 }, { "epoch": 0.6818060379374833, "step": 2552, "train/loss_ctc": 0.8416971564292908, "train/loss_error": 0.5317692160606384, "train/loss_total": 0.593754768371582 }, { "epoch": 0.6820732033128507, "step": 2553, "train/loss_ctc": 0.8379570841789246, "train/loss_error": 0.49187350273132324, "train/loss_total": 0.5610902309417725 }, { "epoch": 0.682340368688218, "step": 2554, "train/loss_ctc": 1.0440082550048828, "train/loss_error": 0.5090702772140503, "train/loss_total": 0.6160578727722168 }, { "epoch": 0.6826075340635853, "step": 2555, "train/loss_ctc": 1.0021822452545166, "train/loss_error": 0.4763428568840027, "train/loss_total": 0.5815107226371765 }, { "epoch": 0.6828746994389527, "step": 2556, "train/loss_ctc": 1.3926784992218018, "train/loss_error": 0.46616607904434204, "train/loss_total": 0.6514685750007629 }, { "epoch": 0.68314186481432, "step": 2557, "train/loss_ctc": 0.6864548921585083, "train/loss_error": 0.4878089427947998, "train/loss_total": 0.5275381803512573 }, { "epoch": 0.6834090301896875, "step": 2558, "train/loss_ctc": 0.7820098400115967, "train/loss_error": 0.4812341332435608, "train/loss_total": 0.5413892865180969 }, { "epoch": 0.6836761955650548, "step": 2559, "train/loss_ctc": 0.712270975112915, "train/loss_error": 0.5043826699256897, "train/loss_total": 0.5459603071212769 }, { "epoch": 0.6839433609404221, "grad_norm": 1.144493818283081, "learning_rate": 2.590114881111408e-05, "loss": 0.5743, "step": 2560 }, { "epoch": 0.6839433609404221, "step": 2560, "train/loss_ctc": 0.6178959608078003, "train/loss_error": 0.44864168763160706, "train/loss_total": 0.4824925661087036 }, { "epoch": 0.6842105263157895, "step": 2561, "train/loss_ctc": 0.8260366916656494, "train/loss_error": 0.4770333170890808, "train/loss_total": 0.5468339920043945 }, { "epoch": 0.6844776916911568, "step": 2562, "train/loss_ctc": 1.836467981338501, "train/loss_error": 0.46755245327949524, "train/loss_total": 0.7413355708122253 }, { "epoch": 0.6847448570665242, "step": 2563, "train/loss_ctc": 0.9641164541244507, "train/loss_error": 0.5448708534240723, "train/loss_total": 0.6287199854850769 }, { "epoch": 0.6850120224418915, "step": 2564, "train/loss_ctc": 0.6308044195175171, "train/loss_error": 0.43578749895095825, "train/loss_total": 0.47479090094566345 }, { "epoch": 0.6852791878172588, "step": 2565, "train/loss_ctc": 0.5670613050460815, "train/loss_error": 0.44739171862602234, "train/loss_total": 0.4713256359100342 }, { "epoch": 0.6855463531926262, "step": 2566, "train/loss_ctc": 0.7666748762130737, "train/loss_error": 0.4865773320198059, "train/loss_total": 0.5425968170166016 }, { "epoch": 0.6858135185679936, "step": 2567, "train/loss_ctc": 0.5904679894447327, "train/loss_error": 0.47225087881088257, "train/loss_total": 0.49589431285858154 }, { "epoch": 0.686080683943361, "step": 2568, "train/loss_ctc": 0.8673580288887024, "train/loss_error": 0.5420198440551758, "train/loss_total": 0.6070874929428101 }, { "epoch": 0.6863478493187283, "step": 2569, "train/loss_ctc": 0.9461674094200134, "train/loss_error": 0.46029698848724365, "train/loss_total": 0.5574710369110107 }, { "epoch": 0.6866150146940957, "grad_norm": 1.607961654663086, "learning_rate": 2.588511888859204e-05, "loss": 0.5549, "step": 2570 }, { "epoch": 0.6866150146940957, "step": 2570, "train/loss_ctc": 0.9646512866020203, "train/loss_error": 0.4284529387950897, "train/loss_total": 0.5356926321983337 }, { "epoch": 0.686882180069463, "step": 2571, "train/loss_ctc": 1.1551538705825806, "train/loss_error": 0.49533218145370483, "train/loss_total": 0.627296507358551 }, { "epoch": 0.6871493454448303, "step": 2572, "train/loss_ctc": 0.6025792956352234, "train/loss_error": 0.5590171813964844, "train/loss_total": 0.5677295923233032 }, { "epoch": 0.6874165108201977, "step": 2573, "train/loss_ctc": 0.4392877221107483, "train/loss_error": 0.5759618878364563, "train/loss_total": 0.5486270189285278 }, { "epoch": 0.687683676195565, "step": 2574, "train/loss_ctc": 0.3999435305595398, "train/loss_error": 0.45576977729797363, "train/loss_total": 0.4446045458316803 }, { "epoch": 0.6879508415709324, "step": 2575, "train/loss_ctc": 0.681121289730072, "train/loss_error": 0.5174136161804199, "train/loss_total": 0.5501551628112793 }, { "epoch": 0.6882180069462998, "step": 2576, "train/loss_ctc": 1.5347490310668945, "train/loss_error": 0.45031943917274475, "train/loss_total": 0.6672053933143616 }, { "epoch": 0.6884851723216671, "step": 2577, "train/loss_ctc": 0.6846322417259216, "train/loss_error": 0.516963005065918, "train/loss_total": 0.5504968762397766 }, { "epoch": 0.6887523376970345, "step": 2578, "train/loss_ctc": 0.734021782875061, "train/loss_error": 0.49454736709594727, "train/loss_total": 0.542442262172699 }, { "epoch": 0.6890195030724018, "step": 2579, "train/loss_ctc": 0.989861786365509, "train/loss_error": 0.4309813976287842, "train/loss_total": 0.542757511138916 }, { "epoch": 0.6892866684477692, "grad_norm": 1.3894771337509155, "learning_rate": 2.5869088966069997e-05, "loss": 0.5577, "step": 2580 }, { "epoch": 0.6892866684477692, "step": 2580, "train/loss_ctc": 0.43232759833335876, "train/loss_error": 0.45691606402397156, "train/loss_total": 0.45199835300445557 }, { "epoch": 0.6895538338231365, "step": 2581, "train/loss_ctc": 0.95152747631073, "train/loss_error": 0.4812045395374298, "train/loss_total": 0.5752691030502319 }, { "epoch": 0.6898209991985039, "step": 2582, "train/loss_ctc": 0.6365368366241455, "train/loss_error": 0.5186759829521179, "train/loss_total": 0.5422481894493103 }, { "epoch": 0.6900881645738712, "step": 2583, "train/loss_ctc": 1.431794285774231, "train/loss_error": 0.4794868230819702, "train/loss_total": 0.6699483394622803 }, { "epoch": 0.6903553299492385, "step": 2584, "train/loss_ctc": 0.7060854434967041, "train/loss_error": 0.5126379728317261, "train/loss_total": 0.5513274669647217 }, { "epoch": 0.690622495324606, "step": 2585, "train/loss_ctc": 0.5422642827033997, "train/loss_error": 0.43254029750823975, "train/loss_total": 0.45448508858680725 }, { "epoch": 0.6908896606999733, "step": 2586, "train/loss_ctc": 1.0722838640213013, "train/loss_error": 0.5551311373710632, "train/loss_total": 0.6585617065429688 }, { "epoch": 0.6911568260753407, "step": 2587, "train/loss_ctc": 1.2592471837997437, "train/loss_error": 0.44892293214797974, "train/loss_total": 0.6109877824783325 }, { "epoch": 0.691423991450708, "step": 2588, "train/loss_ctc": 0.7100571393966675, "train/loss_error": 0.4516891539096832, "train/loss_total": 0.503362774848938 }, { "epoch": 0.6916911568260753, "step": 2589, "train/loss_ctc": 0.7983582019805908, "train/loss_error": 0.509207010269165, "train/loss_total": 0.5670372843742371 }, { "epoch": 0.6919583222014427, "grad_norm": 1.4403420686721802, "learning_rate": 2.5853059043547955e-05, "loss": 0.5585, "step": 2590 }, { "epoch": 0.6919583222014427, "step": 2590, "train/loss_ctc": 0.7149513959884644, "train/loss_error": 0.4697486460208893, "train/loss_total": 0.5187891721725464 }, { "epoch": 0.69222548757681, "step": 2591, "train/loss_ctc": 1.468965768814087, "train/loss_error": 0.4931904375553131, "train/loss_total": 0.6883455514907837 }, { "epoch": 0.6924926529521774, "step": 2592, "train/loss_ctc": 0.35730162262916565, "train/loss_error": 0.4830484390258789, "train/loss_total": 0.4578990936279297 }, { "epoch": 0.6927598183275447, "step": 2593, "train/loss_ctc": 0.5070045590400696, "train/loss_error": 0.46317264437675476, "train/loss_total": 0.4719390273094177 }, { "epoch": 0.693026983702912, "step": 2594, "train/loss_ctc": 0.8793703317642212, "train/loss_error": 0.4482158124446869, "train/loss_total": 0.5344467163085938 }, { "epoch": 0.6932941490782795, "step": 2595, "train/loss_ctc": 0.7536181211471558, "train/loss_error": 0.5733103156089783, "train/loss_total": 0.6093719005584717 }, { "epoch": 0.6935613144536468, "step": 2596, "train/loss_ctc": 0.9155168533325195, "train/loss_error": 0.5125977993011475, "train/loss_total": 0.5931816101074219 }, { "epoch": 0.6938284798290142, "step": 2597, "train/loss_ctc": 1.1072101593017578, "train/loss_error": 0.43913450837135315, "train/loss_total": 0.5727496147155762 }, { "epoch": 0.6940956452043815, "step": 2598, "train/loss_ctc": 0.9085113406181335, "train/loss_error": 0.5217204689979553, "train/loss_total": 0.5990786552429199 }, { "epoch": 0.6943628105797489, "step": 2599, "train/loss_ctc": 1.0864427089691162, "train/loss_error": 0.49828672409057617, "train/loss_total": 0.6159179210662842 }, { "epoch": 0.6946299759551162, "grad_norm": 1.0286213159561157, "learning_rate": 2.5837029121025917e-05, "loss": 0.5662, "step": 2600 }, { "epoch": 0.6946299759551162, "step": 2600, "train/loss_ctc": 0.9827986359596252, "train/loss_error": 0.5068913102149963, "train/loss_total": 0.6020727753639221 }, { "epoch": 0.6948971413304835, "step": 2601, "train/loss_ctc": 0.6444897055625916, "train/loss_error": 0.45472484827041626, "train/loss_total": 0.49267780780792236 }, { "epoch": 0.6951643067058509, "step": 2602, "train/loss_ctc": 0.5820432901382446, "train/loss_error": 0.46112197637557983, "train/loss_total": 0.4853062629699707 }, { "epoch": 0.6954314720812182, "step": 2603, "train/loss_ctc": 1.3276281356811523, "train/loss_error": 0.4346195161342621, "train/loss_total": 0.613221287727356 }, { "epoch": 0.6956986374565857, "step": 2604, "train/loss_ctc": 0.9944577217102051, "train/loss_error": 0.5103681087493896, "train/loss_total": 0.6071860790252686 }, { "epoch": 0.695965802831953, "step": 2605, "train/loss_ctc": 0.6599452495574951, "train/loss_error": 0.5132240056991577, "train/loss_total": 0.5425682663917542 }, { "epoch": 0.6962329682073203, "step": 2606, "train/loss_ctc": 0.5139213800430298, "train/loss_error": 0.5055660009384155, "train/loss_total": 0.5072370767593384 }, { "epoch": 0.6965001335826877, "step": 2607, "train/loss_ctc": 0.7429623007774353, "train/loss_error": 0.44647908210754395, "train/loss_total": 0.5057757496833801 }, { "epoch": 0.696767298958055, "step": 2608, "train/loss_ctc": 0.9030333757400513, "train/loss_error": 0.4541546404361725, "train/loss_total": 0.5439304113388062 }, { "epoch": 0.6970344643334224, "step": 2609, "train/loss_ctc": 0.5655683279037476, "train/loss_error": 0.47608834505081177, "train/loss_total": 0.4939843416213989 }, { "epoch": 0.6973016297087897, "grad_norm": 1.8134703636169434, "learning_rate": 2.5820999198503875e-05, "loss": 0.5394, "step": 2610 }, { "epoch": 0.6973016297087897, "step": 2610, "train/loss_ctc": 0.29049497842788696, "train/loss_error": 0.5326456427574158, "train/loss_total": 0.48421552777290344 }, { "epoch": 0.6975687950841571, "step": 2611, "train/loss_ctc": 1.9192986488342285, "train/loss_error": 0.5305281281471252, "train/loss_total": 0.8082822561264038 }, { "epoch": 0.6978359604595245, "step": 2612, "train/loss_ctc": 1.5121407508850098, "train/loss_error": 0.5483718514442444, "train/loss_total": 0.7411256432533264 }, { "epoch": 0.6981031258348918, "step": 2613, "train/loss_ctc": 1.1458275318145752, "train/loss_error": 0.5222749710083008, "train/loss_total": 0.6469854712486267 }, { "epoch": 0.6983702912102592, "step": 2614, "train/loss_ctc": 0.7028664350509644, "train/loss_error": 0.4905301630496979, "train/loss_total": 0.5329974293708801 }, { "epoch": 0.6986374565856265, "step": 2615, "train/loss_ctc": 0.8989790678024292, "train/loss_error": 0.5045611262321472, "train/loss_total": 0.5834447145462036 }, { "epoch": 0.6989046219609939, "step": 2616, "train/loss_ctc": 1.452038288116455, "train/loss_error": 0.5057896375656128, "train/loss_total": 0.6950393915176392 }, { "epoch": 0.6991717873363612, "step": 2617, "train/loss_ctc": 1.1983063220977783, "train/loss_error": 0.4695352613925934, "train/loss_total": 0.6152894496917725 }, { "epoch": 0.6994389527117285, "step": 2618, "train/loss_ctc": 1.0946431159973145, "train/loss_error": 0.5026064515113831, "train/loss_total": 0.6210137605667114 }, { "epoch": 0.6997061180870959, "step": 2619, "train/loss_ctc": 0.6148894429206848, "train/loss_error": 0.4419574737548828, "train/loss_total": 0.4765438735485077 }, { "epoch": 0.6999732834624632, "grad_norm": 1.7971566915512085, "learning_rate": 2.5804969275981833e-05, "loss": 0.6205, "step": 2620 }, { "epoch": 0.6999732834624632, "step": 2620, "train/loss_ctc": 0.631123423576355, "train/loss_error": 0.4852696359157562, "train/loss_total": 0.5144404172897339 }, { "epoch": 0.7002404488378307, "step": 2621, "train/loss_ctc": 0.4308655560016632, "train/loss_error": 0.48507434129714966, "train/loss_total": 0.47423258423805237 }, { "epoch": 0.700507614213198, "step": 2622, "train/loss_ctc": 0.5291327238082886, "train/loss_error": 0.49410542845726013, "train/loss_total": 0.5011109113693237 }, { "epoch": 0.7007747795885654, "step": 2623, "train/loss_ctc": 0.42746201157569885, "train/loss_error": 0.5002033710479736, "train/loss_total": 0.4856550991535187 }, { "epoch": 0.7010419449639327, "step": 2624, "train/loss_ctc": 0.780429482460022, "train/loss_error": 0.4950554072856903, "train/loss_total": 0.5521302223205566 }, { "epoch": 0.7013091103393, "step": 2625, "train/loss_ctc": 0.844994306564331, "train/loss_error": 0.5102598071098328, "train/loss_total": 0.5772067308425903 }, { "epoch": 0.7015762757146674, "step": 2626, "train/loss_ctc": 0.995629072189331, "train/loss_error": 0.49701932072639465, "train/loss_total": 0.596741259098053 }, { "epoch": 0.7018434410900347, "step": 2627, "train/loss_ctc": 0.7657487392425537, "train/loss_error": 0.4636271893978119, "train/loss_total": 0.5240515470504761 }, { "epoch": 0.7021106064654021, "step": 2628, "train/loss_ctc": 0.7076935768127441, "train/loss_error": 0.5078601241111755, "train/loss_total": 0.5478268265724182 }, { "epoch": 0.7023777718407694, "step": 2629, "train/loss_ctc": 1.195228934288025, "train/loss_error": 0.48578208684921265, "train/loss_total": 0.627671480178833 }, { "epoch": 0.7026449372161367, "grad_norm": 1.9766870737075806, "learning_rate": 2.578893935345979e-05, "loss": 0.5401, "step": 2630 }, { "epoch": 0.7026449372161367, "step": 2630, "train/loss_ctc": 0.5632088780403137, "train/loss_error": 0.4823732078075409, "train/loss_total": 0.49854034185409546 }, { "epoch": 0.7029121025915042, "step": 2631, "train/loss_ctc": 0.4752054810523987, "train/loss_error": 0.5463098287582397, "train/loss_total": 0.5320889949798584 }, { "epoch": 0.7031792679668715, "step": 2632, "train/loss_ctc": 0.9575069546699524, "train/loss_error": 0.4866405427455902, "train/loss_total": 0.5808138251304626 }, { "epoch": 0.7034464333422389, "step": 2633, "train/loss_ctc": 0.4419788122177124, "train/loss_error": 0.4563291668891907, "train/loss_total": 0.45345911383628845 }, { "epoch": 0.7037135987176062, "step": 2634, "train/loss_ctc": 0.9168477058410645, "train/loss_error": 0.43027812242507935, "train/loss_total": 0.5275920629501343 }, { "epoch": 0.7039807640929735, "step": 2635, "train/loss_ctc": 1.4119226932525635, "train/loss_error": 0.42712244391441345, "train/loss_total": 0.6240825057029724 }, { "epoch": 0.7042479294683409, "step": 2636, "train/loss_ctc": 0.5540160536766052, "train/loss_error": 0.5572744011878967, "train/loss_total": 0.5566227436065674 }, { "epoch": 0.7045150948437082, "step": 2637, "train/loss_ctc": 0.7060579061508179, "train/loss_error": 0.5264275670051575, "train/loss_total": 0.5623536705970764 }, { "epoch": 0.7047822602190756, "step": 2638, "train/loss_ctc": 0.5100686550140381, "train/loss_error": 0.4788352847099304, "train/loss_total": 0.4850819706916809 }, { "epoch": 0.705049425594443, "step": 2639, "train/loss_ctc": 0.7245206832885742, "train/loss_error": 0.46091964840888977, "train/loss_total": 0.5136398673057556 }, { "epoch": 0.7053165909698104, "grad_norm": 3.7476601600646973, "learning_rate": 2.577290943093775e-05, "loss": 0.5334, "step": 2640 }, { "epoch": 0.7053165909698104, "step": 2640, "train/loss_ctc": 0.5278118848800659, "train/loss_error": 0.5334793329238892, "train/loss_total": 0.5323458313941956 }, { "epoch": 0.7055837563451777, "step": 2641, "train/loss_ctc": 0.7083044052124023, "train/loss_error": 0.45889636874198914, "train/loss_total": 0.5087779760360718 }, { "epoch": 0.705850921720545, "step": 2642, "train/loss_ctc": 0.5313864946365356, "train/loss_error": 0.4957403540611267, "train/loss_total": 0.5028696060180664 }, { "epoch": 0.7061180870959124, "step": 2643, "train/loss_ctc": 0.6914355754852295, "train/loss_error": 0.5740267634391785, "train/loss_total": 0.5975085496902466 }, { "epoch": 0.7063852524712797, "step": 2644, "train/loss_ctc": 1.1143020391464233, "train/loss_error": 0.492064505815506, "train/loss_total": 0.6165120005607605 }, { "epoch": 0.7066524178466471, "step": 2645, "train/loss_ctc": 1.285711407661438, "train/loss_error": 0.4491161108016968, "train/loss_total": 0.616435170173645 }, { "epoch": 0.7069195832220144, "step": 2646, "train/loss_ctc": 1.3006508350372314, "train/loss_error": 0.5229028463363647, "train/loss_total": 0.6784524917602539 }, { "epoch": 0.7071867485973817, "step": 2647, "train/loss_ctc": 1.1659257411956787, "train/loss_error": 0.4999197721481323, "train/loss_total": 0.6331209540367126 }, { "epoch": 0.7074539139727491, "step": 2648, "train/loss_ctc": 1.0535252094268799, "train/loss_error": 0.45576491951942444, "train/loss_total": 0.5753170251846313 }, { "epoch": 0.7077210793481165, "step": 2649, "train/loss_ctc": 0.8515058159828186, "train/loss_error": 0.5455483794212341, "train/loss_total": 0.60673987865448 }, { "epoch": 0.7079882447234839, "grad_norm": 1.2647167444229126, "learning_rate": 2.575687950841571e-05, "loss": 0.5868, "step": 2650 }, { "epoch": 0.7079882447234839, "step": 2650, "train/loss_ctc": 0.6440526247024536, "train/loss_error": 0.4936375319957733, "train/loss_total": 0.5237205624580383 }, { "epoch": 0.7082554100988512, "step": 2651, "train/loss_ctc": 0.9041551351547241, "train/loss_error": 0.518317461013794, "train/loss_total": 0.5954850316047668 }, { "epoch": 0.7085225754742186, "step": 2652, "train/loss_ctc": 1.0410183668136597, "train/loss_error": 0.5454316735267639, "train/loss_total": 0.6445490121841431 }, { "epoch": 0.7087897408495859, "step": 2653, "train/loss_ctc": 0.4268152415752411, "train/loss_error": 0.4341315031051636, "train/loss_total": 0.4326682686805725 }, { "epoch": 0.7090569062249532, "step": 2654, "train/loss_ctc": 0.9149739146232605, "train/loss_error": 0.47934573888778687, "train/loss_total": 0.5664713382720947 }, { "epoch": 0.7093240716003206, "step": 2655, "train/loss_ctc": 0.3646131753921509, "train/loss_error": 0.45990121364593506, "train/loss_total": 0.4408436119556427 }, { "epoch": 0.7095912369756879, "step": 2656, "train/loss_ctc": 0.917701244354248, "train/loss_error": 0.5226210355758667, "train/loss_total": 0.6016371250152588 }, { "epoch": 0.7098584023510554, "step": 2657, "train/loss_ctc": 0.7368061542510986, "train/loss_error": 0.5043574571609497, "train/loss_total": 0.5508472323417664 }, { "epoch": 0.7101255677264227, "step": 2658, "train/loss_ctc": 0.8581317663192749, "train/loss_error": 0.4552048444747925, "train/loss_total": 0.535790205001831 }, { "epoch": 0.71039273310179, "step": 2659, "train/loss_ctc": 0.6335192322731018, "train/loss_error": 0.49508702754974365, "train/loss_total": 0.5227734446525574 }, { "epoch": 0.7106598984771574, "grad_norm": 3.309849739074707, "learning_rate": 2.574084958589367e-05, "loss": 0.5415, "step": 2660 }, { "epoch": 0.7106598984771574, "step": 2660, "train/loss_ctc": 0.5593665838241577, "train/loss_error": 0.43576741218566895, "train/loss_total": 0.4604872763156891 }, { "epoch": 0.7109270638525247, "step": 2661, "train/loss_ctc": 0.7263132929801941, "train/loss_error": 0.5738204121589661, "train/loss_total": 0.6043189764022827 }, { "epoch": 0.7111942292278921, "step": 2662, "train/loss_ctc": 1.0266679525375366, "train/loss_error": 0.4465697705745697, "train/loss_total": 0.5625894069671631 }, { "epoch": 0.7114613946032594, "step": 2663, "train/loss_ctc": 1.048006534576416, "train/loss_error": 0.5202106237411499, "train/loss_total": 0.625769853591919 }, { "epoch": 0.7117285599786267, "step": 2664, "train/loss_ctc": 0.45139026641845703, "train/loss_error": 0.5083231925964355, "train/loss_total": 0.4969366192817688 }, { "epoch": 0.7119957253539941, "step": 2665, "train/loss_ctc": 1.1574313640594482, "train/loss_error": 0.48837167024612427, "train/loss_total": 0.622183620929718 }, { "epoch": 0.7122628907293614, "step": 2666, "train/loss_ctc": 0.4136643409729004, "train/loss_error": 0.4441351890563965, "train/loss_total": 0.4380410313606262 }, { "epoch": 0.7125300561047289, "step": 2667, "train/loss_ctc": 0.4489552974700928, "train/loss_error": 0.4959641695022583, "train/loss_total": 0.4865624010562897 }, { "epoch": 0.7127972214800962, "step": 2668, "train/loss_ctc": 0.7948243021965027, "train/loss_error": 0.4336887300014496, "train/loss_total": 0.5059158205986023 }, { "epoch": 0.7130643868554636, "step": 2669, "train/loss_ctc": 0.7952561974525452, "train/loss_error": 0.4650004506111145, "train/loss_total": 0.5310516357421875 }, { "epoch": 0.7133315522308309, "grad_norm": 1.9132879972457886, "learning_rate": 2.5726422655623832e-05, "loss": 0.5334, "step": 2670 }, { "epoch": 0.7133315522308309, "step": 2670, "train/loss_ctc": 1.6323999166488647, "train/loss_error": 0.43585342168807983, "train/loss_total": 0.6751627326011658 }, { "epoch": 0.7135987176061982, "step": 2671, "train/loss_ctc": 0.5631102323532104, "train/loss_error": 0.5598209500312805, "train/loss_total": 0.5604788064956665 }, { "epoch": 0.7138658829815656, "step": 2672, "train/loss_ctc": 0.9759939908981323, "train/loss_error": 0.5199669003486633, "train/loss_total": 0.6111723184585571 }, { "epoch": 0.7141330483569329, "step": 2673, "train/loss_ctc": 1.0727134943008423, "train/loss_error": 0.49507859349250793, "train/loss_total": 0.6106055974960327 }, { "epoch": 0.7144002137323003, "step": 2674, "train/loss_ctc": 0.9987678527832031, "train/loss_error": 0.4700198769569397, "train/loss_total": 0.5757694840431213 }, { "epoch": 0.7146673791076676, "step": 2675, "train/loss_ctc": 0.5563375353813171, "train/loss_error": 0.47700053453445435, "train/loss_total": 0.49286794662475586 }, { "epoch": 0.714934544483035, "step": 2676, "train/loss_ctc": 0.26868975162506104, "train/loss_error": 0.5110005140304565, "train/loss_total": 0.46253836154937744 }, { "epoch": 0.7152017098584024, "step": 2677, "train/loss_ctc": 1.2922767400741577, "train/loss_error": 0.47306379675865173, "train/loss_total": 0.6369063854217529 }, { "epoch": 0.7154688752337697, "step": 2678, "train/loss_ctc": 0.5437994003295898, "train/loss_error": 0.43800902366638184, "train/loss_total": 0.45916709303855896 }, { "epoch": 0.7157360406091371, "step": 2679, "train/loss_ctc": 0.7419921159744263, "train/loss_error": 0.48792290687561035, "train/loss_total": 0.5387367606163025 }, { "epoch": 0.7160032059845044, "grad_norm": 1.9961812496185303, "learning_rate": 2.571039273310179e-05, "loss": 0.5623, "step": 2680 }, { "epoch": 0.7160032059845044, "step": 2680, "train/loss_ctc": 0.7183911800384521, "train/loss_error": 0.44821709394454956, "train/loss_total": 0.502251923084259 }, { "epoch": 0.7162703713598718, "step": 2681, "train/loss_ctc": 0.5829969048500061, "train/loss_error": 0.4821273684501648, "train/loss_total": 0.5023012757301331 }, { "epoch": 0.7165375367352391, "step": 2682, "train/loss_ctc": 0.8400017023086548, "train/loss_error": 0.4865318834781647, "train/loss_total": 0.5572258234024048 }, { "epoch": 0.7168047021106064, "step": 2683, "train/loss_ctc": 1.285093903541565, "train/loss_error": 0.4862358868122101, "train/loss_total": 0.6460074782371521 }, { "epoch": 0.7170718674859738, "step": 2684, "train/loss_ctc": 0.8242408633232117, "train/loss_error": 0.46624284982681274, "train/loss_total": 0.5378424525260925 }, { "epoch": 0.7173390328613412, "step": 2685, "train/loss_ctc": 1.9166440963745117, "train/loss_error": 0.49828413128852844, "train/loss_total": 0.781956136226654 }, { "epoch": 0.7176061982367086, "step": 2686, "train/loss_ctc": 0.6825617551803589, "train/loss_error": 0.5254571437835693, "train/loss_total": 0.5568780899047852 }, { "epoch": 0.7178733636120759, "step": 2687, "train/loss_ctc": 0.3784879744052887, "train/loss_error": 0.42327240109443665, "train/loss_total": 0.41431552171707153 }, { "epoch": 0.7181405289874432, "step": 2688, "train/loss_ctc": 0.6283009052276611, "train/loss_error": 0.5790145993232727, "train/loss_total": 0.5888718366622925 }, { "epoch": 0.7184076943628106, "step": 2689, "train/loss_ctc": 1.050879716873169, "train/loss_error": 0.4493909776210785, "train/loss_total": 0.5696887373924255 }, { "epoch": 0.7186748597381779, "grad_norm": 1.376671552658081, "learning_rate": 2.5694362810579748e-05, "loss": 0.5657, "step": 2690 }, { "epoch": 0.7186748597381779, "step": 2690, "train/loss_ctc": 0.7040772438049316, "train/loss_error": 0.5760311484336853, "train/loss_total": 0.6016404032707214 }, { "epoch": 0.7189420251135453, "step": 2691, "train/loss_ctc": 0.8125452995300293, "train/loss_error": 0.45447948575019836, "train/loss_total": 0.5260926485061646 }, { "epoch": 0.7192091904889126, "step": 2692, "train/loss_ctc": 1.0304620265960693, "train/loss_error": 0.4998290240764618, "train/loss_total": 0.6059556007385254 }, { "epoch": 0.7194763558642799, "step": 2693, "train/loss_ctc": 0.7611021399497986, "train/loss_error": 0.514858067035675, "train/loss_total": 0.5641068816184998 }, { "epoch": 0.7197435212396474, "step": 2694, "train/loss_ctc": 0.7783827185630798, "train/loss_error": 0.519137978553772, "train/loss_total": 0.5709869265556335 }, { "epoch": 0.7200106866150147, "step": 2695, "train/loss_ctc": 1.2267704010009766, "train/loss_error": 0.49039292335510254, "train/loss_total": 0.6376684308052063 }, { "epoch": 0.7202778519903821, "step": 2696, "train/loss_ctc": 0.5940906405448914, "train/loss_error": 0.43290239572525024, "train/loss_total": 0.46514004468917847 }, { "epoch": 0.7205450173657494, "step": 2697, "train/loss_ctc": 0.56638503074646, "train/loss_error": 0.5397458672523499, "train/loss_total": 0.5450736880302429 }, { "epoch": 0.7208121827411168, "step": 2698, "train/loss_ctc": 1.4355684518814087, "train/loss_error": 0.45421072840690613, "train/loss_total": 0.6504822969436646 }, { "epoch": 0.7210793481164841, "step": 2699, "train/loss_ctc": 0.8730201721191406, "train/loss_error": 0.4628674387931824, "train/loss_total": 0.5448979735374451 }, { "epoch": 0.7213465134918514, "grad_norm": 1.2441009283065796, "learning_rate": 2.567833288805771e-05, "loss": 0.5712, "step": 2700 }, { "epoch": 0.7213465134918514, "step": 2700, "train/loss_ctc": 0.9500322341918945, "train/loss_error": 0.41683632135391235, "train/loss_total": 0.5234755277633667 }, { "epoch": 0.7216136788672188, "step": 2701, "train/loss_ctc": 1.6582701206207275, "train/loss_error": 0.5062658190727234, "train/loss_total": 0.7366666793823242 }, { "epoch": 0.7218808442425861, "step": 2702, "train/loss_ctc": 0.7322770953178406, "train/loss_error": 0.5037050843238831, "train/loss_total": 0.5494195222854614 }, { "epoch": 0.7221480096179536, "step": 2703, "train/loss_ctc": 0.9195876717567444, "train/loss_error": 0.4369967579841614, "train/loss_total": 0.5335149765014648 }, { "epoch": 0.7224151749933209, "step": 2704, "train/loss_ctc": 1.0534148216247559, "train/loss_error": 0.4834340810775757, "train/loss_total": 0.5974302291870117 }, { "epoch": 0.7226823403686882, "step": 2705, "train/loss_ctc": 1.153625249862671, "train/loss_error": 0.5492234230041504, "train/loss_total": 0.6701037883758545 }, { "epoch": 0.7229495057440556, "step": 2706, "train/loss_ctc": 0.9388760328292847, "train/loss_error": 0.5069648027420044, "train/loss_total": 0.5933470726013184 }, { "epoch": 0.7232166711194229, "step": 2707, "train/loss_ctc": 0.7457977533340454, "train/loss_error": 0.5176142454147339, "train/loss_total": 0.5632509589195251 }, { "epoch": 0.7234838364947903, "step": 2708, "train/loss_ctc": 0.6042523384094238, "train/loss_error": 0.519562840461731, "train/loss_total": 0.5365007519721985 }, { "epoch": 0.7237510018701576, "step": 2709, "train/loss_ctc": 0.8423149585723877, "train/loss_error": 0.5108963251113892, "train/loss_total": 0.577180027961731 }, { "epoch": 0.724018167245525, "grad_norm": 1.5159220695495605, "learning_rate": 2.5662302965535667e-05, "loss": 0.5881, "step": 2710 }, { "epoch": 0.724018167245525, "step": 2710, "train/loss_ctc": 0.8369960784912109, "train/loss_error": 0.5218878388404846, "train/loss_total": 0.5849094986915588 }, { "epoch": 0.7242853326208923, "step": 2711, "train/loss_ctc": 1.7643685340881348, "train/loss_error": 0.5614086985588074, "train/loss_total": 0.8020006418228149 }, { "epoch": 0.7245524979962596, "step": 2712, "train/loss_ctc": 1.1281561851501465, "train/loss_error": 0.4942775368690491, "train/loss_total": 0.6210532784461975 }, { "epoch": 0.7248196633716271, "step": 2713, "train/loss_ctc": 0.5769991874694824, "train/loss_error": 0.44352293014526367, "train/loss_total": 0.4702181816101074 }, { "epoch": 0.7250868287469944, "step": 2714, "train/loss_ctc": 0.8310166001319885, "train/loss_error": 0.4669439494609833, "train/loss_total": 0.5397584438323975 }, { "epoch": 0.7253539941223618, "step": 2715, "train/loss_ctc": 0.8262060880661011, "train/loss_error": 0.5539775490760803, "train/loss_total": 0.6084232926368713 }, { "epoch": 0.7256211594977291, "step": 2716, "train/loss_ctc": 0.6975442171096802, "train/loss_error": 0.4535015821456909, "train/loss_total": 0.5023101568222046 }, { "epoch": 0.7258883248730964, "step": 2717, "train/loss_ctc": 0.698157787322998, "train/loss_error": 0.42582622170448303, "train/loss_total": 0.48029255867004395 }, { "epoch": 0.7261554902484638, "step": 2718, "train/loss_ctc": 0.5493003726005554, "train/loss_error": 0.48884227871894836, "train/loss_total": 0.5009338855743408 }, { "epoch": 0.7264226556238311, "step": 2719, "train/loss_ctc": 0.7527559995651245, "train/loss_error": 0.46265286207199097, "train/loss_total": 0.5206735134124756 }, { "epoch": 0.7266898209991985, "grad_norm": 1.9003429412841797, "learning_rate": 2.5646273043013626e-05, "loss": 0.5631, "step": 2720 }, { "epoch": 0.7266898209991985, "step": 2720, "train/loss_ctc": 1.039198875427246, "train/loss_error": 0.4274648427963257, "train/loss_total": 0.5498116612434387 }, { "epoch": 0.7269569863745659, "step": 2721, "train/loss_ctc": 0.809440016746521, "train/loss_error": 0.48542100191116333, "train/loss_total": 0.550224781036377 }, { "epoch": 0.7272241517499332, "step": 2722, "train/loss_ctc": 0.9968704581260681, "train/loss_error": 0.5032002925872803, "train/loss_total": 0.6019343137741089 }, { "epoch": 0.7274913171253006, "step": 2723, "train/loss_ctc": 0.7781301736831665, "train/loss_error": 0.5030971765518188, "train/loss_total": 0.5581037998199463 }, { "epoch": 0.7277584825006679, "step": 2724, "train/loss_ctc": 1.0770589113235474, "train/loss_error": 0.45904669165611267, "train/loss_total": 0.5826491117477417 }, { "epoch": 0.7280256478760353, "step": 2725, "train/loss_ctc": 0.6953113079071045, "train/loss_error": 0.4834158718585968, "train/loss_total": 0.5257949829101562 }, { "epoch": 0.7282928132514026, "step": 2726, "train/loss_ctc": 0.7984694242477417, "train/loss_error": 0.464747816324234, "train/loss_total": 0.5314921140670776 }, { "epoch": 0.72855997862677, "step": 2727, "train/loss_ctc": 0.8015584945678711, "train/loss_error": 0.4757069945335388, "train/loss_total": 0.5408773422241211 }, { "epoch": 0.7288271440021373, "step": 2728, "train/loss_ctc": 0.9070132374763489, "train/loss_error": 0.5016589164733887, "train/loss_total": 0.5827298164367676 }, { "epoch": 0.7290943093775046, "step": 2729, "train/loss_ctc": 0.8135918378829956, "train/loss_error": 0.45453935861587524, "train/loss_total": 0.5263498425483704 }, { "epoch": 0.729361474752872, "grad_norm": 2.0762085914611816, "learning_rate": 2.5630243120491584e-05, "loss": 0.555, "step": 2730 }, { "epoch": 0.729361474752872, "step": 2730, "train/loss_ctc": 0.5450376272201538, "train/loss_error": 0.486397922039032, "train/loss_total": 0.4981258809566498 }, { "epoch": 0.7296286401282394, "step": 2731, "train/loss_ctc": 0.5438028573989868, "train/loss_error": 0.4226585626602173, "train/loss_total": 0.44688743352890015 }, { "epoch": 0.7298958055036068, "step": 2732, "train/loss_ctc": 0.9315769672393799, "train/loss_error": 0.4953593611717224, "train/loss_total": 0.5826029181480408 }, { "epoch": 0.7301629708789741, "step": 2733, "train/loss_ctc": 0.9625345468521118, "train/loss_error": 0.43129387497901917, "train/loss_total": 0.5375419855117798 }, { "epoch": 0.7304301362543414, "step": 2734, "train/loss_ctc": 1.056265115737915, "train/loss_error": 0.49566060304641724, "train/loss_total": 0.6077815294265747 }, { "epoch": 0.7306973016297088, "step": 2735, "train/loss_ctc": 1.5725486278533936, "train/loss_error": 0.45790895819664, "train/loss_total": 0.6808369159698486 }, { "epoch": 0.7309644670050761, "step": 2736, "train/loss_ctc": 0.9130187034606934, "train/loss_error": 0.522178053855896, "train/loss_total": 0.6003462076187134 }, { "epoch": 0.7312316323804435, "step": 2737, "train/loss_ctc": 1.354169249534607, "train/loss_error": 0.43175971508026123, "train/loss_total": 0.6162416338920593 }, { "epoch": 0.7314987977558108, "step": 2738, "train/loss_ctc": 0.8893327116966248, "train/loss_error": 0.5687814354896545, "train/loss_total": 0.6328917145729065 }, { "epoch": 0.7317659631311783, "step": 2739, "train/loss_ctc": 0.6613316535949707, "train/loss_error": 0.4776781499385834, "train/loss_total": 0.5144088268280029 }, { "epoch": 0.7320331285065456, "grad_norm": 1.3477274179458618, "learning_rate": 2.561421319796954e-05, "loss": 0.5718, "step": 2740 }, { "epoch": 0.7320331285065456, "step": 2740, "train/loss_ctc": 1.3236467838287354, "train/loss_error": 0.4799494445323944, "train/loss_total": 0.6486889123916626 }, { "epoch": 0.7323002938819129, "step": 2741, "train/loss_ctc": 1.3102985620498657, "train/loss_error": 0.5299725532531738, "train/loss_total": 0.6860377788543701 }, { "epoch": 0.7325674592572803, "step": 2742, "train/loss_ctc": 0.5268514156341553, "train/loss_error": 0.48644816875457764, "train/loss_total": 0.4945288300514221 }, { "epoch": 0.7328346246326476, "step": 2743, "train/loss_ctc": 0.8416736125946045, "train/loss_error": 0.5220305919647217, "train/loss_total": 0.5859591960906982 }, { "epoch": 0.733101790008015, "step": 2744, "train/loss_ctc": 0.7225388884544373, "train/loss_error": 0.4117582440376282, "train/loss_total": 0.47391438484191895 }, { "epoch": 0.7333689553833823, "step": 2745, "train/loss_ctc": 1.2933157682418823, "train/loss_error": 0.47214025259017944, "train/loss_total": 0.636375367641449 }, { "epoch": 0.7336361207587496, "step": 2746, "train/loss_ctc": 0.4721686840057373, "train/loss_error": 0.45199644565582275, "train/loss_total": 0.4560309052467346 }, { "epoch": 0.733903286134117, "step": 2747, "train/loss_ctc": 0.633289098739624, "train/loss_error": 0.4796009361743927, "train/loss_total": 0.5103386044502258 }, { "epoch": 0.7341704515094843, "step": 2748, "train/loss_ctc": 0.43724536895751953, "train/loss_error": 0.5408366918563843, "train/loss_total": 0.5201184749603271 }, { "epoch": 0.7344376168848518, "step": 2749, "train/loss_ctc": 0.4083949327468872, "train/loss_error": 0.47381141781806946, "train/loss_total": 0.46072813868522644 }, { "epoch": 0.7347047822602191, "grad_norm": 0.9838333129882812, "learning_rate": 2.5598183275447503e-05, "loss": 0.5473, "step": 2750 }, { "epoch": 0.7347047822602191, "step": 2750, "train/loss_ctc": 0.7749465703964233, "train/loss_error": 0.44293344020843506, "train/loss_total": 0.5093360543251038 }, { "epoch": 0.7349719476355864, "step": 2751, "train/loss_ctc": 1.0788949728012085, "train/loss_error": 0.4464435577392578, "train/loss_total": 0.5729338526725769 }, { "epoch": 0.7352391130109538, "step": 2752, "train/loss_ctc": 1.0144269466400146, "train/loss_error": 0.49250897765159607, "train/loss_total": 0.5968925952911377 }, { "epoch": 0.7355062783863211, "step": 2753, "train/loss_ctc": 0.6621227264404297, "train/loss_error": 0.48815667629241943, "train/loss_total": 0.5229498744010925 }, { "epoch": 0.7357734437616885, "step": 2754, "train/loss_ctc": 0.8308068513870239, "train/loss_error": 0.4137805998325348, "train/loss_total": 0.4971858859062195 }, { "epoch": 0.7360406091370558, "step": 2755, "train/loss_ctc": 0.9075816869735718, "train/loss_error": 0.48300355672836304, "train/loss_total": 0.5679191946983337 }, { "epoch": 0.7363077745124232, "step": 2756, "train/loss_ctc": 1.2200586795806885, "train/loss_error": 0.518044114112854, "train/loss_total": 0.6584470272064209 }, { "epoch": 0.7365749398877905, "step": 2757, "train/loss_ctc": 1.340896487236023, "train/loss_error": 0.4825912117958069, "train/loss_total": 0.654252290725708 }, { "epoch": 0.7368421052631579, "step": 2758, "train/loss_ctc": 1.0280359983444214, "train/loss_error": 0.48502039909362793, "train/loss_total": 0.5936235189437866 }, { "epoch": 0.7371092706385253, "step": 2759, "train/loss_ctc": 0.664863646030426, "train/loss_error": 0.4481537640094757, "train/loss_total": 0.4914957284927368 }, { "epoch": 0.7373764360138926, "grad_norm": 0.9747987985610962, "learning_rate": 2.558215335292546e-05, "loss": 0.5665, "step": 2760 }, { "epoch": 0.7373764360138926, "step": 2760, "train/loss_ctc": 0.7897271513938904, "train/loss_error": 0.44373008608818054, "train/loss_total": 0.5129294991493225 }, { "epoch": 0.73764360138926, "step": 2761, "train/loss_ctc": 1.4504704475402832, "train/loss_error": 0.45339342951774597, "train/loss_total": 0.6528088450431824 }, { "epoch": 0.7379107667646273, "step": 2762, "train/loss_ctc": 1.5306612253189087, "train/loss_error": 0.5872883200645447, "train/loss_total": 0.7759629487991333 }, { "epoch": 0.7381779321399946, "step": 2763, "train/loss_ctc": 1.5255728960037231, "train/loss_error": 0.46467140316963196, "train/loss_total": 0.676851749420166 }, { "epoch": 0.738445097515362, "step": 2764, "train/loss_ctc": 1.1039812564849854, "train/loss_error": 0.45974576473236084, "train/loss_total": 0.5885928869247437 }, { "epoch": 0.7387122628907293, "step": 2765, "train/loss_ctc": 0.7870160937309265, "train/loss_error": 0.5390300154685974, "train/loss_total": 0.5886272192001343 }, { "epoch": 0.7389794282660967, "step": 2766, "train/loss_ctc": 0.7991995811462402, "train/loss_error": 0.4593068063259125, "train/loss_total": 0.5272853374481201 }, { "epoch": 0.7392465936414641, "step": 2767, "train/loss_ctc": 0.4854481816291809, "train/loss_error": 0.45716536045074463, "train/loss_total": 0.46282193064689636 }, { "epoch": 0.7395137590168315, "step": 2768, "train/loss_ctc": 0.8021690845489502, "train/loss_error": 0.43762338161468506, "train/loss_total": 0.5105324983596802 }, { "epoch": 0.7397809243921988, "step": 2769, "train/loss_ctc": 1.2218008041381836, "train/loss_error": 0.518047571182251, "train/loss_total": 0.6587982177734375 }, { "epoch": 0.7400480897675661, "grad_norm": 1.339622139930725, "learning_rate": 2.556612343040342e-05, "loss": 0.5955, "step": 2770 }, { "epoch": 0.7400480897675661, "step": 2770, "train/loss_ctc": 0.9632636308670044, "train/loss_error": 0.4952751100063324, "train/loss_total": 0.5888727903366089 }, { "epoch": 0.7403152551429335, "step": 2771, "train/loss_ctc": 1.168730616569519, "train/loss_error": 0.4436470866203308, "train/loss_total": 0.5886638164520264 }, { "epoch": 0.7405824205183008, "step": 2772, "train/loss_ctc": 1.3521111011505127, "train/loss_error": 0.4595550298690796, "train/loss_total": 0.638066291809082 }, { "epoch": 0.7408495858936682, "step": 2773, "train/loss_ctc": 0.6168003678321838, "train/loss_error": 0.5015687942504883, "train/loss_total": 0.5246151089668274 }, { "epoch": 0.7411167512690355, "step": 2774, "train/loss_ctc": 0.5754146575927734, "train/loss_error": 0.437282919883728, "train/loss_total": 0.46490925550460815 }, { "epoch": 0.7413839166444028, "step": 2775, "train/loss_ctc": 0.4578116834163666, "train/loss_error": 0.4832155108451843, "train/loss_total": 0.47813475131988525 }, { "epoch": 0.7416510820197703, "step": 2776, "train/loss_ctc": 0.8894003629684448, "train/loss_error": 0.46067970991134644, "train/loss_total": 0.5464238524436951 }, { "epoch": 0.7419182473951376, "step": 2777, "train/loss_ctc": 1.1520274877548218, "train/loss_error": 0.41175752878189087, "train/loss_total": 0.559811532497406 }, { "epoch": 0.742185412770505, "step": 2778, "train/loss_ctc": 0.5642439723014832, "train/loss_error": 0.45998239517211914, "train/loss_total": 0.4808347225189209 }, { "epoch": 0.7424525781458723, "step": 2779, "train/loss_ctc": 1.1828972101211548, "train/loss_error": 0.47471246123313904, "train/loss_total": 0.616349458694458 }, { "epoch": 0.7427197435212396, "grad_norm": 1.4911508560180664, "learning_rate": 2.555009350788138e-05, "loss": 0.5487, "step": 2780 }, { "epoch": 0.7427197435212396, "step": 2780, "train/loss_ctc": 1.31227707862854, "train/loss_error": 0.49571743607521057, "train/loss_total": 0.6590293645858765 }, { "epoch": 0.742986908896607, "step": 2781, "train/loss_ctc": 1.460876703262329, "train/loss_error": 0.5256471037864685, "train/loss_total": 0.7126930356025696 }, { "epoch": 0.7432540742719743, "step": 2782, "train/loss_ctc": 0.5708717107772827, "train/loss_error": 0.48927873373031616, "train/loss_total": 0.5055973529815674 }, { "epoch": 0.7435212396473417, "step": 2783, "train/loss_ctc": 1.3309468030929565, "train/loss_error": 0.4859834611415863, "train/loss_total": 0.6549761295318604 }, { "epoch": 0.743788405022709, "step": 2784, "train/loss_ctc": 0.9210115671157837, "train/loss_error": 0.5352439880371094, "train/loss_total": 0.6123975515365601 }, { "epoch": 0.7440555703980765, "step": 2785, "train/loss_ctc": 1.4099781513214111, "train/loss_error": 0.5271509885787964, "train/loss_total": 0.7037163972854614 }, { "epoch": 0.7443227357734438, "step": 2786, "train/loss_ctc": 0.9264206290245056, "train/loss_error": 0.4294421076774597, "train/loss_total": 0.5288378000259399 }, { "epoch": 0.7445899011488111, "step": 2787, "train/loss_ctc": 0.9709098935127258, "train/loss_error": 0.4886144995689392, "train/loss_total": 0.5850735902786255 }, { "epoch": 0.7448570665241785, "step": 2788, "train/loss_ctc": 0.6891555786132812, "train/loss_error": 0.4965354800224304, "train/loss_total": 0.5350595116615295 }, { "epoch": 0.7451242318995458, "step": 2789, "train/loss_ctc": 0.5209981203079224, "train/loss_error": 0.5358750820159912, "train/loss_total": 0.5328996777534485 }, { "epoch": 0.7453913972749132, "grad_norm": 3.6621735095977783, "learning_rate": 2.553406358535934e-05, "loss": 0.603, "step": 2790 }, { "epoch": 0.7453913972749132, "step": 2790, "train/loss_ctc": 0.7904669046401978, "train/loss_error": 0.4528926908969879, "train/loss_total": 0.5204075574874878 }, { "epoch": 0.7456585626502805, "step": 2791, "train/loss_ctc": 0.5573225617408752, "train/loss_error": 0.5124659538269043, "train/loss_total": 0.5214372873306274 }, { "epoch": 0.7459257280256478, "step": 2792, "train/loss_ctc": 1.1213607788085938, "train/loss_error": 0.4617865979671478, "train/loss_total": 0.5937014818191528 }, { "epoch": 0.7461928934010152, "step": 2793, "train/loss_ctc": 0.997186005115509, "train/loss_error": 0.46378758549690247, "train/loss_total": 0.5704672336578369 }, { "epoch": 0.7464600587763826, "step": 2794, "train/loss_ctc": 0.6677525043487549, "train/loss_error": 0.484538197517395, "train/loss_total": 0.521181046962738 }, { "epoch": 0.74672722415175, "step": 2795, "train/loss_ctc": 0.8062849640846252, "train/loss_error": 0.5285837054252625, "train/loss_total": 0.584123969078064 }, { "epoch": 0.7469943895271173, "step": 2796, "train/loss_ctc": 0.5343136787414551, "train/loss_error": 0.48371395468711853, "train/loss_total": 0.49383389949798584 }, { "epoch": 0.7472615549024847, "step": 2797, "train/loss_ctc": 0.9570383429527283, "train/loss_error": 0.5008483529090881, "train/loss_total": 0.5920863747596741 }, { "epoch": 0.747528720277852, "step": 2798, "train/loss_ctc": 1.2445560693740845, "train/loss_error": 0.5077685117721558, "train/loss_total": 0.6551260352134705 }, { "epoch": 0.7477958856532193, "step": 2799, "train/loss_ctc": 0.790429413318634, "train/loss_error": 0.4706468880176544, "train/loss_total": 0.5346033573150635 }, { "epoch": 0.7480630510285867, "grad_norm": 1.5797109603881836, "learning_rate": 2.5518033662837297e-05, "loss": 0.5587, "step": 2800 }, { "epoch": 0.7480630510285867, "step": 2800, "train/loss_ctc": 0.7185239791870117, "train/loss_error": 0.5237248539924622, "train/loss_total": 0.5626846551895142 }, { "epoch": 0.748330216403954, "step": 2801, "train/loss_ctc": 1.48448646068573, "train/loss_error": 0.47236397862434387, "train/loss_total": 0.6747884750366211 }, { "epoch": 0.7485973817793214, "step": 2802, "train/loss_ctc": 2.059607982635498, "train/loss_error": 0.46129071712493896, "train/loss_total": 0.7809541821479797 }, { "epoch": 0.7488645471546888, "step": 2803, "train/loss_ctc": 0.5894603729248047, "train/loss_error": 0.5000107288360596, "train/loss_total": 0.5179006457328796 }, { "epoch": 0.7491317125300561, "step": 2804, "train/loss_ctc": 1.2264764308929443, "train/loss_error": 0.47444045543670654, "train/loss_total": 0.6248476505279541 }, { "epoch": 0.7493988779054235, "step": 2805, "train/loss_ctc": 1.3597052097320557, "train/loss_error": 0.484283447265625, "train/loss_total": 0.6593677997589111 }, { "epoch": 0.7496660432807908, "step": 2806, "train/loss_ctc": 0.6178431510925293, "train/loss_error": 0.5143651962280273, "train/loss_total": 0.5350608229637146 }, { "epoch": 0.7499332086561582, "step": 2807, "train/loss_ctc": 0.5378137230873108, "train/loss_error": 0.484167218208313, "train/loss_total": 0.4948965311050415 }, { "epoch": 0.7502003740315255, "step": 2808, "train/loss_ctc": 1.7892141342163086, "train/loss_error": 0.4649474620819092, "train/loss_total": 0.729800820350647 }, { "epoch": 0.7504675394068928, "step": 2809, "train/loss_ctc": 1.2980210781097412, "train/loss_error": 0.47851064801216125, "train/loss_total": 0.6424127817153931 }, { "epoch": 0.7507347047822602, "grad_norm": 2.108881950378418, "learning_rate": 2.550200374031526e-05, "loss": 0.6223, "step": 2810 }, { "epoch": 0.7507347047822602, "step": 2810, "train/loss_ctc": 0.755272388458252, "train/loss_error": 0.4794755280017853, "train/loss_total": 0.5346348881721497 }, { "epoch": 0.7510018701576275, "step": 2811, "train/loss_ctc": 0.6524398326873779, "train/loss_error": 0.45378023386001587, "train/loss_total": 0.4935121536254883 }, { "epoch": 0.751269035532995, "step": 2812, "train/loss_ctc": 0.9824814200401306, "train/loss_error": 0.491935133934021, "train/loss_total": 0.590044379234314 }, { "epoch": 0.7515362009083623, "step": 2813, "train/loss_ctc": 0.5727787017822266, "train/loss_error": 0.5267048478126526, "train/loss_total": 0.5359196066856384 }, { "epoch": 0.7518033662837297, "step": 2814, "train/loss_ctc": 1.1009854078292847, "train/loss_error": 0.4192959666252136, "train/loss_total": 0.5556339025497437 }, { "epoch": 0.752070531659097, "step": 2815, "train/loss_ctc": 1.5664880275726318, "train/loss_error": 0.514176607131958, "train/loss_total": 0.7246388792991638 }, { "epoch": 0.7523376970344643, "step": 2816, "train/loss_ctc": 0.7883267402648926, "train/loss_error": 0.4718513488769531, "train/loss_total": 0.5351464152336121 }, { "epoch": 0.7526048624098317, "step": 2817, "train/loss_ctc": 1.4437782764434814, "train/loss_error": 0.4803704619407654, "train/loss_total": 0.6730520725250244 }, { "epoch": 0.752872027785199, "step": 2818, "train/loss_ctc": 1.0086108446121216, "train/loss_error": 0.4783637225627899, "train/loss_total": 0.5844131708145142 }, { "epoch": 0.7531391931605664, "step": 2819, "train/loss_ctc": 0.40634891390800476, "train/loss_error": 0.4871426820755005, "train/loss_total": 0.47098392248153687 }, { "epoch": 0.7534063585359337, "grad_norm": 3.2100954055786133, "learning_rate": 2.5485973817793217e-05, "loss": 0.5698, "step": 2820 }, { "epoch": 0.7534063585359337, "step": 2820, "train/loss_ctc": 0.678429901599884, "train/loss_error": 0.4880954623222351, "train/loss_total": 0.5261623859405518 }, { "epoch": 0.753673523911301, "step": 2821, "train/loss_ctc": 0.6963273286819458, "train/loss_error": 0.4526810348033905, "train/loss_total": 0.5014103055000305 }, { "epoch": 0.7539406892866685, "step": 2822, "train/loss_ctc": 0.5278711318969727, "train/loss_error": 0.44912898540496826, "train/loss_total": 0.4648774266242981 }, { "epoch": 0.7542078546620358, "step": 2823, "train/loss_ctc": 1.1522610187530518, "train/loss_error": 0.41784247756004333, "train/loss_total": 0.5647262334823608 }, { "epoch": 0.7544750200374032, "step": 2824, "train/loss_ctc": 1.3051623106002808, "train/loss_error": 0.49161508679389954, "train/loss_total": 0.6543245315551758 }, { "epoch": 0.7547421854127705, "step": 2825, "train/loss_ctc": 1.3393867015838623, "train/loss_error": 0.5641698241233826, "train/loss_total": 0.7192132472991943 }, { "epoch": 0.7550093507881379, "step": 2826, "train/loss_ctc": 0.653393030166626, "train/loss_error": 0.461882084608078, "train/loss_total": 0.5001842975616455 }, { "epoch": 0.7552765161635052, "step": 2827, "train/loss_ctc": 1.4748836755752563, "train/loss_error": 0.46950820088386536, "train/loss_total": 0.6705833077430725 }, { "epoch": 0.7555436815388725, "step": 2828, "train/loss_ctc": 0.6406719088554382, "train/loss_error": 0.5125159025192261, "train/loss_total": 0.5381470918655396 }, { "epoch": 0.7558108469142399, "step": 2829, "train/loss_ctc": 0.608370304107666, "train/loss_error": 0.45893892645835876, "train/loss_total": 0.4888252019882202 }, { "epoch": 0.7560780122896072, "grad_norm": 1.351702332496643, "learning_rate": 2.5469943895271175e-05, "loss": 0.5628, "step": 2830 }, { "epoch": 0.7560780122896072, "step": 2830, "train/loss_ctc": 0.38143742084503174, "train/loss_error": 0.49050405621528625, "train/loss_total": 0.4686907231807709 }, { "epoch": 0.7563451776649747, "step": 2831, "train/loss_ctc": 0.5316367745399475, "train/loss_error": 0.4706031084060669, "train/loss_total": 0.482809841632843 }, { "epoch": 0.756612343040342, "step": 2832, "train/loss_ctc": 1.019726276397705, "train/loss_error": 0.47751283645629883, "train/loss_total": 0.5859555602073669 }, { "epoch": 0.7568795084157093, "step": 2833, "train/loss_ctc": 0.8456522226333618, "train/loss_error": 0.48476773500442505, "train/loss_total": 0.5569446086883545 }, { "epoch": 0.7571466737910767, "step": 2834, "train/loss_ctc": 1.0157800912857056, "train/loss_error": 0.49067261815071106, "train/loss_total": 0.5956941246986389 }, { "epoch": 0.757413839166444, "step": 2835, "train/loss_ctc": 1.2595469951629639, "train/loss_error": 0.5689107179641724, "train/loss_total": 0.7070379853248596 }, { "epoch": 0.7576810045418114, "step": 2836, "train/loss_ctc": 0.7877746820449829, "train/loss_error": 0.4715598225593567, "train/loss_total": 0.5348027944564819 }, { "epoch": 0.7579481699171787, "step": 2837, "train/loss_ctc": 0.8751943707466125, "train/loss_error": 0.44999438524246216, "train/loss_total": 0.5350344181060791 }, { "epoch": 0.758215335292546, "step": 2838, "train/loss_ctc": 1.0913957357406616, "train/loss_error": 0.5244659781455994, "train/loss_total": 0.6378519535064697 }, { "epoch": 0.7584825006679135, "step": 2839, "train/loss_ctc": 1.1406937837600708, "train/loss_error": 0.4932492971420288, "train/loss_total": 0.6227381825447083 }, { "epoch": 0.7587496660432808, "grad_norm": 1.1025673151016235, "learning_rate": 2.5453913972749133e-05, "loss": 0.5728, "step": 2840 }, { "epoch": 0.7587496660432808, "step": 2840, "train/loss_ctc": 0.7257410287857056, "train/loss_error": 0.4958555996417999, "train/loss_total": 0.541832685470581 }, { "epoch": 0.7590168314186482, "step": 2841, "train/loss_ctc": 0.9269928932189941, "train/loss_error": 0.48327624797821045, "train/loss_total": 0.5720195770263672 }, { "epoch": 0.7592839967940155, "step": 2842, "train/loss_ctc": 1.30784010887146, "train/loss_error": 0.46272069215774536, "train/loss_total": 0.6317446231842041 }, { "epoch": 0.7595511621693829, "step": 2843, "train/loss_ctc": 0.5917739272117615, "train/loss_error": 0.4639025330543518, "train/loss_total": 0.4894768297672272 }, { "epoch": 0.7598183275447502, "step": 2844, "train/loss_ctc": 0.6071369647979736, "train/loss_error": 0.4866223931312561, "train/loss_total": 0.5107253193855286 }, { "epoch": 0.7600854929201175, "step": 2845, "train/loss_ctc": 0.959352433681488, "train/loss_error": 0.5707422494888306, "train/loss_total": 0.6484643220901489 }, { "epoch": 0.7603526582954849, "step": 2846, "train/loss_ctc": 0.765897810459137, "train/loss_error": 0.5689176321029663, "train/loss_total": 0.6083136796951294 }, { "epoch": 0.7606198236708522, "step": 2847, "train/loss_ctc": 1.107689380645752, "train/loss_error": 0.5235095620155334, "train/loss_total": 0.6403455138206482 }, { "epoch": 0.7608869890462197, "step": 2848, "train/loss_ctc": 1.0043938159942627, "train/loss_error": 0.4293970763683319, "train/loss_total": 0.5443964004516602 }, { "epoch": 0.761154154421587, "step": 2849, "train/loss_ctc": 0.8562831878662109, "train/loss_error": 0.48723119497299194, "train/loss_total": 0.5610415935516357 }, { "epoch": 0.7614213197969543, "grad_norm": 2.059457540512085, "learning_rate": 2.543788405022709e-05, "loss": 0.5748, "step": 2850 }, { "epoch": 0.7614213197969543, "step": 2850, "train/loss_ctc": 0.4022904336452484, "train/loss_error": 0.47683945298194885, "train/loss_total": 0.46192967891693115 }, { "epoch": 0.7616884851723217, "step": 2851, "train/loss_ctc": 1.3165147304534912, "train/loss_error": 0.49340641498565674, "train/loss_total": 0.6580281257629395 }, { "epoch": 0.761955650547689, "step": 2852, "train/loss_ctc": 0.8431208729743958, "train/loss_error": 0.554177463054657, "train/loss_total": 0.6119661331176758 }, { "epoch": 0.7622228159230564, "step": 2853, "train/loss_ctc": 0.597220778465271, "train/loss_error": 0.4945931136608124, "train/loss_total": 0.5151186585426331 }, { "epoch": 0.7624899812984237, "step": 2854, "train/loss_ctc": 1.5648581981658936, "train/loss_error": 0.4910534620285034, "train/loss_total": 0.7058144211769104 }, { "epoch": 0.7627571466737911, "step": 2855, "train/loss_ctc": 1.0228271484375, "train/loss_error": 0.5087015628814697, "train/loss_total": 0.6115267276763916 }, { "epoch": 0.7630243120491584, "step": 2856, "train/loss_ctc": 1.3364607095718384, "train/loss_error": 0.38957732915878296, "train/loss_total": 0.5789539813995361 }, { "epoch": 0.7632914774245257, "step": 2857, "train/loss_ctc": 0.5776749849319458, "train/loss_error": 0.49548041820526123, "train/loss_total": 0.5119193196296692 }, { "epoch": 0.7635586427998932, "step": 2858, "train/loss_ctc": 0.7957867383956909, "train/loss_error": 0.4875258505344391, "train/loss_total": 0.5491780638694763 }, { "epoch": 0.7638258081752605, "step": 2859, "train/loss_ctc": 1.2595936059951782, "train/loss_error": 0.48034897446632385, "train/loss_total": 0.6361979246139526 }, { "epoch": 0.7640929735506279, "grad_norm": 1.1406278610229492, "learning_rate": 2.542185412770505e-05, "loss": 0.5841, "step": 2860 }, { "epoch": 0.7640929735506279, "step": 2860, "train/loss_ctc": 1.3571382761001587, "train/loss_error": 0.46474963426589966, "train/loss_total": 0.6432273387908936 }, { "epoch": 0.7643601389259952, "step": 2861, "train/loss_ctc": 1.2649424076080322, "train/loss_error": 0.44324320554733276, "train/loss_total": 0.6075830459594727 }, { "epoch": 0.7646273043013625, "step": 2862, "train/loss_ctc": 1.3193159103393555, "train/loss_error": 0.45438405871391296, "train/loss_total": 0.6273704171180725 }, { "epoch": 0.7648944696767299, "step": 2863, "train/loss_ctc": 1.0209753513336182, "train/loss_error": 0.49881723523139954, "train/loss_total": 0.6032488346099854 }, { "epoch": 0.7651616350520972, "step": 2864, "train/loss_ctc": 0.8509142398834229, "train/loss_error": 0.4449528753757477, "train/loss_total": 0.5261451601982117 }, { "epoch": 0.7654288004274646, "step": 2865, "train/loss_ctc": 0.8115207552909851, "train/loss_error": 0.47129586338996887, "train/loss_total": 0.5393408536911011 }, { "epoch": 0.765695965802832, "step": 2866, "train/loss_ctc": 0.7245516777038574, "train/loss_error": 0.4645002484321594, "train/loss_total": 0.516510546207428 }, { "epoch": 0.7659631311781993, "step": 2867, "train/loss_ctc": 0.7010155916213989, "train/loss_error": 0.5915414094924927, "train/loss_total": 0.613436222076416 }, { "epoch": 0.7662302965535667, "step": 2868, "train/loss_ctc": 0.976650059223175, "train/loss_error": 0.45742395520210266, "train/loss_total": 0.5612691640853882 }, { "epoch": 0.766497461928934, "step": 2869, "train/loss_ctc": 0.7326571345329285, "train/loss_error": 0.48512861132621765, "train/loss_total": 0.5346343517303467 }, { "epoch": 0.7667646273043014, "grad_norm": 2.061750888824463, "learning_rate": 2.540582420518301e-05, "loss": 0.5773, "step": 2870 }, { "epoch": 0.7667646273043014, "step": 2870, "train/loss_ctc": 0.2910827696323395, "train/loss_error": 0.4980148673057556, "train/loss_total": 0.4566284418106079 }, { "epoch": 0.7670317926796687, "step": 2871, "train/loss_ctc": 0.6241620779037476, "train/loss_error": 0.48820045590400696, "train/loss_total": 0.5153927803039551 }, { "epoch": 0.7672989580550361, "step": 2872, "train/loss_ctc": 0.8754935264587402, "train/loss_error": 0.46901968121528625, "train/loss_total": 0.5503144264221191 }, { "epoch": 0.7675661234304034, "step": 2873, "train/loss_ctc": 1.4082257747650146, "train/loss_error": 0.5187916159629822, "train/loss_total": 0.6966784596443176 }, { "epoch": 0.7678332888057707, "step": 2874, "train/loss_ctc": 0.8845229148864746, "train/loss_error": 0.5327427983283997, "train/loss_total": 0.6030988693237305 }, { "epoch": 0.7681004541811381, "step": 2875, "train/loss_ctc": 0.7272083163261414, "train/loss_error": 0.48654383420944214, "train/loss_total": 0.534676730632782 }, { "epoch": 0.7683676195565055, "step": 2876, "train/loss_ctc": 1.3163175582885742, "train/loss_error": 0.46038690209388733, "train/loss_total": 0.6315730810165405 }, { "epoch": 0.7686347849318729, "step": 2877, "train/loss_ctc": 0.5205628275871277, "train/loss_error": 0.434457391500473, "train/loss_total": 0.45167848467826843 }, { "epoch": 0.7689019503072402, "step": 2878, "train/loss_ctc": 0.3852612376213074, "train/loss_error": 0.5136277675628662, "train/loss_total": 0.4879544973373413 }, { "epoch": 0.7691691156826075, "step": 2879, "train/loss_ctc": 0.797982394695282, "train/loss_error": 0.5122520923614502, "train/loss_total": 0.5693981647491455 }, { "epoch": 0.7694362810579749, "grad_norm": 2.736323595046997, "learning_rate": 2.538979428266097e-05, "loss": 0.5497, "step": 2880 }, { "epoch": 0.7694362810579749, "step": 2880, "train/loss_ctc": 1.3487722873687744, "train/loss_error": 0.4958040118217468, "train/loss_total": 0.6663976907730103 }, { "epoch": 0.7697034464333422, "step": 2881, "train/loss_ctc": 0.8841726779937744, "train/loss_error": 0.46785852313041687, "train/loss_total": 0.5511213541030884 }, { "epoch": 0.7699706118087096, "step": 2882, "train/loss_ctc": 0.581521213054657, "train/loss_error": 0.4724479615688324, "train/loss_total": 0.4942626357078552 }, { "epoch": 0.7702377771840769, "step": 2883, "train/loss_ctc": 0.9759037494659424, "train/loss_error": 0.49781325459480286, "train/loss_total": 0.5934313535690308 }, { "epoch": 0.7705049425594444, "step": 2884, "train/loss_ctc": 0.634151816368103, "train/loss_error": 0.5026249885559082, "train/loss_total": 0.5289303660392761 }, { "epoch": 0.7707721079348117, "step": 2885, "train/loss_ctc": 0.8604115843772888, "train/loss_error": 0.46930986642837524, "train/loss_total": 0.5475302338600159 }, { "epoch": 0.771039273310179, "step": 2886, "train/loss_ctc": 0.6734257340431213, "train/loss_error": 0.47265467047691345, "train/loss_total": 0.5128088593482971 }, { "epoch": 0.7713064386855464, "step": 2887, "train/loss_ctc": 1.058080792427063, "train/loss_error": 0.5303115248680115, "train/loss_total": 0.6358653903007507 }, { "epoch": 0.7715736040609137, "step": 2888, "train/loss_ctc": 0.9647094011306763, "train/loss_error": 0.5103135108947754, "train/loss_total": 0.6011927127838135 }, { "epoch": 0.7718407694362811, "step": 2889, "train/loss_ctc": 0.8584362268447876, "train/loss_error": 0.4869881570339203, "train/loss_total": 0.5612777471542358 }, { "epoch": 0.7721079348116484, "grad_norm": 1.5491769313812256, "learning_rate": 2.5373764360138926e-05, "loss": 0.5693, "step": 2890 }, { "epoch": 0.7721079348116484, "step": 2890, "train/loss_ctc": 0.5839791893959045, "train/loss_error": 0.5544435977935791, "train/loss_total": 0.5603507161140442 }, { "epoch": 0.7723751001870157, "step": 2891, "train/loss_ctc": 1.0439414978027344, "train/loss_error": 0.450641930103302, "train/loss_total": 0.5693018436431885 }, { "epoch": 0.7726422655623831, "step": 2892, "train/loss_ctc": 1.455543875694275, "train/loss_error": 0.5026268362998962, "train/loss_total": 0.693210244178772 }, { "epoch": 0.7729094309377504, "step": 2893, "train/loss_ctc": 1.13679838180542, "train/loss_error": 0.5017544627189636, "train/loss_total": 0.6287632584571838 }, { "epoch": 0.7731765963131179, "step": 2894, "train/loss_ctc": 0.5640848278999329, "train/loss_error": 0.4058927297592163, "train/loss_total": 0.43753114342689514 }, { "epoch": 0.7734437616884852, "step": 2895, "train/loss_ctc": 0.8093373775482178, "train/loss_error": 0.5036401748657227, "train/loss_total": 0.5647796392440796 }, { "epoch": 0.7737109270638525, "step": 2896, "train/loss_ctc": 0.9112488031387329, "train/loss_error": 0.5316348671913147, "train/loss_total": 0.6075576543807983 }, { "epoch": 0.7739780924392199, "step": 2897, "train/loss_ctc": 0.5024813413619995, "train/loss_error": 0.5102041959762573, "train/loss_total": 0.5086596608161926 }, { "epoch": 0.7742452578145872, "step": 2898, "train/loss_ctc": 0.6254774332046509, "train/loss_error": 0.413657546043396, "train/loss_total": 0.4560215175151825 }, { "epoch": 0.7745124231899546, "step": 2899, "train/loss_ctc": 0.7387146949768066, "train/loss_error": 0.43402299284935, "train/loss_total": 0.49496132135391235 }, { "epoch": 0.7747795885653219, "grad_norm": 1.8147391080856323, "learning_rate": 2.5357734437616885e-05, "loss": 0.5521, "step": 2900 }, { "epoch": 0.7747795885653219, "step": 2900, "train/loss_ctc": 1.3503706455230713, "train/loss_error": 0.45189225673675537, "train/loss_total": 0.6315879821777344 }, { "epoch": 0.7750467539406893, "step": 2901, "train/loss_ctc": 0.4244428277015686, "train/loss_error": 0.3940699100494385, "train/loss_total": 0.4001445174217224 }, { "epoch": 0.7753139193160566, "step": 2902, "train/loss_ctc": 0.8444951176643372, "train/loss_error": 0.49112340807914734, "train/loss_total": 0.5617977380752563 }, { "epoch": 0.775581084691424, "step": 2903, "train/loss_ctc": 0.5140201449394226, "train/loss_error": 0.5417603254318237, "train/loss_total": 0.5362122654914856 }, { "epoch": 0.7758482500667914, "step": 2904, "train/loss_ctc": 0.665488600730896, "train/loss_error": 0.4695959985256195, "train/loss_total": 0.5087745189666748 }, { "epoch": 0.7761154154421587, "step": 2905, "train/loss_ctc": 1.2565593719482422, "train/loss_error": 0.45350322127342224, "train/loss_total": 0.6141144633293152 }, { "epoch": 0.7763825808175261, "step": 2906, "train/loss_ctc": 0.7851846218109131, "train/loss_error": 0.4897587299346924, "train/loss_total": 0.5488439202308655 }, { "epoch": 0.7766497461928934, "step": 2907, "train/loss_ctc": 0.9213924407958984, "train/loss_error": 0.45549651980400085, "train/loss_total": 0.5486757159233093 }, { "epoch": 0.7769169115682607, "step": 2908, "train/loss_ctc": 0.5949634313583374, "train/loss_error": 0.505241870880127, "train/loss_total": 0.523186206817627 }, { "epoch": 0.7771840769436281, "step": 2909, "train/loss_ctc": 0.5457055568695068, "train/loss_error": 0.4690084159374237, "train/loss_total": 0.4843478500843048 }, { "epoch": 0.7774512423189954, "grad_norm": 1.4335858821868896, "learning_rate": 2.5341704515094843e-05, "loss": 0.5358, "step": 2910 }, { "epoch": 0.7774512423189954, "step": 2910, "train/loss_ctc": 1.0521676540374756, "train/loss_error": 0.39856216311454773, "train/loss_total": 0.5292832851409912 }, { "epoch": 0.7777184076943628, "step": 2911, "train/loss_ctc": 0.6894010901451111, "train/loss_error": 0.5171873569488525, "train/loss_total": 0.5516301393508911 }, { "epoch": 0.7779855730697302, "step": 2912, "train/loss_ctc": 0.4941064715385437, "train/loss_error": 0.42611056566238403, "train/loss_total": 0.43970978260040283 }, { "epoch": 0.7782527384450976, "step": 2913, "train/loss_ctc": 1.0744271278381348, "train/loss_error": 0.4941549301147461, "train/loss_total": 0.6102094054222107 }, { "epoch": 0.7785199038204649, "step": 2914, "train/loss_ctc": 0.8322430849075317, "train/loss_error": 0.4284674823284149, "train/loss_total": 0.5092226266860962 }, { "epoch": 0.7787870691958322, "step": 2915, "train/loss_ctc": 0.8894956707954407, "train/loss_error": 0.4263649880886078, "train/loss_total": 0.5189911127090454 }, { "epoch": 0.7790542345711996, "step": 2916, "train/loss_ctc": 0.7826955914497375, "train/loss_error": 0.5042135119438171, "train/loss_total": 0.5599099397659302 }, { "epoch": 0.7793213999465669, "step": 2917, "train/loss_ctc": 0.9341332912445068, "train/loss_error": 0.5186201930046082, "train/loss_total": 0.6017228364944458 }, { "epoch": 0.7795885653219343, "step": 2918, "train/loss_ctc": 1.7706091403961182, "train/loss_error": 0.4604412913322449, "train/loss_total": 0.7224748730659485 }, { "epoch": 0.7798557306973016, "step": 2919, "train/loss_ctc": 0.8419076800346375, "train/loss_error": 0.5064656734466553, "train/loss_total": 0.5735540986061096 }, { "epoch": 0.7801228960726689, "grad_norm": 3.036142349243164, "learning_rate": 2.53256745925728e-05, "loss": 0.5617, "step": 2920 }, { "epoch": 0.7801228960726689, "step": 2920, "train/loss_ctc": 0.9221644401550293, "train/loss_error": 0.4936615824699402, "train/loss_total": 0.579362154006958 }, { "epoch": 0.7803900614480364, "step": 2921, "train/loss_ctc": 0.5919752717018127, "train/loss_error": 0.5602602958679199, "train/loss_total": 0.5666033029556274 }, { "epoch": 0.7806572268234037, "step": 2922, "train/loss_ctc": 0.567247748374939, "train/loss_error": 0.5597525238990784, "train/loss_total": 0.5612515807151794 }, { "epoch": 0.7809243921987711, "step": 2923, "train/loss_ctc": 0.8058344721794128, "train/loss_error": 0.4714694917201996, "train/loss_total": 0.5383424758911133 }, { "epoch": 0.7811915575741384, "step": 2924, "train/loss_ctc": 0.9348213076591492, "train/loss_error": 0.47410935163497925, "train/loss_total": 0.5662517547607422 }, { "epoch": 0.7814587229495057, "step": 2925, "train/loss_ctc": 1.5385308265686035, "train/loss_error": 0.5555839538574219, "train/loss_total": 0.7521733045578003 }, { "epoch": 0.7817258883248731, "step": 2926, "train/loss_ctc": 0.45449304580688477, "train/loss_error": 0.4556189477443695, "train/loss_total": 0.4553937613964081 }, { "epoch": 0.7819930537002404, "step": 2927, "train/loss_ctc": 0.7902371287345886, "train/loss_error": 0.5477641820907593, "train/loss_total": 0.5962587594985962 }, { "epoch": 0.7822602190756078, "step": 2928, "train/loss_ctc": 0.7875915765762329, "train/loss_error": 0.4600055515766144, "train/loss_total": 0.525522768497467 }, { "epoch": 0.7825273844509751, "step": 2929, "train/loss_ctc": 0.7187546491622925, "train/loss_error": 0.4576740860939026, "train/loss_total": 0.5098901987075806 }, { "epoch": 0.7827945498263426, "grad_norm": 3.4484944343566895, "learning_rate": 2.5309644670050762e-05, "loss": 0.5651, "step": 2930 }, { "epoch": 0.7827945498263426, "step": 2930, "train/loss_ctc": 0.9284870028495789, "train/loss_error": 0.48733484745025635, "train/loss_total": 0.5755652785301208 }, { "epoch": 0.7830617152017099, "step": 2931, "train/loss_ctc": 1.1994813680648804, "train/loss_error": 0.4990137219429016, "train/loss_total": 0.6391072869300842 }, { "epoch": 0.7833288805770772, "step": 2932, "train/loss_ctc": 1.1669161319732666, "train/loss_error": 0.5394877195358276, "train/loss_total": 0.6649733781814575 }, { "epoch": 0.7835960459524446, "step": 2933, "train/loss_ctc": 1.1596903800964355, "train/loss_error": 0.534494161605835, "train/loss_total": 0.6595334410667419 }, { "epoch": 0.7838632113278119, "step": 2934, "train/loss_ctc": 1.3300520181655884, "train/loss_error": 0.5060741901397705, "train/loss_total": 0.670869767665863 }, { "epoch": 0.7841303767031793, "step": 2935, "train/loss_ctc": 0.6597011089324951, "train/loss_error": 0.44035494327545166, "train/loss_total": 0.48422420024871826 }, { "epoch": 0.7843975420785466, "step": 2936, "train/loss_ctc": 0.6984867453575134, "train/loss_error": 0.43593907356262207, "train/loss_total": 0.4884486198425293 }, { "epoch": 0.7846647074539139, "step": 2937, "train/loss_ctc": 1.250440001487732, "train/loss_error": 0.4751819670200348, "train/loss_total": 0.6302335858345032 }, { "epoch": 0.7849318728292813, "step": 2938, "train/loss_ctc": 0.9352893829345703, "train/loss_error": 0.4553404450416565, "train/loss_total": 0.5513302087783813 }, { "epoch": 0.7851990382046486, "step": 2939, "train/loss_ctc": 0.6900678873062134, "train/loss_error": 0.482766330242157, "train/loss_total": 0.5242266654968262 }, { "epoch": 0.7854662035800161, "grad_norm": 2.1201252937316895, "learning_rate": 2.529361474752872e-05, "loss": 0.5889, "step": 2940 }, { "epoch": 0.7854662035800161, "step": 2940, "train/loss_ctc": 0.6174479126930237, "train/loss_error": 0.43726179003715515, "train/loss_total": 0.4732990264892578 }, { "epoch": 0.7857333689553834, "step": 2941, "train/loss_ctc": 0.43704622983932495, "train/loss_error": 0.47573187947273254, "train/loss_total": 0.4679947793483734 }, { "epoch": 0.7860005343307508, "step": 2942, "train/loss_ctc": 0.740275502204895, "train/loss_error": 0.47814491391181946, "train/loss_total": 0.5305710434913635 }, { "epoch": 0.7862676997061181, "step": 2943, "train/loss_ctc": 0.3315761983394623, "train/loss_error": 0.4505220353603363, "train/loss_total": 0.4267328679561615 }, { "epoch": 0.7865348650814854, "step": 2944, "train/loss_ctc": 0.5778076648712158, "train/loss_error": 0.514043390750885, "train/loss_total": 0.5267962217330933 }, { "epoch": 0.7868020304568528, "step": 2945, "train/loss_ctc": 0.45237037539482117, "train/loss_error": 0.47988182306289673, "train/loss_total": 0.4743795394897461 }, { "epoch": 0.7870691958322201, "step": 2946, "train/loss_ctc": 0.6387772560119629, "train/loss_error": 0.4401954710483551, "train/loss_total": 0.47991180419921875 }, { "epoch": 0.7873363612075875, "step": 2947, "train/loss_ctc": 0.7970544099807739, "train/loss_error": 0.5009592175483704, "train/loss_total": 0.560178279876709 }, { "epoch": 0.7876035265829548, "step": 2948, "train/loss_ctc": 0.7733163237571716, "train/loss_error": 0.4766940474510193, "train/loss_total": 0.5360184907913208 }, { "epoch": 0.7878706919583222, "step": 2949, "train/loss_ctc": 0.5701862573623657, "train/loss_error": 0.461474746465683, "train/loss_total": 0.4832170605659485 }, { "epoch": 0.7881378573336896, "grad_norm": 1.3762012720108032, "learning_rate": 2.527758482500668e-05, "loss": 0.4959, "step": 2950 }, { "epoch": 0.7881378573336896, "step": 2950, "train/loss_ctc": 1.3820152282714844, "train/loss_error": 0.4148898720741272, "train/loss_total": 0.6083149313926697 }, { "epoch": 0.7884050227090569, "step": 2951, "train/loss_ctc": 1.0459308624267578, "train/loss_error": 0.47117939591407776, "train/loss_total": 0.5861297249794006 }, { "epoch": 0.7886721880844243, "step": 2952, "train/loss_ctc": 1.1207728385925293, "train/loss_error": 0.4695951044559479, "train/loss_total": 0.599830687046051 }, { "epoch": 0.7889393534597916, "step": 2953, "train/loss_ctc": 0.8719353675842285, "train/loss_error": 0.4870811998844147, "train/loss_total": 0.5640520453453064 }, { "epoch": 0.7892065188351589, "step": 2954, "train/loss_ctc": 1.3221492767333984, "train/loss_error": 0.5042250752449036, "train/loss_total": 0.6678099632263184 }, { "epoch": 0.7894736842105263, "step": 2955, "train/loss_ctc": 0.5093162059783936, "train/loss_error": 0.4422370493412018, "train/loss_total": 0.4556528925895691 }, { "epoch": 0.7897408495858936, "step": 2956, "train/loss_ctc": 0.6955406665802002, "train/loss_error": 0.4383202791213989, "train/loss_total": 0.48976439237594604 }, { "epoch": 0.790008014961261, "step": 2957, "train/loss_ctc": 0.42835739254951477, "train/loss_error": 0.5124672651290894, "train/loss_total": 0.49564531445503235 }, { "epoch": 0.7902751803366284, "step": 2958, "train/loss_ctc": 0.8250206708908081, "train/loss_error": 0.5342557430267334, "train/loss_total": 0.5924087762832642 }, { "epoch": 0.7905423457119958, "step": 2959, "train/loss_ctc": 0.6774476170539856, "train/loss_error": 0.4786161780357361, "train/loss_total": 0.5183824300765991 }, { "epoch": 0.7908095110873631, "grad_norm": 1.4514628648757935, "learning_rate": 2.526155490248464e-05, "loss": 0.5578, "step": 2960 }, { "epoch": 0.7908095110873631, "step": 2960, "train/loss_ctc": 0.9218015074729919, "train/loss_error": 0.4483262002468109, "train/loss_total": 0.5430212616920471 }, { "epoch": 0.7910766764627304, "step": 2961, "train/loss_ctc": 0.5961170196533203, "train/loss_error": 0.4634678363800049, "train/loss_total": 0.4899976849555969 }, { "epoch": 0.7913438418380978, "step": 2962, "train/loss_ctc": 0.8762989044189453, "train/loss_error": 0.49887675046920776, "train/loss_total": 0.5743612051010132 }, { "epoch": 0.7916110072134651, "step": 2963, "train/loss_ctc": 2.248333215713501, "train/loss_error": 0.4890454411506653, "train/loss_total": 0.8409030437469482 }, { "epoch": 0.7918781725888325, "step": 2964, "train/loss_ctc": 0.608677864074707, "train/loss_error": 0.46651989221572876, "train/loss_total": 0.4949514865875244 }, { "epoch": 0.7921453379641998, "step": 2965, "train/loss_ctc": 0.6198924779891968, "train/loss_error": 0.41820576786994934, "train/loss_total": 0.4585431218147278 }, { "epoch": 0.7924125033395671, "step": 2966, "train/loss_ctc": 0.7542701959609985, "train/loss_error": 0.45864593982696533, "train/loss_total": 0.5177707672119141 }, { "epoch": 0.7926796687149346, "step": 2967, "train/loss_ctc": 0.6857596039772034, "train/loss_error": 0.4740677773952484, "train/loss_total": 0.5164061784744263 }, { "epoch": 0.7929468340903019, "step": 2968, "train/loss_ctc": 0.2916373014450073, "train/loss_error": 0.5096649527549744, "train/loss_total": 0.46605944633483887 }, { "epoch": 0.7932139994656693, "step": 2969, "train/loss_ctc": 0.9917741417884827, "train/loss_error": 0.4565945863723755, "train/loss_total": 0.5636305212974548 }, { "epoch": 0.7934811648410366, "grad_norm": 1.7182483673095703, "learning_rate": 2.5245524979962598e-05, "loss": 0.5466, "step": 2970 }, { "epoch": 0.7934811648410366, "step": 2970, "train/loss_ctc": 0.8186730146408081, "train/loss_error": 0.5490197539329529, "train/loss_total": 0.602950394153595 }, { "epoch": 0.793748330216404, "step": 2971, "train/loss_ctc": 0.2583041787147522, "train/loss_error": 0.43357887864112854, "train/loss_total": 0.3985239565372467 }, { "epoch": 0.7940154955917713, "step": 2972, "train/loss_ctc": 1.048496961593628, "train/loss_error": 0.5020980834960938, "train/loss_total": 0.6113778352737427 }, { "epoch": 0.7942826609671386, "step": 2973, "train/loss_ctc": 0.7629705667495728, "train/loss_error": 0.5098650455474854, "train/loss_total": 0.5604861974716187 }, { "epoch": 0.794549826342506, "step": 2974, "train/loss_ctc": 1.1289434432983398, "train/loss_error": 0.48062995076179504, "train/loss_total": 0.6102926731109619 }, { "epoch": 0.7948169917178733, "step": 2975, "train/loss_ctc": 2.000392436981201, "train/loss_error": 0.46533599495887756, "train/loss_total": 0.7723473310470581 }, { "epoch": 0.7950841570932408, "step": 2976, "train/loss_ctc": 1.1606955528259277, "train/loss_error": 0.5235596299171448, "train/loss_total": 0.6509867906570435 }, { "epoch": 0.7953513224686081, "step": 2977, "train/loss_ctc": 0.4199904501438141, "train/loss_error": 0.4213620722293854, "train/loss_total": 0.42108774185180664 }, { "epoch": 0.7956184878439754, "step": 2978, "train/loss_ctc": 1.6371351480484009, "train/loss_error": 0.493961900472641, "train/loss_total": 0.7225965261459351 }, { "epoch": 0.7958856532193428, "step": 2979, "train/loss_ctc": 1.1148486137390137, "train/loss_error": 0.5062313675880432, "train/loss_total": 0.6279548406600952 }, { "epoch": 0.7961528185947101, "grad_norm": 1.4374194145202637, "learning_rate": 2.5229495057440556e-05, "loss": 0.5979, "step": 2980 }, { "epoch": 0.7961528185947101, "step": 2980, "train/loss_ctc": 0.7940276861190796, "train/loss_error": 0.48985859751701355, "train/loss_total": 0.5506924390792847 }, { "epoch": 0.7964199839700775, "step": 2981, "train/loss_ctc": 0.5580230355262756, "train/loss_error": 0.5235405564308167, "train/loss_total": 0.5304370522499084 }, { "epoch": 0.7966871493454448, "step": 2982, "train/loss_ctc": 0.544400155544281, "train/loss_error": 0.4880465865135193, "train/loss_total": 0.49931731820106506 }, { "epoch": 0.7969543147208121, "step": 2983, "train/loss_ctc": 0.82317054271698, "train/loss_error": 0.5474562048912048, "train/loss_total": 0.6025990843772888 }, { "epoch": 0.7972214800961795, "step": 2984, "train/loss_ctc": 0.592171847820282, "train/loss_error": 0.5071735382080078, "train/loss_total": 0.5241732001304626 }, { "epoch": 0.7974886454715469, "step": 2985, "train/loss_ctc": 1.0956196784973145, "train/loss_error": 0.5775182247161865, "train/loss_total": 0.6811385154724121 }, { "epoch": 0.7977558108469143, "step": 2986, "train/loss_ctc": 1.0768300294876099, "train/loss_error": 0.442941278219223, "train/loss_total": 0.5697190761566162 }, { "epoch": 0.7980229762222816, "step": 2987, "train/loss_ctc": 0.8191194534301758, "train/loss_error": 0.46984171867370605, "train/loss_total": 0.5396972894668579 }, { "epoch": 0.798290141597649, "step": 2988, "train/loss_ctc": 0.34391283988952637, "train/loss_error": 0.4907379150390625, "train/loss_total": 0.46137291193008423 }, { "epoch": 0.7985573069730163, "step": 2989, "train/loss_ctc": 1.0213556289672852, "train/loss_error": 0.4873637557029724, "train/loss_total": 0.594162106513977 }, { "epoch": 0.7988244723483836, "grad_norm": 1.177858829498291, "learning_rate": 2.5213465134918517e-05, "loss": 0.5553, "step": 2990 }, { "epoch": 0.7988244723483836, "step": 2990, "train/loss_ctc": 1.0331120491027832, "train/loss_error": 0.49936020374298096, "train/loss_total": 0.6061105728149414 }, { "epoch": 0.799091637723751, "step": 2991, "train/loss_ctc": 0.6420161724090576, "train/loss_error": 0.4582092761993408, "train/loss_total": 0.4949706792831421 }, { "epoch": 0.7993588030991183, "step": 2992, "train/loss_ctc": 0.782766580581665, "train/loss_error": 0.4473693370819092, "train/loss_total": 0.5144487619400024 }, { "epoch": 0.7996259684744857, "step": 2993, "train/loss_ctc": 0.8742583394050598, "train/loss_error": 0.4728156328201294, "train/loss_total": 0.5531041622161865 }, { "epoch": 0.799893133849853, "step": 2994, "train/loss_ctc": 1.067089557647705, "train/loss_error": 0.4817747175693512, "train/loss_total": 0.5988377332687378 }, { "epoch": 0.8001602992252204, "step": 2995, "train/loss_ctc": 0.44054776430130005, "train/loss_error": 0.4881775677204132, "train/loss_total": 0.47865161299705505 }, { "epoch": 0.8004274646005878, "step": 2996, "train/loss_ctc": 0.6164044141769409, "train/loss_error": 0.4191884398460388, "train/loss_total": 0.45863163471221924 }, { "epoch": 0.8006946299759551, "step": 2997, "train/loss_ctc": 0.7597132325172424, "train/loss_error": 0.506535530090332, "train/loss_total": 0.557171106338501 }, { "epoch": 0.8009617953513225, "step": 2998, "train/loss_ctc": 0.7632400989532471, "train/loss_error": 0.4950227737426758, "train/loss_total": 0.54866623878479 }, { "epoch": 0.8012289607266898, "step": 2999, "train/loss_ctc": 1.2999656200408936, "train/loss_error": 0.5661563277244568, "train/loss_total": 0.7129181623458862 }, { "epoch": 0.8014961261020572, "grad_norm": 1.6761879920959473, "learning_rate": 2.5197435212396476e-05, "loss": 0.5524, "step": 3000 }, { "epoch": 0.8014961261020572, "step": 3000, "train/loss_ctc": 0.9111810922622681, "train/loss_error": 0.44194233417510986, "train/loss_total": 0.5357900857925415 }, { "epoch": 0.8017632914774245, "step": 3001, "train/loss_ctc": 1.4157342910766602, "train/loss_error": 0.5080885291099548, "train/loss_total": 0.6896176934242249 }, { "epoch": 0.8020304568527918, "step": 3002, "train/loss_ctc": 0.3527597486972809, "train/loss_error": 0.42081400752067566, "train/loss_total": 0.40720316767692566 }, { "epoch": 0.8022976222281593, "step": 3003, "train/loss_ctc": 0.5378677845001221, "train/loss_error": 0.4515215754508972, "train/loss_total": 0.46879082918167114 }, { "epoch": 0.8025647876035266, "step": 3004, "train/loss_ctc": 1.256931185722351, "train/loss_error": 0.4877832531929016, "train/loss_total": 0.6416128873825073 }, { "epoch": 0.802831952978894, "step": 3005, "train/loss_ctc": 0.9590218663215637, "train/loss_error": 0.432712197303772, "train/loss_total": 0.5379741191864014 }, { "epoch": 0.8030991183542613, "step": 3006, "train/loss_ctc": 0.8964119553565979, "train/loss_error": 0.520325779914856, "train/loss_total": 0.5955430269241333 }, { "epoch": 0.8033662837296286, "step": 3007, "train/loss_ctc": 0.6747466921806335, "train/loss_error": 0.4454132616519928, "train/loss_total": 0.4912799596786499 }, { "epoch": 0.803633449104996, "step": 3008, "train/loss_ctc": 1.2055011987686157, "train/loss_error": 0.5170679688453674, "train/loss_total": 0.654754638671875 }, { "epoch": 0.8039006144803633, "step": 3009, "train/loss_ctc": 1.988965630531311, "train/loss_error": 0.4787771999835968, "train/loss_total": 0.7808148860931396 }, { "epoch": 0.8041677798557307, "grad_norm": 2.71976375579834, "learning_rate": 2.5181405289874434e-05, "loss": 0.5803, "step": 3010 }, { "epoch": 0.8041677798557307, "step": 3010, "train/loss_ctc": 1.3309872150421143, "train/loss_error": 0.4553990364074707, "train/loss_total": 0.6305166482925415 }, { "epoch": 0.804434945231098, "step": 3011, "train/loss_ctc": 1.0749473571777344, "train/loss_error": 0.46465083956718445, "train/loss_total": 0.5867101550102234 }, { "epoch": 0.8047021106064653, "step": 3012, "train/loss_ctc": 2.3468947410583496, "train/loss_error": 0.4978320896625519, "train/loss_total": 0.8676446676254272 }, { "epoch": 0.8049692759818328, "step": 3013, "train/loss_ctc": 1.01107919216156, "train/loss_error": 0.48580536246299744, "train/loss_total": 0.59086012840271 }, { "epoch": 0.8052364413572001, "step": 3014, "train/loss_ctc": 0.8030689358711243, "train/loss_error": 0.5272881388664246, "train/loss_total": 0.5824443101882935 }, { "epoch": 0.8055036067325675, "step": 3015, "train/loss_ctc": 0.45724597573280334, "train/loss_error": 0.504291832447052, "train/loss_total": 0.4948826730251312 }, { "epoch": 0.8057707721079348, "step": 3016, "train/loss_ctc": 0.772964358329773, "train/loss_error": 0.4414122998714447, "train/loss_total": 0.5077227354049683 }, { "epoch": 0.8060379374833022, "step": 3017, "train/loss_ctc": 0.6931480765342712, "train/loss_error": 0.4621143937110901, "train/loss_total": 0.5083211660385132 }, { "epoch": 0.8063051028586695, "step": 3018, "train/loss_ctc": 1.3698277473449707, "train/loss_error": 0.5138036608695984, "train/loss_total": 0.6850085258483887 }, { "epoch": 0.8065722682340368, "step": 3019, "train/loss_ctc": 1.342149257659912, "train/loss_error": 0.46706774830818176, "train/loss_total": 0.6420840620994568 }, { "epoch": 0.8068394336094042, "grad_norm": 1.6200084686279297, "learning_rate": 2.5165375367352392e-05, "loss": 0.6096, "step": 3020 }, { "epoch": 0.8068394336094042, "step": 3020, "train/loss_ctc": 0.8941131830215454, "train/loss_error": 0.5443651080131531, "train/loss_total": 0.6143147349357605 }, { "epoch": 0.8071065989847716, "step": 3021, "train/loss_ctc": 1.1679316759109497, "train/loss_error": 0.44395676255226135, "train/loss_total": 0.5887517929077148 }, { "epoch": 0.807373764360139, "step": 3022, "train/loss_ctc": 0.6039267778396606, "train/loss_error": 0.3966637849807739, "train/loss_total": 0.4381164014339447 }, { "epoch": 0.8076409297355063, "step": 3023, "train/loss_ctc": 0.7881558537483215, "train/loss_error": 0.44278156757354736, "train/loss_total": 0.5118564367294312 }, { "epoch": 0.8079080951108736, "step": 3024, "train/loss_ctc": 0.37730133533477783, "train/loss_error": 0.43550413846969604, "train/loss_total": 0.42386358976364136 }, { "epoch": 0.808175260486241, "step": 3025, "train/loss_ctc": 1.0540920495986938, "train/loss_error": 0.4839685261249542, "train/loss_total": 0.5979932546615601 }, { "epoch": 0.8084424258616083, "step": 3026, "train/loss_ctc": 0.8757997155189514, "train/loss_error": 0.5597696900367737, "train/loss_total": 0.6229757070541382 }, { "epoch": 0.8087095912369757, "step": 3027, "train/loss_ctc": 0.5394589900970459, "train/loss_error": 0.4492727816104889, "train/loss_total": 0.4673100411891937 }, { "epoch": 0.808976756612343, "step": 3028, "train/loss_ctc": 0.5293406248092651, "train/loss_error": 0.5295188426971436, "train/loss_total": 0.5294831991195679 }, { "epoch": 0.8092439219877104, "step": 3029, "train/loss_ctc": 0.8108111619949341, "train/loss_error": 0.41485080122947693, "train/loss_total": 0.49404287338256836 }, { "epoch": 0.8095110873630778, "grad_norm": 1.7230430841445923, "learning_rate": 2.514934544483035e-05, "loss": 0.5289, "step": 3030 }, { "epoch": 0.8095110873630778, "step": 3030, "train/loss_ctc": 0.40967825055122375, "train/loss_error": 0.5076941251754761, "train/loss_total": 0.48809096217155457 }, { "epoch": 0.8097782527384451, "step": 3031, "train/loss_ctc": 0.5301638841629028, "train/loss_error": 0.48331624269485474, "train/loss_total": 0.49268579483032227 }, { "epoch": 0.8100454181138125, "step": 3032, "train/loss_ctc": 0.736260175704956, "train/loss_error": 0.44563186168670654, "train/loss_total": 0.5037575364112854 }, { "epoch": 0.8103125834891798, "step": 3033, "train/loss_ctc": 0.7123754620552063, "train/loss_error": 0.4227343201637268, "train/loss_total": 0.4806625545024872 }, { "epoch": 0.8105797488645472, "step": 3034, "train/loss_ctc": 1.5474092960357666, "train/loss_error": 0.5120089054107666, "train/loss_total": 0.7190890312194824 }, { "epoch": 0.8108469142399145, "step": 3035, "train/loss_ctc": 0.7174692749977112, "train/loss_error": 0.4789159893989563, "train/loss_total": 0.5266266465187073 }, { "epoch": 0.8111140796152818, "step": 3036, "train/loss_ctc": 1.070904016494751, "train/loss_error": 0.4767478406429291, "train/loss_total": 0.5955790877342224 }, { "epoch": 0.8113812449906492, "step": 3037, "train/loss_ctc": 0.9940497279167175, "train/loss_error": 0.4804721176624298, "train/loss_total": 0.5831876397132874 }, { "epoch": 0.8116484103660165, "step": 3038, "train/loss_ctc": 0.7133729457855225, "train/loss_error": 0.4611652195453644, "train/loss_total": 0.5116068124771118 }, { "epoch": 0.811915575741384, "step": 3039, "train/loss_ctc": 0.7433927059173584, "train/loss_error": 0.5334884524345398, "train/loss_total": 0.5754693150520325 }, { "epoch": 0.8121827411167513, "grad_norm": 2.45930814743042, "learning_rate": 2.513331552230831e-05, "loss": 0.5477, "step": 3040 }, { "epoch": 0.8121827411167513, "step": 3040, "train/loss_ctc": 0.9540420174598694, "train/loss_error": 0.46437743306159973, "train/loss_total": 0.5623103380203247 }, { "epoch": 0.8124499064921186, "step": 3041, "train/loss_ctc": 0.663831353187561, "train/loss_error": 0.4319707453250885, "train/loss_total": 0.4783428907394409 }, { "epoch": 0.812717071867486, "step": 3042, "train/loss_ctc": 0.7330929040908813, "train/loss_error": 0.48083382844924927, "train/loss_total": 0.5312856435775757 }, { "epoch": 0.8129842372428533, "step": 3043, "train/loss_ctc": 1.1896998882293701, "train/loss_error": 0.4892534613609314, "train/loss_total": 0.629342794418335 }, { "epoch": 0.8132514026182207, "step": 3044, "train/loss_ctc": 0.41473662853240967, "train/loss_error": 0.5056504607200623, "train/loss_total": 0.4874677062034607 }, { "epoch": 0.813518567993588, "step": 3045, "train/loss_ctc": 0.6522220373153687, "train/loss_error": 0.45695796608924866, "train/loss_total": 0.49601078033447266 }, { "epoch": 0.8137857333689554, "step": 3046, "train/loss_ctc": 1.0514724254608154, "train/loss_error": 0.4927571415901184, "train/loss_total": 0.6045001745223999 }, { "epoch": 0.8140528987443227, "step": 3047, "train/loss_ctc": 0.8109334707260132, "train/loss_error": 0.48707225918769836, "train/loss_total": 0.5518445372581482 }, { "epoch": 0.81432006411969, "step": 3048, "train/loss_ctc": 0.8062158823013306, "train/loss_error": 0.45127037167549133, "train/loss_total": 0.5222594738006592 }, { "epoch": 0.8145872294950575, "step": 3049, "train/loss_ctc": 1.1585487127304077, "train/loss_error": 0.4068465828895569, "train/loss_total": 0.557187020778656 }, { "epoch": 0.8148543948704248, "grad_norm": 1.7192081212997437, "learning_rate": 2.511728559978627e-05, "loss": 0.5421, "step": 3050 }, { "epoch": 0.8148543948704248, "step": 3050, "train/loss_ctc": 0.7121910452842712, "train/loss_error": 0.480686217546463, "train/loss_total": 0.5269871950149536 }, { "epoch": 0.8151215602457922, "step": 3051, "train/loss_ctc": 0.8129500150680542, "train/loss_error": 0.5271231532096863, "train/loss_total": 0.5842885375022888 }, { "epoch": 0.8153887256211595, "step": 3052, "train/loss_ctc": 1.1727306842803955, "train/loss_error": 0.4470920264720917, "train/loss_total": 0.5922197699546814 }, { "epoch": 0.8156558909965268, "step": 3053, "train/loss_ctc": 0.9831794500350952, "train/loss_error": 0.45654061436653137, "train/loss_total": 0.5618683695793152 }, { "epoch": 0.8159230563718942, "step": 3054, "train/loss_ctc": 0.7334644198417664, "train/loss_error": 0.5079213976860046, "train/loss_total": 0.5530300140380859 }, { "epoch": 0.8161902217472615, "step": 3055, "train/loss_ctc": 1.7526054382324219, "train/loss_error": 0.5209723711013794, "train/loss_total": 0.7672989964485168 }, { "epoch": 0.8164573871226289, "step": 3056, "train/loss_ctc": 0.8100409507751465, "train/loss_error": 0.5463852286338806, "train/loss_total": 0.5991163849830627 }, { "epoch": 0.8167245524979962, "step": 3057, "train/loss_ctc": 0.6394535303115845, "train/loss_error": 0.4679091274738312, "train/loss_total": 0.5022180080413818 }, { "epoch": 0.8169917178733637, "step": 3058, "train/loss_ctc": 0.5378938913345337, "train/loss_error": 0.5037491917610168, "train/loss_total": 0.5105781555175781 }, { "epoch": 0.817258883248731, "step": 3059, "train/loss_ctc": 0.6434801816940308, "train/loss_error": 0.5047841668128967, "train/loss_total": 0.5325233936309814 }, { "epoch": 0.8175260486240983, "grad_norm": 1.130591630935669, "learning_rate": 2.5101255677264227e-05, "loss": 0.573, "step": 3060 }, { "epoch": 0.8175260486240983, "step": 3060, "train/loss_ctc": 1.5501819849014282, "train/loss_error": 0.5759367346763611, "train/loss_total": 0.7707858085632324 }, { "epoch": 0.8177932139994657, "step": 3061, "train/loss_ctc": 0.6457768082618713, "train/loss_error": 0.425510048866272, "train/loss_total": 0.46956342458724976 }, { "epoch": 0.818060379374833, "step": 3062, "train/loss_ctc": 0.7771015167236328, "train/loss_error": 0.49250438809394836, "train/loss_total": 0.5494238138198853 }, { "epoch": 0.8183275447502004, "step": 3063, "train/loss_ctc": 0.8317627906799316, "train/loss_error": 0.49745509028434753, "train/loss_total": 0.5643166303634644 }, { "epoch": 0.8185947101255677, "step": 3064, "train/loss_ctc": 1.3525240421295166, "train/loss_error": 0.5201199650764465, "train/loss_total": 0.6866008043289185 }, { "epoch": 0.818861875500935, "step": 3065, "train/loss_ctc": 0.5912970304489136, "train/loss_error": 0.4787025451660156, "train/loss_total": 0.5012214779853821 }, { "epoch": 0.8191290408763024, "step": 3066, "train/loss_ctc": 0.8100953102111816, "train/loss_error": 0.4469284415245056, "train/loss_total": 0.5195618271827698 }, { "epoch": 0.8193962062516698, "step": 3067, "train/loss_ctc": 0.9376887083053589, "train/loss_error": 0.44162699580192566, "train/loss_total": 0.5408393740653992 }, { "epoch": 0.8196633716270372, "step": 3068, "train/loss_ctc": 0.9675512909889221, "train/loss_error": 0.43784794211387634, "train/loss_total": 0.5437886118888855 }, { "epoch": 0.8199305370024045, "step": 3069, "train/loss_ctc": 0.45098140835762024, "train/loss_error": 0.43623724579811096, "train/loss_total": 0.43918609619140625 }, { "epoch": 0.8201977023777718, "grad_norm": 0.9735845327377319, "learning_rate": 2.5085225754742186e-05, "loss": 0.5585, "step": 3070 }, { "epoch": 0.8201977023777718, "step": 3070, "train/loss_ctc": 0.8497644662857056, "train/loss_error": 0.47923794388771057, "train/loss_total": 0.5533432960510254 }, { "epoch": 0.8204648677531392, "step": 3071, "train/loss_ctc": 0.8797729015350342, "train/loss_error": 0.41746780276298523, "train/loss_total": 0.509928822517395 }, { "epoch": 0.8207320331285065, "step": 3072, "train/loss_ctc": 1.080685019493103, "train/loss_error": 0.48571085929870605, "train/loss_total": 0.6047056913375854 }, { "epoch": 0.8209991985038739, "step": 3073, "train/loss_ctc": 0.9666090607643127, "train/loss_error": 0.5214123129844666, "train/loss_total": 0.6104516386985779 }, { "epoch": 0.8212663638792412, "step": 3074, "train/loss_ctc": 0.640471875667572, "train/loss_error": 0.5347372889518738, "train/loss_total": 0.5558842420578003 }, { "epoch": 0.8215335292546087, "step": 3075, "train/loss_ctc": 0.6217960119247437, "train/loss_error": 0.45799368619918823, "train/loss_total": 0.4907541871070862 }, { "epoch": 0.821800694629976, "step": 3076, "train/loss_ctc": 1.262169361114502, "train/loss_error": 0.4326717257499695, "train/loss_total": 0.598571240901947 }, { "epoch": 0.8220678600053433, "step": 3077, "train/loss_ctc": 1.2118693590164185, "train/loss_error": 0.5379030108451843, "train/loss_total": 0.6726962924003601 }, { "epoch": 0.8223350253807107, "step": 3078, "train/loss_ctc": 1.0121055841445923, "train/loss_error": 0.4848836064338684, "train/loss_total": 0.5903279781341553 }, { "epoch": 0.822602190756078, "step": 3079, "train/loss_ctc": 0.6095095276832581, "train/loss_error": 0.48383790254592896, "train/loss_total": 0.5089722275733948 }, { "epoch": 0.8228693561314454, "grad_norm": 1.5932711362838745, "learning_rate": 2.5069195832220144e-05, "loss": 0.5696, "step": 3080 }, { "epoch": 0.8228693561314454, "step": 3080, "train/loss_ctc": 1.0909924507141113, "train/loss_error": 0.5309956073760986, "train/loss_total": 0.6429949998855591 }, { "epoch": 0.8231365215068127, "step": 3081, "train/loss_ctc": 1.7180403470993042, "train/loss_error": 0.4242052435874939, "train/loss_total": 0.6829723119735718 }, { "epoch": 0.82340368688218, "step": 3082, "train/loss_ctc": 1.6389646530151367, "train/loss_error": 0.5236515998840332, "train/loss_total": 0.7467142343521118 }, { "epoch": 0.8236708522575474, "step": 3083, "train/loss_ctc": 0.39406952261924744, "train/loss_error": 0.4770497679710388, "train/loss_total": 0.46045371890068054 }, { "epoch": 0.8239380176329147, "step": 3084, "train/loss_ctc": 1.1079505681991577, "train/loss_error": 0.4892684817314148, "train/loss_total": 0.6130049228668213 }, { "epoch": 0.8242051830082822, "step": 3085, "train/loss_ctc": 0.6932357549667358, "train/loss_error": 0.4420970678329468, "train/loss_total": 0.4923248291015625 }, { "epoch": 0.8244723483836495, "step": 3086, "train/loss_ctc": 0.30751484632492065, "train/loss_error": 0.4557674527168274, "train/loss_total": 0.426116943359375 }, { "epoch": 0.8247395137590169, "step": 3087, "train/loss_ctc": 0.8225938081741333, "train/loss_error": 0.49511662125587463, "train/loss_total": 0.5606120824813843 }, { "epoch": 0.8250066791343842, "step": 3088, "train/loss_ctc": 0.7887676954269409, "train/loss_error": 0.46210575103759766, "train/loss_total": 0.5274381637573242 }, { "epoch": 0.8252738445097515, "step": 3089, "train/loss_ctc": 0.9410986304283142, "train/loss_error": 0.4471260905265808, "train/loss_total": 0.5459206104278564 }, { "epoch": 0.8255410098851189, "grad_norm": 1.8749364614486694, "learning_rate": 2.50531659096981e-05, "loss": 0.5699, "step": 3090 }, { "epoch": 0.8255410098851189, "step": 3090, "train/loss_ctc": 0.6068027019500732, "train/loss_error": 0.501832902431488, "train/loss_total": 0.5228269100189209 }, { "epoch": 0.8258081752604862, "step": 3091, "train/loss_ctc": 1.6008687019348145, "train/loss_error": 0.4875745177268982, "train/loss_total": 0.7102333307266235 }, { "epoch": 0.8260753406358536, "step": 3092, "train/loss_ctc": 0.8732237815856934, "train/loss_error": 0.47389283776283264, "train/loss_total": 0.5537590384483337 }, { "epoch": 0.826342506011221, "step": 3093, "train/loss_ctc": 0.6411046981811523, "train/loss_error": 0.4332534372806549, "train/loss_total": 0.4748237133026123 }, { "epoch": 0.8266096713865883, "step": 3094, "train/loss_ctc": 0.7122423648834229, "train/loss_error": 0.444709837436676, "train/loss_total": 0.49821633100509644 }, { "epoch": 0.8268768367619557, "step": 3095, "train/loss_ctc": 0.8068487048149109, "train/loss_error": 0.5494336485862732, "train/loss_total": 0.6009166240692139 }, { "epoch": 0.827144002137323, "step": 3096, "train/loss_ctc": 1.0386781692504883, "train/loss_error": 0.43468719720840454, "train/loss_total": 0.5554854273796082 }, { "epoch": 0.8274111675126904, "step": 3097, "train/loss_ctc": 0.9152788519859314, "train/loss_error": 0.4850517809391022, "train/loss_total": 0.571097195148468 }, { "epoch": 0.8276783328880577, "step": 3098, "train/loss_ctc": 0.5233585238456726, "train/loss_error": 0.43120256066322327, "train/loss_total": 0.44963377714157104 }, { "epoch": 0.827945498263425, "step": 3099, "train/loss_ctc": 1.340078592300415, "train/loss_error": 0.4100600779056549, "train/loss_total": 0.5960637927055359 }, { "epoch": 0.8282126636387924, "grad_norm": 2.8949923515319824, "learning_rate": 2.5037135987176063e-05, "loss": 0.5533, "step": 3100 }, { "epoch": 0.8282126636387924, "step": 3100, "train/loss_ctc": 0.8128536939620972, "train/loss_error": 0.5176162719726562, "train/loss_total": 0.5766637325286865 }, { "epoch": 0.8284798290141597, "step": 3101, "train/loss_ctc": 1.6801029443740845, "train/loss_error": 0.49897560477256775, "train/loss_total": 0.7352011203765869 }, { "epoch": 0.8287469943895271, "step": 3102, "train/loss_ctc": 0.8123148083686829, "train/loss_error": 0.4031837284564972, "train/loss_total": 0.48500996828079224 }, { "epoch": 0.8290141597648945, "step": 3103, "train/loss_ctc": 0.9756402373313904, "train/loss_error": 0.4948101341724396, "train/loss_total": 0.5909761786460876 }, { "epoch": 0.8292813251402619, "step": 3104, "train/loss_ctc": 0.8421967625617981, "train/loss_error": 0.4525336027145386, "train/loss_total": 0.5304662585258484 }, { "epoch": 0.8295484905156292, "step": 3105, "train/loss_ctc": 0.9576398134231567, "train/loss_error": 0.4733046889305115, "train/loss_total": 0.5701717138290405 }, { "epoch": 0.8298156558909965, "step": 3106, "train/loss_ctc": 1.5263481140136719, "train/loss_error": 0.49850961565971375, "train/loss_total": 0.7040773630142212 }, { "epoch": 0.8300828212663639, "step": 3107, "train/loss_ctc": 1.101283311843872, "train/loss_error": 0.5307483077049255, "train/loss_total": 0.6448553204536438 }, { "epoch": 0.8303499866417312, "step": 3108, "train/loss_ctc": 1.0607599020004272, "train/loss_error": 0.48916247487068176, "train/loss_total": 0.6034819483757019 }, { "epoch": 0.8306171520170986, "step": 3109, "train/loss_ctc": 0.837665319442749, "train/loss_error": 0.4675319790840149, "train/loss_total": 0.5415586233139038 }, { "epoch": 0.8308843173924659, "grad_norm": 1.026451826095581, "learning_rate": 2.502110606465402e-05, "loss": 0.5982, "step": 3110 }, { "epoch": 0.8308843173924659, "step": 3110, "train/loss_ctc": 1.2701168060302734, "train/loss_error": 0.4488508701324463, "train/loss_total": 0.6131041049957275 }, { "epoch": 0.8311514827678332, "step": 3111, "train/loss_ctc": 0.19880008697509766, "train/loss_error": 0.4529251456260681, "train/loss_total": 0.402100145816803 }, { "epoch": 0.8314186481432007, "step": 3112, "train/loss_ctc": 0.8599897623062134, "train/loss_error": 0.3972751200199127, "train/loss_total": 0.4898180365562439 }, { "epoch": 0.831685813518568, "step": 3113, "train/loss_ctc": 0.9094061851501465, "train/loss_error": 0.382596880197525, "train/loss_total": 0.48795872926712036 }, { "epoch": 0.8319529788939354, "step": 3114, "train/loss_ctc": 1.5217809677124023, "train/loss_error": 0.5459675192832947, "train/loss_total": 0.7411302328109741 }, { "epoch": 0.8322201442693027, "step": 3115, "train/loss_ctc": 0.8099494576454163, "train/loss_error": 0.43918532133102417, "train/loss_total": 0.5133381485939026 }, { "epoch": 0.8324873096446701, "step": 3116, "train/loss_ctc": 0.42500078678131104, "train/loss_error": 0.49688050150871277, "train/loss_total": 0.48250457644462585 }, { "epoch": 0.8327544750200374, "step": 3117, "train/loss_ctc": 1.2225087881088257, "train/loss_error": 0.4608660936355591, "train/loss_total": 0.6131946444511414 }, { "epoch": 0.8330216403954047, "step": 3118, "train/loss_ctc": 0.5333720445632935, "train/loss_error": 0.487640380859375, "train/loss_total": 0.4967867136001587 }, { "epoch": 0.8332888057707721, "step": 3119, "train/loss_ctc": 0.6566418409347534, "train/loss_error": 0.5242244005203247, "train/loss_total": 0.5507079362869263 }, { "epoch": 0.8335559711461394, "grad_norm": 2.09527850151062, "learning_rate": 2.500507614213198e-05, "loss": 0.5391, "step": 3120 }, { "epoch": 0.8335559711461394, "step": 3120, "train/loss_ctc": 0.6743537187576294, "train/loss_error": 0.4896302819252014, "train/loss_total": 0.526574969291687 }, { "epoch": 0.8338231365215069, "step": 3121, "train/loss_ctc": 0.9270197749137878, "train/loss_error": 0.5314350128173828, "train/loss_total": 0.6105519533157349 }, { "epoch": 0.8340903018968742, "step": 3122, "train/loss_ctc": 0.741458535194397, "train/loss_error": 0.49873071908950806, "train/loss_total": 0.5472762584686279 }, { "epoch": 0.8343574672722415, "step": 3123, "train/loss_ctc": 1.32424795627594, "train/loss_error": 0.5202418565750122, "train/loss_total": 0.6810430884361267 }, { "epoch": 0.8346246326476089, "step": 3124, "train/loss_ctc": 1.496250867843628, "train/loss_error": 0.5133601427078247, "train/loss_total": 0.7099382877349854 }, { "epoch": 0.8348917980229762, "step": 3125, "train/loss_ctc": 0.5674343705177307, "train/loss_error": 0.5251197814941406, "train/loss_total": 0.5335826873779297 }, { "epoch": 0.8351589633983436, "step": 3126, "train/loss_ctc": 0.7524260878562927, "train/loss_error": 0.3821237087249756, "train/loss_total": 0.45618417859077454 }, { "epoch": 0.8354261287737109, "step": 3127, "train/loss_ctc": 0.7185573577880859, "train/loss_error": 0.5202137231826782, "train/loss_total": 0.5598824620246887 }, { "epoch": 0.8356932941490782, "step": 3128, "train/loss_ctc": 0.9277684688568115, "train/loss_error": 0.4324312210083008, "train/loss_total": 0.5314986705780029 }, { "epoch": 0.8359604595244456, "step": 3129, "train/loss_ctc": 0.5303979516029358, "train/loss_error": 0.5019356608390808, "train/loss_total": 0.5076281428337097 }, { "epoch": 0.836227624899813, "grad_norm": 1.3385242223739624, "learning_rate": 2.4989046219609937e-05, "loss": 0.5664, "step": 3130 }, { "epoch": 0.836227624899813, "step": 3130, "train/loss_ctc": 0.6625877618789673, "train/loss_error": 0.49619096517562866, "train/loss_total": 0.5294703245162964 }, { "epoch": 0.8364947902751804, "step": 3131, "train/loss_ctc": 1.6275826692581177, "train/loss_error": 0.4829957187175751, "train/loss_total": 0.7119131088256836 }, { "epoch": 0.8367619556505477, "step": 3132, "train/loss_ctc": 0.5581399202346802, "train/loss_error": 0.4453180432319641, "train/loss_total": 0.4678824245929718 }, { "epoch": 0.8370291210259151, "step": 3133, "train/loss_ctc": 1.240214467048645, "train/loss_error": 0.4151928424835205, "train/loss_total": 0.5801971554756165 }, { "epoch": 0.8372962864012824, "step": 3134, "train/loss_ctc": 0.6169732809066772, "train/loss_error": 0.5449830293655396, "train/loss_total": 0.5593810677528381 }, { "epoch": 0.8375634517766497, "step": 3135, "train/loss_ctc": 0.9199112057685852, "train/loss_error": 0.5209255218505859, "train/loss_total": 0.6007226705551147 }, { "epoch": 0.8378306171520171, "step": 3136, "train/loss_ctc": 0.6785036325454712, "train/loss_error": 0.5915108919143677, "train/loss_total": 0.6089094877243042 }, { "epoch": 0.8380977825273844, "step": 3137, "train/loss_ctc": 0.717205286026001, "train/loss_error": 0.4745017886161804, "train/loss_total": 0.5230425000190735 }, { "epoch": 0.8383649479027518, "step": 3138, "train/loss_ctc": 0.9462090730667114, "train/loss_error": 0.4909067749977112, "train/loss_total": 0.5819672346115112 }, { "epoch": 0.8386321132781192, "step": 3139, "train/loss_ctc": 1.0245089530944824, "train/loss_error": 0.4545653164386749, "train/loss_total": 0.5685540437698364 }, { "epoch": 0.8388992786534865, "grad_norm": 1.117599606513977, "learning_rate": 2.49730162970879e-05, "loss": 0.5732, "step": 3140 }, { "epoch": 0.8388992786534865, "step": 3140, "train/loss_ctc": 0.569128155708313, "train/loss_error": 0.5007115006446838, "train/loss_total": 0.5143948197364807 }, { "epoch": 0.8391664440288539, "step": 3141, "train/loss_ctc": 0.6288563013076782, "train/loss_error": 0.4392307698726654, "train/loss_total": 0.477155864238739 }, { "epoch": 0.8394336094042212, "step": 3142, "train/loss_ctc": 0.48492392897605896, "train/loss_error": 0.4477376937866211, "train/loss_total": 0.45517492294311523 }, { "epoch": 0.8397007747795886, "step": 3143, "train/loss_ctc": 1.071408748626709, "train/loss_error": 0.520402193069458, "train/loss_total": 0.6306034922599792 }, { "epoch": 0.8399679401549559, "step": 3144, "train/loss_ctc": 1.3241175413131714, "train/loss_error": 0.5384109020233154, "train/loss_total": 0.6955522298812866 }, { "epoch": 0.8402351055303233, "step": 3145, "train/loss_ctc": 1.094294548034668, "train/loss_error": 0.47553732991218567, "train/loss_total": 0.5992887616157532 }, { "epoch": 0.8405022709056906, "step": 3146, "train/loss_ctc": 0.8903539776802063, "train/loss_error": 0.4949767291545868, "train/loss_total": 0.5740522146224976 }, { "epoch": 0.8407694362810579, "step": 3147, "train/loss_ctc": 1.1134406328201294, "train/loss_error": 0.4583921432495117, "train/loss_total": 0.5894018411636353 }, { "epoch": 0.8410366016564254, "step": 3148, "train/loss_ctc": 0.9021286964416504, "train/loss_error": 0.4585905075073242, "train/loss_total": 0.5472981333732605 }, { "epoch": 0.8413037670317927, "step": 3149, "train/loss_ctc": 0.9124491214752197, "train/loss_error": 0.4842967689037323, "train/loss_total": 0.5699272751808167 }, { "epoch": 0.8415709324071601, "grad_norm": 4.247948169708252, "learning_rate": 2.4956986374565857e-05, "loss": 0.5653, "step": 3150 }, { "epoch": 0.8415709324071601, "step": 3150, "train/loss_ctc": 0.774744987487793, "train/loss_error": 0.4552391767501831, "train/loss_total": 0.519140362739563 }, { "epoch": 0.8418380977825274, "step": 3151, "train/loss_ctc": 1.674710988998413, "train/loss_error": 0.41115882992744446, "train/loss_total": 0.6638692617416382 }, { "epoch": 0.8421052631578947, "step": 3152, "train/loss_ctc": 0.6772514581680298, "train/loss_error": 0.4908643960952759, "train/loss_total": 0.5281417965888977 }, { "epoch": 0.8423724285332621, "step": 3153, "train/loss_ctc": 1.7656927108764648, "train/loss_error": 0.437004029750824, "train/loss_total": 0.7027417421340942 }, { "epoch": 0.8426395939086294, "step": 3154, "train/loss_ctc": 0.8557385206222534, "train/loss_error": 0.5349134206771851, "train/loss_total": 0.5990784168243408 }, { "epoch": 0.8429067592839968, "step": 3155, "train/loss_ctc": 1.0660743713378906, "train/loss_error": 0.4820585250854492, "train/loss_total": 0.5988616943359375 }, { "epoch": 0.8431739246593641, "step": 3156, "train/loss_ctc": 1.0305548906326294, "train/loss_error": 0.4699240028858185, "train/loss_total": 0.5820502042770386 }, { "epoch": 0.8434410900347314, "step": 3157, "train/loss_ctc": 0.9048017859458923, "train/loss_error": 0.482820600271225, "train/loss_total": 0.5672168731689453 }, { "epoch": 0.8437082554100989, "step": 3158, "train/loss_ctc": 0.7534753680229187, "train/loss_error": 0.45365071296691895, "train/loss_total": 0.5136156678199768 }, { "epoch": 0.8439754207854662, "step": 3159, "train/loss_ctc": 0.8161032795906067, "train/loss_error": 0.49473556876182556, "train/loss_total": 0.5590091347694397 }, { "epoch": 0.8442425861608336, "grad_norm": 1.7089091539382935, "learning_rate": 2.494095645204382e-05, "loss": 0.5834, "step": 3160 }, { "epoch": 0.8442425861608336, "step": 3160, "train/loss_ctc": 0.800256609916687, "train/loss_error": 0.48825323581695557, "train/loss_total": 0.5506539344787598 }, { "epoch": 0.8445097515362009, "step": 3161, "train/loss_ctc": 0.826768159866333, "train/loss_error": 0.4713718593120575, "train/loss_total": 0.5424511432647705 }, { "epoch": 0.8447769169115683, "step": 3162, "train/loss_ctc": 0.43142667412757874, "train/loss_error": 0.4663635492324829, "train/loss_total": 0.45937615633010864 }, { "epoch": 0.8450440822869356, "step": 3163, "train/loss_ctc": 1.0974042415618896, "train/loss_error": 0.4757233262062073, "train/loss_total": 0.6000595092773438 }, { "epoch": 0.8453112476623029, "step": 3164, "train/loss_ctc": 1.279136300086975, "train/loss_error": 0.4781532883644104, "train/loss_total": 0.6383498907089233 }, { "epoch": 0.8455784130376703, "step": 3165, "train/loss_ctc": 0.9767631888389587, "train/loss_error": 0.48061567544937134, "train/loss_total": 0.5798451900482178 }, { "epoch": 0.8458455784130376, "step": 3166, "train/loss_ctc": 0.6170594096183777, "train/loss_error": 0.4092544615268707, "train/loss_total": 0.45081546902656555 }, { "epoch": 0.8461127437884051, "step": 3167, "train/loss_ctc": 0.4542815685272217, "train/loss_error": 0.50669926404953, "train/loss_total": 0.49621573090553284 }, { "epoch": 0.8463799091637724, "step": 3168, "train/loss_ctc": 0.7943823933601379, "train/loss_error": 0.5091138482093811, "train/loss_total": 0.5661675930023193 }, { "epoch": 0.8466470745391397, "step": 3169, "train/loss_ctc": 0.7659265398979187, "train/loss_error": 0.44804421067237854, "train/loss_total": 0.5116206407546997 }, { "epoch": 0.8469142399145071, "grad_norm": 2.6520020961761475, "learning_rate": 2.4924926529521777e-05, "loss": 0.5396, "step": 3170 }, { "epoch": 0.8469142399145071, "step": 3170, "train/loss_ctc": 0.8212599158287048, "train/loss_error": 0.5023250579833984, "train/loss_total": 0.5661120414733887 }, { "epoch": 0.8471814052898744, "step": 3171, "train/loss_ctc": 0.6138189435005188, "train/loss_error": 0.47591111063957214, "train/loss_total": 0.5034927129745483 }, { "epoch": 0.8474485706652418, "step": 3172, "train/loss_ctc": 1.5930769443511963, "train/loss_error": 0.4272899031639099, "train/loss_total": 0.660447359085083 }, { "epoch": 0.8477157360406091, "step": 3173, "train/loss_ctc": 1.13057541847229, "train/loss_error": 0.4952455461025238, "train/loss_total": 0.622311532497406 }, { "epoch": 0.8479829014159765, "step": 3174, "train/loss_ctc": 0.8736135959625244, "train/loss_error": 0.47808346152305603, "train/loss_total": 0.5571894645690918 }, { "epoch": 0.8482500667913438, "step": 3175, "train/loss_ctc": 0.7379328012466431, "train/loss_error": 0.47727933526039124, "train/loss_total": 0.5294100642204285 }, { "epoch": 0.8485172321667112, "step": 3176, "train/loss_ctc": 1.2655360698699951, "train/loss_error": 0.4945752024650574, "train/loss_total": 0.648767352104187 }, { "epoch": 0.8487843975420786, "step": 3177, "train/loss_ctc": 1.0437211990356445, "train/loss_error": 0.5170341730117798, "train/loss_total": 0.6223716139793396 }, { "epoch": 0.8490515629174459, "step": 3178, "train/loss_ctc": 0.34974586963653564, "train/loss_error": 0.4941524565219879, "train/loss_total": 0.46527114510536194 }, { "epoch": 0.8493187282928133, "step": 3179, "train/loss_ctc": 0.8908352851867676, "train/loss_error": 0.5546354651451111, "train/loss_total": 0.6218754649162292 }, { "epoch": 0.8495858936681806, "grad_norm": 1.6328353881835938, "learning_rate": 2.4908896606999735e-05, "loss": 0.5797, "step": 3180 }, { "epoch": 0.8495858936681806, "step": 3180, "train/loss_ctc": 1.3305039405822754, "train/loss_error": 0.4632515609264374, "train/loss_total": 0.6367020606994629 }, { "epoch": 0.8498530590435479, "step": 3181, "train/loss_ctc": 0.5080212354660034, "train/loss_error": 0.4634629487991333, "train/loss_total": 0.4723746180534363 }, { "epoch": 0.8501202244189153, "step": 3182, "train/loss_ctc": 0.6936469078063965, "train/loss_error": 0.45466920733451843, "train/loss_total": 0.502464771270752 }, { "epoch": 0.8503873897942826, "step": 3183, "train/loss_ctc": 0.7709975242614746, "train/loss_error": 0.49981147050857544, "train/loss_total": 0.5540486574172974 }, { "epoch": 0.85065455516965, "step": 3184, "train/loss_ctc": 0.6871329545974731, "train/loss_error": 0.4972696900367737, "train/loss_total": 0.5352423787117004 }, { "epoch": 0.8509217205450174, "step": 3185, "train/loss_ctc": 0.16039735078811646, "train/loss_error": 0.3783019483089447, "train/loss_total": 0.33472102880477905 }, { "epoch": 0.8511888859203847, "step": 3186, "train/loss_ctc": 0.9011149406433105, "train/loss_error": 0.4530975818634033, "train/loss_total": 0.5427010655403137 }, { "epoch": 0.8514560512957521, "step": 3187, "train/loss_ctc": 0.9755969047546387, "train/loss_error": 0.5075110793113708, "train/loss_total": 0.6011282205581665 }, { "epoch": 0.8517232166711194, "step": 3188, "train/loss_ctc": 0.9333105683326721, "train/loss_error": 0.46125924587249756, "train/loss_total": 0.5556695461273193 }, { "epoch": 0.8519903820464868, "step": 3189, "train/loss_ctc": 0.6815544366836548, "train/loss_error": 0.5154176950454712, "train/loss_total": 0.5486450791358948 }, { "epoch": 0.8522575474218541, "grad_norm": 2.5178112983703613, "learning_rate": 2.4892866684477693e-05, "loss": 0.5284, "step": 3190 }, { "epoch": 0.8522575474218541, "step": 3190, "train/loss_ctc": 1.250740647315979, "train/loss_error": 0.5119808912277222, "train/loss_total": 0.6597328782081604 }, { "epoch": 0.8525247127972215, "step": 3191, "train/loss_ctc": 0.9959869384765625, "train/loss_error": 0.4504943788051605, "train/loss_total": 0.5595929026603699 }, { "epoch": 0.8527918781725888, "step": 3192, "train/loss_ctc": 0.816687822341919, "train/loss_error": 0.4529876708984375, "train/loss_total": 0.5257276892662048 }, { "epoch": 0.8530590435479561, "step": 3193, "train/loss_ctc": 0.5984222888946533, "train/loss_error": 0.4471464157104492, "train/loss_total": 0.47740158438682556 }, { "epoch": 0.8533262089233236, "step": 3194, "train/loss_ctc": 0.6911954283714294, "train/loss_error": 0.4412747025489807, "train/loss_total": 0.4912588596343994 }, { "epoch": 0.8535933742986909, "step": 3195, "train/loss_ctc": 0.8898682594299316, "train/loss_error": 0.5522764325141907, "train/loss_total": 0.6197948455810547 }, { "epoch": 0.8538605396740583, "step": 3196, "train/loss_ctc": 0.9346420168876648, "train/loss_error": 0.45846161246299744, "train/loss_total": 0.5536977052688599 }, { "epoch": 0.8541277050494256, "step": 3197, "train/loss_ctc": 0.5465682148933411, "train/loss_error": 0.48138976097106934, "train/loss_total": 0.4944254457950592 }, { "epoch": 0.8543948704247929, "step": 3198, "train/loss_ctc": 0.8057968616485596, "train/loss_error": 0.42948397994041443, "train/loss_total": 0.5047465562820435 }, { "epoch": 0.8546620358001603, "step": 3199, "train/loss_ctc": 0.651007890701294, "train/loss_error": 0.543313205242157, "train/loss_total": 0.5648521184921265 }, { "epoch": 0.8549292011755276, "grad_norm": 2.184727430343628, "learning_rate": 2.487683676195565e-05, "loss": 0.5451, "step": 3200 }, { "epoch": 0.8549292011755276, "step": 3200, "train/loss_ctc": 1.3228685855865479, "train/loss_error": 0.4723585247993469, "train/loss_total": 0.6424605846405029 }, { "epoch": 0.855196366550895, "step": 3201, "train/loss_ctc": 1.432997703552246, "train/loss_error": 0.4778701066970825, "train/loss_total": 0.6688956022262573 }, { "epoch": 0.8554635319262623, "step": 3202, "train/loss_ctc": 0.5356141924858093, "train/loss_error": 0.4554630219936371, "train/loss_total": 0.47149327397346497 }, { "epoch": 0.8557306973016298, "step": 3203, "train/loss_ctc": 1.761958122253418, "train/loss_error": 0.4743218421936035, "train/loss_total": 0.7318490743637085 }, { "epoch": 0.8559978626769971, "step": 3204, "train/loss_ctc": 0.7452456951141357, "train/loss_error": 0.4689018428325653, "train/loss_total": 0.5241706371307373 }, { "epoch": 0.8562650280523644, "step": 3205, "train/loss_ctc": 0.5363925099372864, "train/loss_error": 0.42170190811157227, "train/loss_total": 0.44464004039764404 }, { "epoch": 0.8565321934277318, "step": 3206, "train/loss_ctc": 0.7935597896575928, "train/loss_error": 0.4473503828048706, "train/loss_total": 0.516592264175415 }, { "epoch": 0.8567993588030991, "step": 3207, "train/loss_ctc": 0.8960939049720764, "train/loss_error": 0.4287090003490448, "train/loss_total": 0.5221859812736511 }, { "epoch": 0.8570665241784665, "step": 3208, "train/loss_ctc": 0.6452332735061646, "train/loss_error": 0.49647897481918335, "train/loss_total": 0.5262298583984375 }, { "epoch": 0.8573336895538338, "step": 3209, "train/loss_ctc": 1.0500280857086182, "train/loss_error": 0.4560733735561371, "train/loss_total": 0.5748643279075623 }, { "epoch": 0.8576008549292011, "grad_norm": 1.3955662250518799, "learning_rate": 2.486080683943361e-05, "loss": 0.5623, "step": 3210 }, { "epoch": 0.8576008549292011, "step": 3210, "train/loss_ctc": 0.82066410779953, "train/loss_error": 0.5242272019386292, "train/loss_total": 0.5835145711898804 }, { "epoch": 0.8578680203045685, "step": 3211, "train/loss_ctc": 0.6942819356918335, "train/loss_error": 0.4901849329471588, "train/loss_total": 0.5310043692588806 }, { "epoch": 0.8581351856799359, "step": 3212, "train/loss_ctc": 0.5998343229293823, "train/loss_error": 0.4762273132801056, "train/loss_total": 0.5009487271308899 }, { "epoch": 0.8584023510553033, "step": 3213, "train/loss_ctc": 0.6150535941123962, "train/loss_error": 0.46663233637809753, "train/loss_total": 0.4963166117668152 }, { "epoch": 0.8586695164306706, "step": 3214, "train/loss_ctc": 0.5561748147010803, "train/loss_error": 0.4553254544734955, "train/loss_total": 0.4754953384399414 }, { "epoch": 0.8589366818060379, "step": 3215, "train/loss_ctc": 1.4168450832366943, "train/loss_error": 0.4247235059738159, "train/loss_total": 0.6231478452682495 }, { "epoch": 0.8592038471814053, "step": 3216, "train/loss_ctc": 0.8072898387908936, "train/loss_error": 0.5078240633010864, "train/loss_total": 0.5677172541618347 }, { "epoch": 0.8594710125567726, "step": 3217, "train/loss_ctc": 0.4057512879371643, "train/loss_error": 0.4325670897960663, "train/loss_total": 0.4272039532661438 }, { "epoch": 0.85973817793214, "step": 3218, "train/loss_ctc": 0.5789376497268677, "train/loss_error": 0.48974329233169556, "train/loss_total": 0.5075821876525879 }, { "epoch": 0.8600053433075073, "step": 3219, "train/loss_ctc": 0.797553539276123, "train/loss_error": 0.505774736404419, "train/loss_total": 0.5641304850578308 }, { "epoch": 0.8602725086828747, "grad_norm": 1.3359863758087158, "learning_rate": 2.484477691691157e-05, "loss": 0.5277, "step": 3220 }, { "epoch": 0.8602725086828747, "step": 3220, "train/loss_ctc": 0.8559557795524597, "train/loss_error": 0.4746069014072418, "train/loss_total": 0.5508766770362854 }, { "epoch": 0.860539674058242, "step": 3221, "train/loss_ctc": 0.30122822523117065, "train/loss_error": 0.42252886295318604, "train/loss_total": 0.3982687294483185 }, { "epoch": 0.8608068394336094, "step": 3222, "train/loss_ctc": 1.579446792602539, "train/loss_error": 0.4955827295780182, "train/loss_total": 0.7123555541038513 }, { "epoch": 0.8610740048089768, "step": 3223, "train/loss_ctc": 0.623897910118103, "train/loss_error": 0.505693256855011, "train/loss_total": 0.5293341875076294 }, { "epoch": 0.8613411701843441, "step": 3224, "train/loss_ctc": 0.7188682556152344, "train/loss_error": 0.46801459789276123, "train/loss_total": 0.5181853175163269 }, { "epoch": 0.8616083355597115, "step": 3225, "train/loss_ctc": 1.077892541885376, "train/loss_error": 0.4057387113571167, "train/loss_total": 0.5401694774627686 }, { "epoch": 0.8618755009350788, "step": 3226, "train/loss_ctc": 1.0402779579162598, "train/loss_error": 0.5245161652565002, "train/loss_total": 0.627668559551239 }, { "epoch": 0.8621426663104461, "step": 3227, "train/loss_ctc": 1.1522958278656006, "train/loss_error": 0.5140130519866943, "train/loss_total": 0.6416696310043335 }, { "epoch": 0.8624098316858135, "step": 3228, "train/loss_ctc": 1.4738199710845947, "train/loss_error": 0.5097883343696594, "train/loss_total": 0.7025946974754333 }, { "epoch": 0.8626769970611808, "step": 3229, "train/loss_ctc": 1.2736260890960693, "train/loss_error": 0.46671441197395325, "train/loss_total": 0.6280967593193054 }, { "epoch": 0.8629441624365483, "grad_norm": 1.0406551361083984, "learning_rate": 2.482874699438953e-05, "loss": 0.5849, "step": 3230 }, { "epoch": 0.8629441624365483, "step": 3230, "train/loss_ctc": 0.6384764909744263, "train/loss_error": 0.5102254748344421, "train/loss_total": 0.535875678062439 }, { "epoch": 0.8632113278119156, "step": 3231, "train/loss_ctc": 0.9089609384536743, "train/loss_error": 0.48182594776153564, "train/loss_total": 0.5672529339790344 }, { "epoch": 0.863478493187283, "step": 3232, "train/loss_ctc": 0.5038744211196899, "train/loss_error": 0.47546395659446716, "train/loss_total": 0.48114606738090515 }, { "epoch": 0.8637456585626503, "step": 3233, "train/loss_ctc": 1.0071982145309448, "train/loss_error": 0.3712397515773773, "train/loss_total": 0.4984314441680908 }, { "epoch": 0.8640128239380176, "step": 3234, "train/loss_ctc": 1.0454721450805664, "train/loss_error": 0.4895525574684143, "train/loss_total": 0.6007364988327026 }, { "epoch": 0.864279989313385, "step": 3235, "train/loss_ctc": 1.0505465269088745, "train/loss_error": 0.44800201058387756, "train/loss_total": 0.5685109496116638 }, { "epoch": 0.8645471546887523, "step": 3236, "train/loss_ctc": 0.6152941584587097, "train/loss_error": 0.48959246277809143, "train/loss_total": 0.514732837677002 }, { "epoch": 0.8648143200641197, "step": 3237, "train/loss_ctc": 0.7215101718902588, "train/loss_error": 0.45408719778060913, "train/loss_total": 0.507571816444397 }, { "epoch": 0.865081485439487, "step": 3238, "train/loss_ctc": 0.5390958189964294, "train/loss_error": 0.5031912922859192, "train/loss_total": 0.5103722214698792 }, { "epoch": 0.8653486508148543, "step": 3239, "train/loss_ctc": 0.8290675282478333, "train/loss_error": 0.4578694999217987, "train/loss_total": 0.5321091413497925 }, { "epoch": 0.8656158161902218, "grad_norm": 3.623600959777832, "learning_rate": 2.4812717071867486e-05, "loss": 0.5317, "step": 3240 }, { "epoch": 0.8656158161902218, "step": 3240, "train/loss_ctc": 0.79111248254776, "train/loss_error": 0.4742026627063751, "train/loss_total": 0.537584662437439 }, { "epoch": 0.8658829815655891, "step": 3241, "train/loss_ctc": 1.6637284755706787, "train/loss_error": 0.447637677192688, "train/loss_total": 0.690855860710144 }, { "epoch": 0.8661501469409565, "step": 3242, "train/loss_ctc": 0.9199137687683105, "train/loss_error": 0.5266895890235901, "train/loss_total": 0.6053344011306763 }, { "epoch": 0.8664173123163238, "step": 3243, "train/loss_ctc": 0.359025239944458, "train/loss_error": 0.4220704138278961, "train/loss_total": 0.4094614088535309 }, { "epoch": 0.8666844776916911, "step": 3244, "train/loss_ctc": 1.2478995323181152, "train/loss_error": 0.5515834093093872, "train/loss_total": 0.6908466815948486 }, { "epoch": 0.8669516430670585, "step": 3245, "train/loss_ctc": 1.0738705396652222, "train/loss_error": 0.49403417110443115, "train/loss_total": 0.6100014448165894 }, { "epoch": 0.8672188084424258, "step": 3246, "train/loss_ctc": 0.5624485015869141, "train/loss_error": 0.43318894505500793, "train/loss_total": 0.4590408504009247 }, { "epoch": 0.8674859738177932, "step": 3247, "train/loss_ctc": 0.8540573120117188, "train/loss_error": 0.45653095841407776, "train/loss_total": 0.5360362529754639 }, { "epoch": 0.8677531391931605, "step": 3248, "train/loss_ctc": 0.40077394247055054, "train/loss_error": 0.43874773383140564, "train/loss_total": 0.43115299940109253 }, { "epoch": 0.868020304568528, "step": 3249, "train/loss_ctc": 1.1160178184509277, "train/loss_error": 0.4828187823295593, "train/loss_total": 0.6094585657119751 }, { "epoch": 0.8682874699438953, "grad_norm": 1.4366375207901, "learning_rate": 2.4796687149345445e-05, "loss": 0.558, "step": 3250 }, { "epoch": 0.8682874699438953, "step": 3250, "train/loss_ctc": 1.0483585596084595, "train/loss_error": 0.48948556184768677, "train/loss_total": 0.6012601852416992 }, { "epoch": 0.8685546353192626, "step": 3251, "train/loss_ctc": 0.6026918888092041, "train/loss_error": 0.5072348713874817, "train/loss_total": 0.5263262987136841 }, { "epoch": 0.86882180069463, "step": 3252, "train/loss_ctc": 0.8606607913970947, "train/loss_error": 0.4966026842594147, "train/loss_total": 0.5694143176078796 }, { "epoch": 0.8690889660699973, "step": 3253, "train/loss_ctc": 0.6702669262886047, "train/loss_error": 0.40975940227508545, "train/loss_total": 0.46186089515686035 }, { "epoch": 0.8693561314453647, "step": 3254, "train/loss_ctc": 0.6911590099334717, "train/loss_error": 0.4148692488670349, "train/loss_total": 0.4701272249221802 }, { "epoch": 0.869623296820732, "step": 3255, "train/loss_ctc": 0.5420863628387451, "train/loss_error": 0.4550267159938812, "train/loss_total": 0.47243866324424744 }, { "epoch": 0.8698904621960993, "step": 3256, "train/loss_ctc": 0.4632501006126404, "train/loss_error": 0.4810810685157776, "train/loss_total": 0.4775148928165436 }, { "epoch": 0.8701576275714668, "step": 3257, "train/loss_ctc": 0.9386894106864929, "train/loss_error": 0.5302446484565735, "train/loss_total": 0.6119335889816284 }, { "epoch": 0.8704247929468341, "step": 3258, "train/loss_ctc": 0.7840349674224854, "train/loss_error": 0.48364609479904175, "train/loss_total": 0.5437238812446594 }, { "epoch": 0.8706919583222015, "step": 3259, "train/loss_ctc": 1.0688999891281128, "train/loss_error": 0.55096435546875, "train/loss_total": 0.6545515060424805 }, { "epoch": 0.8709591236975688, "grad_norm": 1.8053230047225952, "learning_rate": 2.4780657226823403e-05, "loss": 0.5389, "step": 3260 }, { "epoch": 0.8709591236975688, "step": 3260, "train/loss_ctc": 0.7803812026977539, "train/loss_error": 0.47635746002197266, "train/loss_total": 0.537162184715271 }, { "epoch": 0.8712262890729362, "step": 3261, "train/loss_ctc": 0.7718378305435181, "train/loss_error": 0.5016964077949524, "train/loss_total": 0.5557247400283813 }, { "epoch": 0.8714934544483035, "step": 3262, "train/loss_ctc": 0.6770038604736328, "train/loss_error": 0.4526674151420593, "train/loss_total": 0.49753472208976746 }, { "epoch": 0.8717606198236708, "step": 3263, "train/loss_ctc": 0.6659913063049316, "train/loss_error": 0.5725822448730469, "train/loss_total": 0.5912640690803528 }, { "epoch": 0.8720277851990382, "step": 3264, "train/loss_ctc": 0.789483368396759, "train/loss_error": 0.5679417252540588, "train/loss_total": 0.6122500896453857 }, { "epoch": 0.8722949505744055, "step": 3265, "train/loss_ctc": 0.5676129460334778, "train/loss_error": 0.4933619797229767, "train/loss_total": 0.5082122087478638 }, { "epoch": 0.872562115949773, "step": 3266, "train/loss_ctc": 1.0420209169387817, "train/loss_error": 0.45963001251220703, "train/loss_total": 0.5761082172393799 }, { "epoch": 0.8728292813251403, "step": 3267, "train/loss_ctc": 0.9294723868370056, "train/loss_error": 0.4932272136211395, "train/loss_total": 0.5804762244224548 }, { "epoch": 0.8730964467005076, "step": 3268, "train/loss_ctc": 1.3334131240844727, "train/loss_error": 0.5395970344543457, "train/loss_total": 0.6983602643013 }, { "epoch": 0.873363612075875, "step": 3269, "train/loss_ctc": 0.7758563756942749, "train/loss_error": 0.44979986548423767, "train/loss_total": 0.515011191368103 }, { "epoch": 0.8736307774512423, "grad_norm": 3.0737216472625732, "learning_rate": 2.476462730430136e-05, "loss": 0.5672, "step": 3270 }, { "epoch": 0.8736307774512423, "step": 3270, "train/loss_ctc": 1.0079963207244873, "train/loss_error": 0.4950129985809326, "train/loss_total": 0.5976096391677856 }, { "epoch": 0.8738979428266097, "step": 3271, "train/loss_ctc": 0.3717257082462311, "train/loss_error": 0.4660215973854065, "train/loss_total": 0.4471624195575714 }, { "epoch": 0.874165108201977, "step": 3272, "train/loss_ctc": 0.7443214654922485, "train/loss_error": 0.5179699063301086, "train/loss_total": 0.5632402300834656 }, { "epoch": 0.8744322735773443, "step": 3273, "train/loss_ctc": 0.3428748846054077, "train/loss_error": 0.4473317861557007, "train/loss_total": 0.42644041776657104 }, { "epoch": 0.8746994389527117, "step": 3274, "train/loss_ctc": 0.41028279066085815, "train/loss_error": 0.47009313106536865, "train/loss_total": 0.4581310749053955 }, { "epoch": 0.874966604328079, "step": 3275, "train/loss_ctc": 0.6192250847816467, "train/loss_error": 0.486271470785141, "train/loss_total": 0.5128622055053711 }, { "epoch": 0.8752337697034465, "step": 3276, "train/loss_ctc": 0.6313298940658569, "train/loss_error": 0.4623478353023529, "train/loss_total": 0.49614423513412476 }, { "epoch": 0.8755009350788138, "step": 3277, "train/loss_ctc": 0.9709841012954712, "train/loss_error": 0.532330334186554, "train/loss_total": 0.6200610995292664 }, { "epoch": 0.8757681004541812, "step": 3278, "train/loss_ctc": 0.5724344253540039, "train/loss_error": 0.4670272469520569, "train/loss_total": 0.48810869455337524 }, { "epoch": 0.8760352658295485, "step": 3279, "train/loss_ctc": 0.8957648277282715, "train/loss_error": 0.4747563600540161, "train/loss_total": 0.5589580535888672 }, { "epoch": 0.8763024312049158, "grad_norm": 3.3033602237701416, "learning_rate": 2.4748597381779322e-05, "loss": 0.5169, "step": 3280 }, { "epoch": 0.8763024312049158, "step": 3280, "train/loss_ctc": 1.3774747848510742, "train/loss_error": 0.5236810445785522, "train/loss_total": 0.6944397687911987 }, { "epoch": 0.8765695965802832, "step": 3281, "train/loss_ctc": 0.6645187139511108, "train/loss_error": 0.5412089228630066, "train/loss_total": 0.5658708810806274 }, { "epoch": 0.8768367619556505, "step": 3282, "train/loss_ctc": 0.8895927667617798, "train/loss_error": 0.48579999804496765, "train/loss_total": 0.5665585994720459 }, { "epoch": 0.8771039273310179, "step": 3283, "train/loss_ctc": 0.5431087017059326, "train/loss_error": 0.4707886278629303, "train/loss_total": 0.48525264859199524 }, { "epoch": 0.8773710927063852, "step": 3284, "train/loss_ctc": 0.9945969581604004, "train/loss_error": 0.5042879581451416, "train/loss_total": 0.6023497581481934 }, { "epoch": 0.8776382580817526, "step": 3285, "train/loss_ctc": 0.8325635194778442, "train/loss_error": 0.5004280805587769, "train/loss_total": 0.5668551921844482 }, { "epoch": 0.87790542345712, "step": 3286, "train/loss_ctc": 0.3161351680755615, "train/loss_error": 0.4199959635734558, "train/loss_total": 0.39922380447387695 }, { "epoch": 0.8781725888324873, "step": 3287, "train/loss_ctc": 1.1136977672576904, "train/loss_error": 0.47907811403274536, "train/loss_total": 0.6060020327568054 }, { "epoch": 0.8784397542078547, "step": 3288, "train/loss_ctc": 0.6073254346847534, "train/loss_error": 0.4894351363182068, "train/loss_total": 0.5130132436752319 }, { "epoch": 0.878706919583222, "step": 3289, "train/loss_ctc": 0.5122302770614624, "train/loss_error": 0.5422742366790771, "train/loss_total": 0.5362654328346252 }, { "epoch": 0.8789740849585894, "grad_norm": 1.7531330585479736, "learning_rate": 2.473256745925728e-05, "loss": 0.5536, "step": 3290 }, { "epoch": 0.8789740849585894, "step": 3290, "train/loss_ctc": 0.6217741966247559, "train/loss_error": 0.42407163977622986, "train/loss_total": 0.4636121690273285 }, { "epoch": 0.8792412503339567, "step": 3291, "train/loss_ctc": 0.6267963647842407, "train/loss_error": 0.4868428409099579, "train/loss_total": 0.5148335695266724 }, { "epoch": 0.879508415709324, "step": 3292, "train/loss_ctc": 0.514979362487793, "train/loss_error": 0.41893789172172546, "train/loss_total": 0.4381462037563324 }, { "epoch": 0.8797755810846914, "step": 3293, "train/loss_ctc": 1.053723931312561, "train/loss_error": 0.46675747632980347, "train/loss_total": 0.5841507911682129 }, { "epoch": 0.8800427464600588, "step": 3294, "train/loss_ctc": 0.9281941652297974, "train/loss_error": 0.5382204651832581, "train/loss_total": 0.6162152290344238 }, { "epoch": 0.8803099118354262, "step": 3295, "train/loss_ctc": 0.40197330713272095, "train/loss_error": 0.49210089445114136, "train/loss_total": 0.4740753769874573 }, { "epoch": 0.8805770772107935, "step": 3296, "train/loss_ctc": 0.8967203497886658, "train/loss_error": 0.5441153645515442, "train/loss_total": 0.6146363615989685 }, { "epoch": 0.8808442425861608, "step": 3297, "train/loss_ctc": 1.1492468118667603, "train/loss_error": 0.43267345428466797, "train/loss_total": 0.5759881734848022 }, { "epoch": 0.8811114079615282, "step": 3298, "train/loss_ctc": 0.49732476472854614, "train/loss_error": 0.3704291582107544, "train/loss_total": 0.39580827951431274 }, { "epoch": 0.8813785733368955, "step": 3299, "train/loss_ctc": 1.1049718856811523, "train/loss_error": 0.5236292481422424, "train/loss_total": 0.6398978233337402 }, { "epoch": 0.8816457387122629, "grad_norm": 2.3421497344970703, "learning_rate": 2.471653753673524e-05, "loss": 0.5317, "step": 3300 }, { "epoch": 0.8816457387122629, "step": 3300, "train/loss_ctc": 0.8147406578063965, "train/loss_error": 0.5189582705497742, "train/loss_total": 0.5781147480010986 }, { "epoch": 0.8819129040876302, "step": 3301, "train/loss_ctc": 0.38150861859321594, "train/loss_error": 0.5193927884101868, "train/loss_total": 0.4918159544467926 }, { "epoch": 0.8821800694629975, "step": 3302, "train/loss_ctc": 0.9835702180862427, "train/loss_error": 0.4439678490161896, "train/loss_total": 0.5518883466720581 }, { "epoch": 0.882447234838365, "step": 3303, "train/loss_ctc": 0.6967076659202576, "train/loss_error": 0.46721529960632324, "train/loss_total": 0.5131137371063232 }, { "epoch": 0.8827144002137323, "step": 3304, "train/loss_ctc": 0.7455540895462036, "train/loss_error": 0.45091837644577026, "train/loss_total": 0.509845495223999 }, { "epoch": 0.8829815655890997, "step": 3305, "train/loss_ctc": 1.0798466205596924, "train/loss_error": 0.5325914025306702, "train/loss_total": 0.6420424580574036 }, { "epoch": 0.883248730964467, "step": 3306, "train/loss_ctc": 0.7551547288894653, "train/loss_error": 0.4662017822265625, "train/loss_total": 0.5239923596382141 }, { "epoch": 0.8835158963398344, "step": 3307, "train/loss_ctc": 0.43271660804748535, "train/loss_error": 0.4845793545246124, "train/loss_total": 0.474206805229187 }, { "epoch": 0.8837830617152017, "step": 3308, "train/loss_ctc": 0.6597588658332825, "train/loss_error": 0.44931092858314514, "train/loss_total": 0.4914005398750305 }, { "epoch": 0.884050227090569, "step": 3309, "train/loss_ctc": 0.36401206254959106, "train/loss_error": 0.4797859787940979, "train/loss_total": 0.45663121342658997 }, { "epoch": 0.8843173924659364, "grad_norm": 1.1462656259536743, "learning_rate": 2.4700507614213196e-05, "loss": 0.5233, "step": 3310 }, { "epoch": 0.8843173924659364, "step": 3310, "train/loss_ctc": 0.36992672085762024, "train/loss_error": 0.4111216962337494, "train/loss_total": 0.4028826951980591 }, { "epoch": 0.8845845578413037, "step": 3311, "train/loss_ctc": 0.7094646692276001, "train/loss_error": 0.49708420038223267, "train/loss_total": 0.5395603179931641 }, { "epoch": 0.8848517232166712, "step": 3312, "train/loss_ctc": 0.7959470748901367, "train/loss_error": 0.4610859155654907, "train/loss_total": 0.5280581712722778 }, { "epoch": 0.8851188885920385, "step": 3313, "train/loss_ctc": 0.6320904493331909, "train/loss_error": 0.46180567145347595, "train/loss_total": 0.4958626627922058 }, { "epoch": 0.8853860539674058, "step": 3314, "train/loss_ctc": 1.5405638217926025, "train/loss_error": 0.49653589725494385, "train/loss_total": 0.7053414583206177 }, { "epoch": 0.8856532193427732, "step": 3315, "train/loss_ctc": 0.770024299621582, "train/loss_error": 0.5100765824317932, "train/loss_total": 0.5620661377906799 }, { "epoch": 0.8859203847181405, "step": 3316, "train/loss_ctc": 0.7674458026885986, "train/loss_error": 0.47889184951782227, "train/loss_total": 0.5366026163101196 }, { "epoch": 0.8861875500935079, "step": 3317, "train/loss_ctc": 0.6397322416305542, "train/loss_error": 0.470998615026474, "train/loss_total": 0.504745364189148 }, { "epoch": 0.8864547154688752, "step": 3318, "train/loss_ctc": 0.7084599137306213, "train/loss_error": 0.5031605362892151, "train/loss_total": 0.5442204475402832 }, { "epoch": 0.8867218808442426, "step": 3319, "train/loss_ctc": 0.5138479471206665, "train/loss_error": 0.574337363243103, "train/loss_total": 0.5622394680976868 }, { "epoch": 0.88698904621961, "grad_norm": 2.619608163833618, "learning_rate": 2.4684477691691158e-05, "loss": 0.5382, "step": 3320 }, { "epoch": 0.88698904621961, "step": 3320, "train/loss_ctc": 0.3213087320327759, "train/loss_error": 0.46769100427627563, "train/loss_total": 0.4384145736694336 }, { "epoch": 0.8872562115949773, "step": 3321, "train/loss_ctc": 1.168924331665039, "train/loss_error": 0.48604628443717957, "train/loss_total": 0.6226218938827515 }, { "epoch": 0.8875233769703447, "step": 3322, "train/loss_ctc": 0.603694498538971, "train/loss_error": 0.48048263788223267, "train/loss_total": 0.5051250457763672 }, { "epoch": 0.887790542345712, "step": 3323, "train/loss_ctc": 0.6589060425758362, "train/loss_error": 0.5555368065834045, "train/loss_total": 0.5762106776237488 }, { "epoch": 0.8880577077210794, "step": 3324, "train/loss_ctc": 1.3392589092254639, "train/loss_error": 0.4373631775379181, "train/loss_total": 0.6177423596382141 }, { "epoch": 0.8883248730964467, "step": 3325, "train/loss_ctc": 1.290363073348999, "train/loss_error": 0.457657128572464, "train/loss_total": 0.624198317527771 }, { "epoch": 0.888592038471814, "step": 3326, "train/loss_ctc": 0.6032229661941528, "train/loss_error": 0.4662169814109802, "train/loss_total": 0.4936181902885437 }, { "epoch": 0.8888592038471814, "step": 3327, "train/loss_ctc": 0.6622551083564758, "train/loss_error": 0.4942096769809723, "train/loss_total": 0.5278187990188599 }, { "epoch": 0.8891263692225487, "step": 3328, "train/loss_ctc": 0.9433482885360718, "train/loss_error": 0.49678415060043335, "train/loss_total": 0.586097002029419 }, { "epoch": 0.8893935345979161, "step": 3329, "train/loss_ctc": 0.9038457870483398, "train/loss_error": 0.5126225352287292, "train/loss_total": 0.5908672213554382 }, { "epoch": 0.8896606999732835, "grad_norm": 1.1853793859481812, "learning_rate": 2.466844776916912e-05, "loss": 0.5583, "step": 3330 }, { "epoch": 0.8896606999732835, "step": 3330, "train/loss_ctc": 0.935968279838562, "train/loss_error": 0.4874541461467743, "train/loss_total": 0.5771570205688477 }, { "epoch": 0.8899278653486508, "step": 3331, "train/loss_ctc": 0.487009197473526, "train/loss_error": 0.47253066301345825, "train/loss_total": 0.4754263758659363 }, { "epoch": 0.8901950307240182, "step": 3332, "train/loss_ctc": 0.9246155023574829, "train/loss_error": 0.4648340046405792, "train/loss_total": 0.556790292263031 }, { "epoch": 0.8904621960993855, "step": 3333, "train/loss_ctc": 1.3561443090438843, "train/loss_error": 0.4368847906589508, "train/loss_total": 0.6207367181777954 }, { "epoch": 0.8907293614747529, "step": 3334, "train/loss_ctc": 1.08652663230896, "train/loss_error": 0.4661654233932495, "train/loss_total": 0.5902376770973206 }, { "epoch": 0.8909965268501202, "step": 3335, "train/loss_ctc": 0.784156084060669, "train/loss_error": 0.49730610847473145, "train/loss_total": 0.5546761155128479 }, { "epoch": 0.8912636922254876, "step": 3336, "train/loss_ctc": 3.2174360752105713, "train/loss_error": 0.5196065306663513, "train/loss_total": 1.0591723918914795 }, { "epoch": 0.8915308576008549, "step": 3337, "train/loss_ctc": 1.6350336074829102, "train/loss_error": 0.4859484136104584, "train/loss_total": 0.7157654762268066 }, { "epoch": 0.8917980229762222, "step": 3338, "train/loss_ctc": 0.7775758504867554, "train/loss_error": 0.4463478624820709, "train/loss_total": 0.5125934481620789 }, { "epoch": 0.8920651883515897, "step": 3339, "train/loss_ctc": 0.8789438009262085, "train/loss_error": 0.45484423637390137, "train/loss_total": 0.5396641492843628 }, { "epoch": 0.892332353726957, "grad_norm": 1.9004477262496948, "learning_rate": 2.4652417846647077e-05, "loss": 0.6202, "step": 3340 }, { "epoch": 0.892332353726957, "step": 3340, "train/loss_ctc": 0.22994650900363922, "train/loss_error": 0.48042675852775574, "train/loss_total": 0.43033072352409363 }, { "epoch": 0.8925995191023244, "step": 3341, "train/loss_ctc": 1.3901567459106445, "train/loss_error": 0.5265464186668396, "train/loss_total": 0.6992684602737427 }, { "epoch": 0.8928666844776917, "step": 3342, "train/loss_ctc": 0.5434172749519348, "train/loss_error": 0.5202376842498779, "train/loss_total": 0.5248736143112183 }, { "epoch": 0.893133849853059, "step": 3343, "train/loss_ctc": 0.8966229557991028, "train/loss_error": 0.41275307536125183, "train/loss_total": 0.5095270872116089 }, { "epoch": 0.8934010152284264, "step": 3344, "train/loss_ctc": 0.8391170501708984, "train/loss_error": 0.5003804564476013, "train/loss_total": 0.5681278109550476 }, { "epoch": 0.8936681806037937, "step": 3345, "train/loss_ctc": 0.8629227876663208, "train/loss_error": 0.4823722839355469, "train/loss_total": 0.5584824085235596 }, { "epoch": 0.8939353459791611, "step": 3346, "train/loss_ctc": 0.6964774131774902, "train/loss_error": 0.43958091735839844, "train/loss_total": 0.4909602403640747 }, { "epoch": 0.8942025113545284, "step": 3347, "train/loss_ctc": 1.1979563236236572, "train/loss_error": 0.4295197129249573, "train/loss_total": 0.5832070112228394 }, { "epoch": 0.8944696767298959, "step": 3348, "train/loss_ctc": 0.9524686336517334, "train/loss_error": 0.5455309152603149, "train/loss_total": 0.6269184350967407 }, { "epoch": 0.8947368421052632, "step": 3349, "train/loss_ctc": 0.6072737574577332, "train/loss_error": 0.5552489757537842, "train/loss_total": 0.565653920173645 }, { "epoch": 0.8950040074806305, "grad_norm": 1.8594268560409546, "learning_rate": 2.4636387924125036e-05, "loss": 0.5557, "step": 3350 }, { "epoch": 0.8950040074806305, "step": 3350, "train/loss_ctc": 0.6926593780517578, "train/loss_error": 0.5682445168495178, "train/loss_total": 0.5931274890899658 }, { "epoch": 0.8952711728559979, "step": 3351, "train/loss_ctc": 0.5475561618804932, "train/loss_error": 0.5156329870223999, "train/loss_total": 0.5220176577568054 }, { "epoch": 0.8955383382313652, "step": 3352, "train/loss_ctc": 1.483647108078003, "train/loss_error": 0.46765127778053284, "train/loss_total": 0.6708504557609558 }, { "epoch": 0.8958055036067326, "step": 3353, "train/loss_ctc": 1.7452523708343506, "train/loss_error": 0.456162691116333, "train/loss_total": 0.7139806747436523 }, { "epoch": 0.8960726689820999, "step": 3354, "train/loss_ctc": 1.2620738744735718, "train/loss_error": 0.5077790021896362, "train/loss_total": 0.6586380004882812 }, { "epoch": 0.8963398343574672, "step": 3355, "train/loss_ctc": 0.9689384698867798, "train/loss_error": 0.5161939859390259, "train/loss_total": 0.6067428588867188 }, { "epoch": 0.8966069997328346, "step": 3356, "train/loss_ctc": 0.5560733079910278, "train/loss_error": 0.4838523268699646, "train/loss_total": 0.4982965290546417 }, { "epoch": 0.896874165108202, "step": 3357, "train/loss_ctc": 0.9574555158615112, "train/loss_error": 0.4690059423446655, "train/loss_total": 0.5666958689689636 }, { "epoch": 0.8971413304835694, "step": 3358, "train/loss_ctc": 1.3362736701965332, "train/loss_error": 0.40914463996887207, "train/loss_total": 0.5945704579353333 }, { "epoch": 0.8974084958589367, "step": 3359, "train/loss_ctc": 0.892714262008667, "train/loss_error": 0.44076260924339294, "train/loss_total": 0.5311529636383057 }, { "epoch": 0.897675661234304, "grad_norm": 0.8574635982513428, "learning_rate": 2.4620358001602994e-05, "loss": 0.5956, "step": 3360 }, { "epoch": 0.897675661234304, "step": 3360, "train/loss_ctc": 1.166532278060913, "train/loss_error": 0.5368314981460571, "train/loss_total": 0.6627716422080994 }, { "epoch": 0.8979428266096714, "step": 3361, "train/loss_ctc": 0.9338124990463257, "train/loss_error": 0.5032956600189209, "train/loss_total": 0.5893990397453308 }, { "epoch": 0.8982099919850387, "step": 3362, "train/loss_ctc": 0.3246608078479767, "train/loss_error": 0.546002209186554, "train/loss_total": 0.5017338991165161 }, { "epoch": 0.8984771573604061, "step": 3363, "train/loss_ctc": 1.267413854598999, "train/loss_error": 0.562779426574707, "train/loss_total": 0.7037063241004944 }, { "epoch": 0.8987443227357734, "step": 3364, "train/loss_ctc": 1.1885347366333008, "train/loss_error": 0.48290595412254333, "train/loss_total": 0.6240317225456238 }, { "epoch": 0.8990114881111408, "step": 3365, "train/loss_ctc": 0.8893055319786072, "train/loss_error": 0.4649439752101898, "train/loss_total": 0.5498163104057312 }, { "epoch": 0.8992786534865082, "step": 3366, "train/loss_ctc": 0.7901560068130493, "train/loss_error": 0.43124324083328247, "train/loss_total": 0.5030258297920227 }, { "epoch": 0.8995458188618755, "step": 3367, "train/loss_ctc": 0.4195953607559204, "train/loss_error": 0.45165273547172546, "train/loss_total": 0.4452412724494934 }, { "epoch": 0.8998129842372429, "step": 3368, "train/loss_ctc": 0.5382916927337646, "train/loss_error": 0.445697158575058, "train/loss_total": 0.46421605348587036 }, { "epoch": 0.9000801496126102, "step": 3369, "train/loss_ctc": 1.3667629957199097, "train/loss_error": 0.4648316502571106, "train/loss_total": 0.6452178955078125 }, { "epoch": 0.9003473149879776, "grad_norm": 7.2755351066589355, "learning_rate": 2.4604328079080952e-05, "loss": 0.5689, "step": 3370 }, { "epoch": 0.9003473149879776, "step": 3370, "train/loss_ctc": 0.49527645111083984, "train/loss_error": 0.45847558975219727, "train/loss_total": 0.4658357799053192 }, { "epoch": 0.9006144803633449, "step": 3371, "train/loss_ctc": 0.6643853783607483, "train/loss_error": 0.44314801692962646, "train/loss_total": 0.4873954951763153 }, { "epoch": 0.9008816457387122, "step": 3372, "train/loss_ctc": 0.44014066457748413, "train/loss_error": 0.5216938853263855, "train/loss_total": 0.5053832530975342 }, { "epoch": 0.9011488111140796, "step": 3373, "train/loss_ctc": 1.0720473527908325, "train/loss_error": 0.5077056884765625, "train/loss_total": 0.6205739974975586 }, { "epoch": 0.9014159764894469, "step": 3374, "train/loss_ctc": 0.7404699325561523, "train/loss_error": 0.49189871549606323, "train/loss_total": 0.541612982749939 }, { "epoch": 0.9016831418648144, "step": 3375, "train/loss_ctc": 0.7318744659423828, "train/loss_error": 0.4992069900035858, "train/loss_total": 0.5457404851913452 }, { "epoch": 0.9019503072401817, "step": 3376, "train/loss_ctc": 0.8994971513748169, "train/loss_error": 0.4503374993801117, "train/loss_total": 0.5401694178581238 }, { "epoch": 0.9022174726155491, "step": 3377, "train/loss_ctc": 0.7830135822296143, "train/loss_error": 0.45345959067344666, "train/loss_total": 0.5193703770637512 }, { "epoch": 0.9024846379909164, "step": 3378, "train/loss_ctc": 1.386165738105774, "train/loss_error": 0.5262821912765503, "train/loss_total": 0.6982588768005371 }, { "epoch": 0.9027518033662837, "step": 3379, "train/loss_ctc": 1.297654628753662, "train/loss_error": 0.4660951495170593, "train/loss_total": 0.6324070692062378 }, { "epoch": 0.9030189687416511, "grad_norm": 1.4904083013534546, "learning_rate": 2.458829815655891e-05, "loss": 0.5557, "step": 3380 }, { "epoch": 0.9030189687416511, "step": 3380, "train/loss_ctc": 0.8409029245376587, "train/loss_error": 0.513373076915741, "train/loss_total": 0.5788790583610535 }, { "epoch": 0.9032861341170184, "step": 3381, "train/loss_ctc": 0.4796462655067444, "train/loss_error": 0.516353189945221, "train/loss_total": 0.5090118050575256 }, { "epoch": 0.9035532994923858, "step": 3382, "train/loss_ctc": 1.4681806564331055, "train/loss_error": 0.45071926712989807, "train/loss_total": 0.6542115211486816 }, { "epoch": 0.9038204648677531, "step": 3383, "train/loss_ctc": 0.8778011798858643, "train/loss_error": 0.5134565830230713, "train/loss_total": 0.5863255262374878 }, { "epoch": 0.9040876302431204, "step": 3384, "train/loss_ctc": 0.6792441010475159, "train/loss_error": 0.5020376443862915, "train/loss_total": 0.5374789237976074 }, { "epoch": 0.9043547956184879, "step": 3385, "train/loss_ctc": 0.8866318464279175, "train/loss_error": 0.49143922328948975, "train/loss_total": 0.5704777240753174 }, { "epoch": 0.9046219609938552, "step": 3386, "train/loss_ctc": 0.8891377449035645, "train/loss_error": 0.49673324823379517, "train/loss_total": 0.575214147567749 }, { "epoch": 0.9048891263692226, "step": 3387, "train/loss_ctc": 0.7065724730491638, "train/loss_error": 0.5018807649612427, "train/loss_total": 0.542819082736969 }, { "epoch": 0.9051562917445899, "step": 3388, "train/loss_ctc": 1.165587306022644, "train/loss_error": 0.5139285922050476, "train/loss_total": 0.6442603468894958 }, { "epoch": 0.9054234571199573, "step": 3389, "train/loss_ctc": 0.8307275176048279, "train/loss_error": 0.5028313398361206, "train/loss_total": 0.5684105753898621 }, { "epoch": 0.9056906224953246, "grad_norm": 1.2463074922561646, "learning_rate": 2.457226823403687e-05, "loss": 0.5767, "step": 3390 }, { "epoch": 0.9056906224953246, "step": 3390, "train/loss_ctc": 0.7257765531539917, "train/loss_error": 0.4403577148914337, "train/loss_total": 0.49744150042533875 }, { "epoch": 0.9059577878706919, "step": 3391, "train/loss_ctc": 0.8162412047386169, "train/loss_error": 0.45079052448272705, "train/loss_total": 0.523880660533905 }, { "epoch": 0.9062249532460593, "step": 3392, "train/loss_ctc": 0.8699325323104858, "train/loss_error": 0.5184797644615173, "train/loss_total": 0.58877032995224 }, { "epoch": 0.9064921186214266, "step": 3393, "train/loss_ctc": 0.9109047055244446, "train/loss_error": 0.5171363353729248, "train/loss_total": 0.5958900451660156 }, { "epoch": 0.9067592839967941, "step": 3394, "train/loss_ctc": 0.5456739068031311, "train/loss_error": 0.5822296142578125, "train/loss_total": 0.5749184489250183 }, { "epoch": 0.9070264493721614, "step": 3395, "train/loss_ctc": 1.1545909643173218, "train/loss_error": 0.47086045145988464, "train/loss_total": 0.6076065301895142 }, { "epoch": 0.9072936147475287, "step": 3396, "train/loss_ctc": 0.759467601776123, "train/loss_error": 0.4620480537414551, "train/loss_total": 0.5215319395065308 }, { "epoch": 0.9075607801228961, "step": 3397, "train/loss_ctc": 1.0132710933685303, "train/loss_error": 0.47390851378440857, "train/loss_total": 0.5817810297012329 }, { "epoch": 0.9078279454982634, "step": 3398, "train/loss_ctc": 1.0951974391937256, "train/loss_error": 0.5173656940460205, "train/loss_total": 0.6329320669174194 }, { "epoch": 0.9080951108736308, "step": 3399, "train/loss_ctc": 0.7235309481620789, "train/loss_error": 0.4708389937877655, "train/loss_total": 0.5213773846626282 }, { "epoch": 0.9083622762489981, "grad_norm": 1.7847402095794678, "learning_rate": 2.455623831151483e-05, "loss": 0.5646, "step": 3400 }, { "epoch": 0.9083622762489981, "step": 3400, "train/loss_ctc": 1.3383525609970093, "train/loss_error": 0.4881841838359833, "train/loss_total": 0.6582179069519043 }, { "epoch": 0.9086294416243654, "step": 3401, "train/loss_ctc": 0.3609795570373535, "train/loss_error": 0.44923466444015503, "train/loss_total": 0.4315836429595947 }, { "epoch": 0.9088966069997328, "step": 3402, "train/loss_ctc": 0.6951619386672974, "train/loss_error": 0.48175230622291565, "train/loss_total": 0.5244342088699341 }, { "epoch": 0.9091637723751002, "step": 3403, "train/loss_ctc": 0.8833766579627991, "train/loss_error": 0.4770660996437073, "train/loss_total": 0.5583282113075256 }, { "epoch": 0.9094309377504676, "step": 3404, "train/loss_ctc": 0.9326575994491577, "train/loss_error": 0.4429357051849365, "train/loss_total": 0.5408800840377808 }, { "epoch": 0.9096981031258349, "step": 3405, "train/loss_ctc": 1.3480339050292969, "train/loss_error": 0.4966800808906555, "train/loss_total": 0.6669508814811707 }, { "epoch": 0.9099652685012023, "step": 3406, "train/loss_ctc": 0.8963239192962646, "train/loss_error": 0.4827163815498352, "train/loss_total": 0.565437912940979 }, { "epoch": 0.9102324338765696, "step": 3407, "train/loss_ctc": 0.742943286895752, "train/loss_error": 0.4587017297744751, "train/loss_total": 0.5155500173568726 }, { "epoch": 0.9104995992519369, "step": 3408, "train/loss_ctc": 0.2446107268333435, "train/loss_error": 0.48999112844467163, "train/loss_total": 0.440915048122406 }, { "epoch": 0.9107667646273043, "step": 3409, "train/loss_ctc": 1.5975209474563599, "train/loss_error": 0.4545794427394867, "train/loss_total": 0.6831677556037903 }, { "epoch": 0.9110339300026716, "grad_norm": 1.6234992742538452, "learning_rate": 2.4540208388992787e-05, "loss": 0.5585, "step": 3410 }, { "epoch": 0.9110339300026716, "step": 3410, "train/loss_ctc": 0.780453085899353, "train/loss_error": 0.4969489276409149, "train/loss_total": 0.5536497831344604 }, { "epoch": 0.911301095378039, "step": 3411, "train/loss_ctc": 0.7801978588104248, "train/loss_error": 0.5062515139579773, "train/loss_total": 0.5610408186912537 }, { "epoch": 0.9115682607534064, "step": 3412, "train/loss_ctc": 0.896430492401123, "train/loss_error": 0.4507591724395752, "train/loss_total": 0.5398934483528137 }, { "epoch": 0.9118354261287737, "step": 3413, "train/loss_ctc": 0.4581610858440399, "train/loss_error": 0.4140303134918213, "train/loss_total": 0.42285647988319397 }, { "epoch": 0.9121025915041411, "step": 3414, "train/loss_ctc": 0.2595197260379791, "train/loss_error": 0.4949053227901459, "train/loss_total": 0.4478282034397125 }, { "epoch": 0.9123697568795084, "step": 3415, "train/loss_ctc": 0.3195077180862427, "train/loss_error": 0.41985541582107544, "train/loss_total": 0.3997858762741089 }, { "epoch": 0.9126369222548758, "step": 3416, "train/loss_ctc": 0.8463519811630249, "train/loss_error": 0.4473753571510315, "train/loss_total": 0.5271706581115723 }, { "epoch": 0.9129040876302431, "step": 3417, "train/loss_ctc": 0.9734652638435364, "train/loss_error": 0.4700004756450653, "train/loss_total": 0.5706934332847595 }, { "epoch": 0.9131712530056105, "step": 3418, "train/loss_ctc": 0.8967537879943848, "train/loss_error": 0.42984065413475037, "train/loss_total": 0.5232232809066772 }, { "epoch": 0.9134384183809778, "step": 3419, "train/loss_ctc": 0.8292235136032104, "train/loss_error": 0.4969872236251831, "train/loss_total": 0.5634344816207886 }, { "epoch": 0.9137055837563451, "grad_norm": 1.3532096147537231, "learning_rate": 2.4524178466470746e-05, "loss": 0.511, "step": 3420 }, { "epoch": 0.9137055837563451, "step": 3420, "train/loss_ctc": 0.9598293304443359, "train/loss_error": 0.5077037811279297, "train/loss_total": 0.5981289148330688 }, { "epoch": 0.9139727491317126, "step": 3421, "train/loss_ctc": 1.4507941007614136, "train/loss_error": 0.47306379675865173, "train/loss_total": 0.6686098575592041 }, { "epoch": 0.9142399145070799, "step": 3422, "train/loss_ctc": 0.9019536972045898, "train/loss_error": 0.4297815263271332, "train/loss_total": 0.5242159366607666 }, { "epoch": 0.9145070798824473, "step": 3423, "train/loss_ctc": 0.7548458576202393, "train/loss_error": 0.47427868843078613, "train/loss_total": 0.5303921699523926 }, { "epoch": 0.9147742452578146, "step": 3424, "train/loss_ctc": 0.4960431456565857, "train/loss_error": 0.4435916244983673, "train/loss_total": 0.4540819227695465 }, { "epoch": 0.9150414106331819, "step": 3425, "train/loss_ctc": 0.9160598516464233, "train/loss_error": 0.5229177474975586, "train/loss_total": 0.6015461683273315 }, { "epoch": 0.9153085760085493, "step": 3426, "train/loss_ctc": 1.8343935012817383, "train/loss_error": 0.44315528869628906, "train/loss_total": 0.7214029431343079 }, { "epoch": 0.9155757413839166, "step": 3427, "train/loss_ctc": 0.5100948214530945, "train/loss_error": 0.4803423583507538, "train/loss_total": 0.48629283905029297 }, { "epoch": 0.915842906759284, "step": 3428, "train/loss_ctc": 1.0767223834991455, "train/loss_error": 0.48826172947883606, "train/loss_total": 0.6059538722038269 }, { "epoch": 0.9161100721346513, "step": 3429, "train/loss_ctc": 0.5045132637023926, "train/loss_error": 0.42072978615760803, "train/loss_total": 0.437486469745636 }, { "epoch": 0.9163772375100186, "grad_norm": 4.148726940155029, "learning_rate": 2.4508148543948704e-05, "loss": 0.5628, "step": 3430 }, { "epoch": 0.9163772375100186, "step": 3430, "train/loss_ctc": 1.2978501319885254, "train/loss_error": 0.5320425629615784, "train/loss_total": 0.6852040886878967 }, { "epoch": 0.9166444028853861, "step": 3431, "train/loss_ctc": 0.9417965412139893, "train/loss_error": 0.5128879547119141, "train/loss_total": 0.5986696481704712 }, { "epoch": 0.9169115682607534, "step": 3432, "train/loss_ctc": 0.7896242141723633, "train/loss_error": 0.5272542238235474, "train/loss_total": 0.5797282457351685 }, { "epoch": 0.9171787336361208, "step": 3433, "train/loss_ctc": 0.6089869737625122, "train/loss_error": 0.5271985530853271, "train/loss_total": 0.543556272983551 }, { "epoch": 0.9174458990114881, "step": 3434, "train/loss_ctc": 0.8721272349357605, "train/loss_error": 0.4302854537963867, "train/loss_total": 0.5186538100242615 }, { "epoch": 0.9177130643868555, "step": 3435, "train/loss_ctc": 0.44947725534439087, "train/loss_error": 0.4900006353855133, "train/loss_total": 0.48189598321914673 }, { "epoch": 0.9179802297622228, "step": 3436, "train/loss_ctc": 0.7677585482597351, "train/loss_error": 0.514275074005127, "train/loss_total": 0.5649718046188354 }, { "epoch": 0.9182473951375901, "step": 3437, "train/loss_ctc": 0.6389090418815613, "train/loss_error": 0.5162510871887207, "train/loss_total": 0.5407826900482178 }, { "epoch": 0.9185145605129575, "step": 3438, "train/loss_ctc": 1.014725685119629, "train/loss_error": 0.5132436752319336, "train/loss_total": 0.6135400533676147 }, { "epoch": 0.9187817258883249, "step": 3439, "train/loss_ctc": 0.7269742488861084, "train/loss_error": 0.4534088671207428, "train/loss_total": 0.5081219673156738 }, { "epoch": 0.9190488912636923, "grad_norm": 21.88617515563965, "learning_rate": 2.4492118621426662e-05, "loss": 0.5635, "step": 3440 }, { "epoch": 0.9190488912636923, "step": 3440, "train/loss_ctc": 0.5811458826065063, "train/loss_error": 0.418517142534256, "train/loss_total": 0.45104289054870605 }, { "epoch": 0.9193160566390596, "step": 3441, "train/loss_ctc": 1.1826601028442383, "train/loss_error": 0.4378117620944977, "train/loss_total": 0.5867814421653748 }, { "epoch": 0.9195832220144269, "step": 3442, "train/loss_ctc": 1.0182254314422607, "train/loss_error": 0.488595575094223, "train/loss_total": 0.5945215821266174 }, { "epoch": 0.9198503873897943, "step": 3443, "train/loss_ctc": 1.672121524810791, "train/loss_error": 0.4766959249973297, "train/loss_total": 0.7157810926437378 }, { "epoch": 0.9201175527651616, "step": 3444, "train/loss_ctc": 0.7235927581787109, "train/loss_error": 0.47121405601501465, "train/loss_total": 0.521689772605896 }, { "epoch": 0.920384718140529, "step": 3445, "train/loss_ctc": 0.9276315569877625, "train/loss_error": 0.513503909111023, "train/loss_total": 0.5963294506072998 }, { "epoch": 0.9206518835158963, "step": 3446, "train/loss_ctc": 0.5219827890396118, "train/loss_error": 0.47061219811439514, "train/loss_total": 0.480886310338974 }, { "epoch": 0.9209190488912637, "step": 3447, "train/loss_ctc": 1.2932226657867432, "train/loss_error": 0.4776308834552765, "train/loss_total": 0.6407492756843567 }, { "epoch": 0.921186214266631, "step": 3448, "train/loss_ctc": 0.6993082761764526, "train/loss_error": 0.5431489944458008, "train/loss_total": 0.5743808746337891 }, { "epoch": 0.9214533796419984, "step": 3449, "train/loss_ctc": 0.6030756831169128, "train/loss_error": 0.5120809674263, "train/loss_total": 0.5302799344062805 }, { "epoch": 0.9217205450173658, "grad_norm": 1.8805906772613525, "learning_rate": 2.4476088698904623e-05, "loss": 0.5692, "step": 3450 }, { "epoch": 0.9217205450173658, "step": 3450, "train/loss_ctc": 0.42219382524490356, "train/loss_error": 0.4333401620388031, "train/loss_total": 0.4311109185218811 }, { "epoch": 0.9219877103927331, "step": 3451, "train/loss_ctc": 1.0206055641174316, "train/loss_error": 0.49787452816963196, "train/loss_total": 0.6024207472801208 }, { "epoch": 0.9222548757681005, "step": 3452, "train/loss_ctc": 0.6684680581092834, "train/loss_error": 0.4654586911201477, "train/loss_total": 0.5060606002807617 }, { "epoch": 0.9225220411434678, "step": 3453, "train/loss_ctc": 0.6635431051254272, "train/loss_error": 0.4690178632736206, "train/loss_total": 0.5079229474067688 }, { "epoch": 0.9227892065188351, "step": 3454, "train/loss_ctc": 0.5157069563865662, "train/loss_error": 0.5642732381820679, "train/loss_total": 0.5545600056648254 }, { "epoch": 0.9230563718942025, "step": 3455, "train/loss_ctc": 0.4564465880393982, "train/loss_error": 0.38205018639564514, "train/loss_total": 0.39692947268486023 }, { "epoch": 0.9233235372695698, "step": 3456, "train/loss_ctc": 1.0508222579956055, "train/loss_error": 0.4542921185493469, "train/loss_total": 0.5735981464385986 }, { "epoch": 0.9235907026449373, "step": 3457, "train/loss_ctc": 0.5318403244018555, "train/loss_error": 0.47391802072525024, "train/loss_total": 0.4855024814605713 }, { "epoch": 0.9238578680203046, "step": 3458, "train/loss_ctc": 0.6624482870101929, "train/loss_error": 0.47409430146217346, "train/loss_total": 0.5117651224136353 }, { "epoch": 0.9241250333956719, "step": 3459, "train/loss_ctc": 0.4398803412914276, "train/loss_error": 0.49075525999069214, "train/loss_total": 0.48058027029037476 }, { "epoch": 0.9243921987710393, "grad_norm": 2.540977716445923, "learning_rate": 2.446005877638258e-05, "loss": 0.505, "step": 3460 }, { "epoch": 0.9243921987710393, "step": 3460, "train/loss_ctc": 1.157395839691162, "train/loss_error": 0.4839162826538086, "train/loss_total": 0.6186121702194214 }, { "epoch": 0.9246593641464066, "step": 3461, "train/loss_ctc": 1.2313902378082275, "train/loss_error": 0.46820178627967834, "train/loss_total": 0.6208394765853882 }, { "epoch": 0.924926529521774, "step": 3462, "train/loss_ctc": 1.0578675270080566, "train/loss_error": 0.47453105449676514, "train/loss_total": 0.5911983251571655 }, { "epoch": 0.9251936948971413, "step": 3463, "train/loss_ctc": 1.1404472589492798, "train/loss_error": 0.43710023164749146, "train/loss_total": 0.5777696371078491 }, { "epoch": 0.9254608602725087, "step": 3464, "train/loss_ctc": 1.0008265972137451, "train/loss_error": 0.46532800793647766, "train/loss_total": 0.5724277496337891 }, { "epoch": 0.925728025647876, "step": 3465, "train/loss_ctc": 1.5057417154312134, "train/loss_error": 0.471383661031723, "train/loss_total": 0.6782553195953369 }, { "epoch": 0.9259951910232433, "step": 3466, "train/loss_ctc": 0.641379714012146, "train/loss_error": 0.4968814253807068, "train/loss_total": 0.5257810950279236 }, { "epoch": 0.9262623563986108, "step": 3467, "train/loss_ctc": 0.7479777336120605, "train/loss_error": 0.4873473644256592, "train/loss_total": 0.5394734144210815 }, { "epoch": 0.9265295217739781, "step": 3468, "train/loss_ctc": 1.6838146448135376, "train/loss_error": 0.47943252325057983, "train/loss_total": 0.7203089594841003 }, { "epoch": 0.9267966871493455, "step": 3469, "train/loss_ctc": 0.46828731894493103, "train/loss_error": 0.4116942584514618, "train/loss_total": 0.4230128824710846 }, { "epoch": 0.9270638525247128, "grad_norm": 1.8960540294647217, "learning_rate": 2.444402885386054e-05, "loss": 0.5868, "step": 3470 }, { "epoch": 0.9270638525247128, "step": 3470, "train/loss_ctc": 0.8654427528381348, "train/loss_error": 0.4821591377258301, "train/loss_total": 0.5588158369064331 }, { "epoch": 0.9273310179000801, "step": 3471, "train/loss_ctc": 0.6040160059928894, "train/loss_error": 0.5031249523162842, "train/loss_total": 0.5233031511306763 }, { "epoch": 0.9275981832754475, "step": 3472, "train/loss_ctc": 1.0789320468902588, "train/loss_error": 0.49328941106796265, "train/loss_total": 0.6104179620742798 }, { "epoch": 0.9278653486508148, "step": 3473, "train/loss_ctc": 0.9979490041732788, "train/loss_error": 0.4362955689430237, "train/loss_total": 0.5486262440681458 }, { "epoch": 0.9281325140261822, "step": 3474, "train/loss_ctc": 0.4198561906814575, "train/loss_error": 0.5117581486701965, "train/loss_total": 0.4933777451515198 }, { "epoch": 0.9283996794015495, "step": 3475, "train/loss_ctc": 0.7907875180244446, "train/loss_error": 0.4572061002254486, "train/loss_total": 0.5239223837852478 }, { "epoch": 0.928666844776917, "step": 3476, "train/loss_ctc": 0.48020535707473755, "train/loss_error": 0.5161042213439941, "train/loss_total": 0.5089244246482849 }, { "epoch": 0.9289340101522843, "step": 3477, "train/loss_ctc": 1.311869740486145, "train/loss_error": 0.5061720013618469, "train/loss_total": 0.6673115491867065 }, { "epoch": 0.9292011755276516, "step": 3478, "train/loss_ctc": 0.5481843948364258, "train/loss_error": 0.39164912700653076, "train/loss_total": 0.4229561686515808 }, { "epoch": 0.929468340903019, "step": 3479, "train/loss_ctc": 1.3471051454544067, "train/loss_error": 0.5316687822341919, "train/loss_total": 0.694756031036377 }, { "epoch": 0.9297355062783863, "grad_norm": 1.6087114810943604, "learning_rate": 2.4427998931338497e-05, "loss": 0.5552, "step": 3480 }, { "epoch": 0.9297355062783863, "step": 3480, "train/loss_ctc": 0.5559735298156738, "train/loss_error": 0.4505469799041748, "train/loss_total": 0.47163230180740356 }, { "epoch": 0.9300026716537537, "step": 3481, "train/loss_ctc": 1.3150534629821777, "train/loss_error": 0.5098294019699097, "train/loss_total": 0.6708742380142212 }, { "epoch": 0.930269837029121, "step": 3482, "train/loss_ctc": 0.8281481862068176, "train/loss_error": 0.4219728708267212, "train/loss_total": 0.5032079219818115 }, { "epoch": 0.9305370024044883, "step": 3483, "train/loss_ctc": 0.7901828289031982, "train/loss_error": 0.4174014925956726, "train/loss_total": 0.49195778369903564 }, { "epoch": 0.9308041677798558, "step": 3484, "train/loss_ctc": 0.6499196887016296, "train/loss_error": 0.5157840847969055, "train/loss_total": 0.5426112413406372 }, { "epoch": 0.9310713331552231, "step": 3485, "train/loss_ctc": 1.2510197162628174, "train/loss_error": 0.41387614607810974, "train/loss_total": 0.5813048481941223 }, { "epoch": 0.9313384985305905, "step": 3486, "train/loss_ctc": 0.4515354037284851, "train/loss_error": 0.46927493810653687, "train/loss_total": 0.4657270312309265 }, { "epoch": 0.9316056639059578, "step": 3487, "train/loss_ctc": 1.1526035070419312, "train/loss_error": 0.4799220860004425, "train/loss_total": 0.6144583821296692 }, { "epoch": 0.9318728292813251, "step": 3488, "train/loss_ctc": 1.1064010858535767, "train/loss_error": 0.46713167428970337, "train/loss_total": 0.5949856042861938 }, { "epoch": 0.9321399946566925, "step": 3489, "train/loss_ctc": 1.1018662452697754, "train/loss_error": 0.44816410541534424, "train/loss_total": 0.5789045691490173 }, { "epoch": 0.9324071600320598, "grad_norm": 1.3838632106781006, "learning_rate": 2.4411969008816456e-05, "loss": 0.5516, "step": 3490 }, { "epoch": 0.9324071600320598, "step": 3490, "train/loss_ctc": 1.470228672027588, "train/loss_error": 0.43898653984069824, "train/loss_total": 0.6452349424362183 }, { "epoch": 0.9326743254074272, "step": 3491, "train/loss_ctc": 0.9679989218711853, "train/loss_error": 0.43970707058906555, "train/loss_total": 0.5453654527664185 }, { "epoch": 0.9329414907827945, "step": 3492, "train/loss_ctc": 0.9240432977676392, "train/loss_error": 0.516839861869812, "train/loss_total": 0.5982805490493774 }, { "epoch": 0.933208656158162, "step": 3493, "train/loss_ctc": 0.5208671689033508, "train/loss_error": 0.44468623399734497, "train/loss_total": 0.4599224328994751 }, { "epoch": 0.9334758215335293, "step": 3494, "train/loss_ctc": 1.2871720790863037, "train/loss_error": 0.5072513818740845, "train/loss_total": 0.6632355451583862 }, { "epoch": 0.9337429869088966, "step": 3495, "train/loss_ctc": 1.0999318361282349, "train/loss_error": 0.488004207611084, "train/loss_total": 0.6103897094726562 }, { "epoch": 0.934010152284264, "step": 3496, "train/loss_ctc": 0.5185856819152832, "train/loss_error": 0.45718052983283997, "train/loss_total": 0.469461590051651 }, { "epoch": 0.9342773176596313, "step": 3497, "train/loss_ctc": 0.662216305732727, "train/loss_error": 0.4509694576263428, "train/loss_total": 0.4932188391685486 }, { "epoch": 0.9345444830349987, "step": 3498, "train/loss_ctc": 0.8341490030288696, "train/loss_error": 0.48977938294410706, "train/loss_total": 0.5586532950401306 }, { "epoch": 0.934811648410366, "step": 3499, "train/loss_ctc": 1.3119332790374756, "train/loss_error": 0.5965220332145691, "train/loss_total": 0.7396042943000793 }, { "epoch": 0.9350788137857333, "grad_norm": 2.954911947250366, "learning_rate": 2.4395939086294417e-05, "loss": 0.5783, "step": 3500 }, { "epoch": 0.9350788137857333, "step": 3500, "train/loss_ctc": 1.0918447971343994, "train/loss_error": 0.5616689920425415, "train/loss_total": 0.667704164981842 }, { "epoch": 0.9353459791611007, "step": 3501, "train/loss_ctc": 1.0671498775482178, "train/loss_error": 0.4396892189979553, "train/loss_total": 0.5651813745498657 }, { "epoch": 0.935613144536468, "step": 3502, "train/loss_ctc": 0.6185129880905151, "train/loss_error": 0.4528614580631256, "train/loss_total": 0.48599177598953247 }, { "epoch": 0.9358803099118355, "step": 3503, "train/loss_ctc": 0.6180313229560852, "train/loss_error": 0.46759307384490967, "train/loss_total": 0.4976807236671448 }, { "epoch": 0.9361474752872028, "step": 3504, "train/loss_ctc": 1.1647446155548096, "train/loss_error": 0.48987817764282227, "train/loss_total": 0.6248514652252197 }, { "epoch": 0.9364146406625702, "step": 3505, "train/loss_ctc": 1.1724096536636353, "train/loss_error": 0.456557035446167, "train/loss_total": 0.5997275710105896 }, { "epoch": 0.9366818060379375, "step": 3506, "train/loss_ctc": 0.719284176826477, "train/loss_error": 0.4476121664047241, "train/loss_total": 0.5019465684890747 }, { "epoch": 0.9369489714133048, "step": 3507, "train/loss_ctc": 0.6408095359802246, "train/loss_error": 0.501064658164978, "train/loss_total": 0.5290136337280273 }, { "epoch": 0.9372161367886722, "step": 3508, "train/loss_ctc": 0.7375044822692871, "train/loss_error": 0.5298808217048645, "train/loss_total": 0.5714055299758911 }, { "epoch": 0.9374833021640395, "step": 3509, "train/loss_ctc": 0.729225754737854, "train/loss_error": 0.4867127537727356, "train/loss_total": 0.5352153778076172 }, { "epoch": 0.9377504675394069, "grad_norm": 1.701902985572815, "learning_rate": 2.437990916377238e-05, "loss": 0.5579, "step": 3510 }, { "epoch": 0.9377504675394069, "step": 3510, "train/loss_ctc": 0.9813209772109985, "train/loss_error": 0.482461154460907, "train/loss_total": 0.5822331309318542 }, { "epoch": 0.9380176329147742, "step": 3511, "train/loss_ctc": 0.5932587385177612, "train/loss_error": 0.44673243165016174, "train/loss_total": 0.4760377109050751 }, { "epoch": 0.9382847982901416, "step": 3512, "train/loss_ctc": 0.6067032814025879, "train/loss_error": 0.531701385974884, "train/loss_total": 0.5467017889022827 }, { "epoch": 0.938551963665509, "step": 3513, "train/loss_ctc": 1.088606357574463, "train/loss_error": 0.5597662329673767, "train/loss_total": 0.665534257888794 }, { "epoch": 0.9388191290408763, "step": 3514, "train/loss_ctc": 0.6842907667160034, "train/loss_error": 0.3766138553619385, "train/loss_total": 0.43814921379089355 }, { "epoch": 0.9390862944162437, "step": 3515, "train/loss_ctc": 0.41728460788726807, "train/loss_error": 0.5480818748474121, "train/loss_total": 0.5219224095344543 }, { "epoch": 0.939353459791611, "step": 3516, "train/loss_ctc": 0.7342770099639893, "train/loss_error": 0.4660634696483612, "train/loss_total": 0.5197061896324158 }, { "epoch": 0.9396206251669783, "step": 3517, "train/loss_ctc": 0.370981365442276, "train/loss_error": 0.4152052104473114, "train/loss_total": 0.4063604474067688 }, { "epoch": 0.9398877905423457, "step": 3518, "train/loss_ctc": 1.206092119216919, "train/loss_error": 0.4946918785572052, "train/loss_total": 0.6369719505310059 }, { "epoch": 0.940154955917713, "step": 3519, "train/loss_ctc": 0.5747841596603394, "train/loss_error": 0.5029003024101257, "train/loss_total": 0.5172770619392395 }, { "epoch": 0.9404221212930804, "grad_norm": 1.119053602218628, "learning_rate": 2.4363879241250337e-05, "loss": 0.5311, "step": 3520 }, { "epoch": 0.9404221212930804, "step": 3520, "train/loss_ctc": 1.3056979179382324, "train/loss_error": 0.516110897064209, "train/loss_total": 0.6740283370018005 }, { "epoch": 0.9406892866684478, "step": 3521, "train/loss_ctc": 0.8414530754089355, "train/loss_error": 0.4861939251422882, "train/loss_total": 0.5572457313537598 }, { "epoch": 0.9409564520438152, "step": 3522, "train/loss_ctc": 0.9047218561172485, "train/loss_error": 0.4723462462425232, "train/loss_total": 0.5588213801383972 }, { "epoch": 0.9412236174191825, "step": 3523, "train/loss_ctc": 0.527671217918396, "train/loss_error": 0.4425772726535797, "train/loss_total": 0.45959606766700745 }, { "epoch": 0.9414907827945498, "step": 3524, "train/loss_ctc": 0.7199193239212036, "train/loss_error": 0.5438113212585449, "train/loss_total": 0.5790328979492188 }, { "epoch": 0.9417579481699172, "step": 3525, "train/loss_ctc": 0.34730294346809387, "train/loss_error": 0.4688054025173187, "train/loss_total": 0.44450491666793823 }, { "epoch": 0.9420251135452845, "step": 3526, "train/loss_ctc": 0.2237713634967804, "train/loss_error": 0.4654683470726013, "train/loss_total": 0.41712895035743713 }, { "epoch": 0.9422922789206519, "step": 3527, "train/loss_ctc": 0.586525559425354, "train/loss_error": 0.4928368926048279, "train/loss_total": 0.5115746259689331 }, { "epoch": 0.9425594442960192, "step": 3528, "train/loss_ctc": 0.8644538521766663, "train/loss_error": 0.41971340775489807, "train/loss_total": 0.5086615085601807 }, { "epoch": 0.9428266096713865, "step": 3529, "train/loss_ctc": 0.714561939239502, "train/loss_error": 0.43239647150039673, "train/loss_total": 0.4888295829296112 }, { "epoch": 0.943093775046754, "grad_norm": 2.0244014263153076, "learning_rate": 2.4347849318728295e-05, "loss": 0.5199, "step": 3530 }, { "epoch": 0.943093775046754, "step": 3530, "train/loss_ctc": 0.8247090578079224, "train/loss_error": 0.4859597980976105, "train/loss_total": 0.5537096261978149 }, { "epoch": 0.9433609404221213, "step": 3531, "train/loss_ctc": 1.3161065578460693, "train/loss_error": 0.46879643201828003, "train/loss_total": 0.6382584571838379 }, { "epoch": 0.9436281057974887, "step": 3532, "train/loss_ctc": 0.8928426504135132, "train/loss_error": 0.4581795632839203, "train/loss_total": 0.5451121926307678 }, { "epoch": 0.943895271172856, "step": 3533, "train/loss_ctc": 0.8839648962020874, "train/loss_error": 0.48369863629341125, "train/loss_total": 0.5637519359588623 }, { "epoch": 0.9441624365482234, "step": 3534, "train/loss_ctc": 0.5824025869369507, "train/loss_error": 0.5220387578010559, "train/loss_total": 0.534111499786377 }, { "epoch": 0.9444296019235907, "step": 3535, "train/loss_ctc": 0.6503121852874756, "train/loss_error": 0.4517728090286255, "train/loss_total": 0.4914807081222534 }, { "epoch": 0.944696767298958, "step": 3536, "train/loss_ctc": 1.5745413303375244, "train/loss_error": 0.42785999178886414, "train/loss_total": 0.6571962833404541 }, { "epoch": 0.9449639326743254, "step": 3537, "train/loss_ctc": 0.7851674556732178, "train/loss_error": 0.461735337972641, "train/loss_total": 0.5264217853546143 }, { "epoch": 0.9452310980496927, "step": 3538, "train/loss_ctc": 0.7639051079750061, "train/loss_error": 0.5132654905319214, "train/loss_total": 0.5633934140205383 }, { "epoch": 0.9454982634250602, "step": 3539, "train/loss_ctc": 0.7625647783279419, "train/loss_error": 0.5186635255813599, "train/loss_total": 0.5674437880516052 }, { "epoch": 0.9457654288004275, "grad_norm": 2.1657559871673584, "learning_rate": 2.4331819396206253e-05, "loss": 0.5641, "step": 3540 }, { "epoch": 0.9457654288004275, "step": 3540, "train/loss_ctc": 1.3172171115875244, "train/loss_error": 0.5065015554428101, "train/loss_total": 0.6686446666717529 }, { "epoch": 0.9460325941757948, "step": 3541, "train/loss_ctc": 1.0733959674835205, "train/loss_error": 0.4585513472557068, "train/loss_total": 0.5815202593803406 }, { "epoch": 0.9462997595511622, "step": 3542, "train/loss_ctc": 1.8981703519821167, "train/loss_error": 0.49357178807258606, "train/loss_total": 0.774491548538208 }, { "epoch": 0.9465669249265295, "step": 3543, "train/loss_ctc": 0.4219155013561249, "train/loss_error": 0.42433521151542664, "train/loss_total": 0.42385128140449524 }, { "epoch": 0.9468340903018969, "step": 3544, "train/loss_ctc": 0.837031364440918, "train/loss_error": 0.49139976501464844, "train/loss_total": 0.5605260729789734 }, { "epoch": 0.9471012556772642, "step": 3545, "train/loss_ctc": 0.537888765335083, "train/loss_error": 0.5602786540985107, "train/loss_total": 0.5558006763458252 }, { "epoch": 0.9473684210526315, "step": 3546, "train/loss_ctc": 0.7400788068771362, "train/loss_error": 0.5401682257652283, "train/loss_total": 0.5801503658294678 }, { "epoch": 0.9476355864279989, "step": 3547, "train/loss_ctc": 0.840261697769165, "train/loss_error": 0.5036677718162537, "train/loss_total": 0.5709865689277649 }, { "epoch": 0.9479027518033663, "step": 3548, "train/loss_ctc": 0.7012356519699097, "train/loss_error": 0.47091206908226013, "train/loss_total": 0.5169768333435059 }, { "epoch": 0.9481699171787337, "step": 3549, "train/loss_ctc": 0.9233070015907288, "train/loss_error": 0.4428565800189972, "train/loss_total": 0.5389466881752014 }, { "epoch": 0.948437082554101, "grad_norm": 1.3823477029800415, "learning_rate": 2.431578947368421e-05, "loss": 0.5772, "step": 3550 }, { "epoch": 0.948437082554101, "step": 3550, "train/loss_ctc": 1.5605864524841309, "train/loss_error": 0.4386001229286194, "train/loss_total": 0.6629974246025085 }, { "epoch": 0.9487042479294684, "step": 3551, "train/loss_ctc": 0.962056577205658, "train/loss_error": 0.41228368878364563, "train/loss_total": 0.5222382545471191 }, { "epoch": 0.9489714133048357, "step": 3552, "train/loss_ctc": 1.084665298461914, "train/loss_error": 0.5292890071868896, "train/loss_total": 0.6403642892837524 }, { "epoch": 0.949238578680203, "step": 3553, "train/loss_ctc": 1.326075792312622, "train/loss_error": 0.4526687562465668, "train/loss_total": 0.6273502111434937 }, { "epoch": 0.9495057440555704, "step": 3554, "train/loss_ctc": 0.5918725728988647, "train/loss_error": 0.4751342833042145, "train/loss_total": 0.49848195910453796 }, { "epoch": 0.9497729094309377, "step": 3555, "train/loss_ctc": 0.6447357535362244, "train/loss_error": 0.5374247431755066, "train/loss_total": 0.5588869452476501 }, { "epoch": 0.9500400748063051, "step": 3556, "train/loss_ctc": 0.6099054217338562, "train/loss_error": 0.4774196445941925, "train/loss_total": 0.5039168000221252 }, { "epoch": 0.9503072401816725, "step": 3557, "train/loss_ctc": 0.6123560667037964, "train/loss_error": 0.4335711598396301, "train/loss_total": 0.4693281352519989 }, { "epoch": 0.9505744055570398, "step": 3558, "train/loss_ctc": 0.7008876800537109, "train/loss_error": 0.48911044001579285, "train/loss_total": 0.5314658880233765 }, { "epoch": 0.9508415709324072, "step": 3559, "train/loss_ctc": 1.3742852210998535, "train/loss_error": 0.4710635840892792, "train/loss_total": 0.6517078876495361 }, { "epoch": 0.9511087363077745, "grad_norm": 1.5743591785430908, "learning_rate": 2.429975955116217e-05, "loss": 0.5667, "step": 3560 }, { "epoch": 0.9511087363077745, "step": 3560, "train/loss_ctc": 1.07756769657135, "train/loss_error": 0.5035180449485779, "train/loss_total": 0.6183279752731323 }, { "epoch": 0.9513759016831419, "step": 3561, "train/loss_ctc": 1.4964509010314941, "train/loss_error": 0.5139135122299194, "train/loss_total": 0.7104209661483765 }, { "epoch": 0.9516430670585092, "step": 3562, "train/loss_ctc": 1.3948365449905396, "train/loss_error": 0.4593004882335663, "train/loss_total": 0.6464077234268188 }, { "epoch": 0.9519102324338766, "step": 3563, "train/loss_ctc": 0.44160938262939453, "train/loss_error": 0.4519486129283905, "train/loss_total": 0.44988077878952026 }, { "epoch": 0.9521773978092439, "step": 3564, "train/loss_ctc": 0.47506290674209595, "train/loss_error": 0.40075814723968506, "train/loss_total": 0.4156191051006317 }, { "epoch": 0.9524445631846112, "step": 3565, "train/loss_ctc": 0.6770831346511841, "train/loss_error": 0.5041428208351135, "train/loss_total": 0.5387308597564697 }, { "epoch": 0.9527117285599787, "step": 3566, "train/loss_ctc": 1.0999462604522705, "train/loss_error": 0.5255690217018127, "train/loss_total": 0.6404444575309753 }, { "epoch": 0.952978893935346, "step": 3567, "train/loss_ctc": 1.5097756385803223, "train/loss_error": 0.4929899275302887, "train/loss_total": 0.6963471174240112 }, { "epoch": 0.9532460593107134, "step": 3568, "train/loss_ctc": 0.5362870097160339, "train/loss_error": 0.4450371563434601, "train/loss_total": 0.4632871150970459 }, { "epoch": 0.9535132246860807, "step": 3569, "train/loss_ctc": 0.9997315406799316, "train/loss_error": 0.4028279781341553, "train/loss_total": 0.5222086906433105 }, { "epoch": 0.953780390061448, "grad_norm": 1.5191932916641235, "learning_rate": 2.428372962864013e-05, "loss": 0.5702, "step": 3570 }, { "epoch": 0.953780390061448, "step": 3570, "train/loss_ctc": 1.5983611345291138, "train/loss_error": 0.4600861072540283, "train/loss_total": 0.6877411603927612 }, { "epoch": 0.9540475554368154, "step": 3571, "train/loss_ctc": 0.6618959903717041, "train/loss_error": 0.4696590006351471, "train/loss_total": 0.5081064105033875 }, { "epoch": 0.9543147208121827, "step": 3572, "train/loss_ctc": 0.5210601091384888, "train/loss_error": 0.49390703439712524, "train/loss_total": 0.49933764338493347 }, { "epoch": 0.9545818861875501, "step": 3573, "train/loss_ctc": 0.7401055097579956, "train/loss_error": 0.4313472807407379, "train/loss_total": 0.4930989444255829 }, { "epoch": 0.9548490515629174, "step": 3574, "train/loss_ctc": 0.6724354028701782, "train/loss_error": 0.47244253754615784, "train/loss_total": 0.512441098690033 }, { "epoch": 0.9551162169382847, "step": 3575, "train/loss_ctc": 0.6270970106124878, "train/loss_error": 0.47915875911712646, "train/loss_total": 0.5087463855743408 }, { "epoch": 0.9553833823136522, "step": 3576, "train/loss_ctc": 0.3061321973800659, "train/loss_error": 0.4883533716201782, "train/loss_total": 0.4519091248512268 }, { "epoch": 0.9556505476890195, "step": 3577, "train/loss_ctc": 0.5480928421020508, "train/loss_error": 0.46477118134498596, "train/loss_total": 0.48143553733825684 }, { "epoch": 0.9559177130643869, "step": 3578, "train/loss_ctc": 1.3419170379638672, "train/loss_error": 0.4394846558570862, "train/loss_total": 0.6199711561203003 }, { "epoch": 0.9561848784397542, "step": 3579, "train/loss_ctc": 1.225292444229126, "train/loss_error": 0.5415983200073242, "train/loss_total": 0.6783371567726135 }, { "epoch": 0.9564520438151216, "grad_norm": 2.0246994495391846, "learning_rate": 2.426769970611809e-05, "loss": 0.5441, "step": 3580 }, { "epoch": 0.9564520438151216, "step": 3580, "train/loss_ctc": 0.7003951072692871, "train/loss_error": 0.5237777829170227, "train/loss_total": 0.5591012239456177 }, { "epoch": 0.9567192091904889, "step": 3581, "train/loss_ctc": 0.6158161759376526, "train/loss_error": 0.4750303328037262, "train/loss_total": 0.5031875371932983 }, { "epoch": 0.9569863745658562, "step": 3582, "train/loss_ctc": 0.5346684455871582, "train/loss_error": 0.4563118517398834, "train/loss_total": 0.4719831645488739 }, { "epoch": 0.9572535399412236, "step": 3583, "train/loss_ctc": 0.8019940257072449, "train/loss_error": 0.44339045882225037, "train/loss_total": 0.5151112079620361 }, { "epoch": 0.957520705316591, "step": 3584, "train/loss_ctc": 1.4313206672668457, "train/loss_error": 0.4371066689491272, "train/loss_total": 0.6359494924545288 }, { "epoch": 0.9577878706919584, "step": 3585, "train/loss_ctc": 0.6053365468978882, "train/loss_error": 0.4711320996284485, "train/loss_total": 0.4979729950428009 }, { "epoch": 0.9580550360673257, "step": 3586, "train/loss_ctc": 0.16293680667877197, "train/loss_error": 0.4519561231136322, "train/loss_total": 0.3941522538661957 }, { "epoch": 0.958322201442693, "step": 3587, "train/loss_ctc": 0.7985769510269165, "train/loss_error": 0.4973079562187195, "train/loss_total": 0.5575617551803589 }, { "epoch": 0.9585893668180604, "step": 3588, "train/loss_ctc": 1.0730763673782349, "train/loss_error": 0.5464974045753479, "train/loss_total": 0.6518132090568542 }, { "epoch": 0.9588565321934277, "step": 3589, "train/loss_ctc": 0.905291736125946, "train/loss_error": 0.44162479043006897, "train/loss_total": 0.5343581438064575 }, { "epoch": 0.9591236975687951, "grad_norm": 1.3916069269180298, "learning_rate": 2.4251669783596047e-05, "loss": 0.5321, "step": 3590 }, { "epoch": 0.9591236975687951, "step": 3590, "train/loss_ctc": 0.42478352785110474, "train/loss_error": 0.4232303500175476, "train/loss_total": 0.42354097962379456 }, { "epoch": 0.9593908629441624, "step": 3591, "train/loss_ctc": 1.4719223976135254, "train/loss_error": 0.5075427293777466, "train/loss_total": 0.7004187107086182 }, { "epoch": 0.9596580283195298, "step": 3592, "train/loss_ctc": 1.0207420587539673, "train/loss_error": 0.45902732014656067, "train/loss_total": 0.5713702440261841 }, { "epoch": 0.9599251936948971, "step": 3593, "train/loss_ctc": 0.659548819065094, "train/loss_error": 0.4410279393196106, "train/loss_total": 0.48473215103149414 }, { "epoch": 0.9601923590702645, "step": 3594, "train/loss_ctc": 0.9214853644371033, "train/loss_error": 0.5306939482688904, "train/loss_total": 0.608852207660675 }, { "epoch": 0.9604595244456319, "step": 3595, "train/loss_ctc": 0.8675329089164734, "train/loss_error": 0.4748983383178711, "train/loss_total": 0.5534252524375916 }, { "epoch": 0.9607266898209992, "step": 3596, "train/loss_ctc": 0.5585525035858154, "train/loss_error": 0.4200592041015625, "train/loss_total": 0.44775789976119995 }, { "epoch": 0.9609938551963666, "step": 3597, "train/loss_ctc": 1.090782880783081, "train/loss_error": 0.4642941653728485, "train/loss_total": 0.589591920375824 }, { "epoch": 0.9612610205717339, "step": 3598, "train/loss_ctc": 0.7117000818252563, "train/loss_error": 0.45949211716651917, "train/loss_total": 0.5099337100982666 }, { "epoch": 0.9615281859471012, "step": 3599, "train/loss_ctc": 0.5586119890213013, "train/loss_error": 0.5295820832252502, "train/loss_total": 0.5353880524635315 }, { "epoch": 0.9617953513224686, "grad_norm": 2.9232282638549805, "learning_rate": 2.4235639861074005e-05, "loss": 0.5425, "step": 3600 }, { "epoch": 0.9617953513224686, "step": 3600, "train/loss_ctc": 0.7934017181396484, "train/loss_error": 0.47914236783981323, "train/loss_total": 0.5419942140579224 }, { "epoch": 0.9620625166978359, "step": 3601, "train/loss_ctc": 1.2169246673583984, "train/loss_error": 0.503657877445221, "train/loss_total": 0.6463112235069275 }, { "epoch": 0.9623296820732034, "step": 3602, "train/loss_ctc": 0.6308504939079285, "train/loss_error": 0.46262964606285095, "train/loss_total": 0.49627381563186646 }, { "epoch": 0.9625968474485707, "step": 3603, "train/loss_ctc": 1.3921645879745483, "train/loss_error": 0.4615439772605896, "train/loss_total": 0.6476681232452393 }, { "epoch": 0.962864012823938, "step": 3604, "train/loss_ctc": 1.189175009727478, "train/loss_error": 0.46468883752822876, "train/loss_total": 0.6095860600471497 }, { "epoch": 0.9631311781993054, "step": 3605, "train/loss_ctc": 0.6841964721679688, "train/loss_error": 0.5065358877182007, "train/loss_total": 0.5420680046081543 }, { "epoch": 0.9633983435746727, "step": 3606, "train/loss_ctc": 0.8929882645606995, "train/loss_error": 0.46617981791496277, "train/loss_total": 0.5515415072441101 }, { "epoch": 0.9636655089500401, "step": 3607, "train/loss_ctc": 0.7986168265342712, "train/loss_error": 0.5032637715339661, "train/loss_total": 0.562334418296814 }, { "epoch": 0.9639326743254074, "step": 3608, "train/loss_ctc": 1.0881502628326416, "train/loss_error": 0.5337350964546204, "train/loss_total": 0.6446181535720825 }, { "epoch": 0.9641998397007748, "step": 3609, "train/loss_ctc": 1.5979535579681396, "train/loss_error": 0.4884597957134247, "train/loss_total": 0.7103585600852966 }, { "epoch": 0.9644670050761421, "grad_norm": 2.621793031692505, "learning_rate": 2.4219609938551963e-05, "loss": 0.5953, "step": 3610 }, { "epoch": 0.9644670050761421, "step": 3610, "train/loss_ctc": 0.9421782493591309, "train/loss_error": 0.5166330933570862, "train/loss_total": 0.601742148399353 }, { "epoch": 0.9647341704515094, "step": 3611, "train/loss_ctc": 1.1913633346557617, "train/loss_error": 0.46421465277671814, "train/loss_total": 0.6096444129943848 }, { "epoch": 0.9650013358268769, "step": 3612, "train/loss_ctc": 0.8571532964706421, "train/loss_error": 0.5832118391990662, "train/loss_total": 0.6380001306533813 }, { "epoch": 0.9652685012022442, "step": 3613, "train/loss_ctc": 0.6893675327301025, "train/loss_error": 0.414002925157547, "train/loss_total": 0.46907585859298706 }, { "epoch": 0.9655356665776116, "step": 3614, "train/loss_ctc": 1.6983113288879395, "train/loss_error": 0.4822472929954529, "train/loss_total": 0.7254601120948792 }, { "epoch": 0.9658028319529789, "step": 3615, "train/loss_ctc": 1.5347583293914795, "train/loss_error": 0.5761570930480957, "train/loss_total": 0.7678773403167725 }, { "epoch": 0.9660699973283462, "step": 3616, "train/loss_ctc": 0.6837830543518066, "train/loss_error": 0.4366667866706848, "train/loss_total": 0.4860900640487671 }, { "epoch": 0.9663371627037136, "step": 3617, "train/loss_ctc": 0.6411715745925903, "train/loss_error": 0.4993136525154114, "train/loss_total": 0.5276852250099182 }, { "epoch": 0.9666043280790809, "step": 3618, "train/loss_ctc": 0.7318233251571655, "train/loss_error": 0.5082970857620239, "train/loss_total": 0.5530023574829102 }, { "epoch": 0.9668714934544483, "step": 3619, "train/loss_ctc": 0.7987982034683228, "train/loss_error": 0.42739149928092957, "train/loss_total": 0.5016728639602661 }, { "epoch": 0.9671386588298156, "grad_norm": 1.9986977577209473, "learning_rate": 2.4203580016029924e-05, "loss": 0.588, "step": 3620 }, { "epoch": 0.9671386588298156, "step": 3620, "train/loss_ctc": 0.5150967836380005, "train/loss_error": 0.5008513927459717, "train/loss_total": 0.5037004947662354 }, { "epoch": 0.9674058242051831, "step": 3621, "train/loss_ctc": 1.4351117610931396, "train/loss_error": 0.504712700843811, "train/loss_total": 0.6907925605773926 }, { "epoch": 0.9676729895805504, "step": 3622, "train/loss_ctc": 0.3352503776550293, "train/loss_error": 0.5121802091598511, "train/loss_total": 0.4767942428588867 }, { "epoch": 0.9679401549559177, "step": 3623, "train/loss_ctc": 0.5495129823684692, "train/loss_error": 0.5150138735771179, "train/loss_total": 0.5219137072563171 }, { "epoch": 0.9682073203312851, "step": 3624, "train/loss_ctc": 1.0727643966674805, "train/loss_error": 0.48513898253440857, "train/loss_total": 0.6026641130447388 }, { "epoch": 0.9684744857066524, "step": 3625, "train/loss_ctc": 0.9473322629928589, "train/loss_error": 0.481611430644989, "train/loss_total": 0.5747556090354919 }, { "epoch": 0.9687416510820198, "step": 3626, "train/loss_ctc": 1.4376780986785889, "train/loss_error": 0.536483108997345, "train/loss_total": 0.7167221307754517 }, { "epoch": 0.9690088164573871, "step": 3627, "train/loss_ctc": 0.32441940903663635, "train/loss_error": 0.48806360363960266, "train/loss_total": 0.45533478260040283 }, { "epoch": 0.9692759818327544, "step": 3628, "train/loss_ctc": 1.2856471538543701, "train/loss_error": 0.4879048466682434, "train/loss_total": 0.6474533081054688 }, { "epoch": 0.9695431472081218, "step": 3629, "train/loss_ctc": 0.829481303691864, "train/loss_error": 0.41986000537872314, "train/loss_total": 0.5017842650413513 }, { "epoch": 0.9698103125834892, "grad_norm": 2.275308132171631, "learning_rate": 2.4187550093507882e-05, "loss": 0.5692, "step": 3630 }, { "epoch": 0.9698103125834892, "step": 3630, "train/loss_ctc": 0.4447995722293854, "train/loss_error": 0.529486358165741, "train/loss_total": 0.5125489830970764 }, { "epoch": 0.9700774779588566, "step": 3631, "train/loss_ctc": 0.6857032179832458, "train/loss_error": 0.48156508803367615, "train/loss_total": 0.522392749786377 }, { "epoch": 0.9703446433342239, "step": 3632, "train/loss_ctc": 0.4762127101421356, "train/loss_error": 0.4812159836292267, "train/loss_total": 0.48021531105041504 }, { "epoch": 0.9706118087095912, "step": 3633, "train/loss_ctc": 1.1414210796356201, "train/loss_error": 0.49313876032829285, "train/loss_total": 0.6227952241897583 }, { "epoch": 0.9708789740849586, "step": 3634, "train/loss_ctc": 0.6694636940956116, "train/loss_error": 0.43670347332954407, "train/loss_total": 0.483255535364151 }, { "epoch": 0.9711461394603259, "step": 3635, "train/loss_ctc": 0.7957303524017334, "train/loss_error": 0.39559781551361084, "train/loss_total": 0.47562432289123535 }, { "epoch": 0.9714133048356933, "step": 3636, "train/loss_ctc": 0.5392587184906006, "train/loss_error": 0.42568039894104004, "train/loss_total": 0.44839605689048767 }, { "epoch": 0.9716804702110606, "step": 3637, "train/loss_ctc": 0.6990938782691956, "train/loss_error": 0.47271549701690674, "train/loss_total": 0.5179911851882935 }, { "epoch": 0.971947635586428, "step": 3638, "train/loss_ctc": 0.6805181503295898, "train/loss_error": 0.4926875829696655, "train/loss_total": 0.5302537083625793 }, { "epoch": 0.9722148009617954, "step": 3639, "train/loss_ctc": 0.5349355340003967, "train/loss_error": 0.5367154479026794, "train/loss_total": 0.5363594889640808 }, { "epoch": 0.9724819663371627, "grad_norm": 2.716036796569824, "learning_rate": 2.417152017098584e-05, "loss": 0.513, "step": 3640 }, { "epoch": 0.9724819663371627, "step": 3640, "train/loss_ctc": 0.5502064228057861, "train/loss_error": 0.5314725637435913, "train/loss_total": 0.5352193117141724 }, { "epoch": 0.9727491317125301, "step": 3641, "train/loss_ctc": 1.3377187252044678, "train/loss_error": 0.5493111610412598, "train/loss_total": 0.7069926857948303 }, { "epoch": 0.9730162970878974, "step": 3642, "train/loss_ctc": 0.6238168478012085, "train/loss_error": 0.4259561002254486, "train/loss_total": 0.4655282497406006 }, { "epoch": 0.9732834624632648, "step": 3643, "train/loss_ctc": 0.9526464343070984, "train/loss_error": 0.465654581785202, "train/loss_total": 0.5630529522895813 }, { "epoch": 0.9735506278386321, "step": 3644, "train/loss_ctc": 0.778344452381134, "train/loss_error": 0.46831369400024414, "train/loss_total": 0.53031986951828 }, { "epoch": 0.9738177932139994, "step": 3645, "train/loss_ctc": 1.1610257625579834, "train/loss_error": 0.485316663980484, "train/loss_total": 0.6204584836959839 }, { "epoch": 0.9740849585893668, "step": 3646, "train/loss_ctc": 0.9893320798873901, "train/loss_error": 0.460464209318161, "train/loss_total": 0.5662378072738647 }, { "epoch": 0.9743521239647341, "step": 3647, "train/loss_ctc": 0.5925207138061523, "train/loss_error": 0.5643618702888489, "train/loss_total": 0.5699936747550964 }, { "epoch": 0.9746192893401016, "step": 3648, "train/loss_ctc": 0.5202575922012329, "train/loss_error": 0.4666767716407776, "train/loss_total": 0.47739294171333313 }, { "epoch": 0.9748864547154689, "step": 3649, "train/loss_ctc": 0.5855245590209961, "train/loss_error": 0.5366030931472778, "train/loss_total": 0.5463873744010925 }, { "epoch": 0.9751536200908363, "grad_norm": 2.002140998840332, "learning_rate": 2.41554902484638e-05, "loss": 0.5582, "step": 3650 }, { "epoch": 0.9751536200908363, "step": 3650, "train/loss_ctc": 0.8125630617141724, "train/loss_error": 0.48201075196266174, "train/loss_total": 0.5481212139129639 }, { "epoch": 0.9754207854662036, "step": 3651, "train/loss_ctc": 0.8378018140792847, "train/loss_error": 0.5017126798629761, "train/loss_total": 0.5689305067062378 }, { "epoch": 0.9756879508415709, "step": 3652, "train/loss_ctc": 1.1438250541687012, "train/loss_error": 0.4518336057662964, "train/loss_total": 0.5902318954467773 }, { "epoch": 0.9759551162169383, "step": 3653, "train/loss_ctc": 0.4193516969680786, "train/loss_error": 0.4617287516593933, "train/loss_total": 0.4532533586025238 }, { "epoch": 0.9762222815923056, "step": 3654, "train/loss_ctc": 0.5214437246322632, "train/loss_error": 0.5095190405845642, "train/loss_total": 0.5119040012359619 }, { "epoch": 0.976489446967673, "step": 3655, "train/loss_ctc": 0.5404855608940125, "train/loss_error": 0.4778842329978943, "train/loss_total": 0.49040448665618896 }, { "epoch": 0.9767566123430403, "step": 3656, "train/loss_ctc": 0.48459377884864807, "train/loss_error": 0.5231141448020935, "train/loss_total": 0.5154100656509399 }, { "epoch": 0.9770237777184076, "step": 3657, "train/loss_ctc": 1.127035140991211, "train/loss_error": 0.4658545255661011, "train/loss_total": 0.598090648651123 }, { "epoch": 0.9772909430937751, "step": 3658, "train/loss_ctc": 0.4383711814880371, "train/loss_error": 0.44862109422683716, "train/loss_total": 0.44657111167907715 }, { "epoch": 0.9775581084691424, "step": 3659, "train/loss_ctc": 0.8042476177215576, "train/loss_error": 0.5035569667816162, "train/loss_total": 0.5636951327323914 }, { "epoch": 0.9778252738445098, "grad_norm": 1.5016952753067017, "learning_rate": 2.4139460325941756e-05, "loss": 0.5287, "step": 3660 }, { "epoch": 0.9778252738445098, "step": 3660, "train/loss_ctc": 0.825756847858429, "train/loss_error": 0.4829649329185486, "train/loss_total": 0.5515233278274536 }, { "epoch": 0.9780924392198771, "step": 3661, "train/loss_ctc": 0.6465924382209778, "train/loss_error": 0.5703026652336121, "train/loss_total": 0.5855606198310852 }, { "epoch": 0.9783596045952444, "step": 3662, "train/loss_ctc": 0.8303977251052856, "train/loss_error": 0.47830620408058167, "train/loss_total": 0.5487245321273804 }, { "epoch": 0.9786267699706118, "step": 3663, "train/loss_ctc": 0.8534298539161682, "train/loss_error": 0.4620731770992279, "train/loss_total": 0.5403445363044739 }, { "epoch": 0.9788939353459791, "step": 3664, "train/loss_ctc": 0.48138612508773804, "train/loss_error": 0.456743061542511, "train/loss_total": 0.4616716802120209 }, { "epoch": 0.9791611007213465, "step": 3665, "train/loss_ctc": 0.6678726673126221, "train/loss_error": 0.43566271662712097, "train/loss_total": 0.48210471868515015 }, { "epoch": 0.9794282660967139, "step": 3666, "train/loss_ctc": 0.6599935293197632, "train/loss_error": 0.48940813541412354, "train/loss_total": 0.5235252380371094 }, { "epoch": 0.9796954314720813, "step": 3667, "train/loss_ctc": 1.4339334964752197, "train/loss_error": 0.48831140995025635, "train/loss_total": 0.6774358749389648 }, { "epoch": 0.9799625968474486, "step": 3668, "train/loss_ctc": 1.023255705833435, "train/loss_error": 0.5243828296661377, "train/loss_total": 0.6241574287414551 }, { "epoch": 0.9802297622228159, "step": 3669, "train/loss_ctc": 1.115569829940796, "train/loss_error": 0.4195927381515503, "train/loss_total": 0.5587881803512573 }, { "epoch": 0.9804969275981833, "grad_norm": 2.2825865745544434, "learning_rate": 2.4123430403419715e-05, "loss": 0.5554, "step": 3670 }, { "epoch": 0.9804969275981833, "step": 3670, "train/loss_ctc": 0.6298229098320007, "train/loss_error": 0.47772783041000366, "train/loss_total": 0.5081468820571899 }, { "epoch": 0.9807640929735506, "step": 3671, "train/loss_ctc": 0.779216468334198, "train/loss_error": 0.5136445164680481, "train/loss_total": 0.566758930683136 }, { "epoch": 0.981031258348918, "step": 3672, "train/loss_ctc": 1.146583080291748, "train/loss_error": 0.5158827900886536, "train/loss_total": 0.6420228481292725 }, { "epoch": 0.9812984237242853, "step": 3673, "train/loss_ctc": 1.1768052577972412, "train/loss_error": 0.4615383744239807, "train/loss_total": 0.6045917868614197 }, { "epoch": 0.9815655890996526, "step": 3674, "train/loss_ctc": 1.5036171674728394, "train/loss_error": 0.5200534462928772, "train/loss_total": 0.7167662382125854 }, { "epoch": 0.98183275447502, "step": 3675, "train/loss_ctc": 0.6632317304611206, "train/loss_error": 0.45250844955444336, "train/loss_total": 0.4946531057357788 }, { "epoch": 0.9820999198503874, "step": 3676, "train/loss_ctc": 0.923117995262146, "train/loss_error": 0.4609510600566864, "train/loss_total": 0.5533844232559204 }, { "epoch": 0.9823670852257548, "step": 3677, "train/loss_ctc": 1.5000905990600586, "train/loss_error": 0.48871198296546936, "train/loss_total": 0.6909877061843872 }, { "epoch": 0.9826342506011221, "step": 3678, "train/loss_ctc": 0.7138301134109497, "train/loss_error": 0.49874040484428406, "train/loss_total": 0.5417583584785461 }, { "epoch": 0.9829014159764895, "step": 3679, "train/loss_ctc": 0.41933125257492065, "train/loss_error": 0.5030674934387207, "train/loss_total": 0.48632025718688965 }, { "epoch": 0.9831685813518568, "grad_norm": 1.6944286823272705, "learning_rate": 2.410740048089768e-05, "loss": 0.5805, "step": 3680 }, { "epoch": 0.9831685813518568, "step": 3680, "train/loss_ctc": 0.762305736541748, "train/loss_error": 0.5116881132125854, "train/loss_total": 0.561811625957489 }, { "epoch": 0.9834357467272241, "step": 3681, "train/loss_ctc": 0.23977583646774292, "train/loss_error": 0.4571402668952942, "train/loss_total": 0.41366738080978394 }, { "epoch": 0.9837029121025915, "step": 3682, "train/loss_ctc": 0.7808812856674194, "train/loss_error": 0.4855750501155853, "train/loss_total": 0.5446363091468811 }, { "epoch": 0.9839700774779588, "step": 3683, "train/loss_ctc": 0.9457278847694397, "train/loss_error": 0.45525407791137695, "train/loss_total": 0.5533488392829895 }, { "epoch": 0.9842372428533263, "step": 3684, "train/loss_ctc": 1.2516454458236694, "train/loss_error": 0.5180346369743347, "train/loss_total": 0.6647568345069885 }, { "epoch": 0.9845044082286936, "step": 3685, "train/loss_ctc": 0.7088863849639893, "train/loss_error": 0.4642132520675659, "train/loss_total": 0.5131478905677795 }, { "epoch": 0.9847715736040609, "step": 3686, "train/loss_ctc": 1.3440220355987549, "train/loss_error": 0.5096138715744019, "train/loss_total": 0.6764954924583435 }, { "epoch": 0.9850387389794283, "step": 3687, "train/loss_ctc": 0.7306962013244629, "train/loss_error": 0.5144610404968262, "train/loss_total": 0.5577080845832825 }, { "epoch": 0.9853059043547956, "step": 3688, "train/loss_ctc": 1.4330990314483643, "train/loss_error": 0.42858025431632996, "train/loss_total": 0.6294840574264526 }, { "epoch": 0.985573069730163, "step": 3689, "train/loss_ctc": 0.9688518047332764, "train/loss_error": 0.5517217516899109, "train/loss_total": 0.635147750377655 }, { "epoch": 0.9858402351055303, "grad_norm": 1.7593579292297363, "learning_rate": 2.4091370558375638e-05, "loss": 0.575, "step": 3690 }, { "epoch": 0.9858402351055303, "step": 3690, "train/loss_ctc": 0.8902039527893066, "train/loss_error": 0.5033462047576904, "train/loss_total": 0.5807177424430847 }, { "epoch": 0.9861074004808976, "step": 3691, "train/loss_ctc": 0.7567342519760132, "train/loss_error": 0.4422156810760498, "train/loss_total": 0.5051193833351135 }, { "epoch": 0.986374565856265, "step": 3692, "train/loss_ctc": 1.0663261413574219, "train/loss_error": 0.5556537508964539, "train/loss_total": 0.6577882170677185 }, { "epoch": 0.9866417312316323, "step": 3693, "train/loss_ctc": 0.6413911581039429, "train/loss_error": 0.5115825533866882, "train/loss_total": 0.537544310092926 }, { "epoch": 0.9869088966069998, "step": 3694, "train/loss_ctc": 0.9616836309432983, "train/loss_error": 0.4381249248981476, "train/loss_total": 0.5428366661071777 }, { "epoch": 0.9871760619823671, "step": 3695, "train/loss_ctc": 0.8860788941383362, "train/loss_error": 0.4369466304779053, "train/loss_total": 0.5267730951309204 }, { "epoch": 0.9874432273577345, "step": 3696, "train/loss_ctc": 0.5993409156799316, "train/loss_error": 0.4117979407310486, "train/loss_total": 0.44930654764175415 }, { "epoch": 0.9877103927331018, "step": 3697, "train/loss_ctc": 0.6598078012466431, "train/loss_error": 0.5098426938056946, "train/loss_total": 0.5398357510566711 }, { "epoch": 0.9879775581084691, "step": 3698, "train/loss_ctc": 0.6036102175712585, "train/loss_error": 0.41762790083885193, "train/loss_total": 0.45482438802719116 }, { "epoch": 0.9882447234838365, "step": 3699, "train/loss_ctc": 1.457806944847107, "train/loss_error": 0.5254242420196533, "train/loss_total": 0.7119008302688599 }, { "epoch": 0.9885118888592038, "grad_norm": 2.675985097885132, "learning_rate": 2.4075340635853596e-05, "loss": 0.5507, "step": 3700 }, { "epoch": 0.9885118888592038, "step": 3700, "train/loss_ctc": 0.5443578958511353, "train/loss_error": 0.4427555203437805, "train/loss_total": 0.46307599544525146 }, { "epoch": 0.9887790542345712, "step": 3701, "train/loss_ctc": 0.47146788239479065, "train/loss_error": 0.4499428868293762, "train/loss_total": 0.4542478919029236 }, { "epoch": 0.9890462196099385, "step": 3702, "train/loss_ctc": 0.6368746757507324, "train/loss_error": 0.4166319668292999, "train/loss_total": 0.4606804847717285 }, { "epoch": 0.9893133849853059, "step": 3703, "train/loss_ctc": 0.6794888377189636, "train/loss_error": 0.4614664912223816, "train/loss_total": 0.5050709843635559 }, { "epoch": 0.9895805503606733, "step": 3704, "train/loss_ctc": 0.24354495108127594, "train/loss_error": 0.4548807442188263, "train/loss_total": 0.41261357069015503 }, { "epoch": 0.9898477157360406, "step": 3705, "train/loss_ctc": 0.7122829556465149, "train/loss_error": 0.4089302718639374, "train/loss_total": 0.4696008265018463 }, { "epoch": 0.990114881111408, "step": 3706, "train/loss_ctc": 0.5242364406585693, "train/loss_error": 0.45567113161087036, "train/loss_total": 0.46938419342041016 }, { "epoch": 0.9903820464867753, "step": 3707, "train/loss_ctc": 0.5998107194900513, "train/loss_error": 0.48876845836639404, "train/loss_total": 0.5109769105911255 }, { "epoch": 0.9906492118621427, "step": 3708, "train/loss_ctc": 0.5595529079437256, "train/loss_error": 0.4869895875453949, "train/loss_total": 0.501502275466919 }, { "epoch": 0.99091637723751, "step": 3709, "train/loss_ctc": 0.8065559267997742, "train/loss_error": 0.4346969723701477, "train/loss_total": 0.5090687870979309 }, { "epoch": 0.9911835426128773, "grad_norm": 1.365391731262207, "learning_rate": 2.4059310713331554e-05, "loss": 0.4756, "step": 3710 }, { "epoch": 0.9911835426128773, "step": 3710, "train/loss_ctc": 0.9487364292144775, "train/loss_error": 0.4674866497516632, "train/loss_total": 0.563736617565155 }, { "epoch": 0.9914507079882448, "step": 3711, "train/loss_ctc": 0.529752790927887, "train/loss_error": 0.47133564949035645, "train/loss_total": 0.483019083738327 }, { "epoch": 0.9917178733636121, "step": 3712, "train/loss_ctc": 0.8395970463752747, "train/loss_error": 0.5374472737312317, "train/loss_total": 0.5978772640228271 }, { "epoch": 0.9919850387389795, "step": 3713, "train/loss_ctc": 0.583590030670166, "train/loss_error": 0.46038779616355896, "train/loss_total": 0.4850282669067383 }, { "epoch": 0.9922522041143468, "step": 3714, "train/loss_ctc": 1.032325267791748, "train/loss_error": 0.47890087962150574, "train/loss_total": 0.5895857810974121 }, { "epoch": 0.9925193694897141, "step": 3715, "train/loss_ctc": 0.8497927188873291, "train/loss_error": 0.48677605390548706, "train/loss_total": 0.5593793988227844 }, { "epoch": 0.9927865348650815, "step": 3716, "train/loss_ctc": 0.8597954511642456, "train/loss_error": 0.5613788962364197, "train/loss_total": 0.6210622191429138 }, { "epoch": 0.9930537002404488, "step": 3717, "train/loss_ctc": 0.3722435235977173, "train/loss_error": 0.5619121789932251, "train/loss_total": 0.5239784717559814 }, { "epoch": 0.9933208656158162, "step": 3718, "train/loss_ctc": 1.5593957901000977, "train/loss_error": 0.5080621242523193, "train/loss_total": 0.7183288335800171 }, { "epoch": 0.9935880309911835, "step": 3719, "train/loss_ctc": 0.49042683839797974, "train/loss_error": 0.4664606750011444, "train/loss_total": 0.4712539315223694 }, { "epoch": 0.9938551963665508, "grad_norm": 3.0108609199523926, "learning_rate": 2.4043280790809512e-05, "loss": 0.5613, "step": 3720 }, { "epoch": 0.9938551963665508, "step": 3720, "train/loss_ctc": 0.9124356508255005, "train/loss_error": 0.47288092970848083, "train/loss_total": 0.5607918500900269 }, { "epoch": 0.9941223617419183, "step": 3721, "train/loss_ctc": 1.0213702917099, "train/loss_error": 0.5405425429344177, "train/loss_total": 0.63670814037323 }, { "epoch": 0.9943895271172856, "step": 3722, "train/loss_ctc": 1.1643518209457397, "train/loss_error": 0.48123639822006226, "train/loss_total": 0.6178594827651978 }, { "epoch": 0.994656692492653, "step": 3723, "train/loss_ctc": 1.0836153030395508, "train/loss_error": 0.459759920835495, "train/loss_total": 0.5845310091972351 }, { "epoch": 0.9949238578680203, "step": 3724, "train/loss_ctc": 0.6664578914642334, "train/loss_error": 0.5132892727851868, "train/loss_total": 0.543923020362854 }, { "epoch": 0.9951910232433877, "step": 3725, "train/loss_ctc": 0.42283663153648376, "train/loss_error": 0.5031220316886902, "train/loss_total": 0.4870649576187134 }, { "epoch": 0.995458188618755, "step": 3726, "train/loss_ctc": 0.8238855004310608, "train/loss_error": 0.45370733737945557, "train/loss_total": 0.5277429819107056 }, { "epoch": 0.9957253539941223, "step": 3727, "train/loss_ctc": 0.47270143032073975, "train/loss_error": 0.49129003286361694, "train/loss_total": 0.4875723421573639 }, { "epoch": 0.9959925193694897, "step": 3728, "train/loss_ctc": 0.7382230758666992, "train/loss_error": 0.45326343178749084, "train/loss_total": 0.5102553963661194 }, { "epoch": 0.996259684744857, "step": 3729, "train/loss_ctc": 0.6343204975128174, "train/loss_error": 0.4845033288002014, "train/loss_total": 0.5144667625427246 }, { "epoch": 0.9965268501202245, "grad_norm": 6.580362319946289, "learning_rate": 2.402725086828747e-05, "loss": 0.5471, "step": 3730 }, { "epoch": 0.9965268501202245, "step": 3730, "train/loss_ctc": 1.1174691915512085, "train/loss_error": 0.47077903151512146, "train/loss_total": 0.6001170873641968 }, { "epoch": 0.9967940154955918, "step": 3731, "train/loss_ctc": 0.6133954524993896, "train/loss_error": 0.4448604881763458, "train/loss_total": 0.4785674810409546 }, { "epoch": 0.9970611808709591, "step": 3732, "train/loss_ctc": 0.7400922775268555, "train/loss_error": 0.426338791847229, "train/loss_total": 0.4890894889831543 }, { "epoch": 0.9973283462463265, "step": 3733, "train/loss_ctc": 1.083162546157837, "train/loss_error": 0.49027174711227417, "train/loss_total": 0.6088498830795288 }, { "epoch": 0.9975955116216938, "step": 3734, "train/loss_ctc": 0.4347202777862549, "train/loss_error": 0.45094218850135803, "train/loss_total": 0.44769781827926636 }, { "epoch": 0.9978626769970612, "step": 3735, "train/loss_ctc": 1.5834288597106934, "train/loss_error": 0.4649876356124878, "train/loss_total": 0.6886758804321289 }, { "epoch": 0.9981298423724285, "step": 3736, "train/loss_ctc": 1.1907074451446533, "train/loss_error": 0.5406822562217712, "train/loss_total": 0.6706873178482056 }, { "epoch": 0.9983970077477959, "step": 3737, "train/loss_ctc": 0.6599445343017578, "train/loss_error": 0.5214180946350098, "train/loss_total": 0.5491234064102173 }, { "epoch": 0.9986641731231632, "step": 3738, "train/loss_ctc": 0.5127624273300171, "train/loss_error": 0.45245203375816345, "train/loss_total": 0.4645141363143921 }, { "epoch": 0.9989313384985306, "step": 3739, "train/loss_ctc": 0.5206364393234253, "train/loss_error": 0.4608645439147949, "train/loss_total": 0.47281894087791443 }, { "epoch": 0.999198503873898, "grad_norm": 1.1436834335327148, "learning_rate": 2.401122094576543e-05, "loss": 0.547, "step": 3740 }, { "epoch": 0.999198503873898, "step": 3740, "train/loss_ctc": 0.7677963972091675, "train/loss_error": 0.5099321603775024, "train/loss_total": 0.5615050196647644 }, { "epoch": 0.9994656692492653, "step": 3741, "train/loss_ctc": 0.6928520202636719, "train/loss_error": 0.5342695713043213, "train/loss_total": 0.5659860968589783 }, { "epoch": 0.9997328346246327, "step": 3742, "train/loss_ctc": 0.7967663407325745, "train/loss_error": 0.5341121554374695, "train/loss_total": 0.5866429805755615 }, { "epoch": 1.0, "eval_eval/f1_0": 0.5716429352760315, "eval_eval/f1_1": 0.803674042224884, "eval_eval/precision_0": 0.7310855388641357, "eval_eval/precision_1": 0.7306421399116516, "eval_eval/recall_0": 0.46929439902305603, "eval_eval/recall_1": 0.8929272890090942, "eval_eval/wer": 0.1701705964789083, "eval_runtime": 40.6776, "eval_samples_per_second": 11.284, "eval_steps_per_second": 11.284, "step": 3743 }, { "epoch": 1.0, "step": 3743, "train/loss_ctc": 0.45580965280532837, "train/loss_error": 0.5381885170936584, "train/loss_total": 0.5217127203941345 }, { "epoch": 1.0002671653753674, "step": 3744, "train/loss_ctc": 0.7694880962371826, "train/loss_error": 0.46100184321403503, "train/loss_total": 0.5226991176605225 }, { "epoch": 1.0005343307507346, "step": 3745, "train/loss_ctc": 0.5638374090194702, "train/loss_error": 0.44868698716163635, "train/loss_total": 0.47171708941459656 }, { "epoch": 1.000801496126102, "step": 3746, "train/loss_ctc": 0.8209260702133179, "train/loss_error": 0.4422755241394043, "train/loss_total": 0.5180056095123291 }, { "epoch": 1.0010686615014694, "step": 3747, "train/loss_ctc": 1.0962741374969482, "train/loss_error": 0.4515840411186218, "train/loss_total": 0.5805220603942871 }, { "epoch": 1.0013358268768369, "step": 3748, "train/loss_ctc": 0.7776880264282227, "train/loss_error": 0.458482950925827, "train/loss_total": 0.5223239660263062 }, { "epoch": 1.001602992252204, "step": 3749, "train/loss_ctc": 0.9871833324432373, "train/loss_error": 0.5018702745437622, "train/loss_total": 0.5989329218864441 }, { "epoch": 1.0018701576275715, "grad_norm": 1.6502267122268677, "learning_rate": 2.399519102324339e-05, "loss": 0.545, "step": 3750 }, { "epoch": 1.0018701576275715, "step": 3750, "train/loss_ctc": 0.7196980118751526, "train/loss_error": 0.5006952285766602, "train/loss_total": 0.5444957613945007 }, { "epoch": 1.002137323002939, "step": 3751, "train/loss_ctc": 0.48419812321662903, "train/loss_error": 0.5319424271583557, "train/loss_total": 0.5223935842514038 }, { "epoch": 1.002404488378306, "step": 3752, "train/loss_ctc": 1.4989821910858154, "train/loss_error": 0.4467562139034271, "train/loss_total": 0.6572014093399048 }, { "epoch": 1.0026716537536735, "step": 3753, "train/loss_ctc": 0.7774584293365479, "train/loss_error": 0.47346559166908264, "train/loss_total": 0.5342641472816467 }, { "epoch": 1.002938819129041, "step": 3754, "train/loss_ctc": 0.8976605534553528, "train/loss_error": 0.5187057852745056, "train/loss_total": 0.5944967269897461 }, { "epoch": 1.0032059845044081, "step": 3755, "train/loss_ctc": 0.7903514504432678, "train/loss_error": 0.5013747215270996, "train/loss_total": 0.5591700673103333 }, { "epoch": 1.0034731498797755, "step": 3756, "train/loss_ctc": 0.3303086757659912, "train/loss_error": 0.41725045442581177, "train/loss_total": 0.3998621106147766 }, { "epoch": 1.003740315255143, "step": 3757, "train/loss_ctc": 0.5317478179931641, "train/loss_error": 0.406688928604126, "train/loss_total": 0.4317007064819336 }, { "epoch": 1.0040074806305104, "step": 3758, "train/loss_ctc": 0.598755955696106, "train/loss_error": 0.454839289188385, "train/loss_total": 0.48362261056900024 }, { "epoch": 1.0042746460058776, "step": 3759, "train/loss_ctc": 0.9128444194793701, "train/loss_error": 0.5339495539665222, "train/loss_total": 0.6097285151481628 }, { "epoch": 1.004541811381245, "grad_norm": 4.613402366638184, "learning_rate": 2.3979161100721347e-05, "loss": 0.5337, "step": 3760 }, { "epoch": 1.004541811381245, "step": 3760, "train/loss_ctc": 1.0428142547607422, "train/loss_error": 0.525585949420929, "train/loss_total": 0.6290316581726074 }, { "epoch": 1.0048089767566124, "step": 3761, "train/loss_ctc": 0.9303702116012573, "train/loss_error": 0.4189651310443878, "train/loss_total": 0.5212461948394775 }, { "epoch": 1.0050761421319796, "step": 3762, "train/loss_ctc": 0.7165793180465698, "train/loss_error": 0.44714221358299255, "train/loss_total": 0.5010296702384949 }, { "epoch": 1.005343307507347, "step": 3763, "train/loss_ctc": 0.7789567708969116, "train/loss_error": 0.4309159219264984, "train/loss_total": 0.50052410364151 }, { "epoch": 1.0056104728827144, "step": 3764, "train/loss_ctc": 0.37368759512901306, "train/loss_error": 0.549833357334137, "train/loss_total": 0.5146042108535767 }, { "epoch": 1.0058776382580819, "step": 3765, "train/loss_ctc": 1.2319133281707764, "train/loss_error": 0.4003099799156189, "train/loss_total": 0.5666306614875793 }, { "epoch": 1.006144803633449, "step": 3766, "train/loss_ctc": 1.2208718061447144, "train/loss_error": 0.4369432330131531, "train/loss_total": 0.5937289595603943 }, { "epoch": 1.0064119690088165, "step": 3767, "train/loss_ctc": 0.5656775236129761, "train/loss_error": 0.4587152600288391, "train/loss_total": 0.48010772466659546 }, { "epoch": 1.0066791343841839, "step": 3768, "train/loss_ctc": 0.8725504875183105, "train/loss_error": 0.5147557854652405, "train/loss_total": 0.5863147377967834 }, { "epoch": 1.006946299759551, "step": 3769, "train/loss_ctc": 1.1253197193145752, "train/loss_error": 0.47035330533981323, "train/loss_total": 0.6013466119766235 }, { "epoch": 1.0072134651349185, "grad_norm": 2.3907630443573, "learning_rate": 2.396473417045151e-05, "loss": 0.5495, "step": 3770 }, { "epoch": 1.0072134651349185, "step": 3770, "train/loss_ctc": 0.4259914755821228, "train/loss_error": 0.48958897590637207, "train/loss_total": 0.47686946392059326 }, { "epoch": 1.007480630510286, "step": 3771, "train/loss_ctc": 0.4355773329734802, "train/loss_error": 0.40326714515686035, "train/loss_total": 0.4097291827201843 }, { "epoch": 1.007747795885653, "step": 3772, "train/loss_ctc": 0.8071613311767578, "train/loss_error": 0.49742361903190613, "train/loss_total": 0.5593711733818054 }, { "epoch": 1.0080149612610205, "step": 3773, "train/loss_ctc": 0.4635499119758606, "train/loss_error": 0.4976217746734619, "train/loss_total": 0.4908074140548706 }, { "epoch": 1.008282126636388, "step": 3774, "train/loss_ctc": 0.6589210033416748, "train/loss_error": 0.4723466634750366, "train/loss_total": 0.5096615552902222 }, { "epoch": 1.0085492920117554, "step": 3775, "train/loss_ctc": 0.4077252745628357, "train/loss_error": 0.39323890209198, "train/loss_total": 0.39613619446754456 }, { "epoch": 1.0088164573871226, "step": 3776, "train/loss_ctc": 0.4717223346233368, "train/loss_error": 0.5141833424568176, "train/loss_total": 0.5056911706924438 }, { "epoch": 1.00908362276249, "step": 3777, "train/loss_ctc": 0.45515069365501404, "train/loss_error": 0.5219895839691162, "train/loss_total": 0.5086218118667603 }, { "epoch": 1.0093507881378574, "step": 3778, "train/loss_ctc": 0.48121377825737, "train/loss_error": 0.5530360341072083, "train/loss_total": 0.538671612739563 }, { "epoch": 1.0096179535132246, "step": 3779, "train/loss_ctc": 1.0517446994781494, "train/loss_error": 0.503086507320404, "train/loss_total": 0.61281818151474 }, { "epoch": 1.009885118888592, "grad_norm": 1.3840856552124023, "learning_rate": 2.3948704247929472e-05, "loss": 0.5008, "step": 3780 }, { "epoch": 1.009885118888592, "step": 3780, "train/loss_ctc": 0.9346080422401428, "train/loss_error": 0.4382610023021698, "train/loss_total": 0.5375304222106934 }, { "epoch": 1.0101522842639594, "step": 3781, "train/loss_ctc": 0.8502447009086609, "train/loss_error": 0.5116174817085266, "train/loss_total": 0.5793429017066956 }, { "epoch": 1.0104194496393268, "step": 3782, "train/loss_ctc": 1.5233488082885742, "train/loss_error": 0.521632194519043, "train/loss_total": 0.721975564956665 }, { "epoch": 1.010686615014694, "step": 3783, "train/loss_ctc": 1.4376168251037598, "train/loss_error": 0.5115651488304138, "train/loss_total": 0.696775496006012 }, { "epoch": 1.0109537803900615, "step": 3784, "train/loss_ctc": 0.6448431015014648, "train/loss_error": 0.4345046281814575, "train/loss_total": 0.47657233476638794 }, { "epoch": 1.0112209457654289, "step": 3785, "train/loss_ctc": 0.7111561894416809, "train/loss_error": 0.47299444675445557, "train/loss_total": 0.5206267833709717 }, { "epoch": 1.011488111140796, "step": 3786, "train/loss_ctc": 0.8288694024085999, "train/loss_error": 0.5273630023002625, "train/loss_total": 0.5876643061637878 }, { "epoch": 1.0117552765161635, "step": 3787, "train/loss_ctc": 0.44979673624038696, "train/loss_error": 0.5186256766319275, "train/loss_total": 0.5048598647117615 }, { "epoch": 1.012022441891531, "step": 3788, "train/loss_ctc": 0.7636124491691589, "train/loss_error": 0.44501182436943054, "train/loss_total": 0.5087319612503052 }, { "epoch": 1.0122896072668983, "step": 3789, "train/loss_ctc": 0.6251295208930969, "train/loss_error": 0.5306793451309204, "train/loss_total": 0.5495693683624268 }, { "epoch": 1.0125567726422655, "grad_norm": 1.4915121793746948, "learning_rate": 2.393267432540743e-05, "loss": 0.5684, "step": 3790 }, { "epoch": 1.0125567726422655, "step": 3790, "train/loss_ctc": 1.1122907400131226, "train/loss_error": 0.5546818375587463, "train/loss_total": 0.6662036180496216 }, { "epoch": 1.012823938017633, "step": 3791, "train/loss_ctc": 0.8745155334472656, "train/loss_error": 0.3869136571884155, "train/loss_total": 0.4844340682029724 }, { "epoch": 1.0130911033930003, "step": 3792, "train/loss_ctc": 0.4613833427429199, "train/loss_error": 0.47647422552108765, "train/loss_total": 0.4734560549259186 }, { "epoch": 1.0133582687683675, "step": 3793, "train/loss_ctc": 0.936573326587677, "train/loss_error": 0.4593687653541565, "train/loss_total": 0.5548096895217896 }, { "epoch": 1.013625434143735, "step": 3794, "train/loss_ctc": 1.427729845046997, "train/loss_error": 0.49261975288391113, "train/loss_total": 0.6796417832374573 }, { "epoch": 1.0138925995191024, "step": 3795, "train/loss_ctc": 1.1708099842071533, "train/loss_error": 0.4908417761325836, "train/loss_total": 0.6268354654312134 }, { "epoch": 1.0141597648944696, "step": 3796, "train/loss_ctc": 0.5556793808937073, "train/loss_error": 0.4199094772338867, "train/loss_total": 0.4470634460449219 }, { "epoch": 1.014426930269837, "step": 3797, "train/loss_ctc": 1.2348308563232422, "train/loss_error": 0.4496152400970459, "train/loss_total": 0.6066583395004272 }, { "epoch": 1.0146940956452044, "step": 3798, "train/loss_ctc": 1.1808624267578125, "train/loss_error": 0.45007193088531494, "train/loss_total": 0.5962300300598145 }, { "epoch": 1.0149612610205718, "step": 3799, "train/loss_ctc": 1.2569034099578857, "train/loss_error": 0.4610322415828705, "train/loss_total": 0.6202064752578735 }, { "epoch": 1.015228426395939, "grad_norm": 1.144295573234558, "learning_rate": 2.3916644402885388e-05, "loss": 0.5756, "step": 3800 }, { "epoch": 1.015228426395939, "step": 3800, "train/loss_ctc": 1.0090267658233643, "train/loss_error": 0.45175233483314514, "train/loss_total": 0.5632072687149048 }, { "epoch": 1.0154955917713064, "step": 3801, "train/loss_ctc": 0.9358128309249878, "train/loss_error": 0.5045774579048157, "train/loss_total": 0.590824544429779 }, { "epoch": 1.0157627571466739, "step": 3802, "train/loss_ctc": 0.47147849202156067, "train/loss_error": 0.4617239534854889, "train/loss_total": 0.4636748731136322 }, { "epoch": 1.016029922522041, "step": 3803, "train/loss_ctc": 0.6777307987213135, "train/loss_error": 0.539901614189148, "train/loss_total": 0.567467451095581 }, { "epoch": 1.0162970878974085, "step": 3804, "train/loss_ctc": 0.7911843657493591, "train/loss_error": 0.4990663230419159, "train/loss_total": 0.5574899315834045 }, { "epoch": 1.0165642532727759, "step": 3805, "train/loss_ctc": 0.7534880638122559, "train/loss_error": 0.528003990650177, "train/loss_total": 0.5731008052825928 }, { "epoch": 1.0168314186481433, "step": 3806, "train/loss_ctc": 1.0739386081695557, "train/loss_error": 0.48601463437080383, "train/loss_total": 0.6035994291305542 }, { "epoch": 1.0170985840235105, "step": 3807, "train/loss_ctc": 1.2442668676376343, "train/loss_error": 0.4685540199279785, "train/loss_total": 0.6236965656280518 }, { "epoch": 1.017365749398878, "step": 3808, "train/loss_ctc": 0.5290172100067139, "train/loss_error": 0.4434371292591095, "train/loss_total": 0.4605531692504883 }, { "epoch": 1.0176329147742453, "step": 3809, "train/loss_ctc": 1.1296722888946533, "train/loss_error": 0.4745122194290161, "train/loss_total": 0.6055442690849304 }, { "epoch": 1.0179000801496125, "grad_norm": 1.4140270948410034, "learning_rate": 2.3900614480363346e-05, "loss": 0.5609, "step": 3810 }, { "epoch": 1.0179000801496125, "step": 3810, "train/loss_ctc": 1.1995854377746582, "train/loss_error": 0.4486510753631592, "train/loss_total": 0.5988379716873169 }, { "epoch": 1.01816724552498, "step": 3811, "train/loss_ctc": 0.586775004863739, "train/loss_error": 0.41179028153419495, "train/loss_total": 0.4467872381210327 }, { "epoch": 1.0184344109003474, "step": 3812, "train/loss_ctc": 0.35640227794647217, "train/loss_error": 0.5179896354675293, "train/loss_total": 0.4856721758842468 }, { "epoch": 1.0187015762757146, "step": 3813, "train/loss_ctc": 0.813119113445282, "train/loss_error": 0.4758244752883911, "train/loss_total": 0.5432834029197693 }, { "epoch": 1.018968741651082, "step": 3814, "train/loss_ctc": 0.7481849193572998, "train/loss_error": 0.41897356510162354, "train/loss_total": 0.4848158359527588 }, { "epoch": 1.0192359070264494, "step": 3815, "train/loss_ctc": 0.5144761800765991, "train/loss_error": 0.4584893584251404, "train/loss_total": 0.46968671679496765 }, { "epoch": 1.0195030724018168, "step": 3816, "train/loss_ctc": 0.7299771308898926, "train/loss_error": 0.4725426733493805, "train/loss_total": 0.524029552936554 }, { "epoch": 1.019770237777184, "step": 3817, "train/loss_ctc": 0.43776679039001465, "train/loss_error": 0.4870569407939911, "train/loss_total": 0.47719889879226685 }, { "epoch": 1.0200374031525514, "step": 3818, "train/loss_ctc": 0.9848060607910156, "train/loss_error": 0.503010094165802, "train/loss_total": 0.5993692874908447 }, { "epoch": 1.0203045685279188, "step": 3819, "train/loss_ctc": 0.48361095786094666, "train/loss_error": 0.4783487617969513, "train/loss_total": 0.47940120100975037 }, { "epoch": 1.020571733903286, "grad_norm": 1.4238579273223877, "learning_rate": 2.3884584557841304e-05, "loss": 0.5109, "step": 3820 }, { "epoch": 1.020571733903286, "step": 3820, "train/loss_ctc": 0.6719026565551758, "train/loss_error": 0.5286048054695129, "train/loss_total": 0.5572643876075745 }, { "epoch": 1.0208388992786535, "step": 3821, "train/loss_ctc": 0.6636650562286377, "train/loss_error": 0.4266272783279419, "train/loss_total": 0.47403484582901 }, { "epoch": 1.0211060646540209, "step": 3822, "train/loss_ctc": 1.3342368602752686, "train/loss_error": 0.46268412470817566, "train/loss_total": 0.6369947195053101 }, { "epoch": 1.0213732300293883, "step": 3823, "train/loss_ctc": 0.9311450123786926, "train/loss_error": 0.571437656879425, "train/loss_total": 0.6433791518211365 }, { "epoch": 1.0216403954047555, "step": 3824, "train/loss_ctc": 1.069040298461914, "train/loss_error": 0.48446446657180786, "train/loss_total": 0.6013796329498291 }, { "epoch": 1.021907560780123, "step": 3825, "train/loss_ctc": 0.9616842269897461, "train/loss_error": 0.4935400187969208, "train/loss_total": 0.5871688723564148 }, { "epoch": 1.0221747261554903, "step": 3826, "train/loss_ctc": 1.568145513534546, "train/loss_error": 0.5945439338684082, "train/loss_total": 0.7892642617225647 }, { "epoch": 1.0224418915308575, "step": 3827, "train/loss_ctc": 1.4596717357635498, "train/loss_error": 0.5124921798706055, "train/loss_total": 0.7019280791282654 }, { "epoch": 1.022709056906225, "step": 3828, "train/loss_ctc": 1.1815398931503296, "train/loss_error": 0.43111133575439453, "train/loss_total": 0.5811970233917236 }, { "epoch": 1.0229762222815924, "step": 3829, "train/loss_ctc": 0.8236463069915771, "train/loss_error": 0.49094897508621216, "train/loss_total": 0.5574884414672852 }, { "epoch": 1.0232433876569598, "grad_norm": 1.1664466857910156, "learning_rate": 2.3868554635319262e-05, "loss": 0.613, "step": 3830 }, { "epoch": 1.0232433876569598, "step": 3830, "train/loss_ctc": 0.7822795510292053, "train/loss_error": 0.4714638292789459, "train/loss_total": 0.5336269736289978 }, { "epoch": 1.023510553032327, "step": 3831, "train/loss_ctc": 0.4621928930282593, "train/loss_error": 0.45382609963417053, "train/loss_total": 0.45549947023391724 }, { "epoch": 1.0237777184076944, "step": 3832, "train/loss_ctc": 0.5817024111747742, "train/loss_error": 0.49857500195503235, "train/loss_total": 0.5152004957199097 }, { "epoch": 1.0240448837830618, "step": 3833, "train/loss_ctc": 0.5555782914161682, "train/loss_error": 0.5012218356132507, "train/loss_total": 0.5120931267738342 }, { "epoch": 1.024312049158429, "step": 3834, "train/loss_ctc": 1.028281569480896, "train/loss_error": 0.49712637066841125, "train/loss_total": 0.6033574342727661 }, { "epoch": 1.0245792145337964, "step": 3835, "train/loss_ctc": 0.9501603841781616, "train/loss_error": 0.49219706654548645, "train/loss_total": 0.5837897658348083 }, { "epoch": 1.0248463799091638, "step": 3836, "train/loss_ctc": 1.0797337293624878, "train/loss_error": 0.4386402666568756, "train/loss_total": 0.5668589472770691 }, { "epoch": 1.025113545284531, "step": 3837, "train/loss_ctc": 0.6890125274658203, "train/loss_error": 0.47163125872612, "train/loss_total": 0.5151075124740601 }, { "epoch": 1.0253807106598984, "step": 3838, "train/loss_ctc": 1.0030158758163452, "train/loss_error": 0.5272190570831299, "train/loss_total": 0.622378408908844 }, { "epoch": 1.0256478760352659, "step": 3839, "train/loss_ctc": 0.7141278982162476, "train/loss_error": 0.44395411014556885, "train/loss_total": 0.49798887968063354 }, { "epoch": 1.0259150414106333, "grad_norm": 1.8661375045776367, "learning_rate": 2.3852524712797224e-05, "loss": 0.5406, "step": 3840 }, { "epoch": 1.0259150414106333, "step": 3840, "train/loss_ctc": 1.4138872623443604, "train/loss_error": 0.5439372062683105, "train/loss_total": 0.7179272174835205 }, { "epoch": 1.0261822067860005, "step": 3841, "train/loss_ctc": 0.9025692939758301, "train/loss_error": 0.5158510208129883, "train/loss_total": 0.5931947231292725 }, { "epoch": 1.026449372161368, "step": 3842, "train/loss_ctc": 0.39333510398864746, "train/loss_error": 0.5262902975082397, "train/loss_total": 0.49969926476478577 }, { "epoch": 1.0267165375367353, "step": 3843, "train/loss_ctc": 0.3728488087654114, "train/loss_error": 0.424772709608078, "train/loss_total": 0.41438794136047363 }, { "epoch": 1.0269837029121025, "step": 3844, "train/loss_ctc": 1.004296898841858, "train/loss_error": 0.505790650844574, "train/loss_total": 0.6054919362068176 }, { "epoch": 1.02725086828747, "step": 3845, "train/loss_ctc": 0.7669129371643066, "train/loss_error": 0.4853951930999756, "train/loss_total": 0.5416987538337708 }, { "epoch": 1.0275180336628373, "step": 3846, "train/loss_ctc": 0.9773377180099487, "train/loss_error": 0.44104209542274475, "train/loss_total": 0.5483012199401855 }, { "epoch": 1.0277851990382048, "step": 3847, "train/loss_ctc": 0.5295485854148865, "train/loss_error": 0.4856789708137512, "train/loss_total": 0.49445289373397827 }, { "epoch": 1.028052364413572, "step": 3848, "train/loss_ctc": 0.7762313485145569, "train/loss_error": 0.46115174889564514, "train/loss_total": 0.5241676568984985 }, { "epoch": 1.0283195297889394, "step": 3849, "train/loss_ctc": 0.6002363562583923, "train/loss_error": 0.4964142441749573, "train/loss_total": 0.5171786546707153 }, { "epoch": 1.0285866951643068, "grad_norm": 1.7496501207351685, "learning_rate": 2.3836494790275182e-05, "loss": 0.5457, "step": 3850 }, { "epoch": 1.0285866951643068, "step": 3850, "train/loss_ctc": 0.9135147333145142, "train/loss_error": 0.5374115109443665, "train/loss_total": 0.612632155418396 }, { "epoch": 1.028853860539674, "step": 3851, "train/loss_ctc": 2.2172536849975586, "train/loss_error": 0.4723137617111206, "train/loss_total": 0.8213017582893372 }, { "epoch": 1.0291210259150414, "step": 3852, "train/loss_ctc": 0.5337480902671814, "train/loss_error": 0.43160319328308105, "train/loss_total": 0.4520321786403656 }, { "epoch": 1.0293881912904088, "step": 3853, "train/loss_ctc": 0.9721407890319824, "train/loss_error": 0.478187620639801, "train/loss_total": 0.5769782662391663 }, { "epoch": 1.029655356665776, "step": 3854, "train/loss_ctc": 0.7279649972915649, "train/loss_error": 0.4173530340194702, "train/loss_total": 0.4794754385948181 }, { "epoch": 1.0299225220411434, "step": 3855, "train/loss_ctc": 0.8414298295974731, "train/loss_error": 0.48756662011146545, "train/loss_total": 0.5583392381668091 }, { "epoch": 1.0301896874165108, "step": 3856, "train/loss_ctc": 0.6511398553848267, "train/loss_error": 0.4108843505382538, "train/loss_total": 0.4589354395866394 }, { "epoch": 1.0304568527918783, "step": 3857, "train/loss_ctc": 0.7119500041007996, "train/loss_error": 0.4296739101409912, "train/loss_total": 0.48612910509109497 }, { "epoch": 1.0307240181672455, "step": 3858, "train/loss_ctc": 0.8694126605987549, "train/loss_error": 0.47343212366104126, "train/loss_total": 0.552628219127655 }, { "epoch": 1.0309911835426129, "step": 3859, "train/loss_ctc": 0.5577421188354492, "train/loss_error": 0.4770880341529846, "train/loss_total": 0.49321886897087097 }, { "epoch": 1.0312583489179803, "grad_norm": 2.026101589202881, "learning_rate": 2.382046486775314e-05, "loss": 0.5492, "step": 3860 }, { "epoch": 1.0312583489179803, "step": 3860, "train/loss_ctc": 0.6345400810241699, "train/loss_error": 0.5519282817840576, "train/loss_total": 0.5684506297111511 }, { "epoch": 1.0315255142933475, "step": 3861, "train/loss_ctc": 1.2318336963653564, "train/loss_error": 0.47278517484664917, "train/loss_total": 0.6245949268341064 }, { "epoch": 1.031792679668715, "step": 3862, "train/loss_ctc": 0.3687434196472168, "train/loss_error": 0.5203135013580322, "train/loss_total": 0.4899994730949402 }, { "epoch": 1.0320598450440823, "step": 3863, "train/loss_ctc": 1.7344835996627808, "train/loss_error": 0.47974419593811035, "train/loss_total": 0.7306920886039734 }, { "epoch": 1.0323270104194497, "step": 3864, "train/loss_ctc": 0.6149645447731018, "train/loss_error": 0.41610634326934814, "train/loss_total": 0.45587798953056335 }, { "epoch": 1.032594175794817, "step": 3865, "train/loss_ctc": 0.8428536057472229, "train/loss_error": 0.48677390813827515, "train/loss_total": 0.5579898357391357 }, { "epoch": 1.0328613411701844, "step": 3866, "train/loss_ctc": 0.7916213870048523, "train/loss_error": 0.46479466557502747, "train/loss_total": 0.5301600098609924 }, { "epoch": 1.0331285065455518, "step": 3867, "train/loss_ctc": 0.6180012822151184, "train/loss_error": 0.506364643573761, "train/loss_total": 0.5286920070648193 }, { "epoch": 1.033395671920919, "step": 3868, "train/loss_ctc": 0.49450644850730896, "train/loss_error": 0.500817060470581, "train/loss_total": 0.49955496191978455 }, { "epoch": 1.0336628372962864, "step": 3869, "train/loss_ctc": 0.5196385383605957, "train/loss_error": 0.41271570324897766, "train/loss_total": 0.43410027027130127 }, { "epoch": 1.0339300026716538, "grad_norm": 2.4841554164886475, "learning_rate": 2.3804434945231098e-05, "loss": 0.542, "step": 3870 }, { "epoch": 1.0339300026716538, "step": 3870, "train/loss_ctc": 1.250922679901123, "train/loss_error": 0.45126333832740784, "train/loss_total": 0.6111952066421509 }, { "epoch": 1.034197168047021, "step": 3871, "train/loss_ctc": 0.7515795826911926, "train/loss_error": 0.5130211114883423, "train/loss_total": 0.5607328414916992 }, { "epoch": 1.0344643334223884, "step": 3872, "train/loss_ctc": 1.2253150939941406, "train/loss_error": 0.4645387530326843, "train/loss_total": 0.6166940331459045 }, { "epoch": 1.0347314987977558, "step": 3873, "train/loss_ctc": 1.0808144807815552, "train/loss_error": 0.4647468328475952, "train/loss_total": 0.5879603624343872 }, { "epoch": 1.0349986641731233, "step": 3874, "train/loss_ctc": 0.5884749889373779, "train/loss_error": 0.5556614995002747, "train/loss_total": 0.5622242093086243 }, { "epoch": 1.0352658295484904, "step": 3875, "train/loss_ctc": 1.3464949131011963, "train/loss_error": 0.4655783474445343, "train/loss_total": 0.6417616605758667 }, { "epoch": 1.0355329949238579, "step": 3876, "train/loss_ctc": 0.46378105878829956, "train/loss_error": 0.5164836645126343, "train/loss_total": 0.5059431791305542 }, { "epoch": 1.0358001602992253, "step": 3877, "train/loss_ctc": 1.0526379346847534, "train/loss_error": 0.45742982625961304, "train/loss_total": 0.5764714479446411 }, { "epoch": 1.0360673256745925, "step": 3878, "train/loss_ctc": 0.6346207857131958, "train/loss_error": 0.48684683442115784, "train/loss_total": 0.5164016485214233 }, { "epoch": 1.03633449104996, "step": 3879, "train/loss_ctc": 0.6337834596633911, "train/loss_error": 0.4911864697933197, "train/loss_total": 0.5197058916091919 }, { "epoch": 1.0366016564253273, "grad_norm": 14.688573837280273, "learning_rate": 2.3788405022709056e-05, "loss": 0.5699, "step": 3880 }, { "epoch": 1.0366016564253273, "step": 3880, "train/loss_ctc": 0.4650380611419678, "train/loss_error": 0.4483996629714966, "train/loss_total": 0.45172736048698425 }, { "epoch": 1.0368688218006947, "step": 3881, "train/loss_ctc": 1.162193775177002, "train/loss_error": 0.4887031316757202, "train/loss_total": 0.6234012842178345 }, { "epoch": 1.037135987176062, "step": 3882, "train/loss_ctc": 0.41575250029563904, "train/loss_error": 0.4473116099834442, "train/loss_total": 0.4409998059272766 }, { "epoch": 1.0374031525514293, "step": 3883, "train/loss_ctc": 1.0451596975326538, "train/loss_error": 0.4686640799045563, "train/loss_total": 0.5839632153511047 }, { "epoch": 1.0376703179267968, "step": 3884, "train/loss_ctc": 0.6089056730270386, "train/loss_error": 0.4797690510749817, "train/loss_total": 0.505596399307251 }, { "epoch": 1.037937483302164, "step": 3885, "train/loss_ctc": 0.8278604745864868, "train/loss_error": 0.5307732820510864, "train/loss_total": 0.5901907086372375 }, { "epoch": 1.0382046486775314, "step": 3886, "train/loss_ctc": 0.7804652452468872, "train/loss_error": 0.41674187779426575, "train/loss_total": 0.48948657512664795 }, { "epoch": 1.0384718140528988, "step": 3887, "train/loss_ctc": 0.8831983208656311, "train/loss_error": 0.5069752335548401, "train/loss_total": 0.5822198390960693 }, { "epoch": 1.038738979428266, "step": 3888, "train/loss_ctc": 0.48573341965675354, "train/loss_error": 0.4675135612487793, "train/loss_total": 0.4711575508117676 }, { "epoch": 1.0390061448036334, "step": 3889, "train/loss_ctc": 0.5342159271240234, "train/loss_error": 0.48771175742149353, "train/loss_total": 0.4970126152038574 }, { "epoch": 1.0392733101790008, "grad_norm": 1.7224938869476318, "learning_rate": 2.3772375100187014e-05, "loss": 0.5236, "step": 3890 }, { "epoch": 1.0392733101790008, "step": 3890, "train/loss_ctc": 1.092110514640808, "train/loss_error": 0.536490261554718, "train/loss_total": 0.6476143002510071 }, { "epoch": 1.0395404755543682, "step": 3891, "train/loss_ctc": 0.8016855120658875, "train/loss_error": 0.42122969031333923, "train/loss_total": 0.49732089042663574 }, { "epoch": 1.0398076409297354, "step": 3892, "train/loss_ctc": 0.5756088495254517, "train/loss_error": 0.46696826815605164, "train/loss_total": 0.4886963963508606 }, { "epoch": 1.0400748063051029, "step": 3893, "train/loss_ctc": 1.3140493631362915, "train/loss_error": 0.4856036901473999, "train/loss_total": 0.6512928009033203 }, { "epoch": 1.0403419716804703, "step": 3894, "train/loss_ctc": 0.3586786389350891, "train/loss_error": 0.5486457943916321, "train/loss_total": 0.5106523633003235 }, { "epoch": 1.0406091370558375, "step": 3895, "train/loss_ctc": 1.1632559299468994, "train/loss_error": 0.4976773262023926, "train/loss_total": 0.630793035030365 }, { "epoch": 1.0408763024312049, "step": 3896, "train/loss_ctc": 0.9195078015327454, "train/loss_error": 0.4201526641845703, "train/loss_total": 0.5200237035751343 }, { "epoch": 1.0411434678065723, "step": 3897, "train/loss_ctc": 0.8982474207878113, "train/loss_error": 0.43514227867126465, "train/loss_total": 0.527763307094574 }, { "epoch": 1.0414106331819397, "step": 3898, "train/loss_ctc": 0.3865181505680084, "train/loss_error": 0.4391293227672577, "train/loss_total": 0.42860710620880127 }, { "epoch": 1.041677798557307, "step": 3899, "train/loss_ctc": 0.6155557632446289, "train/loss_error": 0.4568370282649994, "train/loss_total": 0.4885807931423187 }, { "epoch": 1.0419449639326743, "grad_norm": 1.1544773578643799, "learning_rate": 2.3756345177664976e-05, "loss": 0.5391, "step": 3900 }, { "epoch": 1.0419449639326743, "step": 3900, "train/loss_ctc": 0.37244778871536255, "train/loss_error": 0.5131359100341797, "train/loss_total": 0.48499828577041626 }, { "epoch": 1.0422121293080417, "step": 3901, "train/loss_ctc": 0.5048367977142334, "train/loss_error": 0.399269700050354, "train/loss_total": 0.42038315534591675 }, { "epoch": 1.042479294683409, "step": 3902, "train/loss_ctc": 0.5093068480491638, "train/loss_error": 0.4394543468952179, "train/loss_total": 0.453424870967865 }, { "epoch": 1.0427464600587764, "step": 3903, "train/loss_ctc": 0.9933212399482727, "train/loss_error": 0.4532586932182312, "train/loss_total": 0.5612711906433105 }, { "epoch": 1.0430136254341438, "step": 3904, "train/loss_ctc": 0.7509257793426514, "train/loss_error": 0.5738065838813782, "train/loss_total": 0.6092303991317749 }, { "epoch": 1.0432807908095112, "step": 3905, "train/loss_ctc": 0.8644500374794006, "train/loss_error": 0.4463486075401306, "train/loss_total": 0.5299688577651978 }, { "epoch": 1.0435479561848784, "step": 3906, "train/loss_ctc": 0.7496657371520996, "train/loss_error": 0.48880186676979065, "train/loss_total": 0.5409746170043945 }, { "epoch": 1.0438151215602458, "step": 3907, "train/loss_ctc": 0.6875343322753906, "train/loss_error": 0.42810070514678955, "train/loss_total": 0.4799874424934387 }, { "epoch": 1.0440822869356132, "step": 3908, "train/loss_ctc": 0.9361450672149658, "train/loss_error": 0.4591033458709717, "train/loss_total": 0.5545117259025574 }, { "epoch": 1.0443494523109804, "step": 3909, "train/loss_ctc": 1.3639838695526123, "train/loss_error": 0.46314874291419983, "train/loss_total": 0.6433157920837402 }, { "epoch": 1.0446166176863478, "grad_norm": 1.8401010036468506, "learning_rate": 2.3740315255142934e-05, "loss": 0.5278, "step": 3910 }, { "epoch": 1.0446166176863478, "step": 3910, "train/loss_ctc": 0.8308601379394531, "train/loss_error": 0.4320076107978821, "train/loss_total": 0.5117781162261963 }, { "epoch": 1.0448837830617153, "step": 3911, "train/loss_ctc": 0.5497831702232361, "train/loss_error": 0.4739389419555664, "train/loss_total": 0.48910778760910034 }, { "epoch": 1.0451509484370824, "step": 3912, "train/loss_ctc": 1.0063213109970093, "train/loss_error": 0.43557560443878174, "train/loss_total": 0.5497247576713562 }, { "epoch": 1.0454181138124499, "step": 3913, "train/loss_ctc": 1.28811514377594, "train/loss_error": 0.5160667896270752, "train/loss_total": 0.670476496219635 }, { "epoch": 1.0456852791878173, "step": 3914, "train/loss_ctc": 0.522223949432373, "train/loss_error": 0.37415024638175964, "train/loss_total": 0.4037649929523468 }, { "epoch": 1.0459524445631847, "step": 3915, "train/loss_ctc": 1.068488597869873, "train/loss_error": 0.5605217218399048, "train/loss_total": 0.6621150970458984 }, { "epoch": 1.046219609938552, "step": 3916, "train/loss_ctc": 0.4384303689002991, "train/loss_error": 0.4908166229724884, "train/loss_total": 0.4803394079208374 }, { "epoch": 1.0464867753139193, "step": 3917, "train/loss_ctc": 1.2731730937957764, "train/loss_error": 0.4925443232059479, "train/loss_total": 0.6486700773239136 }, { "epoch": 1.0467539406892867, "step": 3918, "train/loss_ctc": 0.8322703838348389, "train/loss_error": 0.4113943576812744, "train/loss_total": 0.4955695569515228 }, { "epoch": 1.047021106064654, "step": 3919, "train/loss_ctc": 0.6769797205924988, "train/loss_error": 0.431878924369812, "train/loss_total": 0.4808990955352783 }, { "epoch": 1.0472882714400213, "grad_norm": 1.8285417556762695, "learning_rate": 2.3724285332620892e-05, "loss": 0.5392, "step": 3920 }, { "epoch": 1.0472882714400213, "step": 3920, "train/loss_ctc": 0.5813727378845215, "train/loss_error": 0.4589422047138214, "train/loss_total": 0.48342829942703247 }, { "epoch": 1.0475554368153888, "step": 3921, "train/loss_ctc": 0.9234471917152405, "train/loss_error": 0.4583703279495239, "train/loss_total": 0.5513857007026672 }, { "epoch": 1.0478226021907562, "step": 3922, "train/loss_ctc": 0.6416752338409424, "train/loss_error": 0.4342706501483917, "train/loss_total": 0.4757515788078308 }, { "epoch": 1.0480897675661234, "step": 3923, "train/loss_ctc": 0.5584193468093872, "train/loss_error": 0.49421849846839905, "train/loss_total": 0.5070586800575256 }, { "epoch": 1.0483569329414908, "step": 3924, "train/loss_ctc": 0.5400751829147339, "train/loss_error": 0.4448193907737732, "train/loss_total": 0.4638705551624298 }, { "epoch": 1.0486240983168582, "step": 3925, "train/loss_ctc": 0.9727007746696472, "train/loss_error": 0.5754210352897644, "train/loss_total": 0.6548770070075989 }, { "epoch": 1.0488912636922254, "step": 3926, "train/loss_ctc": 0.7647847533226013, "train/loss_error": 0.5099584460258484, "train/loss_total": 0.56092369556427 }, { "epoch": 1.0491584290675928, "step": 3927, "train/loss_ctc": 0.9280192852020264, "train/loss_error": 0.44174960255622864, "train/loss_total": 0.5390035510063171 }, { "epoch": 1.0494255944429602, "step": 3928, "train/loss_ctc": 0.5269968509674072, "train/loss_error": 0.553561806678772, "train/loss_total": 0.548248827457428 }, { "epoch": 1.0496927598183277, "step": 3929, "train/loss_ctc": 0.7302961349487305, "train/loss_error": 0.4691314101219177, "train/loss_total": 0.5213643908500671 }, { "epoch": 1.0499599251936949, "grad_norm": 1.5781995058059692, "learning_rate": 2.370825541009885e-05, "loss": 0.5306, "step": 3930 }, { "epoch": 1.0499599251936949, "step": 3930, "train/loss_ctc": 1.1465113162994385, "train/loss_error": 0.4603097140789032, "train/loss_total": 0.5975500345230103 }, { "epoch": 1.0502270905690623, "step": 3931, "train/loss_ctc": 0.9919294714927673, "train/loss_error": 0.44973263144493103, "train/loss_total": 0.5581719875335693 }, { "epoch": 1.0504942559444297, "step": 3932, "train/loss_ctc": 0.9857327342033386, "train/loss_error": 0.5007118582725525, "train/loss_total": 0.5977160334587097 }, { "epoch": 1.0507614213197969, "step": 3933, "train/loss_ctc": 0.741032063961029, "train/loss_error": 0.45230114459991455, "train/loss_total": 0.5100473165512085 }, { "epoch": 1.0510285866951643, "step": 3934, "train/loss_ctc": 0.8935285210609436, "train/loss_error": 0.5476299524307251, "train/loss_total": 0.6168096661567688 }, { "epoch": 1.0512957520705317, "step": 3935, "train/loss_ctc": 0.7363148927688599, "train/loss_error": 0.4576323628425598, "train/loss_total": 0.5133688449859619 }, { "epoch": 1.051562917445899, "step": 3936, "train/loss_ctc": 1.0343844890594482, "train/loss_error": 0.49880871176719666, "train/loss_total": 0.6059238910675049 }, { "epoch": 1.0518300828212663, "step": 3937, "train/loss_ctc": 1.6427109241485596, "train/loss_error": 0.47345104813575745, "train/loss_total": 0.7073030471801758 }, { "epoch": 1.0520972481966337, "step": 3938, "train/loss_ctc": 0.4353887736797333, "train/loss_error": 0.4584754705429077, "train/loss_total": 0.4538581371307373 }, { "epoch": 1.0523644135720012, "step": 3939, "train/loss_ctc": 0.47942298650741577, "train/loss_error": 0.48229044675827026, "train/loss_total": 0.48171696066856384 }, { "epoch": 1.0526315789473684, "grad_norm": 1.4998966455459595, "learning_rate": 2.3692225487576808e-05, "loss": 0.5642, "step": 3940 }, { "epoch": 1.0526315789473684, "step": 3940, "train/loss_ctc": 0.8331328630447388, "train/loss_error": 0.467129111289978, "train/loss_total": 0.5403298735618591 }, { "epoch": 1.0528987443227358, "step": 3941, "train/loss_ctc": 0.7316734194755554, "train/loss_error": 0.4201710522174835, "train/loss_total": 0.4824715256690979 }, { "epoch": 1.0531659096981032, "step": 3942, "train/loss_ctc": 1.4956990480422974, "train/loss_error": 0.464096337556839, "train/loss_total": 0.6704168915748596 }, { "epoch": 1.0534330750734704, "step": 3943, "train/loss_ctc": 0.9803268909454346, "train/loss_error": 0.43267595767974854, "train/loss_total": 0.5422061681747437 }, { "epoch": 1.0537002404488378, "step": 3944, "train/loss_ctc": 0.5026727914810181, "train/loss_error": 0.44495847821235657, "train/loss_total": 0.4565013349056244 }, { "epoch": 1.0539674058242052, "step": 3945, "train/loss_ctc": 0.7070684432983398, "train/loss_error": 0.49757012724876404, "train/loss_total": 0.539469838142395 }, { "epoch": 1.0542345711995726, "step": 3946, "train/loss_ctc": 0.4009840190410614, "train/loss_error": 0.5193074941635132, "train/loss_total": 0.4956428110599518 }, { "epoch": 1.0545017365749398, "step": 3947, "train/loss_ctc": 0.7737083435058594, "train/loss_error": 0.5086662769317627, "train/loss_total": 0.5616747140884399 }, { "epoch": 1.0547689019503073, "step": 3948, "train/loss_ctc": 0.5917870998382568, "train/loss_error": 0.5445682406425476, "train/loss_total": 0.5540120601654053 }, { "epoch": 1.0550360673256747, "step": 3949, "train/loss_ctc": 0.49380940198898315, "train/loss_error": 0.4612674117088318, "train/loss_total": 0.467775821685791 }, { "epoch": 1.0553032327010419, "grad_norm": 1.3144336938858032, "learning_rate": 2.367619556505477e-05, "loss": 0.5311, "step": 3950 }, { "epoch": 1.0553032327010419, "step": 3950, "train/loss_ctc": 1.5597171783447266, "train/loss_error": 0.5008944869041443, "train/loss_total": 0.7126590013504028 }, { "epoch": 1.0555703980764093, "step": 3951, "train/loss_ctc": 0.8611404299736023, "train/loss_error": 0.4333202540874481, "train/loss_total": 0.5188843011856079 }, { "epoch": 1.0558375634517767, "step": 3952, "train/loss_ctc": 0.29298174381256104, "train/loss_error": 0.48301032185554504, "train/loss_total": 0.4450046122074127 }, { "epoch": 1.056104728827144, "step": 3953, "train/loss_ctc": 0.5348240733146667, "train/loss_error": 0.49427229166030884, "train/loss_total": 0.5023826360702515 }, { "epoch": 1.0563718942025113, "step": 3954, "train/loss_ctc": 0.45826098322868347, "train/loss_error": 0.47487112879753113, "train/loss_total": 0.4715490937232971 }, { "epoch": 1.0566390595778787, "step": 3955, "train/loss_ctc": 0.7785174250602722, "train/loss_error": 0.45127588510513306, "train/loss_total": 0.5167242288589478 }, { "epoch": 1.0569062249532462, "step": 3956, "train/loss_ctc": 1.2001482248306274, "train/loss_error": 0.43921440839767456, "train/loss_total": 0.5914011597633362 }, { "epoch": 1.0571733903286133, "step": 3957, "train/loss_ctc": 0.881220817565918, "train/loss_error": 0.5084797143936157, "train/loss_total": 0.5830279588699341 }, { "epoch": 1.0574405557039808, "step": 3958, "train/loss_ctc": 0.6052147150039673, "train/loss_error": 0.45990389585494995, "train/loss_total": 0.48896604776382446 }, { "epoch": 1.0577077210793482, "step": 3959, "train/loss_ctc": 0.7284175753593445, "train/loss_error": 0.5065963268280029, "train/loss_total": 0.5509606003761292 }, { "epoch": 1.0579748864547154, "grad_norm": 4.343263626098633, "learning_rate": 2.366016564253273e-05, "loss": 0.5382, "step": 3960 }, { "epoch": 1.0579748864547154, "step": 3960, "train/loss_ctc": 0.8836780786514282, "train/loss_error": 0.4724520742893219, "train/loss_total": 0.5546972751617432 }, { "epoch": 1.0582420518300828, "step": 3961, "train/loss_ctc": 0.6507118940353394, "train/loss_error": 0.528822660446167, "train/loss_total": 0.5532004833221436 }, { "epoch": 1.0585092172054502, "step": 3962, "train/loss_ctc": 0.7683864235877991, "train/loss_error": 0.5131793022155762, "train/loss_total": 0.5642207264900208 }, { "epoch": 1.0587763825808176, "step": 3963, "train/loss_ctc": 0.4928221106529236, "train/loss_error": 0.49376803636550903, "train/loss_total": 0.49357885122299194 }, { "epoch": 1.0590435479561848, "step": 3964, "train/loss_ctc": 1.0454046726226807, "train/loss_error": 0.44277864694595337, "train/loss_total": 0.5633038282394409 }, { "epoch": 1.0593107133315522, "step": 3965, "train/loss_ctc": 0.35787802934646606, "train/loss_error": 0.41764023900032043, "train/loss_total": 0.4056878089904785 }, { "epoch": 1.0595778787069197, "step": 3966, "train/loss_ctc": 0.4980515241622925, "train/loss_error": 0.48805680871009827, "train/loss_total": 0.49005573987960815 }, { "epoch": 1.0598450440822869, "step": 3967, "train/loss_ctc": 0.7270998954772949, "train/loss_error": 0.46412917971611023, "train/loss_total": 0.5167233347892761 }, { "epoch": 1.0601122094576543, "step": 3968, "train/loss_ctc": 0.7458951473236084, "train/loss_error": 0.4638698995113373, "train/loss_total": 0.5202749371528625 }, { "epoch": 1.0603793748330217, "step": 3969, "train/loss_ctc": 0.38461053371429443, "train/loss_error": 0.49159878492355347, "train/loss_total": 0.47020116448402405 }, { "epoch": 1.0606465402083889, "grad_norm": 2.321599006652832, "learning_rate": 2.364413572001069e-05, "loss": 0.5132, "step": 3970 }, { "epoch": 1.0606465402083889, "step": 3970, "train/loss_ctc": 0.4937571585178375, "train/loss_error": 0.52993243932724, "train/loss_total": 0.522697389125824 }, { "epoch": 1.0609137055837563, "step": 3971, "train/loss_ctc": 0.517194390296936, "train/loss_error": 0.4975987374782562, "train/loss_total": 0.5015178918838501 }, { "epoch": 1.0611808709591237, "step": 3972, "train/loss_ctc": 0.9597811698913574, "train/loss_error": 0.4314647912979126, "train/loss_total": 0.5371280908584595 }, { "epoch": 1.0614480363344911, "step": 3973, "train/loss_ctc": 1.1210018396377563, "train/loss_error": 0.516346275806427, "train/loss_total": 0.637277364730835 }, { "epoch": 1.0617152017098583, "step": 3974, "train/loss_ctc": 0.8408898115158081, "train/loss_error": 0.4726808965206146, "train/loss_total": 0.5463227033615112 }, { "epoch": 1.0619823670852258, "step": 3975, "train/loss_ctc": 0.7682973146438599, "train/loss_error": 0.5500010251998901, "train/loss_total": 0.593660295009613 }, { "epoch": 1.0622495324605932, "step": 3976, "train/loss_ctc": 1.0721943378448486, "train/loss_error": 0.5340829491615295, "train/loss_total": 0.6417052149772644 }, { "epoch": 1.0625166978359604, "step": 3977, "train/loss_ctc": 0.5951440334320068, "train/loss_error": 0.5097681283950806, "train/loss_total": 0.5268433094024658 }, { "epoch": 1.0627838632113278, "step": 3978, "train/loss_ctc": 0.9760894775390625, "train/loss_error": 0.5268452763557434, "train/loss_total": 0.6166940927505493 }, { "epoch": 1.0630510285866952, "step": 3979, "train/loss_ctc": 0.8728110790252686, "train/loss_error": 0.44385603070259094, "train/loss_total": 0.5296470522880554 }, { "epoch": 1.0633181939620626, "grad_norm": 1.2986767292022705, "learning_rate": 2.3628105797488647e-05, "loss": 0.5653, "step": 3980 }, { "epoch": 1.0633181939620626, "step": 3980, "train/loss_ctc": 0.8921992182731628, "train/loss_error": 0.45065298676490784, "train/loss_total": 0.5389622449874878 }, { "epoch": 1.0635853593374298, "step": 3981, "train/loss_ctc": 0.7619007229804993, "train/loss_error": 0.46175509691238403, "train/loss_total": 0.521784245967865 }, { "epoch": 1.0638525247127972, "step": 3982, "train/loss_ctc": 0.4537259340286255, "train/loss_error": 0.4721260666847229, "train/loss_total": 0.4684460461139679 }, { "epoch": 1.0641196900881646, "step": 3983, "train/loss_ctc": 0.6304725408554077, "train/loss_error": 0.504491925239563, "train/loss_total": 0.5296880602836609 }, { "epoch": 1.0643868554635318, "step": 3984, "train/loss_ctc": 0.7686970829963684, "train/loss_error": 0.48812729120254517, "train/loss_total": 0.5442412495613098 }, { "epoch": 1.0646540208388993, "step": 3985, "train/loss_ctc": 1.1811800003051758, "train/loss_error": 0.4813980162143707, "train/loss_total": 0.6213544607162476 }, { "epoch": 1.0649211862142667, "step": 3986, "train/loss_ctc": 1.071630835533142, "train/loss_error": 0.5222695469856262, "train/loss_total": 0.6321418285369873 }, { "epoch": 1.0651883515896339, "step": 3987, "train/loss_ctc": 0.6393519639968872, "train/loss_error": 0.4422829747200012, "train/loss_total": 0.4816967844963074 }, { "epoch": 1.0654555169650013, "step": 3988, "train/loss_ctc": 0.7280234098434448, "train/loss_error": 0.49461793899536133, "train/loss_total": 0.541299045085907 }, { "epoch": 1.0657226823403687, "step": 3989, "train/loss_ctc": 0.7500318884849548, "train/loss_error": 0.4932331144809723, "train/loss_total": 0.5445928573608398 }, { "epoch": 1.0659898477157361, "grad_norm": 1.8530948162078857, "learning_rate": 2.3612075874966605e-05, "loss": 0.5424, "step": 3990 }, { "epoch": 1.0659898477157361, "step": 3990, "train/loss_ctc": 0.5825475454330444, "train/loss_error": 0.43535101413726807, "train/loss_total": 0.46479034423828125 }, { "epoch": 1.0662570130911033, "step": 3991, "train/loss_ctc": 2.7078001499176025, "train/loss_error": 0.46728360652923584, "train/loss_total": 0.9153869152069092 }, { "epoch": 1.0665241784664707, "step": 3992, "train/loss_ctc": 2.184520721435547, "train/loss_error": 0.48863619565963745, "train/loss_total": 0.8278131484985352 }, { "epoch": 1.0667913438418382, "step": 3993, "train/loss_ctc": 0.7373758554458618, "train/loss_error": 0.5479316711425781, "train/loss_total": 0.5858204960823059 }, { "epoch": 1.0670585092172054, "step": 3994, "train/loss_ctc": 0.7608085870742798, "train/loss_error": 0.45688408613204956, "train/loss_total": 0.5176689624786377 }, { "epoch": 1.0673256745925728, "step": 3995, "train/loss_ctc": 1.3901433944702148, "train/loss_error": 0.49695920944213867, "train/loss_total": 0.6755960583686829 }, { "epoch": 1.0675928399679402, "step": 3996, "train/loss_ctc": 0.719897985458374, "train/loss_error": 0.45624059438705444, "train/loss_total": 0.5089720487594604 }, { "epoch": 1.0678600053433076, "step": 3997, "train/loss_ctc": 0.6905839443206787, "train/loss_error": 0.42134177684783936, "train/loss_total": 0.4751902222633362 }, { "epoch": 1.0681271707186748, "step": 3998, "train/loss_ctc": 0.4309443235397339, "train/loss_error": 0.5128576159477234, "train/loss_total": 0.4964749813079834 }, { "epoch": 1.0683943360940422, "step": 3999, "train/loss_ctc": 0.8560190796852112, "train/loss_error": 0.4727603793144226, "train/loss_total": 0.5494121313095093 }, { "epoch": 1.0686615014694096, "grad_norm": 1.6373684406280518, "learning_rate": 2.3596045952444563e-05, "loss": 0.6017, "step": 4000 }, { "epoch": 1.0686615014694096, "step": 4000, "train/loss_ctc": 0.6261571049690247, "train/loss_error": 0.46136474609375, "train/loss_total": 0.4943232536315918 }, { "epoch": 1.0689286668447768, "step": 4001, "train/loss_ctc": 0.5720916390419006, "train/loss_error": 0.532444179058075, "train/loss_total": 0.540373682975769 }, { "epoch": 1.0691958322201442, "step": 4002, "train/loss_ctc": 0.7767926454544067, "train/loss_error": 0.4530806243419647, "train/loss_total": 0.5178230404853821 }, { "epoch": 1.0694629975955117, "step": 4003, "train/loss_ctc": 1.1637606620788574, "train/loss_error": 0.4726529121398926, "train/loss_total": 0.6108744740486145 }, { "epoch": 1.0697301629708789, "step": 4004, "train/loss_ctc": 0.925413966178894, "train/loss_error": 0.4724333584308624, "train/loss_total": 0.5630295276641846 }, { "epoch": 1.0699973283462463, "step": 4005, "train/loss_ctc": 1.0683622360229492, "train/loss_error": 0.4811023771762848, "train/loss_total": 0.5985543727874756 }, { "epoch": 1.0702644937216137, "step": 4006, "train/loss_ctc": 0.4732780158519745, "train/loss_error": 0.4532766342163086, "train/loss_total": 0.4572769105434418 }, { "epoch": 1.0705316590969811, "step": 4007, "train/loss_ctc": 0.5497657060623169, "train/loss_error": 0.4559321999549866, "train/loss_total": 0.47469890117645264 }, { "epoch": 1.0707988244723483, "step": 4008, "train/loss_ctc": 0.4384874701499939, "train/loss_error": 0.5222132802009583, "train/loss_total": 0.5054681301116943 }, { "epoch": 1.0710659898477157, "step": 4009, "train/loss_ctc": 1.4350179433822632, "train/loss_error": 0.4788969159126282, "train/loss_total": 0.6701211333274841 }, { "epoch": 1.0713331552230831, "grad_norm": 1.0815507173538208, "learning_rate": 2.3580016029922525e-05, "loss": 0.5433, "step": 4010 }, { "epoch": 1.0713331552230831, "step": 4010, "train/loss_ctc": 0.32344380021095276, "train/loss_error": 0.38020071387290955, "train/loss_total": 0.36884933710098267 }, { "epoch": 1.0716003205984503, "step": 4011, "train/loss_ctc": 0.5747629404067993, "train/loss_error": 0.4589361548423767, "train/loss_total": 0.48210152983665466 }, { "epoch": 1.0718674859738178, "step": 4012, "train/loss_ctc": 0.8133499026298523, "train/loss_error": 0.545925498008728, "train/loss_total": 0.5994104146957397 }, { "epoch": 1.0721346513491852, "step": 4013, "train/loss_ctc": 0.701468288898468, "train/loss_error": 0.5002686381340027, "train/loss_total": 0.5405085682868958 }, { "epoch": 1.0724018167245526, "step": 4014, "train/loss_ctc": 0.7087960243225098, "train/loss_error": 0.4738996624946594, "train/loss_total": 0.5208789110183716 }, { "epoch": 1.0726689820999198, "step": 4015, "train/loss_ctc": 1.5213745832443237, "train/loss_error": 0.4681934714317322, "train/loss_total": 0.6788296699523926 }, { "epoch": 1.0729361474752872, "step": 4016, "train/loss_ctc": 0.5385354161262512, "train/loss_error": 0.5560930371284485, "train/loss_total": 0.5525815486907959 }, { "epoch": 1.0732033128506546, "step": 4017, "train/loss_ctc": 0.9418811202049255, "train/loss_error": 0.5081849694252014, "train/loss_total": 0.5949242115020752 }, { "epoch": 1.0734704782260218, "step": 4018, "train/loss_ctc": 0.7823328971862793, "train/loss_error": 0.49128222465515137, "train/loss_total": 0.549492359161377 }, { "epoch": 1.0737376436013892, "step": 4019, "train/loss_ctc": 0.4265379011631012, "train/loss_error": 0.43075981736183167, "train/loss_total": 0.4299154281616211 }, { "epoch": 1.0740048089767567, "grad_norm": 0.9808691143989563, "learning_rate": 2.3563986107400483e-05, "loss": 0.5317, "step": 4020 }, { "epoch": 1.0740048089767567, "step": 4020, "train/loss_ctc": 0.9216658473014832, "train/loss_error": 0.4597131907939911, "train/loss_total": 0.5521037578582764 }, { "epoch": 1.0742719743521238, "step": 4021, "train/loss_ctc": 0.713151752948761, "train/loss_error": 0.4573914110660553, "train/loss_total": 0.5085434913635254 }, { "epoch": 1.0745391397274913, "step": 4022, "train/loss_ctc": 0.5504316091537476, "train/loss_error": 0.508115828037262, "train/loss_total": 0.5165790319442749 }, { "epoch": 1.0748063051028587, "step": 4023, "train/loss_ctc": 0.42533406615257263, "train/loss_error": 0.5153716206550598, "train/loss_total": 0.4973641335964203 }, { "epoch": 1.075073470478226, "step": 4024, "train/loss_ctc": 0.34272223711013794, "train/loss_error": 0.43945348262786865, "train/loss_total": 0.42010724544525146 }, { "epoch": 1.0753406358535933, "step": 4025, "train/loss_ctc": 0.429839551448822, "train/loss_error": 0.5572441816329956, "train/loss_total": 0.5317632555961609 }, { "epoch": 1.0756078012289607, "step": 4026, "train/loss_ctc": 1.4075498580932617, "train/loss_error": 0.5161591172218323, "train/loss_total": 0.6944372653961182 }, { "epoch": 1.0758749666043281, "step": 4027, "train/loss_ctc": 1.0373530387878418, "train/loss_error": 0.5313642621040344, "train/loss_total": 0.6325620412826538 }, { "epoch": 1.0761421319796955, "step": 4028, "train/loss_ctc": 0.6214345693588257, "train/loss_error": 0.4799136817455292, "train/loss_total": 0.5082178711891174 }, { "epoch": 1.0764092973550627, "step": 4029, "train/loss_ctc": 0.6313496828079224, "train/loss_error": 0.45709434151649475, "train/loss_total": 0.49194541573524475 }, { "epoch": 1.0766764627304302, "grad_norm": 1.6109498739242554, "learning_rate": 2.354795618487844e-05, "loss": 0.5354, "step": 4030 }, { "epoch": 1.0766764627304302, "step": 4030, "train/loss_ctc": 1.122514009475708, "train/loss_error": 0.46850812435150146, "train/loss_total": 0.5993093252182007 }, { "epoch": 1.0769436281057976, "step": 4031, "train/loss_ctc": 0.7033841609954834, "train/loss_error": 0.43954890966415405, "train/loss_total": 0.49231594800949097 }, { "epoch": 1.0772107934811648, "step": 4032, "train/loss_ctc": 1.8002569675445557, "train/loss_error": 0.5159604549407959, "train/loss_total": 0.7728197574615479 }, { "epoch": 1.0774779588565322, "step": 4033, "train/loss_ctc": 0.6206268072128296, "train/loss_error": 0.5221405625343323, "train/loss_total": 0.5418378114700317 }, { "epoch": 1.0777451242318996, "step": 4034, "train/loss_ctc": 0.8108556270599365, "train/loss_error": 0.4570103585720062, "train/loss_total": 0.5277794599533081 }, { "epoch": 1.0780122896072668, "step": 4035, "train/loss_ctc": 0.9985663890838623, "train/loss_error": 0.475261390209198, "train/loss_total": 0.5799223780632019 }, { "epoch": 1.0782794549826342, "step": 4036, "train/loss_ctc": 0.5592210292816162, "train/loss_error": 0.4304838180541992, "train/loss_total": 0.4562312662601471 }, { "epoch": 1.0785466203580016, "step": 4037, "train/loss_ctc": 0.559866189956665, "train/loss_error": 0.4571859836578369, "train/loss_total": 0.47772204875946045 }, { "epoch": 1.078813785733369, "step": 4038, "train/loss_ctc": 1.0571599006652832, "train/loss_error": 0.4199487566947937, "train/loss_total": 0.5473909974098206 }, { "epoch": 1.0790809511087363, "step": 4039, "train/loss_ctc": 0.7849235534667969, "train/loss_error": 0.4374513328075409, "train/loss_total": 0.506945788860321 }, { "epoch": 1.0793481164841037, "grad_norm": 1.983157753944397, "learning_rate": 2.35319262623564e-05, "loss": 0.5502, "step": 4040 }, { "epoch": 1.0793481164841037, "step": 4040, "train/loss_ctc": 0.5317868590354919, "train/loss_error": 0.43269404768943787, "train/loss_total": 0.45251262187957764 }, { "epoch": 1.079615281859471, "step": 4041, "train/loss_ctc": 0.6026750206947327, "train/loss_error": 0.48688095808029175, "train/loss_total": 0.510039746761322 }, { "epoch": 1.0798824472348383, "step": 4042, "train/loss_ctc": 1.7160804271697998, "train/loss_error": 0.4354530870914459, "train/loss_total": 0.6915785670280457 }, { "epoch": 1.0801496126102057, "step": 4043, "train/loss_ctc": 0.953964352607727, "train/loss_error": 0.48935893177986145, "train/loss_total": 0.5822800397872925 }, { "epoch": 1.0804167779855731, "step": 4044, "train/loss_ctc": 0.8823087215423584, "train/loss_error": 0.5042880773544312, "train/loss_total": 0.5798922181129456 }, { "epoch": 1.0806839433609405, "step": 4045, "train/loss_ctc": 0.8792533874511719, "train/loss_error": 0.5233381986618042, "train/loss_total": 0.5945212244987488 }, { "epoch": 1.0809511087363077, "step": 4046, "train/loss_ctc": 1.2260067462921143, "train/loss_error": 0.4960973858833313, "train/loss_total": 0.64207923412323 }, { "epoch": 1.0812182741116751, "step": 4047, "train/loss_ctc": 0.7569225430488586, "train/loss_error": 0.509710431098938, "train/loss_total": 0.5591528415679932 }, { "epoch": 1.0814854394870426, "step": 4048, "train/loss_ctc": 0.31063640117645264, "train/loss_error": 0.5104585886001587, "train/loss_total": 0.47049418091773987 }, { "epoch": 1.0817526048624098, "step": 4049, "train/loss_ctc": 0.8818739652633667, "train/loss_error": 0.534212052822113, "train/loss_total": 0.6037444472312927 }, { "epoch": 1.0820197702377772, "grad_norm": 3.1239535808563232, "learning_rate": 2.3515896339834357e-05, "loss": 0.5686, "step": 4050 }, { "epoch": 1.0820197702377772, "step": 4050, "train/loss_ctc": 0.5110620260238647, "train/loss_error": 0.5006783604621887, "train/loss_total": 0.5027551054954529 }, { "epoch": 1.0822869356131446, "step": 4051, "train/loss_ctc": 0.5715576410293579, "train/loss_error": 0.527625560760498, "train/loss_total": 0.5364120006561279 }, { "epoch": 1.0825541009885118, "step": 4052, "train/loss_ctc": 0.5943875908851624, "train/loss_error": 0.49714210629463196, "train/loss_total": 0.5165911912918091 }, { "epoch": 1.0828212663638792, "step": 4053, "train/loss_ctc": 0.48176753520965576, "train/loss_error": 0.46932947635650635, "train/loss_total": 0.4718170762062073 }, { "epoch": 1.0830884317392466, "step": 4054, "train/loss_ctc": 1.356268048286438, "train/loss_error": 0.47293227910995483, "train/loss_total": 0.6495994329452515 }, { "epoch": 1.083355597114614, "step": 4055, "train/loss_ctc": 1.2667524814605713, "train/loss_error": 0.47844111919403076, "train/loss_total": 0.6361033916473389 }, { "epoch": 1.0836227624899812, "step": 4056, "train/loss_ctc": 0.7278517484664917, "train/loss_error": 0.48789578676223755, "train/loss_total": 0.5358870029449463 }, { "epoch": 1.0838899278653487, "step": 4057, "train/loss_ctc": 1.6438381671905518, "train/loss_error": 0.4840584993362427, "train/loss_total": 0.7160144448280334 }, { "epoch": 1.084157093240716, "step": 4058, "train/loss_ctc": 0.690217912197113, "train/loss_error": 0.47056469321250916, "train/loss_total": 0.5144953727722168 }, { "epoch": 1.0844242586160833, "step": 4059, "train/loss_ctc": 0.28835970163345337, "train/loss_error": 0.5049881339073181, "train/loss_total": 0.4616624414920807 }, { "epoch": 1.0846914239914507, "grad_norm": 1.4330370426177979, "learning_rate": 2.3499866417312315e-05, "loss": 0.5541, "step": 4060 }, { "epoch": 1.0846914239914507, "step": 4060, "train/loss_ctc": 0.6762321591377258, "train/loss_error": 0.47566142678260803, "train/loss_total": 0.5157755613327026 }, { "epoch": 1.084958589366818, "step": 4061, "train/loss_ctc": 0.5310882329940796, "train/loss_error": 0.46194207668304443, "train/loss_total": 0.47577130794525146 }, { "epoch": 1.0852257547421855, "step": 4062, "train/loss_ctc": 0.4908253252506256, "train/loss_error": 0.47527623176574707, "train/loss_total": 0.4783860445022583 }, { "epoch": 1.0854929201175527, "step": 4063, "train/loss_ctc": 0.8473924398422241, "train/loss_error": 0.4892808198928833, "train/loss_total": 0.5609031319618225 }, { "epoch": 1.0857600854929201, "step": 4064, "train/loss_ctc": 0.6794527769088745, "train/loss_error": 0.4390558898448944, "train/loss_total": 0.48713529109954834 }, { "epoch": 1.0860272508682876, "step": 4065, "train/loss_ctc": 0.6167012453079224, "train/loss_error": 0.499001145362854, "train/loss_total": 0.5225411653518677 }, { "epoch": 1.0862944162436547, "step": 4066, "train/loss_ctc": 0.6985457539558411, "train/loss_error": 0.43406420946121216, "train/loss_total": 0.4869605302810669 }, { "epoch": 1.0865615816190222, "step": 4067, "train/loss_ctc": 0.83387291431427, "train/loss_error": 0.5135518908500671, "train/loss_total": 0.5776160955429077 }, { "epoch": 1.0868287469943896, "step": 4068, "train/loss_ctc": 0.4180322289466858, "train/loss_error": 0.5033058524131775, "train/loss_total": 0.4862511456012726 }, { "epoch": 1.0870959123697568, "step": 4069, "train/loss_ctc": 0.786227285861969, "train/loss_error": 0.515293538570404, "train/loss_total": 0.569480299949646 }, { "epoch": 1.0873630777451242, "grad_norm": 4.454905033111572, "learning_rate": 2.3483836494790277e-05, "loss": 0.5161, "step": 4070 }, { "epoch": 1.0873630777451242, "step": 4070, "train/loss_ctc": 0.37937289476394653, "train/loss_error": 0.4326420724391937, "train/loss_total": 0.42198824882507324 }, { "epoch": 1.0876302431204916, "step": 4071, "train/loss_ctc": 0.43662673234939575, "train/loss_error": 0.4752483069896698, "train/loss_total": 0.467523992061615 }, { "epoch": 1.087897408495859, "step": 4072, "train/loss_ctc": 0.8140741586685181, "train/loss_error": 0.38896188139915466, "train/loss_total": 0.47398436069488525 }, { "epoch": 1.0881645738712262, "step": 4073, "train/loss_ctc": 0.9895976781845093, "train/loss_error": 0.5145998001098633, "train/loss_total": 0.6095993518829346 }, { "epoch": 1.0884317392465936, "step": 4074, "train/loss_ctc": 0.5661169290542603, "train/loss_error": 0.5352879166603088, "train/loss_total": 0.5414537191390991 }, { "epoch": 1.088698904621961, "step": 4075, "train/loss_ctc": 1.9910773038864136, "train/loss_error": 0.45410430431365967, "train/loss_total": 0.7614989280700684 }, { "epoch": 1.0889660699973283, "step": 4076, "train/loss_ctc": 1.259139060974121, "train/loss_error": 0.4698810875415802, "train/loss_total": 0.6277326941490173 }, { "epoch": 1.0892332353726957, "step": 4077, "train/loss_ctc": 0.5885052680969238, "train/loss_error": 0.43412500619888306, "train/loss_total": 0.46500107645988464 }, { "epoch": 1.089500400748063, "step": 4078, "train/loss_ctc": 1.3433306217193604, "train/loss_error": 0.5196145176887512, "train/loss_total": 0.684357762336731 }, { "epoch": 1.0897675661234305, "step": 4079, "train/loss_ctc": 0.3689877986907959, "train/loss_error": 0.42836517095565796, "train/loss_total": 0.41648969054222107 }, { "epoch": 1.0900347314987977, "grad_norm": 2.3479902744293213, "learning_rate": 2.3467806572268235e-05, "loss": 0.547, "step": 4080 }, { "epoch": 1.0900347314987977, "step": 4080, "train/loss_ctc": 0.39489811658859253, "train/loss_error": 0.5425366163253784, "train/loss_total": 0.5130089521408081 }, { "epoch": 1.0903018968741651, "step": 4081, "train/loss_ctc": 0.9281152486801147, "train/loss_error": 0.4423232972621918, "train/loss_total": 0.5394816994667053 }, { "epoch": 1.0905690622495325, "step": 4082, "train/loss_ctc": 2.1691653728485107, "train/loss_error": 0.49407023191452026, "train/loss_total": 0.8290892839431763 }, { "epoch": 1.0908362276248997, "step": 4083, "train/loss_ctc": 0.9243786334991455, "train/loss_error": 0.4972345530986786, "train/loss_total": 0.5826634168624878 }, { "epoch": 1.0911033930002672, "step": 4084, "train/loss_ctc": 0.5556583404541016, "train/loss_error": 0.46533599495887756, "train/loss_total": 0.48340046405792236 }, { "epoch": 1.0913705583756346, "step": 4085, "train/loss_ctc": 0.741701602935791, "train/loss_error": 0.44191086292266846, "train/loss_total": 0.5018690228462219 }, { "epoch": 1.0916377237510018, "step": 4086, "train/loss_ctc": 0.5629839301109314, "train/loss_error": 0.4654320180416107, "train/loss_total": 0.48494240641593933 }, { "epoch": 1.0919048891263692, "step": 4087, "train/loss_ctc": 0.7525678873062134, "train/loss_error": 0.4319656789302826, "train/loss_total": 0.49608612060546875 }, { "epoch": 1.0921720545017366, "step": 4088, "train/loss_ctc": 1.104546070098877, "train/loss_error": 0.41147053241729736, "train/loss_total": 0.5500856637954712 }, { "epoch": 1.092439219877104, "step": 4089, "train/loss_ctc": 3.0052995681762695, "train/loss_error": 0.5133993029594421, "train/loss_total": 1.0117793083190918 }, { "epoch": 1.0927063852524712, "grad_norm": 2.9422719478607178, "learning_rate": 2.3451776649746193e-05, "loss": 0.5992, "step": 4090 }, { "epoch": 1.0927063852524712, "step": 4090, "train/loss_ctc": 1.263988971710205, "train/loss_error": 0.48548635840415955, "train/loss_total": 0.6411868929862976 }, { "epoch": 1.0929735506278386, "step": 4091, "train/loss_ctc": 0.7756132483482361, "train/loss_error": 0.4828137755393982, "train/loss_total": 0.5413736701011658 }, { "epoch": 1.093240716003206, "step": 4092, "train/loss_ctc": 0.6743909120559692, "train/loss_error": 0.5074986219406128, "train/loss_total": 0.540877103805542 }, { "epoch": 1.0935078813785732, "step": 4093, "train/loss_ctc": 1.2732939720153809, "train/loss_error": 0.47058212757110596, "train/loss_total": 0.6311244964599609 }, { "epoch": 1.0937750467539407, "step": 4094, "train/loss_ctc": 0.5439249277114868, "train/loss_error": 0.5220687389373779, "train/loss_total": 0.5264399647712708 }, { "epoch": 1.094042212129308, "step": 4095, "train/loss_ctc": 0.2783878445625305, "train/loss_error": 0.4377533793449402, "train/loss_total": 0.40588027238845825 }, { "epoch": 1.0943093775046755, "step": 4096, "train/loss_ctc": 0.43739408254623413, "train/loss_error": 0.4977499544620514, "train/loss_total": 0.4856787919998169 }, { "epoch": 1.0945765428800427, "step": 4097, "train/loss_ctc": 0.5093803405761719, "train/loss_error": 0.44016167521476746, "train/loss_total": 0.4540054202079773 }, { "epoch": 1.09484370825541, "step": 4098, "train/loss_ctc": 0.7072322368621826, "train/loss_error": 0.4930780231952667, "train/loss_total": 0.5359088778495789 }, { "epoch": 1.0951108736307775, "step": 4099, "train/loss_ctc": 1.3672423362731934, "train/loss_error": 0.4893355965614319, "train/loss_total": 0.6649169921875 }, { "epoch": 1.0953780390061447, "grad_norm": 2.0622925758361816, "learning_rate": 2.343574672722415e-05, "loss": 0.5427, "step": 4100 }, { "epoch": 1.0953780390061447, "step": 4100, "train/loss_ctc": 0.6088579297065735, "train/loss_error": 0.5177466869354248, "train/loss_total": 0.5359689593315125 }, { "epoch": 1.0956452043815121, "step": 4101, "train/loss_ctc": 0.5195111036300659, "train/loss_error": 0.47504010796546936, "train/loss_total": 0.48393431305885315 }, { "epoch": 1.0959123697568796, "step": 4102, "train/loss_ctc": 1.3251655101776123, "train/loss_error": 0.4702300727367401, "train/loss_total": 0.6412171721458435 }, { "epoch": 1.0961795351322468, "step": 4103, "train/loss_ctc": 0.5754446387290955, "train/loss_error": 0.4616551399230957, "train/loss_total": 0.4844130575656891 }, { "epoch": 1.0964467005076142, "step": 4104, "train/loss_ctc": 0.5152000188827515, "train/loss_error": 0.5406096577644348, "train/loss_total": 0.5355277061462402 }, { "epoch": 1.0967138658829816, "step": 4105, "train/loss_ctc": 0.5844569206237793, "train/loss_error": 0.47803258895874023, "train/loss_total": 0.499317467212677 }, { "epoch": 1.096981031258349, "step": 4106, "train/loss_ctc": 1.0545768737792969, "train/loss_error": 0.46741724014282227, "train/loss_total": 0.5848491787910461 }, { "epoch": 1.0972481966337162, "step": 4107, "train/loss_ctc": 1.2254836559295654, "train/loss_error": 0.4646807312965393, "train/loss_total": 0.6168413162231445 }, { "epoch": 1.0975153620090836, "step": 4108, "train/loss_ctc": 1.039105772972107, "train/loss_error": 0.47548040747642517, "train/loss_total": 0.5882054567337036 }, { "epoch": 1.097782527384451, "step": 4109, "train/loss_ctc": 1.6782288551330566, "train/loss_error": 0.5237209796905518, "train/loss_total": 0.7546225786209106 }, { "epoch": 1.0980496927598182, "grad_norm": 1.7408257722854614, "learning_rate": 2.341971680470211e-05, "loss": 0.5725, "step": 4110 }, { "epoch": 1.0980496927598182, "step": 4110, "train/loss_ctc": 0.7614063620567322, "train/loss_error": 0.440796822309494, "train/loss_total": 0.5049187541007996 }, { "epoch": 1.0983168581351856, "step": 4111, "train/loss_ctc": 1.0371787548065186, "train/loss_error": 0.5062233209609985, "train/loss_total": 0.6124144196510315 }, { "epoch": 1.098584023510553, "step": 4112, "train/loss_ctc": 1.2699320316314697, "train/loss_error": 0.4968164563179016, "train/loss_total": 0.6514395475387573 }, { "epoch": 1.0988511888859205, "step": 4113, "train/loss_ctc": 1.1360623836517334, "train/loss_error": 0.4525836408138275, "train/loss_total": 0.5892794132232666 }, { "epoch": 1.0991183542612877, "step": 4114, "train/loss_ctc": 0.775078296661377, "train/loss_error": 0.4803180396556854, "train/loss_total": 0.5392701029777527 }, { "epoch": 1.099385519636655, "step": 4115, "train/loss_ctc": 0.8776316046714783, "train/loss_error": 0.5094423890113831, "train/loss_total": 0.5830802321434021 }, { "epoch": 1.0996526850120225, "step": 4116, "train/loss_ctc": 1.59911048412323, "train/loss_error": 0.5225998759269714, "train/loss_total": 0.737902045249939 }, { "epoch": 1.0999198503873897, "step": 4117, "train/loss_ctc": 0.8790210485458374, "train/loss_error": 0.4906367361545563, "train/loss_total": 0.5683135986328125 }, { "epoch": 1.1001870157627571, "step": 4118, "train/loss_ctc": 0.46898701786994934, "train/loss_error": 0.4552317261695862, "train/loss_total": 0.4579828083515167 }, { "epoch": 1.1004541811381245, "step": 4119, "train/loss_ctc": 1.0044351816177368, "train/loss_error": 0.4461308717727661, "train/loss_total": 0.5577917695045471 }, { "epoch": 1.1007213465134917, "grad_norm": 1.0616759061813354, "learning_rate": 2.3403686882180067e-05, "loss": 0.5802, "step": 4120 }, { "epoch": 1.1007213465134917, "step": 4120, "train/loss_ctc": 0.40985608100891113, "train/loss_error": 0.4436219036579132, "train/loss_total": 0.4368687570095062 }, { "epoch": 1.1009885118888592, "step": 4121, "train/loss_ctc": 1.7554949522018433, "train/loss_error": 0.47742563486099243, "train/loss_total": 0.7330394983291626 }, { "epoch": 1.1012556772642266, "step": 4122, "train/loss_ctc": 0.6485175490379333, "train/loss_error": 0.4451455771923065, "train/loss_total": 0.4858199954032898 }, { "epoch": 1.101522842639594, "step": 4123, "train/loss_ctc": 0.3674405515193939, "train/loss_error": 0.5221195816993713, "train/loss_total": 0.4911837875843048 }, { "epoch": 1.1017900080149612, "step": 4124, "train/loss_ctc": 0.5030428767204285, "train/loss_error": 0.49962806701660156, "train/loss_total": 0.500311017036438 }, { "epoch": 1.1020571733903286, "step": 4125, "train/loss_ctc": 0.690933883190155, "train/loss_error": 0.42251020669937134, "train/loss_total": 0.47619494795799255 }, { "epoch": 1.102324338765696, "step": 4126, "train/loss_ctc": 1.6406946182250977, "train/loss_error": 0.5204333066940308, "train/loss_total": 0.7444855570793152 }, { "epoch": 1.1025915041410632, "step": 4127, "train/loss_ctc": 1.1232993602752686, "train/loss_error": 0.5136857032775879, "train/loss_total": 0.635608434677124 }, { "epoch": 1.1028586695164306, "step": 4128, "train/loss_ctc": 0.9727469682693481, "train/loss_error": 0.4229872226715088, "train/loss_total": 0.5329391956329346 }, { "epoch": 1.103125834891798, "step": 4129, "train/loss_ctc": 1.1004765033721924, "train/loss_error": 0.4764114320278168, "train/loss_total": 0.601224422454834 }, { "epoch": 1.1033930002671655, "grad_norm": 1.5485949516296387, "learning_rate": 2.3387656959658032e-05, "loss": 0.5638, "step": 4130 }, { "epoch": 1.1033930002671655, "step": 4130, "train/loss_ctc": 0.4291761815547943, "train/loss_error": 0.49235180020332336, "train/loss_total": 0.4797166883945465 }, { "epoch": 1.1036601656425327, "step": 4131, "train/loss_ctc": 0.394853800535202, "train/loss_error": 0.4673583507537842, "train/loss_total": 0.45285743474960327 }, { "epoch": 1.1039273310179, "step": 4132, "train/loss_ctc": 0.6069410443305969, "train/loss_error": 0.4464445114135742, "train/loss_total": 0.47854381799697876 }, { "epoch": 1.1041944963932675, "step": 4133, "train/loss_ctc": 0.5566999912261963, "train/loss_error": 0.5534191131591797, "train/loss_total": 0.554075300693512 }, { "epoch": 1.1044616617686347, "step": 4134, "train/loss_ctc": 0.8342275023460388, "train/loss_error": 0.4724768400192261, "train/loss_total": 0.5448269844055176 }, { "epoch": 1.1047288271440021, "step": 4135, "train/loss_ctc": 0.6184117197990417, "train/loss_error": 0.49050581455230713, "train/loss_total": 0.516086995601654 }, { "epoch": 1.1049959925193695, "step": 4136, "train/loss_ctc": 0.6911575794219971, "train/loss_error": 0.44556060433387756, "train/loss_total": 0.4946800172328949 }, { "epoch": 1.1052631578947367, "step": 4137, "train/loss_ctc": 1.012676477432251, "train/loss_error": 0.5773031115531921, "train/loss_total": 0.6643778085708618 }, { "epoch": 1.1055303232701041, "step": 4138, "train/loss_ctc": 0.5962692499160767, "train/loss_error": 0.4581282138824463, "train/loss_total": 0.48575642704963684 }, { "epoch": 1.1057974886454716, "step": 4139, "train/loss_ctc": 0.8303003311157227, "train/loss_error": 0.42850250005722046, "train/loss_total": 0.5088620781898499 }, { "epoch": 1.106064654020839, "grad_norm": 1.1051344871520996, "learning_rate": 2.337162703713599e-05, "loss": 0.518, "step": 4140 }, { "epoch": 1.106064654020839, "step": 4140, "train/loss_ctc": 0.46944478154182434, "train/loss_error": 0.44572535157203674, "train/loss_total": 0.4504692554473877 }, { "epoch": 1.1063318193962062, "step": 4141, "train/loss_ctc": 1.8306387662887573, "train/loss_error": 0.452465683221817, "train/loss_total": 0.7281002998352051 }, { "epoch": 1.1065989847715736, "step": 4142, "train/loss_ctc": 1.5569674968719482, "train/loss_error": 0.48704296350479126, "train/loss_total": 0.7010278701782227 }, { "epoch": 1.106866150146941, "step": 4143, "train/loss_ctc": 1.1791462898254395, "train/loss_error": 0.48110905289649963, "train/loss_total": 0.6207165122032166 }, { "epoch": 1.1071333155223084, "step": 4144, "train/loss_ctc": 0.5391550064086914, "train/loss_error": 0.42281752824783325, "train/loss_total": 0.44608503580093384 }, { "epoch": 1.1074004808976756, "step": 4145, "train/loss_ctc": 1.181970238685608, "train/loss_error": 0.4130021035671234, "train/loss_total": 0.5667957067489624 }, { "epoch": 1.107667646273043, "step": 4146, "train/loss_ctc": 0.8203201293945312, "train/loss_error": 0.6045330762863159, "train/loss_total": 0.64769047498703 }, { "epoch": 1.1079348116484105, "step": 4147, "train/loss_ctc": 1.0229171514511108, "train/loss_error": 0.42257797718048096, "train/loss_total": 0.5426458120346069 }, { "epoch": 1.1082019770237777, "step": 4148, "train/loss_ctc": 0.9070612788200378, "train/loss_error": 0.4571187496185303, "train/loss_total": 0.5471072793006897 }, { "epoch": 1.108469142399145, "step": 4149, "train/loss_ctc": 0.9253426194190979, "train/loss_error": 0.5201066136360168, "train/loss_total": 0.6011538505554199 }, { "epoch": 1.1087363077745125, "grad_norm": 1.834130048751831, "learning_rate": 2.3355597114613948e-05, "loss": 0.5852, "step": 4150 }, { "epoch": 1.1087363077745125, "step": 4150, "train/loss_ctc": 0.626744270324707, "train/loss_error": 0.44680508971214294, "train/loss_total": 0.4827929139137268 }, { "epoch": 1.1090034731498797, "step": 4151, "train/loss_ctc": 1.5161617994308472, "train/loss_error": 0.45000872015953064, "train/loss_total": 0.6632393598556519 }, { "epoch": 1.109270638525247, "step": 4152, "train/loss_ctc": 0.5956822633743286, "train/loss_error": 0.4129174053668976, "train/loss_total": 0.4494703710079193 }, { "epoch": 1.1095378039006145, "step": 4153, "train/loss_ctc": 0.4822978377342224, "train/loss_error": 0.4898049533367157, "train/loss_total": 0.488303542137146 }, { "epoch": 1.109804969275982, "step": 4154, "train/loss_ctc": 0.4696308970451355, "train/loss_error": 0.514621913433075, "train/loss_total": 0.5056236982345581 }, { "epoch": 1.1100721346513491, "step": 4155, "train/loss_ctc": 0.9492930769920349, "train/loss_error": 0.4241480529308319, "train/loss_total": 0.5291770696640015 }, { "epoch": 1.1103393000267165, "step": 4156, "train/loss_ctc": 0.9384521245956421, "train/loss_error": 0.5249155163764954, "train/loss_total": 0.6076228618621826 }, { "epoch": 1.110606465402084, "step": 4157, "train/loss_ctc": 1.1318020820617676, "train/loss_error": 0.500741183757782, "train/loss_total": 0.6269533634185791 }, { "epoch": 1.1108736307774512, "step": 4158, "train/loss_ctc": 0.6018174886703491, "train/loss_error": 0.5202798247337341, "train/loss_total": 0.5365873575210571 }, { "epoch": 1.1111407961528186, "step": 4159, "train/loss_ctc": 0.7162251472473145, "train/loss_error": 0.4448534846305847, "train/loss_total": 0.4991278052330017 }, { "epoch": 1.111407961528186, "grad_norm": 1.6077983379364014, "learning_rate": 2.3339567192091906e-05, "loss": 0.5389, "step": 4160 }, { "epoch": 1.111407961528186, "step": 4160, "train/loss_ctc": 1.0212754011154175, "train/loss_error": 0.429276704788208, "train/loss_total": 0.5476764440536499 }, { "epoch": 1.1116751269035534, "step": 4161, "train/loss_ctc": 0.8138039112091064, "train/loss_error": 0.4667505621910095, "train/loss_total": 0.5361612439155579 }, { "epoch": 1.1119422922789206, "step": 4162, "train/loss_ctc": 0.9846152663230896, "train/loss_error": 0.4744719862937927, "train/loss_total": 0.576500654220581 }, { "epoch": 1.112209457654288, "step": 4163, "train/loss_ctc": 0.6593990325927734, "train/loss_error": 0.5130373239517212, "train/loss_total": 0.5423096418380737 }, { "epoch": 1.1124766230296554, "step": 4164, "train/loss_ctc": 0.7665528059005737, "train/loss_error": 0.49551594257354736, "train/loss_total": 0.5497233271598816 }, { "epoch": 1.1127437884050226, "step": 4165, "train/loss_ctc": 0.7226192951202393, "train/loss_error": 0.5568665266036987, "train/loss_total": 0.5900170803070068 }, { "epoch": 1.11301095378039, "step": 4166, "train/loss_ctc": 0.7595936059951782, "train/loss_error": 0.44658389687538147, "train/loss_total": 0.5091858506202698 }, { "epoch": 1.1132781191557575, "step": 4167, "train/loss_ctc": 0.6623225212097168, "train/loss_error": 0.5141140222549438, "train/loss_total": 0.5437557101249695 }, { "epoch": 1.1135452845311247, "step": 4168, "train/loss_ctc": 0.8431301116943359, "train/loss_error": 0.5452471375465393, "train/loss_total": 0.6048237681388855 }, { "epoch": 1.113812449906492, "step": 4169, "train/loss_ctc": 0.4949721097946167, "train/loss_error": 0.4173976480960846, "train/loss_total": 0.43291255831718445 }, { "epoch": 1.1140796152818595, "grad_norm": 1.8326530456542969, "learning_rate": 2.3323537269569864e-05, "loss": 0.5433, "step": 4170 }, { "epoch": 1.1140796152818595, "step": 4170, "train/loss_ctc": 0.4749676585197449, "train/loss_error": 0.4465157091617584, "train/loss_total": 0.4522061049938202 }, { "epoch": 1.114346780657227, "step": 4171, "train/loss_ctc": 1.193343162536621, "train/loss_error": 0.5041970014572144, "train/loss_total": 0.6420262455940247 }, { "epoch": 1.1146139460325941, "step": 4172, "train/loss_ctc": 0.6001145243644714, "train/loss_error": 0.41212719678878784, "train/loss_total": 0.4497246742248535 }, { "epoch": 1.1148811114079615, "step": 4173, "train/loss_ctc": 0.978007972240448, "train/loss_error": 0.4373131990432739, "train/loss_total": 0.5454521775245667 }, { "epoch": 1.115148276783329, "step": 4174, "train/loss_ctc": 0.713455080986023, "train/loss_error": 0.4650687873363495, "train/loss_total": 0.5147460699081421 }, { "epoch": 1.1154154421586961, "step": 4175, "train/loss_ctc": 1.3037137985229492, "train/loss_error": 0.506574809551239, "train/loss_total": 0.666002631187439 }, { "epoch": 1.1156826075340636, "step": 4176, "train/loss_ctc": 0.7570719718933105, "train/loss_error": 0.41894230246543884, "train/loss_total": 0.48656824231147766 }, { "epoch": 1.115949772909431, "step": 4177, "train/loss_ctc": 1.0714747905731201, "train/loss_error": 0.4284859299659729, "train/loss_total": 0.5570837259292603 }, { "epoch": 1.1162169382847984, "step": 4178, "train/loss_ctc": 0.7221508026123047, "train/loss_error": 0.4587979316711426, "train/loss_total": 0.5114685297012329 }, { "epoch": 1.1164841036601656, "step": 4179, "train/loss_ctc": 1.0024300813674927, "train/loss_error": 0.5086898803710938, "train/loss_total": 0.6074379086494446 }, { "epoch": 1.116751269035533, "grad_norm": 1.922996163368225, "learning_rate": 2.3307507347047822e-05, "loss": 0.5433, "step": 4180 }, { "epoch": 1.116751269035533, "step": 4180, "train/loss_ctc": 0.4825443625450134, "train/loss_error": 0.3967586159706116, "train/loss_total": 0.413915753364563 }, { "epoch": 1.1170184344109004, "step": 4181, "train/loss_ctc": 1.5914188623428345, "train/loss_error": 0.48247599601745605, "train/loss_total": 0.7042645812034607 }, { "epoch": 1.1172855997862676, "step": 4182, "train/loss_ctc": 1.1363370418548584, "train/loss_error": 0.4785618782043457, "train/loss_total": 0.6101169586181641 }, { "epoch": 1.117552765161635, "step": 4183, "train/loss_ctc": 0.6723377704620361, "train/loss_error": 0.5014400482177734, "train/loss_total": 0.5356196165084839 }, { "epoch": 1.1178199305370025, "step": 4184, "train/loss_ctc": 1.0955381393432617, "train/loss_error": 0.43191617727279663, "train/loss_total": 0.5646405816078186 }, { "epoch": 1.1180870959123697, "step": 4185, "train/loss_ctc": 0.8748855590820312, "train/loss_error": 0.4206335246562958, "train/loss_total": 0.511483907699585 }, { "epoch": 1.118354261287737, "step": 4186, "train/loss_ctc": 0.7662567496299744, "train/loss_error": 0.5289186835289001, "train/loss_total": 0.5763863325119019 }, { "epoch": 1.1186214266631045, "step": 4187, "train/loss_ctc": 0.6799755096435547, "train/loss_error": 0.5333389639854431, "train/loss_total": 0.5626662969589233 }, { "epoch": 1.118888592038472, "step": 4188, "train/loss_ctc": 0.5650357007980347, "train/loss_error": 0.446769654750824, "train/loss_total": 0.4704228639602661 }, { "epoch": 1.119155757413839, "step": 4189, "train/loss_ctc": 0.4371787905693054, "train/loss_error": 0.4258497953414917, "train/loss_total": 0.4281156063079834 }, { "epoch": 1.1194229227892065, "grad_norm": 1.688066005706787, "learning_rate": 2.3291477424525784e-05, "loss": 0.5378, "step": 4190 }, { "epoch": 1.1194229227892065, "step": 4190, "train/loss_ctc": 0.3650788366794586, "train/loss_error": 0.41543564200401306, "train/loss_total": 0.4053643047809601 }, { "epoch": 1.119690088164574, "step": 4191, "train/loss_ctc": 1.06241774559021, "train/loss_error": 0.4452294111251831, "train/loss_total": 0.5686670541763306 }, { "epoch": 1.1199572535399411, "step": 4192, "train/loss_ctc": 0.9922195672988892, "train/loss_error": 0.48174208402633667, "train/loss_total": 0.583837628364563 }, { "epoch": 1.1202244189153086, "step": 4193, "train/loss_ctc": 0.9032274484634399, "train/loss_error": 0.4762781262397766, "train/loss_total": 0.5616680383682251 }, { "epoch": 1.120491584290676, "step": 4194, "train/loss_ctc": 0.4986848533153534, "train/loss_error": 0.4912053346633911, "train/loss_total": 0.4927012324333191 }, { "epoch": 1.1207587496660434, "step": 4195, "train/loss_ctc": 1.2535759210586548, "train/loss_error": 0.5207884311676025, "train/loss_total": 0.6673459410667419 }, { "epoch": 1.1210259150414106, "step": 4196, "train/loss_ctc": 0.4554184079170227, "train/loss_error": 0.49426138401031494, "train/loss_total": 0.486492782831192 }, { "epoch": 1.121293080416778, "step": 4197, "train/loss_ctc": 0.7271476984024048, "train/loss_error": 0.5280622839927673, "train/loss_total": 0.5678793787956238 }, { "epoch": 1.1215602457921454, "step": 4198, "train/loss_ctc": 0.7785462737083435, "train/loss_error": 0.4869374632835388, "train/loss_total": 0.5452592372894287 }, { "epoch": 1.1218274111675126, "step": 4199, "train/loss_ctc": 0.6838666200637817, "train/loss_error": 0.4730164706707001, "train/loss_total": 0.5151864886283875 }, { "epoch": 1.12209457654288, "grad_norm": 3.5419907569885254, "learning_rate": 2.3275447502003742e-05, "loss": 0.5394, "step": 4200 }, { "epoch": 1.12209457654288, "step": 4200, "train/loss_ctc": 0.6455404758453369, "train/loss_error": 0.5034217238426208, "train/loss_total": 0.5318454504013062 }, { "epoch": 1.1223617419182474, "step": 4201, "train/loss_ctc": 0.4776611030101776, "train/loss_error": 0.42023229598999023, "train/loss_total": 0.43171805143356323 }, { "epoch": 1.1226289072936146, "step": 4202, "train/loss_ctc": 0.46724367141723633, "train/loss_error": 0.4477772116661072, "train/loss_total": 0.45167049765586853 }, { "epoch": 1.122896072668982, "step": 4203, "train/loss_ctc": 0.8794301748275757, "train/loss_error": 0.5006127953529358, "train/loss_total": 0.5763763189315796 }, { "epoch": 1.1231632380443495, "step": 4204, "train/loss_ctc": 1.2057584524154663, "train/loss_error": 0.5170941352844238, "train/loss_total": 0.6548269987106323 }, { "epoch": 1.123430403419717, "step": 4205, "train/loss_ctc": 0.689227819442749, "train/loss_error": 0.4565238356590271, "train/loss_total": 0.5030646324157715 }, { "epoch": 1.123697568795084, "step": 4206, "train/loss_ctc": 1.192626953125, "train/loss_error": 0.46766284108161926, "train/loss_total": 0.6126556396484375 }, { "epoch": 1.1239647341704515, "step": 4207, "train/loss_ctc": 0.9353954195976257, "train/loss_error": 0.5030803084373474, "train/loss_total": 0.589543342590332 }, { "epoch": 1.124231899545819, "step": 4208, "train/loss_ctc": 0.6532875299453735, "train/loss_error": 0.443053275346756, "train/loss_total": 0.4851001501083374 }, { "epoch": 1.1244990649211861, "step": 4209, "train/loss_ctc": 0.7397526502609253, "train/loss_error": 0.5090292692184448, "train/loss_total": 0.5551739931106567 }, { "epoch": 1.1247662302965535, "grad_norm": 1.4463286399841309, "learning_rate": 2.32594175794817e-05, "loss": 0.5392, "step": 4210 }, { "epoch": 1.1247662302965535, "step": 4210, "train/loss_ctc": 0.8144327402114868, "train/loss_error": 0.5064849257469177, "train/loss_total": 0.5680744647979736 }, { "epoch": 1.125033395671921, "step": 4211, "train/loss_ctc": 0.8458113670349121, "train/loss_error": 0.5100510716438293, "train/loss_total": 0.5772031545639038 }, { "epoch": 1.1253005610472884, "step": 4212, "train/loss_ctc": 0.666166365146637, "train/loss_error": 0.4340866506099701, "train/loss_total": 0.4805026054382324 }, { "epoch": 1.1255677264226556, "step": 4213, "train/loss_ctc": 0.5893436074256897, "train/loss_error": 0.48112261295318604, "train/loss_total": 0.5027668476104736 }, { "epoch": 1.125834891798023, "step": 4214, "train/loss_ctc": 0.6887127161026001, "train/loss_error": 0.42116180062294006, "train/loss_total": 0.47467198967933655 }, { "epoch": 1.1261020571733904, "step": 4215, "train/loss_ctc": 0.8326309323310852, "train/loss_error": 0.4207172393798828, "train/loss_total": 0.5030999779701233 }, { "epoch": 1.1263692225487576, "step": 4216, "train/loss_ctc": 0.7855366468429565, "train/loss_error": 0.40472397208213806, "train/loss_total": 0.4808865189552307 }, { "epoch": 1.126636387924125, "step": 4217, "train/loss_ctc": 0.45639294385910034, "train/loss_error": 0.46432730555534363, "train/loss_total": 0.462740421295166 }, { "epoch": 1.1269035532994924, "step": 4218, "train/loss_ctc": 0.46392783522605896, "train/loss_error": 0.4678824543952942, "train/loss_total": 0.46709153056144714 }, { "epoch": 1.1271707186748596, "step": 4219, "train/loss_ctc": 0.48480886220932007, "train/loss_error": 0.4706672430038452, "train/loss_total": 0.47349557280540466 }, { "epoch": 1.127437884050227, "grad_norm": 2.566448926925659, "learning_rate": 2.3243387656959658e-05, "loss": 0.4991, "step": 4220 }, { "epoch": 1.127437884050227, "step": 4220, "train/loss_ctc": 0.4985434114933014, "train/loss_error": 0.4491255283355713, "train/loss_total": 0.4590091109275818 }, { "epoch": 1.1277050494255945, "step": 4221, "train/loss_ctc": 1.8418571949005127, "train/loss_error": 0.5242326855659485, "train/loss_total": 0.7877576351165771 }, { "epoch": 1.1279722148009619, "step": 4222, "train/loss_ctc": 0.818480372428894, "train/loss_error": 0.46505749225616455, "train/loss_total": 0.5357420444488525 }, { "epoch": 1.128239380176329, "step": 4223, "train/loss_ctc": 0.6629320383071899, "train/loss_error": 0.47834283113479614, "train/loss_total": 0.5152606964111328 }, { "epoch": 1.1285065455516965, "step": 4224, "train/loss_ctc": 0.4224907159805298, "train/loss_error": 0.5005621314048767, "train/loss_total": 0.4849478602409363 }, { "epoch": 1.128773710927064, "step": 4225, "train/loss_ctc": 0.4440518021583557, "train/loss_error": 0.5447088479995728, "train/loss_total": 0.5245774388313293 }, { "epoch": 1.1290408763024313, "step": 4226, "train/loss_ctc": 0.8027639389038086, "train/loss_error": 0.4168049693107605, "train/loss_total": 0.4939967393875122 }, { "epoch": 1.1293080416777985, "step": 4227, "train/loss_ctc": 0.6721876263618469, "train/loss_error": 0.4630230665206909, "train/loss_total": 0.5048559904098511 }, { "epoch": 1.129575207053166, "step": 4228, "train/loss_ctc": 1.225753903388977, "train/loss_error": 0.45387983322143555, "train/loss_total": 0.6082546710968018 }, { "epoch": 1.1298423724285334, "step": 4229, "train/loss_ctc": 1.1384625434875488, "train/loss_error": 0.5191647410392761, "train/loss_total": 0.6430243253707886 }, { "epoch": 1.1301095378039006, "grad_norm": 2.4072861671447754, "learning_rate": 2.3227357734437616e-05, "loss": 0.5557, "step": 4230 }, { "epoch": 1.1301095378039006, "step": 4230, "train/loss_ctc": 0.9026353359222412, "train/loss_error": 0.43370845913887024, "train/loss_total": 0.5274938344955444 }, { "epoch": 1.130376703179268, "step": 4231, "train/loss_ctc": 0.8425662517547607, "train/loss_error": 0.5059311389923096, "train/loss_total": 0.5732581615447998 }, { "epoch": 1.1306438685546354, "step": 4232, "train/loss_ctc": 0.6278966665267944, "train/loss_error": 0.414035826921463, "train/loss_total": 0.45680803060531616 }, { "epoch": 1.1309110339300026, "step": 4233, "train/loss_ctc": 0.3465101718902588, "train/loss_error": 0.4227524399757385, "train/loss_total": 0.40750402212142944 }, { "epoch": 1.13117819930537, "step": 4234, "train/loss_ctc": 0.6990871429443359, "train/loss_error": 0.5306312441825867, "train/loss_total": 0.5643224120140076 }, { "epoch": 1.1314453646807374, "step": 4235, "train/loss_ctc": 1.7023706436157227, "train/loss_error": 0.5087997317314148, "train/loss_total": 0.7475138902664185 }, { "epoch": 1.1317125300561046, "step": 4236, "train/loss_ctc": 0.5254238843917847, "train/loss_error": 0.4514743685722351, "train/loss_total": 0.4662642776966095 }, { "epoch": 1.131979695431472, "step": 4237, "train/loss_ctc": 0.8292598724365234, "train/loss_error": 0.5196078419685364, "train/loss_total": 0.5815382599830627 }, { "epoch": 1.1322468608068395, "step": 4238, "train/loss_ctc": 1.3660682439804077, "train/loss_error": 0.5312836766242981, "train/loss_total": 0.6982406377792358 }, { "epoch": 1.1325140261822069, "step": 4239, "train/loss_ctc": 1.1754032373428345, "train/loss_error": 0.4403538107872009, "train/loss_total": 0.5873637199401855 }, { "epoch": 1.132781191557574, "grad_norm": 2.4142167568206787, "learning_rate": 2.3211327811915574e-05, "loss": 0.561, "step": 4240 }, { "epoch": 1.132781191557574, "step": 4240, "train/loss_ctc": 0.7191441059112549, "train/loss_error": 0.46038419008255005, "train/loss_total": 0.5121361613273621 }, { "epoch": 1.1330483569329415, "step": 4241, "train/loss_ctc": 1.2418100833892822, "train/loss_error": 0.42863044142723083, "train/loss_total": 0.591266393661499 }, { "epoch": 1.133315522308309, "step": 4242, "train/loss_ctc": 0.9776533842086792, "train/loss_error": 0.4695185124874115, "train/loss_total": 0.5711455345153809 }, { "epoch": 1.1335826876836763, "step": 4243, "train/loss_ctc": 0.4117206633090973, "train/loss_error": 0.46482810378074646, "train/loss_total": 0.454206645488739 }, { "epoch": 1.1338498530590435, "step": 4244, "train/loss_ctc": 1.314321517944336, "train/loss_error": 0.45804303884506226, "train/loss_total": 0.629298746585846 }, { "epoch": 1.134117018434411, "step": 4245, "train/loss_ctc": 1.113457202911377, "train/loss_error": 0.4884467124938965, "train/loss_total": 0.6134488582611084 }, { "epoch": 1.1343841838097783, "step": 4246, "train/loss_ctc": 0.8385369777679443, "train/loss_error": 0.5166900157928467, "train/loss_total": 0.5810593962669373 }, { "epoch": 1.1346513491851455, "step": 4247, "train/loss_ctc": 1.1698429584503174, "train/loss_error": 0.531740665435791, "train/loss_total": 0.6593611240386963 }, { "epoch": 1.134918514560513, "step": 4248, "train/loss_ctc": 1.281088948249817, "train/loss_error": 0.504479706287384, "train/loss_total": 0.6598016023635864 }, { "epoch": 1.1351856799358804, "step": 4249, "train/loss_ctc": 0.4333285093307495, "train/loss_error": 0.47395747900009155, "train/loss_total": 0.4658316969871521 }, { "epoch": 1.1354528453112476, "grad_norm": 1.6409084796905518, "learning_rate": 2.3195297889393536e-05, "loss": 0.5738, "step": 4250 }, { "epoch": 1.1354528453112476, "step": 4250, "train/loss_ctc": 1.2473902702331543, "train/loss_error": 0.44456589221954346, "train/loss_total": 0.6051307916641235 }, { "epoch": 1.135720010686615, "step": 4251, "train/loss_ctc": 1.3427064418792725, "train/loss_error": 0.4520816504955292, "train/loss_total": 0.6302066445350647 }, { "epoch": 1.1359871760619824, "step": 4252, "train/loss_ctc": 0.3552575707435608, "train/loss_error": 0.5128607749938965, "train/loss_total": 0.4813401401042938 }, { "epoch": 1.1362543414373496, "step": 4253, "train/loss_ctc": 0.3362007737159729, "train/loss_error": 0.4841958284378052, "train/loss_total": 0.4545968174934387 }, { "epoch": 1.136521506812717, "step": 4254, "train/loss_ctc": 0.8702576756477356, "train/loss_error": 0.5085973143577576, "train/loss_total": 0.5809293985366821 }, { "epoch": 1.1367886721880844, "step": 4255, "train/loss_ctc": 0.9861218929290771, "train/loss_error": 0.47787925601005554, "train/loss_total": 0.5795277953147888 }, { "epoch": 1.1370558375634519, "step": 4256, "train/loss_ctc": 0.6336764693260193, "train/loss_error": 0.4240899682044983, "train/loss_total": 0.4660072922706604 }, { "epoch": 1.137323002938819, "step": 4257, "train/loss_ctc": 0.8649113178253174, "train/loss_error": 0.4562395513057709, "train/loss_total": 0.5379738807678223 }, { "epoch": 1.1375901683141865, "step": 4258, "train/loss_ctc": 0.7689163684844971, "train/loss_error": 0.46707361936569214, "train/loss_total": 0.5274421572685242 }, { "epoch": 1.1378573336895539, "step": 4259, "train/loss_ctc": 0.4255714416503906, "train/loss_error": 0.5226573944091797, "train/loss_total": 0.5032402276992798 }, { "epoch": 1.1381244990649213, "grad_norm": 1.290278434753418, "learning_rate": 2.3179267966871494e-05, "loss": 0.5366, "step": 4260 }, { "epoch": 1.1381244990649213, "step": 4260, "train/loss_ctc": 0.8960558176040649, "train/loss_error": 0.48265230655670166, "train/loss_total": 0.5653330087661743 }, { "epoch": 1.1383916644402885, "step": 4261, "train/loss_ctc": 0.8666713237762451, "train/loss_error": 0.4478740394115448, "train/loss_total": 0.5316334962844849 }, { "epoch": 1.138658829815656, "step": 4262, "train/loss_ctc": 0.6889834403991699, "train/loss_error": 0.44160258769989014, "train/loss_total": 0.4910787343978882 }, { "epoch": 1.1389259951910233, "step": 4263, "train/loss_ctc": 1.0222246646881104, "train/loss_error": 0.5038446187973022, "train/loss_total": 0.6075206398963928 }, { "epoch": 1.1391931605663905, "step": 4264, "train/loss_ctc": 0.30836063623428345, "train/loss_error": 0.408255010843277, "train/loss_total": 0.3882761299610138 }, { "epoch": 1.139460325941758, "step": 4265, "train/loss_ctc": 1.3123877048492432, "train/loss_error": 0.485431432723999, "train/loss_total": 0.6508226990699768 }, { "epoch": 1.1397274913171254, "step": 4266, "train/loss_ctc": 1.0407227277755737, "train/loss_error": 0.4123753011226654, "train/loss_total": 0.538044810295105 }, { "epoch": 1.1399946566924926, "step": 4267, "train/loss_ctc": 0.3996044993400574, "train/loss_error": 0.4879881739616394, "train/loss_total": 0.4703114628791809 }, { "epoch": 1.14026182206786, "step": 4268, "train/loss_ctc": 0.33147552609443665, "train/loss_error": 0.4414961040019989, "train/loss_total": 0.4194920063018799 }, { "epoch": 1.1405289874432274, "step": 4269, "train/loss_ctc": 1.2814961671829224, "train/loss_error": 0.48957428336143494, "train/loss_total": 0.6479586362838745 }, { "epoch": 1.1407961528185946, "grad_norm": 1.3619343042373657, "learning_rate": 2.3163238044349452e-05, "loss": 0.531, "step": 4270 }, { "epoch": 1.1407961528185946, "step": 4270, "train/loss_ctc": 2.5788121223449707, "train/loss_error": 0.4716262221336365, "train/loss_total": 0.8930634260177612 }, { "epoch": 1.141063318193962, "step": 4271, "train/loss_ctc": 1.0525450706481934, "train/loss_error": 0.48813682794570923, "train/loss_total": 0.601018488407135 }, { "epoch": 1.1413304835693294, "step": 4272, "train/loss_ctc": 0.904769778251648, "train/loss_error": 0.4214257001876831, "train/loss_total": 0.518094539642334 }, { "epoch": 1.1415976489446968, "step": 4273, "train/loss_ctc": 1.0209643840789795, "train/loss_error": 0.47823962569236755, "train/loss_total": 0.5867846012115479 }, { "epoch": 1.141864814320064, "step": 4274, "train/loss_ctc": 0.6033394932746887, "train/loss_error": 0.5171422958374023, "train/loss_total": 0.5343817472457886 }, { "epoch": 1.1421319796954315, "step": 4275, "train/loss_ctc": 0.9456713795661926, "train/loss_error": 0.49404504895210266, "train/loss_total": 0.5843703150749207 }, { "epoch": 1.1423991450707989, "step": 4276, "train/loss_ctc": 0.8454432487487793, "train/loss_error": 0.4701063930988312, "train/loss_total": 0.5451737642288208 }, { "epoch": 1.1426663104461663, "step": 4277, "train/loss_ctc": 1.3991281986236572, "train/loss_error": 0.5101927518844604, "train/loss_total": 0.6879798769950867 }, { "epoch": 1.1429334758215335, "step": 4278, "train/loss_ctc": 0.5063991546630859, "train/loss_error": 0.4333024322986603, "train/loss_total": 0.4479217827320099 }, { "epoch": 1.143200641196901, "step": 4279, "train/loss_ctc": 0.8924951553344727, "train/loss_error": 0.5148165225982666, "train/loss_total": 0.5903522372245789 }, { "epoch": 1.1434678065722683, "grad_norm": 1.7438766956329346, "learning_rate": 2.314720812182741e-05, "loss": 0.5989, "step": 4280 }, { "epoch": 1.1434678065722683, "step": 4280, "train/loss_ctc": 1.0670497417449951, "train/loss_error": 0.4614414572715759, "train/loss_total": 0.5825631022453308 }, { "epoch": 1.1437349719476355, "step": 4281, "train/loss_ctc": 0.8535818457603455, "train/loss_error": 0.46743273735046387, "train/loss_total": 0.544662594795227 }, { "epoch": 1.144002137323003, "step": 4282, "train/loss_ctc": 0.7493365406990051, "train/loss_error": 0.4617334008216858, "train/loss_total": 0.5192540287971497 }, { "epoch": 1.1442693026983703, "step": 4283, "train/loss_ctc": 0.4524550437927246, "train/loss_error": 0.4319375455379486, "train/loss_total": 0.43604105710983276 }, { "epoch": 1.1445364680737375, "step": 4284, "train/loss_ctc": 0.9835717678070068, "train/loss_error": 0.4838433861732483, "train/loss_total": 0.583789050579071 }, { "epoch": 1.144803633449105, "step": 4285, "train/loss_ctc": 0.8221145868301392, "train/loss_error": 0.4199656844139099, "train/loss_total": 0.5003954768180847 }, { "epoch": 1.1450707988244724, "step": 4286, "train/loss_ctc": 0.9315028190612793, "train/loss_error": 0.4297851324081421, "train/loss_total": 0.5301286578178406 }, { "epoch": 1.1453379641998398, "step": 4287, "train/loss_ctc": 0.6110705137252808, "train/loss_error": 0.43991827964782715, "train/loss_total": 0.4741487503051758 }, { "epoch": 1.145605129575207, "step": 4288, "train/loss_ctc": 0.4755539298057556, "train/loss_error": 0.4464671015739441, "train/loss_total": 0.45228445529937744 }, { "epoch": 1.1458722949505744, "step": 4289, "train/loss_ctc": 0.7581698894500732, "train/loss_error": 0.47508928179740906, "train/loss_total": 0.531705379486084 }, { "epoch": 1.1461394603259418, "grad_norm": 1.6575164794921875, "learning_rate": 2.3131178199305368e-05, "loss": 0.5155, "step": 4290 }, { "epoch": 1.1461394603259418, "step": 4290, "train/loss_ctc": 0.6527992486953735, "train/loss_error": 0.4243543744087219, "train/loss_total": 0.4700433611869812 }, { "epoch": 1.146406625701309, "step": 4291, "train/loss_ctc": 0.6606371998786926, "train/loss_error": 0.467465877532959, "train/loss_total": 0.5061001777648926 }, { "epoch": 1.1466737910766764, "step": 4292, "train/loss_ctc": 0.5548581480979919, "train/loss_error": 0.5319148898124695, "train/loss_total": 0.5365035533905029 }, { "epoch": 1.1469409564520439, "step": 4293, "train/loss_ctc": 0.7049809694290161, "train/loss_error": 0.46087342500686646, "train/loss_total": 0.5096949338912964 }, { "epoch": 1.1472081218274113, "step": 4294, "train/loss_ctc": 0.8346328735351562, "train/loss_error": 0.5212016701698303, "train/loss_total": 0.5838879346847534 }, { "epoch": 1.1474752872027785, "step": 4295, "train/loss_ctc": 0.5267793536186218, "train/loss_error": 0.49626684188842773, "train/loss_total": 0.5023693442344666 }, { "epoch": 1.1477424525781459, "step": 4296, "train/loss_ctc": 1.2093219757080078, "train/loss_error": 0.5516161322593689, "train/loss_total": 0.6831573247909546 }, { "epoch": 1.1480096179535133, "step": 4297, "train/loss_ctc": 0.9374259114265442, "train/loss_error": 0.4511745572090149, "train/loss_total": 0.5484248399734497 }, { "epoch": 1.1482767833288805, "step": 4298, "train/loss_ctc": 0.626072883605957, "train/loss_error": 0.5248172283172607, "train/loss_total": 0.5450683832168579 }, { "epoch": 1.148543948704248, "step": 4299, "train/loss_ctc": 0.39395928382873535, "train/loss_error": 0.4966329336166382, "train/loss_total": 0.4760982096195221 }, { "epoch": 1.1488111140796153, "grad_norm": 1.702111005783081, "learning_rate": 2.311514827678333e-05, "loss": 0.5361, "step": 4300 }, { "epoch": 1.1488111140796153, "step": 4300, "train/loss_ctc": 0.718821108341217, "train/loss_error": 0.42952772974967957, "train/loss_total": 0.48738640546798706 }, { "epoch": 1.1490782794549825, "step": 4301, "train/loss_ctc": 1.0094637870788574, "train/loss_error": 0.4351136386394501, "train/loss_total": 0.5499836802482605 }, { "epoch": 1.14934544483035, "step": 4302, "train/loss_ctc": 0.3984375, "train/loss_error": 0.4817585349082947, "train/loss_total": 0.46509432792663574 }, { "epoch": 1.1496126102057174, "step": 4303, "train/loss_ctc": 0.6937534809112549, "train/loss_error": 0.4757421612739563, "train/loss_total": 0.5193444490432739 }, { "epoch": 1.1498797755810848, "step": 4304, "train/loss_ctc": 0.47023463249206543, "train/loss_error": 0.49535006284713745, "train/loss_total": 0.49032697081565857 }, { "epoch": 1.150146940956452, "step": 4305, "train/loss_ctc": 0.9725378751754761, "train/loss_error": 0.4617094397544861, "train/loss_total": 0.563875138759613 }, { "epoch": 1.1504141063318194, "step": 4306, "train/loss_ctc": 0.6722403764724731, "train/loss_error": 0.47446462512016296, "train/loss_total": 0.514019787311554 }, { "epoch": 1.1506812717071868, "step": 4307, "train/loss_ctc": 0.5055966377258301, "train/loss_error": 0.4401471018791199, "train/loss_total": 0.45323702692985535 }, { "epoch": 1.150948437082554, "step": 4308, "train/loss_ctc": 1.0057415962219238, "train/loss_error": 0.45230329036712646, "train/loss_total": 0.5629909634590149 }, { "epoch": 1.1512156024579214, "step": 4309, "train/loss_ctc": 0.41965413093566895, "train/loss_error": 0.4803115129470825, "train/loss_total": 0.4681800305843353 }, { "epoch": 1.1514827678332888, "grad_norm": 2.1475558280944824, "learning_rate": 2.309911835426129e-05, "loss": 0.5074, "step": 4310 }, { "epoch": 1.1514827678332888, "step": 4310, "train/loss_ctc": 0.6884384155273438, "train/loss_error": 0.483267605304718, "train/loss_total": 0.5243017673492432 }, { "epoch": 1.1517499332086563, "step": 4311, "train/loss_ctc": 0.9185556173324585, "train/loss_error": 0.42119064927101135, "train/loss_total": 0.5206636786460876 }, { "epoch": 1.1520170985840235, "step": 4312, "train/loss_ctc": 0.9917096495628357, "train/loss_error": 0.4473647177219391, "train/loss_total": 0.5562337040901184 }, { "epoch": 1.1522842639593909, "step": 4313, "train/loss_ctc": 0.3588871657848358, "train/loss_error": 0.5415079593658447, "train/loss_total": 0.5049837827682495 }, { "epoch": 1.1525514293347583, "step": 4314, "train/loss_ctc": 0.401959627866745, "train/loss_error": 0.4647744297981262, "train/loss_total": 0.45221149921417236 }, { "epoch": 1.1528185947101255, "step": 4315, "train/loss_ctc": 0.34313201904296875, "train/loss_error": 0.4806692898273468, "train/loss_total": 0.4531618356704712 }, { "epoch": 1.153085760085493, "step": 4316, "train/loss_ctc": 0.6068448424339294, "train/loss_error": 0.5066114664077759, "train/loss_total": 0.5266581177711487 }, { "epoch": 1.1533529254608603, "step": 4317, "train/loss_ctc": 0.8785425424575806, "train/loss_error": 0.493733286857605, "train/loss_total": 0.570695161819458 }, { "epoch": 1.1536200908362275, "step": 4318, "train/loss_ctc": 0.6521238088607788, "train/loss_error": 0.4314914047718048, "train/loss_total": 0.4756178855895996 }, { "epoch": 1.153887256211595, "step": 4319, "train/loss_ctc": 0.45947355031967163, "train/loss_error": 0.42674654722213745, "train/loss_total": 0.4332919716835022 }, { "epoch": 1.1541544215869624, "grad_norm": 2.073499917984009, "learning_rate": 2.308308843173925e-05, "loss": 0.5018, "step": 4320 }, { "epoch": 1.1541544215869624, "step": 4320, "train/loss_ctc": 0.5456308126449585, "train/loss_error": 0.45468688011169434, "train/loss_total": 0.4728756546974182 }, { "epoch": 1.1544215869623298, "step": 4321, "train/loss_ctc": 0.7288445830345154, "train/loss_error": 0.47177356481552124, "train/loss_total": 0.5231877565383911 }, { "epoch": 1.154688752337697, "step": 4322, "train/loss_ctc": 0.8531579971313477, "train/loss_error": 0.4381192922592163, "train/loss_total": 0.5211270451545715 }, { "epoch": 1.1549559177130644, "step": 4323, "train/loss_ctc": 0.8403780460357666, "train/loss_error": 0.5252922177314758, "train/loss_total": 0.5883094072341919 }, { "epoch": 1.1552230830884318, "step": 4324, "train/loss_ctc": 1.1829452514648438, "train/loss_error": 0.5520369410514832, "train/loss_total": 0.6782186031341553 }, { "epoch": 1.155490248463799, "step": 4325, "train/loss_ctc": 1.0599639415740967, "train/loss_error": 0.47361281514167786, "train/loss_total": 0.5908830165863037 }, { "epoch": 1.1557574138391664, "step": 4326, "train/loss_ctc": 1.143149495124817, "train/loss_error": 0.4480856955051422, "train/loss_total": 0.5870984792709351 }, { "epoch": 1.1560245792145338, "step": 4327, "train/loss_ctc": 0.5179988145828247, "train/loss_error": 0.42873871326446533, "train/loss_total": 0.44659072160720825 }, { "epoch": 1.1562917445899012, "step": 4328, "train/loss_ctc": 0.49556964635849, "train/loss_error": 0.43437302112579346, "train/loss_total": 0.4466123580932617 }, { "epoch": 1.1565589099652684, "step": 4329, "train/loss_ctc": 0.4982498288154602, "train/loss_error": 0.43275249004364014, "train/loss_total": 0.4458519518375397 }, { "epoch": 1.1568260753406359, "grad_norm": 2.2540628910064697, "learning_rate": 2.3067058509217207e-05, "loss": 0.5301, "step": 4330 }, { "epoch": 1.1568260753406359, "step": 4330, "train/loss_ctc": 2.4683756828308105, "train/loss_error": 0.5043168067932129, "train/loss_total": 0.8971285820007324 }, { "epoch": 1.1570932407160033, "step": 4331, "train/loss_ctc": 0.7227344512939453, "train/loss_error": 0.42124414443969727, "train/loss_total": 0.4815422296524048 }, { "epoch": 1.1573604060913705, "step": 4332, "train/loss_ctc": 1.1126773357391357, "train/loss_error": 0.5077779293060303, "train/loss_total": 0.6287578344345093 }, { "epoch": 1.157627571466738, "step": 4333, "train/loss_ctc": 0.5214765071868896, "train/loss_error": 0.4668765068054199, "train/loss_total": 0.4777965247631073 }, { "epoch": 1.1578947368421053, "step": 4334, "train/loss_ctc": 0.9149160385131836, "train/loss_error": 0.4240792393684387, "train/loss_total": 0.5222465991973877 }, { "epoch": 1.1581619022174725, "step": 4335, "train/loss_ctc": 0.7148690223693848, "train/loss_error": 0.5033802390098572, "train/loss_total": 0.5456780195236206 }, { "epoch": 1.15842906759284, "step": 4336, "train/loss_ctc": 0.7964400053024292, "train/loss_error": 0.5140530467033386, "train/loss_total": 0.5705304741859436 }, { "epoch": 1.1586962329682073, "step": 4337, "train/loss_ctc": 0.8118895292282104, "train/loss_error": 0.4473918080329895, "train/loss_total": 0.5202913880348206 }, { "epoch": 1.1589633983435748, "step": 4338, "train/loss_ctc": 0.5086831450462341, "train/loss_error": 0.46786779165267944, "train/loss_total": 0.4760308861732483 }, { "epoch": 1.159230563718942, "step": 4339, "train/loss_ctc": 0.48884183168411255, "train/loss_error": 0.5038496255874634, "train/loss_total": 0.5008480548858643 }, { "epoch": 1.1594977290943094, "grad_norm": 1.735332727432251, "learning_rate": 2.3051028586695165e-05, "loss": 0.5621, "step": 4340 }, { "epoch": 1.1594977290943094, "step": 4340, "train/loss_ctc": 0.7563616037368774, "train/loss_error": 0.4001758396625519, "train/loss_total": 0.4714130163192749 }, { "epoch": 1.1597648944696768, "step": 4341, "train/loss_ctc": 0.7984002232551575, "train/loss_error": 0.4178062677383423, "train/loss_total": 0.4939250946044922 }, { "epoch": 1.1600320598450442, "step": 4342, "train/loss_ctc": 0.7528734803199768, "train/loss_error": 0.5159289836883545, "train/loss_total": 0.5633178949356079 }, { "epoch": 1.1602992252204114, "step": 4343, "train/loss_ctc": 1.127309799194336, "train/loss_error": 0.4502602219581604, "train/loss_total": 0.5856701135635376 }, { "epoch": 1.1605663905957788, "step": 4344, "train/loss_ctc": 0.4100016951560974, "train/loss_error": 0.5235257148742676, "train/loss_total": 0.5008209347724915 }, { "epoch": 1.1608335559711462, "step": 4345, "train/loss_ctc": 0.6546095013618469, "train/loss_error": 0.45773231983184814, "train/loss_total": 0.49710777401924133 }, { "epoch": 1.1611007213465134, "step": 4346, "train/loss_ctc": 0.5078208446502686, "train/loss_error": 0.4491160213947296, "train/loss_total": 0.46085700392723083 }, { "epoch": 1.1613678867218808, "step": 4347, "train/loss_ctc": 1.053701639175415, "train/loss_error": 0.5514971613883972, "train/loss_total": 0.6519380807876587 }, { "epoch": 1.1616350520972483, "step": 4348, "train/loss_ctc": 0.39853066205978394, "train/loss_error": 0.37730222940444946, "train/loss_total": 0.3815479278564453 }, { "epoch": 1.1619022174726155, "step": 4349, "train/loss_ctc": 0.8300626873970032, "train/loss_error": 0.4155460000038147, "train/loss_total": 0.49844932556152344 }, { "epoch": 1.1621693828479829, "grad_norm": 1.4811336994171143, "learning_rate": 2.3034998664173123e-05, "loss": 0.5105, "step": 4350 }, { "epoch": 1.1621693828479829, "step": 4350, "train/loss_ctc": 0.5125853419303894, "train/loss_error": 0.40471652150154114, "train/loss_total": 0.4262903034687042 }, { "epoch": 1.1624365482233503, "step": 4351, "train/loss_ctc": 0.5439208745956421, "train/loss_error": 0.4914397597312927, "train/loss_total": 0.5019360184669495 }, { "epoch": 1.1627037135987175, "step": 4352, "train/loss_ctc": 0.397908478975296, "train/loss_error": 0.4332939088344574, "train/loss_total": 0.42621684074401855 }, { "epoch": 1.162970878974085, "step": 4353, "train/loss_ctc": 0.4718621075153351, "train/loss_error": 0.4670039415359497, "train/loss_total": 0.46797558665275574 }, { "epoch": 1.1632380443494523, "step": 4354, "train/loss_ctc": 0.24375776946544647, "train/loss_error": 0.4027467668056488, "train/loss_total": 0.3709489703178406 }, { "epoch": 1.1635052097248197, "step": 4355, "train/loss_ctc": 0.8785558938980103, "train/loss_error": 0.4545237123966217, "train/loss_total": 0.5393301248550415 }, { "epoch": 1.163772375100187, "step": 4356, "train/loss_ctc": 1.0179319381713867, "train/loss_error": 0.49196428060531616, "train/loss_total": 0.5971578359603882 }, { "epoch": 1.1640395404755544, "step": 4357, "train/loss_ctc": 1.3666272163391113, "train/loss_error": 0.49797776341438293, "train/loss_total": 0.6717076301574707 }, { "epoch": 1.1643067058509218, "step": 4358, "train/loss_ctc": 0.8594512939453125, "train/loss_error": 0.5842317342758179, "train/loss_total": 0.6392756700515747 }, { "epoch": 1.1645738712262892, "step": 4359, "train/loss_ctc": 0.4315311014652252, "train/loss_error": 0.41308003664016724, "train/loss_total": 0.41677024960517883 }, { "epoch": 1.1648410366016564, "grad_norm": 1.7308201789855957, "learning_rate": 2.3018968741651085e-05, "loss": 0.5058, "step": 4360 }, { "epoch": 1.1648410366016564, "step": 4360, "train/loss_ctc": 0.7270104885101318, "train/loss_error": 0.4978448152542114, "train/loss_total": 0.5436779260635376 }, { "epoch": 1.1651082019770238, "step": 4361, "train/loss_ctc": 0.9003312587738037, "train/loss_error": 0.4413841962814331, "train/loss_total": 0.5331736207008362 }, { "epoch": 1.1653753673523912, "step": 4362, "train/loss_ctc": 1.0147521495819092, "train/loss_error": 0.4446074366569519, "train/loss_total": 0.5586363673210144 }, { "epoch": 1.1656425327277584, "step": 4363, "train/loss_ctc": 0.3909415304660797, "train/loss_error": 0.4844003915786743, "train/loss_total": 0.4657086133956909 }, { "epoch": 1.1659096981031258, "step": 4364, "train/loss_ctc": 1.317809820175171, "train/loss_error": 0.48369887471199036, "train/loss_total": 0.6505210399627686 }, { "epoch": 1.1661768634784933, "step": 4365, "train/loss_ctc": 1.0804321765899658, "train/loss_error": 0.5248159170150757, "train/loss_total": 0.6359391808509827 }, { "epoch": 1.1664440288538604, "step": 4366, "train/loss_ctc": 1.2431294918060303, "train/loss_error": 0.47577065229415894, "train/loss_total": 0.6292424201965332 }, { "epoch": 1.1667111942292279, "step": 4367, "train/loss_ctc": 0.46969351172447205, "train/loss_error": 0.4769645035266876, "train/loss_total": 0.4755103290081024 }, { "epoch": 1.1669783596045953, "step": 4368, "train/loss_ctc": 1.3665430545806885, "train/loss_error": 0.41408777236938477, "train/loss_total": 0.6045788526535034 }, { "epoch": 1.1672455249799625, "step": 4369, "train/loss_ctc": 0.3285096287727356, "train/loss_error": 0.4785335063934326, "train/loss_total": 0.4485287368297577 }, { "epoch": 1.16751269035533, "grad_norm": 1.131839632987976, "learning_rate": 2.3002938819129043e-05, "loss": 0.5546, "step": 4370 }, { "epoch": 1.16751269035533, "step": 4370, "train/loss_ctc": 0.46191662549972534, "train/loss_error": 0.4475959539413452, "train/loss_total": 0.4504601061344147 }, { "epoch": 1.1677798557306973, "step": 4371, "train/loss_ctc": 0.9140598177909851, "train/loss_error": 0.42604881525039673, "train/loss_total": 0.5236510038375854 }, { "epoch": 1.1680470211060647, "step": 4372, "train/loss_ctc": 0.8135517835617065, "train/loss_error": 0.4415881633758545, "train/loss_total": 0.5159808993339539 }, { "epoch": 1.168314186481432, "step": 4373, "train/loss_ctc": 1.017152190208435, "train/loss_error": 0.5111751556396484, "train/loss_total": 0.6123706102371216 }, { "epoch": 1.1685813518567993, "step": 4374, "train/loss_ctc": 0.7462685704231262, "train/loss_error": 0.4544140100479126, "train/loss_total": 0.5127848982810974 }, { "epoch": 1.1688485172321668, "step": 4375, "train/loss_ctc": 1.4980106353759766, "train/loss_error": 0.4438602030277252, "train/loss_total": 0.6546902656555176 }, { "epoch": 1.1691156826075342, "step": 4376, "train/loss_ctc": 0.9836815595626831, "train/loss_error": 0.4746372103691101, "train/loss_total": 0.5764461159706116 }, { "epoch": 1.1693828479829014, "step": 4377, "train/loss_ctc": 0.4216786324977875, "train/loss_error": 0.5125100612640381, "train/loss_total": 0.49434375762939453 }, { "epoch": 1.1696500133582688, "step": 4378, "train/loss_ctc": 0.9911475777626038, "train/loss_error": 0.480133593082428, "train/loss_total": 0.58233642578125 }, { "epoch": 1.1699171787336362, "step": 4379, "train/loss_ctc": 0.5016868114471436, "train/loss_error": 0.5112260580062866, "train/loss_total": 0.5093182325363159 }, { "epoch": 1.1701843441090034, "grad_norm": 2.5704236030578613, "learning_rate": 2.2986908896607e-05, "loss": 0.5432, "step": 4380 }, { "epoch": 1.1701843441090034, "step": 4380, "train/loss_ctc": 0.8832755088806152, "train/loss_error": 0.49229955673217773, "train/loss_total": 0.5704947710037231 }, { "epoch": 1.1704515094843708, "step": 4381, "train/loss_ctc": 0.6301989555358887, "train/loss_error": 0.4580576419830322, "train/loss_total": 0.4924858808517456 }, { "epoch": 1.1707186748597382, "step": 4382, "train/loss_ctc": 1.3216716051101685, "train/loss_error": 0.4215155839920044, "train/loss_total": 0.6015467643737793 }, { "epoch": 1.1709858402351054, "step": 4383, "train/loss_ctc": 0.6409716606140137, "train/loss_error": 0.42385604977607727, "train/loss_total": 0.4672791659832001 }, { "epoch": 1.1712530056104729, "step": 4384, "train/loss_ctc": 0.6468263864517212, "train/loss_error": 0.5184231996536255, "train/loss_total": 0.5441038608551025 }, { "epoch": 1.1715201709858403, "step": 4385, "train/loss_ctc": 0.8041530847549438, "train/loss_error": 0.45349952578544617, "train/loss_total": 0.5236302614212036 }, { "epoch": 1.1717873363612075, "step": 4386, "train/loss_ctc": 0.5085093379020691, "train/loss_error": 0.5027260780334473, "train/loss_total": 0.5038827657699585 }, { "epoch": 1.1720545017365749, "step": 4387, "train/loss_ctc": 1.1051931381225586, "train/loss_error": 0.467631071805954, "train/loss_total": 0.5951434969902039 }, { "epoch": 1.1723216671119423, "step": 4388, "train/loss_ctc": 0.8077647686004639, "train/loss_error": 0.4717654287815094, "train/loss_total": 0.5389652848243713 }, { "epoch": 1.1725888324873097, "step": 4389, "train/loss_ctc": 0.8386496305465698, "train/loss_error": 0.45899561047554016, "train/loss_total": 0.5349264144897461 }, { "epoch": 1.172855997862677, "grad_norm": 2.2822275161743164, "learning_rate": 2.297087897408496e-05, "loss": 0.5372, "step": 4390 }, { "epoch": 1.172855997862677, "step": 4390, "train/loss_ctc": 1.1543478965759277, "train/loss_error": 0.4310941994190216, "train/loss_total": 0.5757449269294739 }, { "epoch": 1.1731231632380443, "step": 4391, "train/loss_ctc": 1.39139986038208, "train/loss_error": 0.5692038536071777, "train/loss_total": 0.7336430549621582 }, { "epoch": 1.1733903286134117, "step": 4392, "train/loss_ctc": 0.6338396072387695, "train/loss_error": 0.4805956482887268, "train/loss_total": 0.5112444162368774 }, { "epoch": 1.1736574939887792, "step": 4393, "train/loss_ctc": 1.3757696151733398, "train/loss_error": 0.4637281596660614, "train/loss_total": 0.646136462688446 }, { "epoch": 1.1739246593641464, "step": 4394, "train/loss_ctc": 0.5500805974006653, "train/loss_error": 0.45876559615135193, "train/loss_total": 0.47702860832214355 }, { "epoch": 1.1741918247395138, "step": 4395, "train/loss_ctc": 0.8274999260902405, "train/loss_error": 0.48674920201301575, "train/loss_total": 0.5548993349075317 }, { "epoch": 1.1744589901148812, "step": 4396, "train/loss_ctc": 0.40314599871635437, "train/loss_error": 0.5624000430107117, "train/loss_total": 0.5305492281913757 }, { "epoch": 1.1747261554902484, "step": 4397, "train/loss_ctc": 1.37582266330719, "train/loss_error": 0.47582170367240906, "train/loss_total": 0.6558219194412231 }, { "epoch": 1.1749933208656158, "step": 4398, "train/loss_ctc": 1.519850254058838, "train/loss_error": 0.4588862955570221, "train/loss_total": 0.6710790991783142 }, { "epoch": 1.1752604862409832, "step": 4399, "train/loss_ctc": 0.6906019449234009, "train/loss_error": 0.48022517561912537, "train/loss_total": 0.5223005414009094 }, { "epoch": 1.1755276516163504, "grad_norm": 1.4526753425598145, "learning_rate": 2.2954849051562917e-05, "loss": 0.5878, "step": 4400 }, { "epoch": 1.1755276516163504, "step": 4400, "train/loss_ctc": 0.5617451667785645, "train/loss_error": 0.45172935724258423, "train/loss_total": 0.47373253107070923 }, { "epoch": 1.1757948169917178, "step": 4401, "train/loss_ctc": 1.9297971725463867, "train/loss_error": 0.5094924569129944, "train/loss_total": 0.7935534119606018 }, { "epoch": 1.1760619823670853, "step": 4402, "train/loss_ctc": 1.040653109550476, "train/loss_error": 0.5423812866210938, "train/loss_total": 0.6420356631278992 }, { "epoch": 1.1763291477424527, "step": 4403, "train/loss_ctc": 1.5045690536499023, "train/loss_error": 0.46247583627700806, "train/loss_total": 0.6708945035934448 }, { "epoch": 1.1765963131178199, "step": 4404, "train/loss_ctc": 1.4387328624725342, "train/loss_error": 0.5123506188392639, "train/loss_total": 0.697627067565918 }, { "epoch": 1.1768634784931873, "step": 4405, "train/loss_ctc": 0.7974420785903931, "train/loss_error": 0.45941248536109924, "train/loss_total": 0.5270184278488159 }, { "epoch": 1.1771306438685547, "step": 4406, "train/loss_ctc": 1.2091871500015259, "train/loss_error": 0.5477863550186157, "train/loss_total": 0.6800665259361267 }, { "epoch": 1.177397809243922, "step": 4407, "train/loss_ctc": 0.8893165588378906, "train/loss_error": 0.47118160128593445, "train/loss_total": 0.5548086166381836 }, { "epoch": 1.1776649746192893, "step": 4408, "train/loss_ctc": 0.3694359064102173, "train/loss_error": 0.5017080903053284, "train/loss_total": 0.4752536416053772 }, { "epoch": 1.1779321399946567, "step": 4409, "train/loss_ctc": 1.8735380172729492, "train/loss_error": 0.4823830723762512, "train/loss_total": 0.7606140375137329 }, { "epoch": 1.1781993053700242, "grad_norm": 2.1862146854400635, "learning_rate": 2.2938819129040875e-05, "loss": 0.6276, "step": 4410 }, { "epoch": 1.1781993053700242, "step": 4410, "train/loss_ctc": 0.6470720767974854, "train/loss_error": 0.48714184761047363, "train/loss_total": 0.5191279053688049 }, { "epoch": 1.1784664707453913, "step": 4411, "train/loss_ctc": 0.8736347556114197, "train/loss_error": 0.429476797580719, "train/loss_total": 0.5183084011077881 }, { "epoch": 1.1787336361207588, "step": 4412, "train/loss_ctc": 0.8661232590675354, "train/loss_error": 0.4700155258178711, "train/loss_total": 0.549237072467804 }, { "epoch": 1.1790008014961262, "step": 4413, "train/loss_ctc": 1.3159189224243164, "train/loss_error": 0.45132574439048767, "train/loss_total": 0.6242443919181824 }, { "epoch": 1.1792679668714934, "step": 4414, "train/loss_ctc": 1.0316808223724365, "train/loss_error": 0.4604284465312958, "train/loss_total": 0.574678897857666 }, { "epoch": 1.1795351322468608, "step": 4415, "train/loss_ctc": 0.6762087345123291, "train/loss_error": 0.48371338844299316, "train/loss_total": 0.5222125053405762 }, { "epoch": 1.1798022976222282, "step": 4416, "train/loss_ctc": 0.9606882333755493, "train/loss_error": 0.4712056517601013, "train/loss_total": 0.5691021680831909 }, { "epoch": 1.1800694629975954, "step": 4417, "train/loss_ctc": 1.3140208721160889, "train/loss_error": 0.39103585481643677, "train/loss_total": 0.5756328701972961 }, { "epoch": 1.1803366283729628, "step": 4418, "train/loss_ctc": 0.8384735584259033, "train/loss_error": 0.4174306094646454, "train/loss_total": 0.5016392469406128 }, { "epoch": 1.1806037937483302, "step": 4419, "train/loss_ctc": 1.1144756078720093, "train/loss_error": 0.48316410183906555, "train/loss_total": 0.6094264388084412 }, { "epoch": 1.1808709591236977, "grad_norm": 1.415931224822998, "learning_rate": 2.2922789206518837e-05, "loss": 0.5564, "step": 4420 }, { "epoch": 1.1808709591236977, "step": 4420, "train/loss_ctc": 0.4697962999343872, "train/loss_error": 0.4767700731754303, "train/loss_total": 0.47537532448768616 }, { "epoch": 1.1811381244990649, "step": 4421, "train/loss_ctc": 0.9863483905792236, "train/loss_error": 0.4415721297264099, "train/loss_total": 0.5505273938179016 }, { "epoch": 1.1814052898744323, "step": 4422, "train/loss_ctc": 0.893570601940155, "train/loss_error": 0.49036845564842224, "train/loss_total": 0.5710089206695557 }, { "epoch": 1.1816724552497997, "step": 4423, "train/loss_ctc": 0.9570088982582092, "train/loss_error": 0.4272667169570923, "train/loss_total": 0.5332151651382446 }, { "epoch": 1.1819396206251669, "step": 4424, "train/loss_ctc": 0.5323947668075562, "train/loss_error": 0.4948694109916687, "train/loss_total": 0.5023744702339172 }, { "epoch": 1.1822067860005343, "step": 4425, "train/loss_ctc": 1.0180811882019043, "train/loss_error": 0.4827406108379364, "train/loss_total": 0.5898087620735168 }, { "epoch": 1.1824739513759017, "step": 4426, "train/loss_ctc": 0.51173996925354, "train/loss_error": 0.43780818581581116, "train/loss_total": 0.4525945484638214 }, { "epoch": 1.1827411167512691, "step": 4427, "train/loss_ctc": 0.9103620052337646, "train/loss_error": 0.5214152336120605, "train/loss_total": 0.5992045998573303 }, { "epoch": 1.1830082821266363, "step": 4428, "train/loss_ctc": 0.743865430355072, "train/loss_error": 0.45552849769592285, "train/loss_total": 0.5131958723068237 }, { "epoch": 1.1832754475020038, "step": 4429, "train/loss_ctc": 1.3464487791061401, "train/loss_error": 0.48215726017951965, "train/loss_total": 0.6550155878067017 }, { "epoch": 1.1835426128773712, "grad_norm": 1.321975588798523, "learning_rate": 2.2906759283996795e-05, "loss": 0.5442, "step": 4430 }, { "epoch": 1.1835426128773712, "step": 4430, "train/loss_ctc": 0.7349336743354797, "train/loss_error": 0.42040619254112244, "train/loss_total": 0.4833117127418518 }, { "epoch": 1.1838097782527384, "step": 4431, "train/loss_ctc": 0.5336624979972839, "train/loss_error": 0.5018350481987, "train/loss_total": 0.5082005262374878 }, { "epoch": 1.1840769436281058, "step": 4432, "train/loss_ctc": 0.7937160134315491, "train/loss_error": 0.4551660120487213, "train/loss_total": 0.5228760242462158 }, { "epoch": 1.1843441090034732, "step": 4433, "train/loss_ctc": 0.8808243870735168, "train/loss_error": 0.5287840962409973, "train/loss_total": 0.5991921424865723 }, { "epoch": 1.1846112743788404, "step": 4434, "train/loss_ctc": 1.0015103816986084, "train/loss_error": 0.5272961258888245, "train/loss_total": 0.6221389770507812 }, { "epoch": 1.1848784397542078, "step": 4435, "train/loss_ctc": 0.8610119819641113, "train/loss_error": 0.4248964786529541, "train/loss_total": 0.5121195912361145 }, { "epoch": 1.1851456051295752, "step": 4436, "train/loss_ctc": 1.190102458000183, "train/loss_error": 0.4353979825973511, "train/loss_total": 0.5863388776779175 }, { "epoch": 1.1854127705049426, "step": 4437, "train/loss_ctc": 0.521593451499939, "train/loss_error": 0.40024736523628235, "train/loss_total": 0.42451661825180054 }, { "epoch": 1.1856799358803098, "step": 4438, "train/loss_ctc": 0.35115116834640503, "train/loss_error": 0.5128636360168457, "train/loss_total": 0.48052117228507996 }, { "epoch": 1.1859471012556773, "step": 4439, "train/loss_ctc": 0.7045682668685913, "train/loss_error": 0.42718595266342163, "train/loss_total": 0.4826624393463135 }, { "epoch": 1.1862142666310447, "grad_norm": 3.115896701812744, "learning_rate": 2.2890729361474753e-05, "loss": 0.5222, "step": 4440 }, { "epoch": 1.1862142666310447, "step": 4440, "train/loss_ctc": 0.5145984292030334, "train/loss_error": 0.5076347589492798, "train/loss_total": 0.5090274810791016 }, { "epoch": 1.1864814320064119, "step": 4441, "train/loss_ctc": 0.7199546694755554, "train/loss_error": 0.4326534867286682, "train/loss_total": 0.4901137351989746 }, { "epoch": 1.1867485973817793, "step": 4442, "train/loss_ctc": 1.0091698169708252, "train/loss_error": 0.4598618745803833, "train/loss_total": 0.5697234869003296 }, { "epoch": 1.1870157627571467, "step": 4443, "train/loss_ctc": 1.8038663864135742, "train/loss_error": 0.48336803913116455, "train/loss_total": 0.7474677562713623 }, { "epoch": 1.1872829281325141, "step": 4444, "train/loss_ctc": 0.3424009084701538, "train/loss_error": 0.46243757009506226, "train/loss_total": 0.4384302496910095 }, { "epoch": 1.1875500935078813, "step": 4445, "train/loss_ctc": 0.7446112036705017, "train/loss_error": 0.4884151220321655, "train/loss_total": 0.5396543741226196 }, { "epoch": 1.1878172588832487, "step": 4446, "train/loss_ctc": 0.7185209393501282, "train/loss_error": 0.45954546332359314, "train/loss_total": 0.5113405585289001 }, { "epoch": 1.1880844242586162, "step": 4447, "train/loss_ctc": 0.9237741231918335, "train/loss_error": 0.4697762429714203, "train/loss_total": 0.5605758428573608 }, { "epoch": 1.1883515896339834, "step": 4448, "train/loss_ctc": 0.6027771830558777, "train/loss_error": 0.46481552720069885, "train/loss_total": 0.4924078583717346 }, { "epoch": 1.1886187550093508, "step": 4449, "train/loss_ctc": 0.8425659537315369, "train/loss_error": 0.44186562299728394, "train/loss_total": 0.5220056772232056 }, { "epoch": 1.1888859203847182, "grad_norm": 1.6971265077590942, "learning_rate": 2.287469943895271e-05, "loss": 0.5381, "step": 4450 }, { "epoch": 1.1888859203847182, "step": 4450, "train/loss_ctc": 0.4400433897972107, "train/loss_error": 0.534464418888092, "train/loss_total": 0.5155802369117737 }, { "epoch": 1.1891530857600854, "step": 4451, "train/loss_ctc": 0.8349545001983643, "train/loss_error": 0.4593382775783539, "train/loss_total": 0.534461498260498 }, { "epoch": 1.1894202511354528, "step": 4452, "train/loss_ctc": 0.8254882097244263, "train/loss_error": 0.45403411984443665, "train/loss_total": 0.5283249616622925 }, { "epoch": 1.1896874165108202, "step": 4453, "train/loss_ctc": 1.025757908821106, "train/loss_error": 0.49707451462745667, "train/loss_total": 0.6028112173080444 }, { "epoch": 1.1899545818861876, "step": 4454, "train/loss_ctc": 0.30764585733413696, "train/loss_error": 0.38957077264785767, "train/loss_total": 0.37318578362464905 }, { "epoch": 1.1902217472615548, "step": 4455, "train/loss_ctc": 1.0600322484970093, "train/loss_error": 0.45897814631462097, "train/loss_total": 0.5791889429092407 }, { "epoch": 1.1904889126369222, "step": 4456, "train/loss_ctc": 0.7817307114601135, "train/loss_error": 0.4521491527557373, "train/loss_total": 0.5180654525756836 }, { "epoch": 1.1907560780122897, "step": 4457, "train/loss_ctc": 0.2545962929725647, "train/loss_error": 0.38172996044158936, "train/loss_total": 0.35630324482917786 }, { "epoch": 1.191023243387657, "step": 4458, "train/loss_ctc": 0.6124796867370605, "train/loss_error": 0.4261024296283722, "train/loss_total": 0.4633778929710388 }, { "epoch": 1.1912904087630243, "step": 4459, "train/loss_ctc": 0.5978041887283325, "train/loss_error": 0.45908305048942566, "train/loss_total": 0.4868272840976715 }, { "epoch": 1.1915575741383917, "grad_norm": 4.855085372924805, "learning_rate": 2.285866951643067e-05, "loss": 0.4958, "step": 4460 }, { "epoch": 1.1915575741383917, "step": 4460, "train/loss_ctc": 1.2830218076705933, "train/loss_error": 0.44247376918792725, "train/loss_total": 0.6105834245681763 }, { "epoch": 1.1918247395137591, "step": 4461, "train/loss_ctc": 0.2931157946586609, "train/loss_error": 0.5002764463424683, "train/loss_total": 0.4588443338871002 }, { "epoch": 1.1920919048891263, "step": 4462, "train/loss_ctc": 1.0668847560882568, "train/loss_error": 0.4329725205898285, "train/loss_total": 0.5597549676895142 }, { "epoch": 1.1923590702644937, "step": 4463, "train/loss_ctc": 1.2877404689788818, "train/loss_error": 0.458382785320282, "train/loss_total": 0.6242543458938599 }, { "epoch": 1.1926262356398611, "step": 4464, "train/loss_ctc": 1.282610297203064, "train/loss_error": 0.5797053575515747, "train/loss_total": 0.7202863693237305 }, { "epoch": 1.1928934010152283, "step": 4465, "train/loss_ctc": 0.9152080416679382, "train/loss_error": 0.5614407062530518, "train/loss_total": 0.6321941614151001 }, { "epoch": 1.1931605663905958, "step": 4466, "train/loss_ctc": 0.736578106880188, "train/loss_error": 0.5005114078521729, "train/loss_total": 0.547724723815918 }, { "epoch": 1.1934277317659632, "step": 4467, "train/loss_ctc": 1.0175641775131226, "train/loss_error": 0.4979105591773987, "train/loss_total": 0.6018412709236145 }, { "epoch": 1.1936948971413304, "step": 4468, "train/loss_ctc": 0.6528506278991699, "train/loss_error": 0.465092271566391, "train/loss_total": 0.5026439428329468 }, { "epoch": 1.1939620625166978, "step": 4469, "train/loss_ctc": 0.6198378205299377, "train/loss_error": 0.45711758732795715, "train/loss_total": 0.48966163396835327 }, { "epoch": 1.1942292278920652, "grad_norm": 1.4801260232925415, "learning_rate": 2.2842639593908627e-05, "loss": 0.5748, "step": 4470 }, { "epoch": 1.1942292278920652, "step": 4470, "train/loss_ctc": 0.9973570108413696, "train/loss_error": 0.45047882199287415, "train/loss_total": 0.5598544478416443 }, { "epoch": 1.1944963932674326, "step": 4471, "train/loss_ctc": 1.139167070388794, "train/loss_error": 0.5067166686058044, "train/loss_total": 0.6332067251205444 }, { "epoch": 1.1947635586427998, "step": 4472, "train/loss_ctc": 0.6749228239059448, "train/loss_error": 0.4707948863506317, "train/loss_total": 0.5116204619407654 }, { "epoch": 1.1950307240181672, "step": 4473, "train/loss_ctc": 1.2832664251327515, "train/loss_error": 0.47076931595802307, "train/loss_total": 0.6332687139511108 }, { "epoch": 1.1952978893935347, "step": 4474, "train/loss_ctc": 0.7890616059303284, "train/loss_error": 0.4659760296344757, "train/loss_total": 0.5305931568145752 }, { "epoch": 1.195565054768902, "step": 4475, "train/loss_ctc": 0.8785214424133301, "train/loss_error": 0.5135271549224854, "train/loss_total": 0.5865260362625122 }, { "epoch": 1.1958322201442693, "step": 4476, "train/loss_ctc": 1.300449013710022, "train/loss_error": 0.4949771761894226, "train/loss_total": 0.6560715436935425 }, { "epoch": 1.1960993855196367, "step": 4477, "train/loss_ctc": 0.5607971549034119, "train/loss_error": 0.4457513391971588, "train/loss_total": 0.46876052021980286 }, { "epoch": 1.196366550895004, "step": 4478, "train/loss_ctc": 1.32192063331604, "train/loss_error": 0.4669192135334015, "train/loss_total": 0.6379194855690002 }, { "epoch": 1.1966337162703713, "step": 4479, "train/loss_ctc": 1.1740142107009888, "train/loss_error": 0.44942206144332886, "train/loss_total": 0.5943405032157898 }, { "epoch": 1.1969008816457387, "grad_norm": 1.796962857246399, "learning_rate": 2.282660967138659e-05, "loss": 0.5812, "step": 4480 }, { "epoch": 1.1969008816457387, "step": 4480, "train/loss_ctc": 0.8492260575294495, "train/loss_error": 0.52506023645401, "train/loss_total": 0.5898934006690979 }, { "epoch": 1.1971680470211061, "step": 4481, "train/loss_ctc": 0.4120126962661743, "train/loss_error": 0.48177534341812134, "train/loss_total": 0.4678228497505188 }, { "epoch": 1.1974352123964733, "step": 4482, "train/loss_ctc": 0.8356898427009583, "train/loss_error": 0.48538267612457275, "train/loss_total": 0.5554441213607788 }, { "epoch": 1.1977023777718407, "step": 4483, "train/loss_ctc": 0.599570631980896, "train/loss_error": 0.40830057859420776, "train/loss_total": 0.44655460119247437 }, { "epoch": 1.1979695431472082, "step": 4484, "train/loss_ctc": 0.7978104948997498, "train/loss_error": 0.4363747835159302, "train/loss_total": 0.5086619257926941 }, { "epoch": 1.1982367085225754, "step": 4485, "train/loss_ctc": 1.1070271730422974, "train/loss_error": 0.48520058393478394, "train/loss_total": 0.6095659136772156 }, { "epoch": 1.1985038738979428, "step": 4486, "train/loss_ctc": 1.089816689491272, "train/loss_error": 0.49390676617622375, "train/loss_total": 0.6130887269973755 }, { "epoch": 1.1987710392733102, "step": 4487, "train/loss_ctc": 0.7676712870597839, "train/loss_error": 0.45648086071014404, "train/loss_total": 0.518718957901001 }, { "epoch": 1.1990382046486776, "step": 4488, "train/loss_ctc": 0.4367813766002655, "train/loss_error": 0.4772889316082001, "train/loss_total": 0.4691874086856842 }, { "epoch": 1.1993053700240448, "step": 4489, "train/loss_ctc": 0.4077170491218567, "train/loss_error": 0.4382852017879486, "train/loss_total": 0.4321715831756592 }, { "epoch": 1.1995725353994122, "grad_norm": 1.7618967294692993, "learning_rate": 2.281057974886455e-05, "loss": 0.5211, "step": 4490 }, { "epoch": 1.1995725353994122, "step": 4490, "train/loss_ctc": 0.9480737447738647, "train/loss_error": 0.43881216645240784, "train/loss_total": 0.5406644940376282 }, { "epoch": 1.1998397007747796, "step": 4491, "train/loss_ctc": 0.6121370196342468, "train/loss_error": 0.49901753664016724, "train/loss_total": 0.5216414332389832 }, { "epoch": 1.200106866150147, "step": 4492, "train/loss_ctc": 0.7757480144500732, "train/loss_error": 0.5393695831298828, "train/loss_total": 0.586645245552063 }, { "epoch": 1.2003740315255143, "step": 4493, "train/loss_ctc": 0.6642510890960693, "train/loss_error": 0.47230514883995056, "train/loss_total": 0.5106943249702454 }, { "epoch": 1.2006411969008817, "step": 4494, "train/loss_ctc": 0.37680763006210327, "train/loss_error": 0.47389698028564453, "train/loss_total": 0.4544790983200073 }, { "epoch": 1.200908362276249, "step": 4495, "train/loss_ctc": 0.7435834407806396, "train/loss_error": 0.4397445619106293, "train/loss_total": 0.5005123615264893 }, { "epoch": 1.2011755276516163, "step": 4496, "train/loss_ctc": 1.1722768545150757, "train/loss_error": 0.4541057348251343, "train/loss_total": 0.5977399349212646 }, { "epoch": 1.2014426930269837, "step": 4497, "train/loss_ctc": 0.8936044573783875, "train/loss_error": 0.5373144745826721, "train/loss_total": 0.6085724830627441 }, { "epoch": 1.2017098584023511, "step": 4498, "train/loss_ctc": 1.025012493133545, "train/loss_error": 0.5012824535369873, "train/loss_total": 0.6060284972190857 }, { "epoch": 1.2019770237777183, "step": 4499, "train/loss_ctc": 0.4299057126045227, "train/loss_error": 0.5039462447166443, "train/loss_total": 0.489138126373291 }, { "epoch": 1.2022441891530857, "grad_norm": 1.7228684425354004, "learning_rate": 2.2794549826342508e-05, "loss": 0.5416, "step": 4500 }, { "epoch": 1.2022441891530857, "step": 4500, "train/loss_ctc": 0.8083196878433228, "train/loss_error": 0.508117139339447, "train/loss_total": 0.5681576728820801 }, { "epoch": 1.2025113545284531, "step": 4501, "train/loss_ctc": 0.6321817636489868, "train/loss_error": 0.44911012053489685, "train/loss_total": 0.48572444915771484 }, { "epoch": 1.2027785199038203, "step": 4502, "train/loss_ctc": 0.5126659870147705, "train/loss_error": 0.40072792768478394, "train/loss_total": 0.4231155514717102 }, { "epoch": 1.2030456852791878, "step": 4503, "train/loss_ctc": 0.5812832713127136, "train/loss_error": 0.4799794852733612, "train/loss_total": 0.5002402067184448 }, { "epoch": 1.2033128506545552, "step": 4504, "train/loss_ctc": 1.112560510635376, "train/loss_error": 0.45701223611831665, "train/loss_total": 0.5881218910217285 }, { "epoch": 1.2035800160299226, "step": 4505, "train/loss_ctc": 0.7591171264648438, "train/loss_error": 0.5100389122962952, "train/loss_total": 0.5598545670509338 }, { "epoch": 1.2038471814052898, "step": 4506, "train/loss_ctc": 0.8543694019317627, "train/loss_error": 0.5065527558326721, "train/loss_total": 0.5761160850524902 }, { "epoch": 1.2041143467806572, "step": 4507, "train/loss_ctc": 0.9042279720306396, "train/loss_error": 0.47297537326812744, "train/loss_total": 0.5592259168624878 }, { "epoch": 1.2043815121560246, "step": 4508, "train/loss_ctc": 0.7030908465385437, "train/loss_error": 0.47757968306541443, "train/loss_total": 0.5226819515228271 }, { "epoch": 1.204648677531392, "step": 4509, "train/loss_ctc": 0.22266727685928345, "train/loss_error": 0.41890907287597656, "train/loss_total": 0.3796607255935669 }, { "epoch": 1.2049158429067592, "grad_norm": 6.495790481567383, "learning_rate": 2.2778519903820466e-05, "loss": 0.5163, "step": 4510 }, { "epoch": 1.2049158429067592, "step": 4510, "train/loss_ctc": 1.0712109804153442, "train/loss_error": 0.48610401153564453, "train/loss_total": 0.6031253933906555 }, { "epoch": 1.2051830082821267, "step": 4511, "train/loss_ctc": 1.0940370559692383, "train/loss_error": 0.45598340034484863, "train/loss_total": 0.5835941433906555 }, { "epoch": 1.205450173657494, "step": 4512, "train/loss_ctc": 1.5888699293136597, "train/loss_error": 0.4419761002063751, "train/loss_total": 0.6713548898696899 }, { "epoch": 1.2057173390328613, "step": 4513, "train/loss_ctc": 0.7046571969985962, "train/loss_error": 0.4905497133731842, "train/loss_total": 0.5333712100982666 }, { "epoch": 1.2059845044082287, "step": 4514, "train/loss_ctc": 0.43749216198921204, "train/loss_error": 0.4536827802658081, "train/loss_total": 0.45044466853141785 }, { "epoch": 1.206251669783596, "step": 4515, "train/loss_ctc": 0.9140594005584717, "train/loss_error": 0.4701819121837616, "train/loss_total": 0.5589574575424194 }, { "epoch": 1.2065188351589633, "step": 4516, "train/loss_ctc": 1.1292288303375244, "train/loss_error": 0.4707182049751282, "train/loss_total": 0.6024203300476074 }, { "epoch": 1.2067860005343307, "step": 4517, "train/loss_ctc": 0.6767197847366333, "train/loss_error": 0.5365104675292969, "train/loss_total": 0.5645523071289062 }, { "epoch": 1.2070531659096981, "step": 4518, "train/loss_ctc": 1.0273733139038086, "train/loss_error": 0.42542764544487, "train/loss_total": 0.5458167791366577 }, { "epoch": 1.2073203312850656, "step": 4519, "train/loss_ctc": 0.49116307497024536, "train/loss_error": 0.4130561649799347, "train/loss_total": 0.4286775588989258 }, { "epoch": 1.2075874966604327, "grad_norm": 3.6505117416381836, "learning_rate": 2.2762489981298424e-05, "loss": 0.5542, "step": 4520 }, { "epoch": 1.2075874966604327, "step": 4520, "train/loss_ctc": 0.7002835869789124, "train/loss_error": 0.4715249538421631, "train/loss_total": 0.5172767043113708 }, { "epoch": 1.2078546620358002, "step": 4521, "train/loss_ctc": 0.6266557574272156, "train/loss_error": 0.405942440032959, "train/loss_total": 0.4500851035118103 }, { "epoch": 1.2081218274111676, "step": 4522, "train/loss_ctc": 0.9412095546722412, "train/loss_error": 0.4764062166213989, "train/loss_total": 0.5693668723106384 }, { "epoch": 1.2083889927865348, "step": 4523, "train/loss_ctc": 0.5752881765365601, "train/loss_error": 0.5175005793571472, "train/loss_total": 0.5290580987930298 }, { "epoch": 1.2086561581619022, "step": 4524, "train/loss_ctc": 0.8572118282318115, "train/loss_error": 0.48812052607536316, "train/loss_total": 0.5619388222694397 }, { "epoch": 1.2089233235372696, "step": 4525, "train/loss_ctc": 0.6815072298049927, "train/loss_error": 0.504503071308136, "train/loss_total": 0.5399038791656494 }, { "epoch": 1.209190488912637, "step": 4526, "train/loss_ctc": 0.9910634756088257, "train/loss_error": 0.4834303855895996, "train/loss_total": 0.5849570035934448 }, { "epoch": 1.2094576542880042, "step": 4527, "train/loss_ctc": 0.9536769390106201, "train/loss_error": 0.4564119577407837, "train/loss_total": 0.5558649301528931 }, { "epoch": 1.2097248196633716, "step": 4528, "train/loss_ctc": 0.6762861609458923, "train/loss_error": 0.4211055338382721, "train/loss_total": 0.47214168310165405 }, { "epoch": 1.209991985038739, "step": 4529, "train/loss_ctc": 0.5390245914459229, "train/loss_error": 0.5262953042984009, "train/loss_total": 0.5288411378860474 }, { "epoch": 1.2102591504141063, "grad_norm": 3.3940868377685547, "learning_rate": 2.2746460058776382e-05, "loss": 0.5309, "step": 4530 }, { "epoch": 1.2102591504141063, "step": 4530, "train/loss_ctc": 0.9539922475814819, "train/loss_error": 0.3819866478443146, "train/loss_total": 0.496387779712677 }, { "epoch": 1.2105263157894737, "step": 4531, "train/loss_ctc": 1.1017624139785767, "train/loss_error": 0.49344438314437866, "train/loss_total": 0.6151080131530762 }, { "epoch": 1.210793481164841, "step": 4532, "train/loss_ctc": 0.8132738471031189, "train/loss_error": 0.49343472719192505, "train/loss_total": 0.5574025511741638 }, { "epoch": 1.2110606465402083, "step": 4533, "train/loss_ctc": 0.43664926290512085, "train/loss_error": 0.40909841656684875, "train/loss_total": 0.41460859775543213 }, { "epoch": 1.2113278119155757, "step": 4534, "train/loss_ctc": 0.7656291723251343, "train/loss_error": 0.4637746512889862, "train/loss_total": 0.5241455435752869 }, { "epoch": 1.2115949772909431, "step": 4535, "train/loss_ctc": 0.8673160672187805, "train/loss_error": 0.46835893392562866, "train/loss_total": 0.548150360584259 }, { "epoch": 1.2118621426663105, "step": 4536, "train/loss_ctc": 0.7395429611206055, "train/loss_error": 0.5153554081916809, "train/loss_total": 0.5601929426193237 }, { "epoch": 1.2121293080416777, "step": 4537, "train/loss_ctc": 0.5713444948196411, "train/loss_error": 0.46484142541885376, "train/loss_total": 0.48614203929901123 }, { "epoch": 1.2123964734170452, "step": 4538, "train/loss_ctc": 1.0470998287200928, "train/loss_error": 0.43652188777923584, "train/loss_total": 0.5586374998092651 }, { "epoch": 1.2126636387924126, "step": 4539, "train/loss_ctc": 0.9456579685211182, "train/loss_error": 0.4202859699726105, "train/loss_total": 0.5253604054450989 }, { "epoch": 1.2129308041677798, "grad_norm": 7.437825679779053, "learning_rate": 2.2730430136254344e-05, "loss": 0.5286, "step": 4540 }, { "epoch": 1.2129308041677798, "step": 4540, "train/loss_ctc": 0.7992435693740845, "train/loss_error": 0.4640158414840698, "train/loss_total": 0.5310614109039307 }, { "epoch": 1.2131979695431472, "step": 4541, "train/loss_ctc": 0.7634129524230957, "train/loss_error": 0.40785807371139526, "train/loss_total": 0.4789690375328064 }, { "epoch": 1.2134651349185146, "step": 4542, "train/loss_ctc": 0.5723333358764648, "train/loss_error": 0.4950588643550873, "train/loss_total": 0.5105137825012207 }, { "epoch": 1.213732300293882, "step": 4543, "train/loss_ctc": 0.39488717913627625, "train/loss_error": 0.5813795328140259, "train/loss_total": 0.5440810918807983 }, { "epoch": 1.2139994656692492, "step": 4544, "train/loss_ctc": 1.0369834899902344, "train/loss_error": 0.44691693782806396, "train/loss_total": 0.564930260181427 }, { "epoch": 1.2142666310446166, "step": 4545, "train/loss_ctc": 0.5557984113693237, "train/loss_error": 0.41874632239341736, "train/loss_total": 0.44615674018859863 }, { "epoch": 1.214533796419984, "step": 4546, "train/loss_ctc": 0.37372469902038574, "train/loss_error": 0.42389655113220215, "train/loss_total": 0.4138621985912323 }, { "epoch": 1.2148009617953512, "step": 4547, "train/loss_ctc": 0.20303285121917725, "train/loss_error": 0.514197587966919, "train/loss_total": 0.4519646465778351 }, { "epoch": 1.2150681271707187, "step": 4548, "train/loss_ctc": 1.3030030727386475, "train/loss_error": 0.4724780023097992, "train/loss_total": 0.6385830640792847 }, { "epoch": 1.215335292546086, "step": 4549, "train/loss_ctc": 0.5326285362243652, "train/loss_error": 0.5593268871307373, "train/loss_total": 0.5539872050285339 }, { "epoch": 1.2156024579214533, "grad_norm": 1.8490403890609741, "learning_rate": 2.2714400213732302e-05, "loss": 0.5134, "step": 4550 }, { "epoch": 1.2156024579214533, "step": 4550, "train/loss_ctc": 1.0984033346176147, "train/loss_error": 0.472520649433136, "train/loss_total": 0.5976971983909607 }, { "epoch": 1.2158696232968207, "step": 4551, "train/loss_ctc": 0.8943423628807068, "train/loss_error": 0.4413926899433136, "train/loss_total": 0.5319826602935791 }, { "epoch": 1.216136788672188, "step": 4552, "train/loss_ctc": 0.9755224585533142, "train/loss_error": 0.5029322504997253, "train/loss_total": 0.597450315952301 }, { "epoch": 1.2164039540475555, "step": 4553, "train/loss_ctc": 0.5335069298744202, "train/loss_error": 0.47350287437438965, "train/loss_total": 0.4855036735534668 }, { "epoch": 1.2166711194229227, "step": 4554, "train/loss_ctc": 0.5627827644348145, "train/loss_error": 0.5088542699813843, "train/loss_total": 0.5196399688720703 }, { "epoch": 1.2169382847982901, "step": 4555, "train/loss_ctc": 0.7583087682723999, "train/loss_error": 0.47948601841926575, "train/loss_total": 0.5352505445480347 }, { "epoch": 1.2172054501736576, "step": 4556, "train/loss_ctc": 0.6076363325119019, "train/loss_error": 0.41945788264274597, "train/loss_total": 0.45709359645843506 }, { "epoch": 1.2174726155490248, "step": 4557, "train/loss_ctc": 0.5659648180007935, "train/loss_error": 0.46253544092178345, "train/loss_total": 0.4832213222980499 }, { "epoch": 1.2177397809243922, "step": 4558, "train/loss_ctc": 0.5613870620727539, "train/loss_error": 0.4914110600948334, "train/loss_total": 0.5054062604904175 }, { "epoch": 1.2180069462997596, "step": 4559, "train/loss_ctc": 0.37141865491867065, "train/loss_error": 0.3975364565849304, "train/loss_total": 0.3923128843307495 }, { "epoch": 1.218274111675127, "grad_norm": 2.6301255226135254, "learning_rate": 2.269837029121026e-05, "loss": 0.5106, "step": 4560 }, { "epoch": 1.218274111675127, "step": 4560, "train/loss_ctc": 0.5285245180130005, "train/loss_error": 0.4574340283870697, "train/loss_total": 0.4716521203517914 }, { "epoch": 1.2185412770504942, "step": 4561, "train/loss_ctc": 0.8416488170623779, "train/loss_error": 0.4793625771999359, "train/loss_total": 0.5518198013305664 }, { "epoch": 1.2188084424258616, "step": 4562, "train/loss_ctc": 0.48351454734802246, "train/loss_error": 0.4594828188419342, "train/loss_total": 0.4642891585826874 }, { "epoch": 1.219075607801229, "step": 4563, "train/loss_ctc": 0.4981992840766907, "train/loss_error": 0.4586546719074249, "train/loss_total": 0.4665636122226715 }, { "epoch": 1.2193427731765962, "step": 4564, "train/loss_ctc": 0.8340287208557129, "train/loss_error": 0.4498491883277893, "train/loss_total": 0.5266851186752319 }, { "epoch": 1.2196099385519636, "step": 4565, "train/loss_ctc": 0.6277492642402649, "train/loss_error": 0.47003084421157837, "train/loss_total": 0.5015745162963867 }, { "epoch": 1.219877103927331, "step": 4566, "train/loss_ctc": 0.9795418977737427, "train/loss_error": 0.5136744379997253, "train/loss_total": 0.6068479418754578 }, { "epoch": 1.2201442693026983, "step": 4567, "train/loss_ctc": 1.1855310201644897, "train/loss_error": 0.46003973484039307, "train/loss_total": 0.6051380038261414 }, { "epoch": 1.2204114346780657, "step": 4568, "train/loss_ctc": 0.538263738155365, "train/loss_error": 0.42065170407295227, "train/loss_total": 0.4441741108894348 }, { "epoch": 1.220678600053433, "step": 4569, "train/loss_ctc": 1.316528081893921, "train/loss_error": 0.487784206867218, "train/loss_total": 0.6535329818725586 }, { "epoch": 1.2209457654288005, "grad_norm": 2.607327699661255, "learning_rate": 2.2682340368688218e-05, "loss": 0.5292, "step": 4570 }, { "epoch": 1.2209457654288005, "step": 4570, "train/loss_ctc": 1.0717689990997314, "train/loss_error": 0.49379411339759827, "train/loss_total": 0.609389066696167 }, { "epoch": 1.2212129308041677, "step": 4571, "train/loss_ctc": 1.1500427722930908, "train/loss_error": 0.43838655948638916, "train/loss_total": 0.5807178020477295 }, { "epoch": 1.2214800961795351, "step": 4572, "train/loss_ctc": 0.6986046433448792, "train/loss_error": 0.47842323780059814, "train/loss_total": 0.5224595069885254 }, { "epoch": 1.2217472615549025, "step": 4573, "train/loss_ctc": 0.347849041223526, "train/loss_error": 0.4782070815563202, "train/loss_total": 0.45213550329208374 }, { "epoch": 1.22201442693027, "step": 4574, "train/loss_ctc": 1.0541958808898926, "train/loss_error": 0.4441760182380676, "train/loss_total": 0.5661799907684326 }, { "epoch": 1.2222815923056372, "step": 4575, "train/loss_ctc": 1.0102169513702393, "train/loss_error": 0.45504462718963623, "train/loss_total": 0.5660790801048279 }, { "epoch": 1.2225487576810046, "step": 4576, "train/loss_ctc": 0.5516095161437988, "train/loss_error": 0.4364997148513794, "train/loss_total": 0.45952168107032776 }, { "epoch": 1.222815923056372, "step": 4577, "train/loss_ctc": 0.614376425743103, "train/loss_error": 0.43677371740341187, "train/loss_total": 0.47229427099227905 }, { "epoch": 1.2230830884317392, "step": 4578, "train/loss_ctc": 1.007743000984192, "train/loss_error": 0.46841171383857727, "train/loss_total": 0.5762779712677002 }, { "epoch": 1.2233502538071066, "step": 4579, "train/loss_ctc": 0.7846139669418335, "train/loss_error": 0.5020450353622437, "train/loss_total": 0.5585588216781616 }, { "epoch": 1.223617419182474, "grad_norm": 1.590624213218689, "learning_rate": 2.2666310446166176e-05, "loss": 0.5364, "step": 4580 }, { "epoch": 1.223617419182474, "step": 4580, "train/loss_ctc": 0.4171895682811737, "train/loss_error": 0.4587678015232086, "train/loss_total": 0.45045217871665955 }, { "epoch": 1.2238845845578412, "step": 4581, "train/loss_ctc": 0.8142855167388916, "train/loss_error": 0.5385212898254395, "train/loss_total": 0.5936741232872009 }, { "epoch": 1.2241517499332086, "step": 4582, "train/loss_ctc": 0.8591575026512146, "train/loss_error": 0.5144897699356079, "train/loss_total": 0.5834233164787292 }, { "epoch": 1.224418915308576, "step": 4583, "train/loss_ctc": 1.7823898792266846, "train/loss_error": 0.4827873408794403, "train/loss_total": 0.7427078485488892 }, { "epoch": 1.2246860806839432, "step": 4584, "train/loss_ctc": 1.3992176055908203, "train/loss_error": 0.4295639991760254, "train/loss_total": 0.6234947443008423 }, { "epoch": 1.2249532460593107, "step": 4585, "train/loss_ctc": 0.7413709759712219, "train/loss_error": 0.45463237166404724, "train/loss_total": 0.5119801163673401 }, { "epoch": 1.225220411434678, "step": 4586, "train/loss_ctc": 0.6139674186706543, "train/loss_error": 0.5452965497970581, "train/loss_total": 0.5590307116508484 }, { "epoch": 1.2254875768100455, "step": 4587, "train/loss_ctc": 0.4636497497558594, "train/loss_error": 0.436151921749115, "train/loss_total": 0.44165149331092834 }, { "epoch": 1.2257547421854127, "step": 4588, "train/loss_ctc": 0.43885594606399536, "train/loss_error": 0.45454758405685425, "train/loss_total": 0.4514092803001404 }, { "epoch": 1.22602190756078, "step": 4589, "train/loss_ctc": 0.6026240587234497, "train/loss_error": 0.44440197944641113, "train/loss_total": 0.4760464131832123 }, { "epoch": 1.2262890729361475, "grad_norm": 2.478748321533203, "learning_rate": 2.2650280523644138e-05, "loss": 0.5434, "step": 4590 }, { "epoch": 1.2262890729361475, "step": 4590, "train/loss_ctc": 0.7247174978256226, "train/loss_error": 0.4295368790626526, "train/loss_total": 0.48857301473617554 }, { "epoch": 1.226556238311515, "step": 4591, "train/loss_ctc": 1.698885202407837, "train/loss_error": 0.44217750430107117, "train/loss_total": 0.6935190558433533 }, { "epoch": 1.2268234036868821, "step": 4592, "train/loss_ctc": 1.161852240562439, "train/loss_error": 0.5039986968040466, "train/loss_total": 0.6355693936347961 }, { "epoch": 1.2270905690622496, "step": 4593, "train/loss_ctc": 1.1588741540908813, "train/loss_error": 0.4550745487213135, "train/loss_total": 0.595834493637085 }, { "epoch": 1.227357734437617, "step": 4594, "train/loss_ctc": 0.940994918346405, "train/loss_error": 0.42193812131881714, "train/loss_total": 0.5257494449615479 }, { "epoch": 1.2276248998129842, "step": 4595, "train/loss_ctc": 0.5459373593330383, "train/loss_error": 0.44350677728652954, "train/loss_total": 0.4639929234981537 }, { "epoch": 1.2278920651883516, "step": 4596, "train/loss_ctc": 0.5621012449264526, "train/loss_error": 0.45020753145217896, "train/loss_total": 0.4725863039493561 }, { "epoch": 1.228159230563719, "step": 4597, "train/loss_ctc": 0.4517301321029663, "train/loss_error": 0.42568153142929077, "train/loss_total": 0.4308912754058838 }, { "epoch": 1.2284263959390862, "step": 4598, "train/loss_ctc": 1.0479600429534912, "train/loss_error": 0.47930270433425903, "train/loss_total": 0.5930341482162476 }, { "epoch": 1.2286935613144536, "step": 4599, "train/loss_ctc": 0.37815186381340027, "train/loss_error": 0.4203867018222809, "train/loss_total": 0.41193974018096924 }, { "epoch": 1.228960726689821, "grad_norm": 1.7094707489013672, "learning_rate": 2.2634250601122096e-05, "loss": 0.5312, "step": 4600 }, { "epoch": 1.228960726689821, "step": 4600, "train/loss_ctc": 1.293009877204895, "train/loss_error": 0.44748154282569885, "train/loss_total": 0.616587221622467 }, { "epoch": 1.2292278920651882, "step": 4601, "train/loss_ctc": 0.6013059020042419, "train/loss_error": 0.455026239156723, "train/loss_total": 0.4842821955680847 }, { "epoch": 1.2294950574405556, "step": 4602, "train/loss_ctc": 0.9280760288238525, "train/loss_error": 0.49275293946266174, "train/loss_total": 0.579817533493042 }, { "epoch": 1.229762222815923, "step": 4603, "train/loss_ctc": 0.7740234136581421, "train/loss_error": 0.4549825191497803, "train/loss_total": 0.5187907218933105 }, { "epoch": 1.2300293881912905, "step": 4604, "train/loss_ctc": 0.41505712270736694, "train/loss_error": 0.4661863148212433, "train/loss_total": 0.4559604823589325 }, { "epoch": 1.2302965535666577, "step": 4605, "train/loss_ctc": 0.6631678938865662, "train/loss_error": 0.4868070185184479, "train/loss_total": 0.5220792293548584 }, { "epoch": 1.230563718942025, "step": 4606, "train/loss_ctc": 0.2825363576412201, "train/loss_error": 0.39621075987815857, "train/loss_total": 0.3734758794307709 }, { "epoch": 1.2308308843173925, "step": 4607, "train/loss_ctc": 1.1488358974456787, "train/loss_error": 0.4553655683994293, "train/loss_total": 0.5940596461296082 }, { "epoch": 1.23109804969276, "step": 4608, "train/loss_ctc": 0.3506830036640167, "train/loss_error": 0.41851598024368286, "train/loss_total": 0.4049493968486786 }, { "epoch": 1.2313652150681271, "step": 4609, "train/loss_ctc": 0.540045976638794, "train/loss_error": 0.4980596899986267, "train/loss_total": 0.5064569711685181 }, { "epoch": 1.2316323804434945, "grad_norm": 1.1596218347549438, "learning_rate": 2.2618220678600054e-05, "loss": 0.5056, "step": 4610 }, { "epoch": 1.2316323804434945, "step": 4610, "train/loss_ctc": 0.5219502449035645, "train/loss_error": 0.5021218061447144, "train/loss_total": 0.5060874819755554 }, { "epoch": 1.231899545818862, "step": 4611, "train/loss_ctc": 0.7604732513427734, "train/loss_error": 0.4285103380680084, "train/loss_total": 0.49490290880203247 }, { "epoch": 1.2321667111942292, "step": 4612, "train/loss_ctc": 0.6604653000831604, "train/loss_error": 0.44931110739707947, "train/loss_total": 0.49154192209243774 }, { "epoch": 1.2324338765695966, "step": 4613, "train/loss_ctc": 0.6459534168243408, "train/loss_error": 0.45277079939842224, "train/loss_total": 0.4914073348045349 }, { "epoch": 1.232701041944964, "step": 4614, "train/loss_ctc": 0.6244624257087708, "train/loss_error": 0.43040764331817627, "train/loss_total": 0.4692186117172241 }, { "epoch": 1.2329682073203312, "step": 4615, "train/loss_ctc": 0.6064674854278564, "train/loss_error": 0.4892542362213135, "train/loss_total": 0.5126969218254089 }, { "epoch": 1.2332353726956986, "step": 4616, "train/loss_ctc": 0.4969930052757263, "train/loss_error": 0.45163941383361816, "train/loss_total": 0.4607101380825043 }, { "epoch": 1.233502538071066, "step": 4617, "train/loss_ctc": 0.8313314914703369, "train/loss_error": 0.4597192704677582, "train/loss_total": 0.534041702747345 }, { "epoch": 1.2337697034464332, "step": 4618, "train/loss_ctc": 0.4476492702960968, "train/loss_error": 0.5089064836502075, "train/loss_total": 0.49665504693984985 }, { "epoch": 1.2340368688218006, "step": 4619, "train/loss_ctc": 1.5389277935028076, "train/loss_error": 0.466232031583786, "train/loss_total": 0.6807712316513062 }, { "epoch": 1.234304034197168, "grad_norm": 1.465822458267212, "learning_rate": 2.2602190756078012e-05, "loss": 0.5138, "step": 4620 }, { "epoch": 1.234304034197168, "step": 4620, "train/loss_ctc": 0.3273700475692749, "train/loss_error": 0.561084508895874, "train/loss_total": 0.5143416523933411 }, { "epoch": 1.2345711995725355, "step": 4621, "train/loss_ctc": 0.594296932220459, "train/loss_error": 0.45295411348342896, "train/loss_total": 0.4812226891517639 }, { "epoch": 1.2348383649479027, "step": 4622, "train/loss_ctc": 0.7046976089477539, "train/loss_error": 0.49018627405166626, "train/loss_total": 0.5330885648727417 }, { "epoch": 1.23510553032327, "step": 4623, "train/loss_ctc": 0.2413860559463501, "train/loss_error": 0.4768497347831726, "train/loss_total": 0.4297569990158081 }, { "epoch": 1.2353726956986375, "step": 4624, "train/loss_ctc": 0.47241121530532837, "train/loss_error": 0.48184284567832947, "train/loss_total": 0.4799565374851227 }, { "epoch": 1.235639861074005, "step": 4625, "train/loss_ctc": 0.904290497303009, "train/loss_error": 0.4799135625362396, "train/loss_total": 0.5647889375686646 }, { "epoch": 1.2359070264493721, "step": 4626, "train/loss_ctc": 0.7400909066200256, "train/loss_error": 0.45111748576164246, "train/loss_total": 0.508912205696106 }, { "epoch": 1.2361741918247395, "step": 4627, "train/loss_ctc": 1.328062891960144, "train/loss_error": 0.5358884930610657, "train/loss_total": 0.6943233609199524 }, { "epoch": 1.236441357200107, "step": 4628, "train/loss_ctc": 0.8170164823532104, "train/loss_error": 0.501240611076355, "train/loss_total": 0.5643957853317261 }, { "epoch": 1.2367085225754741, "step": 4629, "train/loss_ctc": 0.5641719102859497, "train/loss_error": 0.45707905292510986, "train/loss_total": 0.4784976541996002 }, { "epoch": 1.2369756879508416, "grad_norm": 1.2859647274017334, "learning_rate": 2.258616083355597e-05, "loss": 0.5249, "step": 4630 }, { "epoch": 1.2369756879508416, "step": 4630, "train/loss_ctc": 1.100504755973816, "train/loss_error": 0.4496239125728607, "train/loss_total": 0.5798000693321228 }, { "epoch": 1.237242853326209, "step": 4631, "train/loss_ctc": 0.45054230093955994, "train/loss_error": 0.43791571259498596, "train/loss_total": 0.4404410421848297 }, { "epoch": 1.2375100187015762, "step": 4632, "train/loss_ctc": 1.179215431213379, "train/loss_error": 0.5193477869033813, "train/loss_total": 0.651321291923523 }, { "epoch": 1.2377771840769436, "step": 4633, "train/loss_ctc": 0.7100318670272827, "train/loss_error": 0.4975139796733856, "train/loss_total": 0.5400175452232361 }, { "epoch": 1.238044349452311, "step": 4634, "train/loss_ctc": 0.755524754524231, "train/loss_error": 0.5136969089508057, "train/loss_total": 0.5620625019073486 }, { "epoch": 1.2383115148276784, "step": 4635, "train/loss_ctc": 0.5800527930259705, "train/loss_error": 0.46283888816833496, "train/loss_total": 0.48628169298171997 }, { "epoch": 1.2385786802030456, "step": 4636, "train/loss_ctc": 1.2253360748291016, "train/loss_error": 0.4994261562824249, "train/loss_total": 0.6446081399917603 }, { "epoch": 1.238845845578413, "step": 4637, "train/loss_ctc": 1.1383562088012695, "train/loss_error": 0.3958514630794525, "train/loss_total": 0.5443524122238159 }, { "epoch": 1.2391130109537805, "step": 4638, "train/loss_ctc": 0.7652048468589783, "train/loss_error": 0.5401241183280945, "train/loss_total": 0.5851402878761292 }, { "epoch": 1.2393801763291477, "step": 4639, "train/loss_ctc": 1.2819693088531494, "train/loss_error": 0.45506811141967773, "train/loss_total": 0.6204483509063721 }, { "epoch": 1.239647341704515, "grad_norm": 1.8974801301956177, "learning_rate": 2.2570130911033928e-05, "loss": 0.5654, "step": 4640 }, { "epoch": 1.239647341704515, "step": 4640, "train/loss_ctc": 0.8208044171333313, "train/loss_error": 0.5153013467788696, "train/loss_total": 0.576401948928833 }, { "epoch": 1.2399145070798825, "step": 4641, "train/loss_ctc": 0.5953839421272278, "train/loss_error": 0.40624237060546875, "train/loss_total": 0.4440706968307495 }, { "epoch": 1.24018167245525, "step": 4642, "train/loss_ctc": 1.1931610107421875, "train/loss_error": 0.4042923152446747, "train/loss_total": 0.5620660781860352 }, { "epoch": 1.240448837830617, "step": 4643, "train/loss_ctc": 1.0113227367401123, "train/loss_error": 0.5096771121025085, "train/loss_total": 0.6100062131881714 }, { "epoch": 1.2407160032059845, "step": 4644, "train/loss_ctc": 1.3603274822235107, "train/loss_error": 0.5129162669181824, "train/loss_total": 0.6823984980583191 }, { "epoch": 1.240983168581352, "step": 4645, "train/loss_ctc": 0.67913818359375, "train/loss_error": 0.5319223999977112, "train/loss_total": 0.56136554479599 }, { "epoch": 1.2412503339567191, "step": 4646, "train/loss_ctc": 0.7005602717399597, "train/loss_error": 0.43236005306243896, "train/loss_total": 0.486000120639801 }, { "epoch": 1.2415174993320865, "step": 4647, "train/loss_ctc": 0.35460004210472107, "train/loss_error": 0.4208697974681854, "train/loss_total": 0.40761587023735046 }, { "epoch": 1.241784664707454, "step": 4648, "train/loss_ctc": 0.5344613194465637, "train/loss_error": 0.5507838726043701, "train/loss_total": 0.5475193858146667 }, { "epoch": 1.2420518300828212, "step": 4649, "train/loss_ctc": 0.5179443359375, "train/loss_error": 0.46996238827705383, "train/loss_total": 0.4795587956905365 }, { "epoch": 1.2423189954581886, "grad_norm": 1.18218994140625, "learning_rate": 2.255410098851189e-05, "loss": 0.5357, "step": 4650 }, { "epoch": 1.2423189954581886, "step": 4650, "train/loss_ctc": 1.4543761014938354, "train/loss_error": 0.5428758859634399, "train/loss_total": 0.7251759767532349 }, { "epoch": 1.242586160833556, "step": 4651, "train/loss_ctc": 0.4664069414138794, "train/loss_error": 0.45689404010772705, "train/loss_total": 0.4587966203689575 }, { "epoch": 1.2428533262089234, "step": 4652, "train/loss_ctc": 0.848602831363678, "train/loss_error": 0.48281294107437134, "train/loss_total": 0.5559709072113037 }, { "epoch": 1.2431204915842906, "step": 4653, "train/loss_ctc": 1.3039674758911133, "train/loss_error": 0.4348808228969574, "train/loss_total": 0.6086981296539307 }, { "epoch": 1.243387656959658, "step": 4654, "train/loss_ctc": 1.2944166660308838, "train/loss_error": 0.421644926071167, "train/loss_total": 0.5961992740631104 }, { "epoch": 1.2436548223350254, "step": 4655, "train/loss_ctc": 0.6533520221710205, "train/loss_error": 0.4767010509967804, "train/loss_total": 0.5120312571525574 }, { "epoch": 1.2439219877103926, "step": 4656, "train/loss_ctc": 0.5492953062057495, "train/loss_error": 0.48989641666412354, "train/loss_total": 0.5017762184143066 }, { "epoch": 1.24418915308576, "step": 4657, "train/loss_ctc": 0.5326934456825256, "train/loss_error": 0.49525368213653564, "train/loss_total": 0.5027416348457336 }, { "epoch": 1.2444563184611275, "step": 4658, "train/loss_ctc": 0.41640937328338623, "train/loss_error": 0.44844967126846313, "train/loss_total": 0.4420416057109833 }, { "epoch": 1.244723483836495, "step": 4659, "train/loss_ctc": 0.6691029071807861, "train/loss_error": 0.4463755786418915, "train/loss_total": 0.4909210205078125 }, { "epoch": 1.244990649211862, "grad_norm": 1.1869536638259888, "learning_rate": 2.2538071065989848e-05, "loss": 0.5394, "step": 4660 }, { "epoch": 1.244990649211862, "step": 4660, "train/loss_ctc": 1.0121783018112183, "train/loss_error": 0.4117933213710785, "train/loss_total": 0.5318703055381775 }, { "epoch": 1.2452578145872295, "step": 4661, "train/loss_ctc": 0.5156049132347107, "train/loss_error": 0.5281684398651123, "train/loss_total": 0.5256557464599609 }, { "epoch": 1.245524979962597, "step": 4662, "train/loss_ctc": 0.35014617443084717, "train/loss_error": 0.4456334412097931, "train/loss_total": 0.4265359938144684 }, { "epoch": 1.2457921453379641, "step": 4663, "train/loss_ctc": 0.6114411354064941, "train/loss_error": 0.44391950964927673, "train/loss_total": 0.47742384672164917 }, { "epoch": 1.2460593107133315, "step": 4664, "train/loss_ctc": 0.5108931064605713, "train/loss_error": 0.4157850742340088, "train/loss_total": 0.4348067045211792 }, { "epoch": 1.246326476088699, "step": 4665, "train/loss_ctc": 0.5658690333366394, "train/loss_error": 0.49003416299819946, "train/loss_total": 0.5052011609077454 }, { "epoch": 1.2465936414640661, "step": 4666, "train/loss_ctc": 0.7969842553138733, "train/loss_error": 0.45486485958099365, "train/loss_total": 0.5232887268066406 }, { "epoch": 1.2468608068394336, "step": 4667, "train/loss_ctc": 0.4073532819747925, "train/loss_error": 0.45277082920074463, "train/loss_total": 0.4436873495578766 }, { "epoch": 1.247127972214801, "step": 4668, "train/loss_ctc": 0.6339585185050964, "train/loss_error": 0.4273148477077484, "train/loss_total": 0.4686436057090759 }, { "epoch": 1.2473951375901684, "step": 4669, "train/loss_ctc": 0.8780130743980408, "train/loss_error": 0.519486665725708, "train/loss_total": 0.5911919474601746 }, { "epoch": 1.2476623029655356, "grad_norm": 3.091667890548706, "learning_rate": 2.252204114346781e-05, "loss": 0.4928, "step": 4670 }, { "epoch": 1.2476623029655356, "step": 4670, "train/loss_ctc": 0.3743356168270111, "train/loss_error": 0.4522055685520172, "train/loss_total": 0.43663159012794495 }, { "epoch": 1.247929468340903, "step": 4671, "train/loss_ctc": 1.0601040124893188, "train/loss_error": 0.44184041023254395, "train/loss_total": 0.565493106842041 }, { "epoch": 1.2481966337162704, "step": 4672, "train/loss_ctc": 0.9097789525985718, "train/loss_error": 0.4823373258113861, "train/loss_total": 0.5678256750106812 }, { "epoch": 1.2484637990916376, "step": 4673, "train/loss_ctc": 0.6750757098197937, "train/loss_error": 0.4250214993953705, "train/loss_total": 0.47503232955932617 }, { "epoch": 1.248730964467005, "step": 4674, "train/loss_ctc": 0.6054915189743042, "train/loss_error": 0.5120274424552917, "train/loss_total": 0.5307202339172363 }, { "epoch": 1.2489981298423725, "step": 4675, "train/loss_ctc": 0.6427102088928223, "train/loss_error": 0.45564308762550354, "train/loss_total": 0.4930565357208252 }, { "epoch": 1.2492652952177399, "step": 4676, "train/loss_ctc": 1.208820104598999, "train/loss_error": 0.4505467414855957, "train/loss_total": 0.6022014021873474 }, { "epoch": 1.249532460593107, "step": 4677, "train/loss_ctc": 0.6861441135406494, "train/loss_error": 0.528786301612854, "train/loss_total": 0.5602578520774841 }, { "epoch": 1.2497996259684745, "step": 4678, "train/loss_ctc": 0.7288471460342407, "train/loss_error": 0.48012039065361023, "train/loss_total": 0.5298657417297363 }, { "epoch": 1.250066791343842, "step": 4679, "train/loss_ctc": 1.000663161277771, "train/loss_error": 0.38912710547447205, "train/loss_total": 0.5114343166351318 }, { "epoch": 1.250333956719209, "grad_norm": 1.7228959798812866, "learning_rate": 2.2506011220945767e-05, "loss": 0.5273, "step": 4680 }, { "epoch": 1.250333956719209, "step": 4680, "train/loss_ctc": 1.3645931482315063, "train/loss_error": 0.43384623527526855, "train/loss_total": 0.6199955940246582 }, { "epoch": 1.2506011220945765, "step": 4681, "train/loss_ctc": 0.5882939100265503, "train/loss_error": 0.44300195574760437, "train/loss_total": 0.47206035256385803 }, { "epoch": 1.250868287469944, "step": 4682, "train/loss_ctc": 0.47544923424720764, "train/loss_error": 0.4433708190917969, "train/loss_total": 0.449786514043808 }, { "epoch": 1.2511354528453111, "step": 4683, "train/loss_ctc": 0.5904799103736877, "train/loss_error": 0.48681941628456116, "train/loss_total": 0.5075514912605286 }, { "epoch": 1.2514026182206786, "step": 4684, "train/loss_ctc": 0.9888313412666321, "train/loss_error": 0.46368208527565, "train/loss_total": 0.5687119364738464 }, { "epoch": 1.251669783596046, "step": 4685, "train/loss_ctc": 0.39457470178604126, "train/loss_error": 0.4280734658241272, "train/loss_total": 0.42137372493743896 }, { "epoch": 1.2519369489714134, "step": 4686, "train/loss_ctc": 0.4050064980983734, "train/loss_error": 0.41834136843681335, "train/loss_total": 0.4156744182109833 }, { "epoch": 1.2522041143467806, "step": 4687, "train/loss_ctc": 1.0661677122116089, "train/loss_error": 0.4691472053527832, "train/loss_total": 0.5885513424873352 }, { "epoch": 1.252471279722148, "step": 4688, "train/loss_ctc": 0.7115901708602905, "train/loss_error": 0.4535592496395111, "train/loss_total": 0.5051654577255249 }, { "epoch": 1.2527384450975154, "step": 4689, "train/loss_ctc": 0.713434100151062, "train/loss_error": 0.4961312413215637, "train/loss_total": 0.5395918488502502 }, { "epoch": 1.2530056104728828, "grad_norm": 4.466556549072266, "learning_rate": 2.2489981298423725e-05, "loss": 0.5088, "step": 4690 }, { "epoch": 1.2530056104728828, "step": 4690, "train/loss_ctc": 1.2301609516143799, "train/loss_error": 0.4420939087867737, "train/loss_total": 0.599707305431366 }, { "epoch": 1.25327277584825, "step": 4691, "train/loss_ctc": 0.4734274446964264, "train/loss_error": 0.4435561001300812, "train/loss_total": 0.44953039288520813 }, { "epoch": 1.2535399412236174, "step": 4692, "train/loss_ctc": 0.8817245960235596, "train/loss_error": 0.4607081115245819, "train/loss_total": 0.5449113845825195 }, { "epoch": 1.2538071065989849, "step": 4693, "train/loss_ctc": 1.1300976276397705, "train/loss_error": 0.4669176936149597, "train/loss_total": 0.5995537042617798 }, { "epoch": 1.254074271974352, "step": 4694, "train/loss_ctc": 0.8179707527160645, "train/loss_error": 0.47301849722862244, "train/loss_total": 0.5420089960098267 }, { "epoch": 1.2543414373497195, "step": 4695, "train/loss_ctc": 1.124915361404419, "train/loss_error": 0.4472561776638031, "train/loss_total": 0.5827880501747131 }, { "epoch": 1.254608602725087, "step": 4696, "train/loss_ctc": 0.7617356777191162, "train/loss_error": 0.4865354895591736, "train/loss_total": 0.54157555103302 }, { "epoch": 1.254875768100454, "step": 4697, "train/loss_ctc": 1.1273767948150635, "train/loss_error": 0.4824906587600708, "train/loss_total": 0.6114678978919983 }, { "epoch": 1.2551429334758215, "step": 4698, "train/loss_ctc": 0.47312474250793457, "train/loss_error": 0.5800439715385437, "train/loss_total": 0.5586601495742798 }, { "epoch": 1.255410098851189, "step": 4699, "train/loss_ctc": 0.9213190078735352, "train/loss_error": 0.5084137916564941, "train/loss_total": 0.5909948348999023 }, { "epoch": 1.2556772642265561, "grad_norm": 2.508129358291626, "learning_rate": 2.2473951375901683e-05, "loss": 0.5621, "step": 4700 }, { "epoch": 1.2556772642265561, "step": 4700, "train/loss_ctc": 0.5951982140541077, "train/loss_error": 0.5265582799911499, "train/loss_total": 0.5402862429618835 }, { "epoch": 1.2559444296019235, "step": 4701, "train/loss_ctc": 0.477253258228302, "train/loss_error": 0.4175144135951996, "train/loss_total": 0.429462194442749 }, { "epoch": 1.256211594977291, "step": 4702, "train/loss_ctc": 0.5785408616065979, "train/loss_error": 0.497782826423645, "train/loss_total": 0.5139344334602356 }, { "epoch": 1.2564787603526584, "step": 4703, "train/loss_ctc": 0.6138666868209839, "train/loss_error": 0.45383238792419434, "train/loss_total": 0.48583927750587463 }, { "epoch": 1.2567459257280256, "step": 4704, "train/loss_ctc": 1.03102707862854, "train/loss_error": 0.5528635382652283, "train/loss_total": 0.6484962701797485 }, { "epoch": 1.257013091103393, "step": 4705, "train/loss_ctc": 0.3578416407108307, "train/loss_error": 0.43499282002449036, "train/loss_total": 0.41956260800361633 }, { "epoch": 1.2572802564787604, "step": 4706, "train/loss_ctc": 0.4942053258419037, "train/loss_error": 0.4379860460758209, "train/loss_total": 0.4492299258708954 }, { "epoch": 1.2575474218541278, "step": 4707, "train/loss_ctc": 0.8573526740074158, "train/loss_error": 0.4503704905509949, "train/loss_total": 0.531766951084137 }, { "epoch": 1.257814587229495, "step": 4708, "train/loss_ctc": 0.5933959484100342, "train/loss_error": 0.5525497794151306, "train/loss_total": 0.5607190132141113 }, { "epoch": 1.2580817526048624, "step": 4709, "train/loss_ctc": 0.686246395111084, "train/loss_error": 0.4419403672218323, "train/loss_total": 0.4908015727996826 }, { "epoch": 1.2583489179802299, "grad_norm": 1.3588483333587646, "learning_rate": 2.2457921453379645e-05, "loss": 0.507, "step": 4710 }, { "epoch": 1.2583489179802299, "step": 4710, "train/loss_ctc": 1.3738116025924683, "train/loss_error": 0.49315589666366577, "train/loss_total": 0.6692870855331421 }, { "epoch": 1.258616083355597, "step": 4711, "train/loss_ctc": 0.829607367515564, "train/loss_error": 0.44095560908317566, "train/loss_total": 0.5186859369277954 }, { "epoch": 1.2588832487309645, "step": 4712, "train/loss_ctc": 1.5676114559173584, "train/loss_error": 0.43931081891059875, "train/loss_total": 0.6649709939956665 }, { "epoch": 1.2591504141063319, "step": 4713, "train/loss_ctc": 0.5252797603607178, "train/loss_error": 0.38738003373146057, "train/loss_total": 0.41495999693870544 }, { "epoch": 1.259417579481699, "step": 4714, "train/loss_ctc": 0.6924283504486084, "train/loss_error": 0.5006529688835144, "train/loss_total": 0.5390080213546753 }, { "epoch": 1.2596847448570665, "step": 4715, "train/loss_ctc": 0.6660871505737305, "train/loss_error": 0.46286413073539734, "train/loss_total": 0.5035087466239929 }, { "epoch": 1.259951910232434, "step": 4716, "train/loss_ctc": 0.8961316347122192, "train/loss_error": 0.520164966583252, "train/loss_total": 0.5953583121299744 }, { "epoch": 1.260219075607801, "step": 4717, "train/loss_ctc": 0.37098050117492676, "train/loss_error": 0.49968644976615906, "train/loss_total": 0.4739452600479126 }, { "epoch": 1.2604862409831685, "step": 4718, "train/loss_ctc": 0.713101863861084, "train/loss_error": 0.5334005951881409, "train/loss_total": 0.5693408250808716 }, { "epoch": 1.260753406358536, "step": 4719, "train/loss_ctc": 0.7019304633140564, "train/loss_error": 0.46129223704338074, "train/loss_total": 0.509419858455658 }, { "epoch": 1.2610205717339034, "grad_norm": 1.7258362770080566, "learning_rate": 2.2441891530857603e-05, "loss": 0.5458, "step": 4720 }, { "epoch": 1.2610205717339034, "step": 4720, "train/loss_ctc": 1.1889621019363403, "train/loss_error": 0.48293355107307434, "train/loss_total": 0.6241392493247986 }, { "epoch": 1.2612877371092706, "step": 4721, "train/loss_ctc": 1.159026861190796, "train/loss_error": 0.4328955411911011, "train/loss_total": 0.5781217813491821 }, { "epoch": 1.261554902484638, "step": 4722, "train/loss_ctc": 0.7643430233001709, "train/loss_error": 0.456559419631958, "train/loss_total": 0.5181161761283875 }, { "epoch": 1.2618220678600054, "step": 4723, "train/loss_ctc": 1.0302611589431763, "train/loss_error": 0.4796832799911499, "train/loss_total": 0.5897988677024841 }, { "epoch": 1.2620892332353728, "step": 4724, "train/loss_ctc": 0.521868109703064, "train/loss_error": 0.36236438155174255, "train/loss_total": 0.39426514506340027 }, { "epoch": 1.26235639861074, "step": 4725, "train/loss_ctc": 0.8664378523826599, "train/loss_error": 0.45997920632362366, "train/loss_total": 0.5412709712982178 }, { "epoch": 1.2626235639861074, "step": 4726, "train/loss_ctc": 0.7553087472915649, "train/loss_error": 0.38803771138191223, "train/loss_total": 0.4614919424057007 }, { "epoch": 1.2628907293614748, "step": 4727, "train/loss_ctc": 0.9758375883102417, "train/loss_error": 0.41772153973579407, "train/loss_total": 0.5293447375297546 }, { "epoch": 1.263157894736842, "step": 4728, "train/loss_ctc": 1.1855500936508179, "train/loss_error": 0.5027522444725037, "train/loss_total": 0.6393117904663086 }, { "epoch": 1.2634250601122095, "step": 4729, "train/loss_ctc": 0.4633835554122925, "train/loss_error": 0.5101590156555176, "train/loss_total": 0.5008039474487305 }, { "epoch": 1.2636922254875769, "grad_norm": 2.294517755508423, "learning_rate": 2.242586160833556e-05, "loss": 0.5377, "step": 4730 }, { "epoch": 1.2636922254875769, "step": 4730, "train/loss_ctc": 0.5647523403167725, "train/loss_error": 0.46084266901016235, "train/loss_total": 0.4816246032714844 }, { "epoch": 1.263959390862944, "step": 4731, "train/loss_ctc": 0.7596425414085388, "train/loss_error": 0.45858529210090637, "train/loss_total": 0.5187967419624329 }, { "epoch": 1.2642265562383115, "step": 4732, "train/loss_ctc": 1.6111459732055664, "train/loss_error": 0.5379142165184021, "train/loss_total": 0.7525606155395508 }, { "epoch": 1.264493721613679, "step": 4733, "train/loss_ctc": 0.49822530150413513, "train/loss_error": 0.45004209876060486, "train/loss_total": 0.4596787691116333 }, { "epoch": 1.264760886989046, "step": 4734, "train/loss_ctc": 0.9705126285552979, "train/loss_error": 0.44144749641418457, "train/loss_total": 0.5472605228424072 }, { "epoch": 1.2650280523644135, "step": 4735, "train/loss_ctc": 0.9692372679710388, "train/loss_error": 0.45782020688056946, "train/loss_total": 0.5601036548614502 }, { "epoch": 1.265295217739781, "step": 4736, "train/loss_ctc": 0.5644770860671997, "train/loss_error": 0.5197328925132751, "train/loss_total": 0.528681755065918 }, { "epoch": 1.2655623831151483, "step": 4737, "train/loss_ctc": 1.1391369104385376, "train/loss_error": 0.4905771315097809, "train/loss_total": 0.6202890872955322 }, { "epoch": 1.2658295484905158, "step": 4738, "train/loss_ctc": 1.1385719776153564, "train/loss_error": 0.4589717984199524, "train/loss_total": 0.5948918461799622 }, { "epoch": 1.266096713865883, "step": 4739, "train/loss_ctc": 0.6467820405960083, "train/loss_error": 0.5306181907653809, "train/loss_total": 0.5538510084152222 }, { "epoch": 1.2663638792412504, "grad_norm": 1.6205828189849854, "learning_rate": 2.240983168581352e-05, "loss": 0.5618, "step": 4740 }, { "epoch": 1.2663638792412504, "step": 4740, "train/loss_ctc": 0.7214241623878479, "train/loss_error": 0.49623972177505493, "train/loss_total": 0.5412766337394714 }, { "epoch": 1.2666310446166178, "step": 4741, "train/loss_ctc": 0.526858925819397, "train/loss_error": 0.4351263642311096, "train/loss_total": 0.45347291231155396 }, { "epoch": 1.266898209991985, "step": 4742, "train/loss_ctc": 0.6148653030395508, "train/loss_error": 0.4235627353191376, "train/loss_total": 0.4618232548236847 }, { "epoch": 1.2671653753673524, "step": 4743, "train/loss_ctc": 0.5545579791069031, "train/loss_error": 0.4582234025001526, "train/loss_total": 0.4774903357028961 }, { "epoch": 1.2674325407427198, "step": 4744, "train/loss_ctc": 0.8395967483520508, "train/loss_error": 0.5189364552497864, "train/loss_total": 0.5830685496330261 }, { "epoch": 1.267699706118087, "step": 4745, "train/loss_ctc": 0.4814661741256714, "train/loss_error": 0.46057650446891785, "train/loss_total": 0.46475446224212646 }, { "epoch": 1.2679668714934544, "step": 4746, "train/loss_ctc": 0.9832582473754883, "train/loss_error": 0.5041452646255493, "train/loss_total": 0.599967896938324 }, { "epoch": 1.2682340368688219, "step": 4747, "train/loss_ctc": 1.1726807355880737, "train/loss_error": 0.47137513756752014, "train/loss_total": 0.6116362810134888 }, { "epoch": 1.268501202244189, "step": 4748, "train/loss_ctc": 0.40136992931365967, "train/loss_error": 0.47940793633461, "train/loss_total": 0.4638003408908844 }, { "epoch": 1.2687683676195565, "step": 4749, "train/loss_ctc": 0.8054472208023071, "train/loss_error": 0.4515759348869324, "train/loss_total": 0.5223501920700073 }, { "epoch": 1.2690355329949239, "grad_norm": 1.8885842561721802, "learning_rate": 2.2393801763291477e-05, "loss": 0.518, "step": 4750 }, { "epoch": 1.2690355329949239, "step": 4750, "train/loss_ctc": 0.5886160135269165, "train/loss_error": 0.4948493242263794, "train/loss_total": 0.5136026740074158 }, { "epoch": 1.269302698370291, "step": 4751, "train/loss_ctc": 0.38542354106903076, "train/loss_error": 0.4948687255382538, "train/loss_total": 0.47297969460487366 }, { "epoch": 1.2695698637456585, "step": 4752, "train/loss_ctc": 0.5547389388084412, "train/loss_error": 0.49904513359069824, "train/loss_total": 0.5101839303970337 }, { "epoch": 1.269837029121026, "step": 4753, "train/loss_ctc": 0.6006180047988892, "train/loss_error": 0.45231160521507263, "train/loss_total": 0.481972873210907 }, { "epoch": 1.2701041944963933, "step": 4754, "train/loss_ctc": 0.48995524644851685, "train/loss_error": 0.4562755525112152, "train/loss_total": 0.4630115032196045 }, { "epoch": 1.2703713598717608, "step": 4755, "train/loss_ctc": 0.6267480850219727, "train/loss_error": 0.5247963070869446, "train/loss_total": 0.5451866984367371 }, { "epoch": 1.270638525247128, "step": 4756, "train/loss_ctc": 0.8515296578407288, "train/loss_error": 0.47612881660461426, "train/loss_total": 0.5512089729309082 }, { "epoch": 1.2709056906224954, "step": 4757, "train/loss_ctc": 0.3488060235977173, "train/loss_error": 0.480598121881485, "train/loss_total": 0.45423972606658936 }, { "epoch": 1.2711728559978628, "step": 4758, "train/loss_ctc": 1.122709035873413, "train/loss_error": 0.5029646754264832, "train/loss_total": 0.6269135475158691 }, { "epoch": 1.27144002137323, "step": 4759, "train/loss_ctc": 1.2041471004486084, "train/loss_error": 0.5060598850250244, "train/loss_total": 0.6456773281097412 }, { "epoch": 1.2717071867485974, "grad_norm": 2.033411979675293, "learning_rate": 2.2377771840769435e-05, "loss": 0.5265, "step": 4760 }, { "epoch": 1.2717071867485974, "step": 4760, "train/loss_ctc": 0.963525652885437, "train/loss_error": 0.5581514835357666, "train/loss_total": 0.6392263174057007 }, { "epoch": 1.2719743521239648, "step": 4761, "train/loss_ctc": 0.8366016149520874, "train/loss_error": 0.45057395100593567, "train/loss_total": 0.5277795195579529 }, { "epoch": 1.272241517499332, "step": 4762, "train/loss_ctc": 0.7446876764297485, "train/loss_error": 0.47380077838897705, "train/loss_total": 0.5279781818389893 }, { "epoch": 1.2725086828746994, "step": 4763, "train/loss_ctc": 0.7483539581298828, "train/loss_error": 0.4357084333896637, "train/loss_total": 0.4982375502586365 }, { "epoch": 1.2727758482500668, "step": 4764, "train/loss_ctc": 0.6854677796363831, "train/loss_error": 0.4767586886882782, "train/loss_total": 0.5185005068778992 }, { "epoch": 1.273043013625434, "step": 4765, "train/loss_ctc": 0.699958324432373, "train/loss_error": 0.4315577745437622, "train/loss_total": 0.48523789644241333 }, { "epoch": 1.2733101790008015, "step": 4766, "train/loss_ctc": 0.3975749909877777, "train/loss_error": 0.4224180579185486, "train/loss_total": 0.4174494743347168 }, { "epoch": 1.2735773443761689, "step": 4767, "train/loss_ctc": 0.43876105546951294, "train/loss_error": 0.4576881229877472, "train/loss_total": 0.4539027214050293 }, { "epoch": 1.273844509751536, "step": 4768, "train/loss_ctc": 0.7770251035690308, "train/loss_error": 0.4549526870250702, "train/loss_total": 0.5193671584129333 }, { "epoch": 1.2741116751269035, "step": 4769, "train/loss_ctc": 0.5474015474319458, "train/loss_error": 0.5020623803138733, "train/loss_total": 0.5111302137374878 }, { "epoch": 1.274378840502271, "grad_norm": 2.470675230026245, "learning_rate": 2.2361741918247397e-05, "loss": 0.5099, "step": 4770 }, { "epoch": 1.274378840502271, "step": 4770, "train/loss_ctc": 0.5632127523422241, "train/loss_error": 0.4047791659832001, "train/loss_total": 0.43646588921546936 }, { "epoch": 1.2746460058776383, "step": 4771, "train/loss_ctc": 0.7348291873931885, "train/loss_error": 0.4762888252735138, "train/loss_total": 0.5279968976974487 }, { "epoch": 1.2749131712530057, "step": 4772, "train/loss_ctc": 0.42114412784576416, "train/loss_error": 0.4658683240413666, "train/loss_total": 0.4569234848022461 }, { "epoch": 1.275180336628373, "step": 4773, "train/loss_ctc": 0.7032368779182434, "train/loss_error": 0.4600430130958557, "train/loss_total": 0.5086817741394043 }, { "epoch": 1.2754475020037404, "step": 4774, "train/loss_ctc": 1.159877061843872, "train/loss_error": 0.438258558511734, "train/loss_total": 0.5825822949409485 }, { "epoch": 1.2757146673791078, "step": 4775, "train/loss_ctc": 0.893828272819519, "train/loss_error": 0.4657653272151947, "train/loss_total": 0.5513778924942017 }, { "epoch": 1.275981832754475, "step": 4776, "train/loss_ctc": 0.336928129196167, "train/loss_error": 0.49005648493766785, "train/loss_total": 0.4594308137893677 }, { "epoch": 1.2762489981298424, "step": 4777, "train/loss_ctc": 0.6827058792114258, "train/loss_error": 0.475382924079895, "train/loss_total": 0.5168474912643433 }, { "epoch": 1.2765161635052098, "step": 4778, "train/loss_ctc": 0.6989758014678955, "train/loss_error": 0.4717588424682617, "train/loss_total": 0.5172022581100464 }, { "epoch": 1.276783328880577, "step": 4779, "train/loss_ctc": 0.8618059158325195, "train/loss_error": 0.4127928614616394, "train/loss_total": 0.5025954842567444 }, { "epoch": 1.2770504942559444, "grad_norm": 1.8959623575210571, "learning_rate": 2.2345711995725355e-05, "loss": 0.506, "step": 4780 }, { "epoch": 1.2770504942559444, "step": 4780, "train/loss_ctc": 1.263364553451538, "train/loss_error": 0.43385764956474304, "train/loss_total": 0.599759042263031 }, { "epoch": 1.2773176596313118, "step": 4781, "train/loss_ctc": 0.8483245372772217, "train/loss_error": 0.42265158891677856, "train/loss_total": 0.5077861547470093 }, { "epoch": 1.277584825006679, "step": 4782, "train/loss_ctc": 0.8384129405021667, "train/loss_error": 0.4531274735927582, "train/loss_total": 0.5301845669746399 }, { "epoch": 1.2778519903820464, "step": 4783, "train/loss_ctc": 0.45669132471084595, "train/loss_error": 0.43224793672561646, "train/loss_total": 0.43713662028312683 }, { "epoch": 1.2781191557574139, "step": 4784, "train/loss_ctc": 0.7221277952194214, "train/loss_error": 0.49672001600265503, "train/loss_total": 0.5418015718460083 }, { "epoch": 1.278386321132781, "step": 4785, "train/loss_ctc": 0.7342614531517029, "train/loss_error": 0.466461181640625, "train/loss_total": 0.5200212597846985 }, { "epoch": 1.2786534865081485, "step": 4786, "train/loss_ctc": 0.5583317875862122, "train/loss_error": 0.4279724061489105, "train/loss_total": 0.45404428243637085 }, { "epoch": 1.278920651883516, "step": 4787, "train/loss_ctc": 0.6956483721733093, "train/loss_error": 0.46268942952156067, "train/loss_total": 0.5092812180519104 }, { "epoch": 1.2791878172588833, "step": 4788, "train/loss_ctc": 0.6966539621353149, "train/loss_error": 0.4448682963848114, "train/loss_total": 0.4952254295349121 }, { "epoch": 1.2794549826342507, "step": 4789, "train/loss_ctc": 1.2545911073684692, "train/loss_error": 0.4782275855541229, "train/loss_total": 0.633500337600708 }, { "epoch": 1.279722148009618, "grad_norm": 8.199169158935547, "learning_rate": 2.2329682073203313e-05, "loss": 0.5229, "step": 4790 }, { "epoch": 1.279722148009618, "step": 4790, "train/loss_ctc": 0.6258813142776489, "train/loss_error": 0.514886200428009, "train/loss_total": 0.537085235118866 }, { "epoch": 1.2799893133849853, "step": 4791, "train/loss_ctc": 0.7543818354606628, "train/loss_error": 0.44458287954330444, "train/loss_total": 0.5065426826477051 }, { "epoch": 1.2802564787603528, "step": 4792, "train/loss_ctc": 0.22813060879707336, "train/loss_error": 0.36201298236846924, "train/loss_total": 0.335236519575119 }, { "epoch": 1.28052364413572, "step": 4793, "train/loss_ctc": 1.2300299406051636, "train/loss_error": 0.44835206866264343, "train/loss_total": 0.6046876311302185 }, { "epoch": 1.2807908095110874, "step": 4794, "train/loss_ctc": 0.39930951595306396, "train/loss_error": 0.46931910514831543, "train/loss_total": 0.4553171992301941 }, { "epoch": 1.2810579748864548, "step": 4795, "train/loss_ctc": 0.6095981597900391, "train/loss_error": 0.48296037316322327, "train/loss_total": 0.5082879066467285 }, { "epoch": 1.281325140261822, "step": 4796, "train/loss_ctc": 0.6158061027526855, "train/loss_error": 0.5356815457344055, "train/loss_total": 0.5517064332962036 }, { "epoch": 1.2815923056371894, "step": 4797, "train/loss_ctc": 0.49755924940109253, "train/loss_error": 0.4440087676048279, "train/loss_total": 0.4547188878059387 }, { "epoch": 1.2818594710125568, "step": 4798, "train/loss_ctc": 0.8140578866004944, "train/loss_error": 0.45848849415779114, "train/loss_total": 0.5296024084091187 }, { "epoch": 1.282126636387924, "step": 4799, "train/loss_ctc": 0.4076067805290222, "train/loss_error": 0.436271607875824, "train/loss_total": 0.4305386543273926 }, { "epoch": 1.2823938017632914, "grad_norm": 2.1939146518707275, "learning_rate": 2.231365215068127e-05, "loss": 0.4914, "step": 4800 }, { "epoch": 1.2823938017632914, "step": 4800, "train/loss_ctc": 1.4459712505340576, "train/loss_error": 0.59754478931427, "train/loss_total": 0.7672300934791565 }, { "epoch": 1.2826609671386588, "step": 4801, "train/loss_ctc": 0.3441999554634094, "train/loss_error": 0.4572492837905884, "train/loss_total": 0.43463942408561707 }, { "epoch": 1.2829281325140263, "step": 4802, "train/loss_ctc": 1.132253646850586, "train/loss_error": 0.4514894187450409, "train/loss_total": 0.587642252445221 }, { "epoch": 1.2831952978893935, "step": 4803, "train/loss_ctc": 0.5014981627464294, "train/loss_error": 0.43217530846595764, "train/loss_total": 0.4460398852825165 }, { "epoch": 1.2834624632647609, "step": 4804, "train/loss_ctc": 0.8432477116584778, "train/loss_error": 0.47343170642852783, "train/loss_total": 0.5473949313163757 }, { "epoch": 1.2837296286401283, "step": 4805, "train/loss_ctc": 0.8875089883804321, "train/loss_error": 0.4654645025730133, "train/loss_total": 0.549873411655426 }, { "epoch": 1.2839967940154957, "step": 4806, "train/loss_ctc": 1.0226569175720215, "train/loss_error": 0.4789537191390991, "train/loss_total": 0.5876943469047546 }, { "epoch": 1.284263959390863, "step": 4807, "train/loss_ctc": 1.0192192792892456, "train/loss_error": 0.4978809356689453, "train/loss_total": 0.6021486520767212 }, { "epoch": 1.2845311247662303, "step": 4808, "train/loss_ctc": 0.46848344802856445, "train/loss_error": 0.4395392835140228, "train/loss_total": 0.44532811641693115 }, { "epoch": 1.2847982901415977, "step": 4809, "train/loss_ctc": 0.6462591290473938, "train/loss_error": 0.43135491013526917, "train/loss_total": 0.4743357300758362 }, { "epoch": 1.285065455516965, "grad_norm": 1.4914331436157227, "learning_rate": 2.229762222815923e-05, "loss": 0.5442, "step": 4810 }, { "epoch": 1.285065455516965, "step": 4810, "train/loss_ctc": 0.8561850190162659, "train/loss_error": 0.41235122084617615, "train/loss_total": 0.501118004322052 }, { "epoch": 1.2853326208923324, "step": 4811, "train/loss_ctc": 0.9386801719665527, "train/loss_error": 0.4800246059894562, "train/loss_total": 0.5717557668685913 }, { "epoch": 1.2855997862676998, "step": 4812, "train/loss_ctc": 0.6240136027336121, "train/loss_error": 0.5079416036605835, "train/loss_total": 0.5311560034751892 }, { "epoch": 1.285866951643067, "step": 4813, "train/loss_ctc": 0.6837641000747681, "train/loss_error": 0.43718528747558594, "train/loss_total": 0.4865010380744934 }, { "epoch": 1.2861341170184344, "step": 4814, "train/loss_ctc": 1.2148487567901611, "train/loss_error": 0.4365644156932831, "train/loss_total": 0.5922212600708008 }, { "epoch": 1.2864012823938018, "step": 4815, "train/loss_ctc": 1.6886913776397705, "train/loss_error": 0.5190325379371643, "train/loss_total": 0.7529643177986145 }, { "epoch": 1.286668447769169, "step": 4816, "train/loss_ctc": 0.31963175535202026, "train/loss_error": 0.4434848725795746, "train/loss_total": 0.4187142848968506 }, { "epoch": 1.2869356131445364, "step": 4817, "train/loss_ctc": 0.5593659281730652, "train/loss_error": 0.477706640958786, "train/loss_total": 0.49403849244117737 }, { "epoch": 1.2872027785199038, "step": 4818, "train/loss_ctc": 0.5196706056594849, "train/loss_error": 0.4412384629249573, "train/loss_total": 0.4569249153137207 }, { "epoch": 1.2874699438952713, "step": 4819, "train/loss_ctc": 0.2389148771762848, "train/loss_error": 0.4583614468574524, "train/loss_total": 0.41447216272354126 }, { "epoch": 1.2877371092706384, "grad_norm": 2.235652208328247, "learning_rate": 2.2281592305637187e-05, "loss": 0.522, "step": 4820 }, { "epoch": 1.2877371092706384, "step": 4820, "train/loss_ctc": 0.6113812923431396, "train/loss_error": 0.5127315521240234, "train/loss_total": 0.5324615240097046 }, { "epoch": 1.2880042746460059, "step": 4821, "train/loss_ctc": 0.4836210310459137, "train/loss_error": 0.43236759305000305, "train/loss_total": 0.4426182806491852 }, { "epoch": 1.2882714400213733, "step": 4822, "train/loss_ctc": 1.1663243770599365, "train/loss_error": 0.46714895963668823, "train/loss_total": 0.6069840788841248 }, { "epoch": 1.2885386053967407, "step": 4823, "train/loss_ctc": 0.9659353494644165, "train/loss_error": 0.5151556730270386, "train/loss_total": 0.6053116321563721 }, { "epoch": 1.288805770772108, "step": 4824, "train/loss_ctc": 0.8275485634803772, "train/loss_error": 0.42895984649658203, "train/loss_total": 0.50867760181427 }, { "epoch": 1.2890729361474753, "step": 4825, "train/loss_ctc": 0.8335293531417847, "train/loss_error": 0.4409738779067993, "train/loss_total": 0.5194849967956543 }, { "epoch": 1.2893401015228427, "step": 4826, "train/loss_ctc": 0.9418703317642212, "train/loss_error": 0.4604891240596771, "train/loss_total": 0.5567653775215149 }, { "epoch": 1.28960726689821, "step": 4827, "train/loss_ctc": 0.5949119925498962, "train/loss_error": 0.5223280191421509, "train/loss_total": 0.536844789981842 }, { "epoch": 1.2898744322735773, "step": 4828, "train/loss_ctc": 0.502535343170166, "train/loss_error": 0.5330654382705688, "train/loss_total": 0.5269594192504883 }, { "epoch": 1.2901415976489448, "step": 4829, "train/loss_ctc": 0.7399886846542358, "train/loss_error": 0.4326639473438263, "train/loss_total": 0.49412891268730164 }, { "epoch": 1.290408763024312, "grad_norm": 1.9759820699691772, "learning_rate": 2.226556238311515e-05, "loss": 0.533, "step": 4830 }, { "epoch": 1.290408763024312, "step": 4830, "train/loss_ctc": 0.7977526187896729, "train/loss_error": 0.47817251086235046, "train/loss_total": 0.5420885682106018 }, { "epoch": 1.2906759283996794, "step": 4831, "train/loss_ctc": 0.8367249965667725, "train/loss_error": 0.4517925977706909, "train/loss_total": 0.5287790894508362 }, { "epoch": 1.2909430937750468, "step": 4832, "train/loss_ctc": 0.36042577028274536, "train/loss_error": 0.4875575006008148, "train/loss_total": 0.462131142616272 }, { "epoch": 1.291210259150414, "step": 4833, "train/loss_ctc": 0.7834212779998779, "train/loss_error": 0.5280977487564087, "train/loss_total": 0.5791624784469604 }, { "epoch": 1.2914774245257814, "step": 4834, "train/loss_ctc": 1.1906068325042725, "train/loss_error": 0.4475133717060089, "train/loss_total": 0.5961320996284485 }, { "epoch": 1.2917445899011488, "step": 4835, "train/loss_ctc": 0.39007478952407837, "train/loss_error": 0.5262917280197144, "train/loss_total": 0.4990483522415161 }, { "epoch": 1.2920117552765162, "step": 4836, "train/loss_ctc": 0.5880246758460999, "train/loss_error": 0.4962727427482605, "train/loss_total": 0.5146231055259705 }, { "epoch": 1.2922789206518834, "step": 4837, "train/loss_ctc": 0.23434661328792572, "train/loss_error": 0.521083652973175, "train/loss_total": 0.46373623609542847 }, { "epoch": 1.2925460860272509, "step": 4838, "train/loss_ctc": 1.0380778312683105, "train/loss_error": 0.5111743211746216, "train/loss_total": 0.6165550351142883 }, { "epoch": 1.2928132514026183, "step": 4839, "train/loss_ctc": 0.6335334181785583, "train/loss_error": 0.47759515047073364, "train/loss_total": 0.5087828040122986 }, { "epoch": 1.2930804167779857, "grad_norm": 1.3032194375991821, "learning_rate": 2.2249532460593107e-05, "loss": 0.5311, "step": 4840 }, { "epoch": 1.2930804167779857, "step": 4840, "train/loss_ctc": 1.29567551612854, "train/loss_error": 0.5063458681106567, "train/loss_total": 0.6642118096351624 }, { "epoch": 1.2933475821533529, "step": 4841, "train/loss_ctc": 0.7667253017425537, "train/loss_error": 0.43207550048828125, "train/loss_total": 0.4990054965019226 }, { "epoch": 1.2936147475287203, "step": 4842, "train/loss_ctc": 1.2725894451141357, "train/loss_error": 0.46587854623794556, "train/loss_total": 0.6272207498550415 }, { "epoch": 1.2938819129040877, "step": 4843, "train/loss_ctc": 0.8175567388534546, "train/loss_error": 0.45007437467575073, "train/loss_total": 0.5235708355903625 }, { "epoch": 1.294149078279455, "step": 4844, "train/loss_ctc": 0.5539332628250122, "train/loss_error": 0.47554782032966614, "train/loss_total": 0.49122491478919983 }, { "epoch": 1.2944162436548223, "step": 4845, "train/loss_ctc": 1.3169081211090088, "train/loss_error": 0.45672598481178284, "train/loss_total": 0.628762423992157 }, { "epoch": 1.2946834090301897, "step": 4846, "train/loss_ctc": 1.0876431465148926, "train/loss_error": 0.4658203125, "train/loss_total": 0.5901848673820496 }, { "epoch": 1.294950574405557, "step": 4847, "train/loss_ctc": 0.8315274715423584, "train/loss_error": 0.5118128657341003, "train/loss_total": 0.575755774974823 }, { "epoch": 1.2952177397809244, "step": 4848, "train/loss_ctc": 0.8052306771278381, "train/loss_error": 0.48608535528182983, "train/loss_total": 0.5499144196510315 }, { "epoch": 1.2954849051562918, "step": 4849, "train/loss_ctc": 0.4349735379219055, "train/loss_error": 0.48572272062301636, "train/loss_total": 0.4755728840827942 }, { "epoch": 1.295752070531659, "grad_norm": 2.9247090816497803, "learning_rate": 2.2233502538071068e-05, "loss": 0.5625, "step": 4850 }, { "epoch": 1.295752070531659, "step": 4850, "train/loss_ctc": 1.2601467370986938, "train/loss_error": 0.5004981160163879, "train/loss_total": 0.6524278521537781 }, { "epoch": 1.2960192359070264, "step": 4851, "train/loss_ctc": 0.6182799339294434, "train/loss_error": 0.38999906182289124, "train/loss_total": 0.43565523624420166 }, { "epoch": 1.2962864012823938, "step": 4852, "train/loss_ctc": 0.6155731678009033, "train/loss_error": 0.4934905171394348, "train/loss_total": 0.5179070234298706 }, { "epoch": 1.2965535666577612, "step": 4853, "train/loss_ctc": 1.6915395259857178, "train/loss_error": 0.4858107566833496, "train/loss_total": 0.7269564867019653 }, { "epoch": 1.2968207320331286, "step": 4854, "train/loss_ctc": 0.9493618607521057, "train/loss_error": 0.5108997821807861, "train/loss_total": 0.598592221736908 }, { "epoch": 1.2970878974084958, "step": 4855, "train/loss_ctc": 0.7458351850509644, "train/loss_error": 0.43973469734191895, "train/loss_total": 0.500954806804657 }, { "epoch": 1.2973550627838633, "step": 4856, "train/loss_ctc": 0.5119404196739197, "train/loss_error": 0.42177125811576843, "train/loss_total": 0.4398050904273987 }, { "epoch": 1.2976222281592307, "step": 4857, "train/loss_ctc": 0.44861918687820435, "train/loss_error": 0.42978283762931824, "train/loss_total": 0.4335501194000244 }, { "epoch": 1.2978893935345979, "step": 4858, "train/loss_ctc": 2.659895896911621, "train/loss_error": 0.5363575220108032, "train/loss_total": 0.9610652327537537 }, { "epoch": 1.2981565589099653, "step": 4859, "train/loss_ctc": 0.4558478891849518, "train/loss_error": 0.4878003001213074, "train/loss_total": 0.48140984773635864 }, { "epoch": 1.2984237242853327, "grad_norm": 1.6040035486221313, "learning_rate": 2.2217472615549026e-05, "loss": 0.5748, "step": 4860 }, { "epoch": 1.2984237242853327, "step": 4860, "train/loss_ctc": 0.40647774934768677, "train/loss_error": 0.47570064663887024, "train/loss_total": 0.46185606718063354 }, { "epoch": 1.2986908896607, "step": 4861, "train/loss_ctc": 0.6010632514953613, "train/loss_error": 0.3969945013523102, "train/loss_total": 0.43780824542045593 }, { "epoch": 1.2989580550360673, "step": 4862, "train/loss_ctc": 0.5672328472137451, "train/loss_error": 0.44883275032043457, "train/loss_total": 0.47251278162002563 }, { "epoch": 1.2992252204114347, "step": 4863, "train/loss_ctc": 1.095543622970581, "train/loss_error": 0.5251447558403015, "train/loss_total": 0.6392245292663574 }, { "epoch": 1.299492385786802, "step": 4864, "train/loss_ctc": 0.5669984817504883, "train/loss_error": 0.4937284588813782, "train/loss_total": 0.5083824992179871 }, { "epoch": 1.2997595511621693, "step": 4865, "train/loss_ctc": 0.6981499195098877, "train/loss_error": 0.42231428623199463, "train/loss_total": 0.4774814248085022 }, { "epoch": 1.3000267165375368, "step": 4866, "train/loss_ctc": 0.5570629239082336, "train/loss_error": 0.42292287945747375, "train/loss_total": 0.4497509002685547 }, { "epoch": 1.300293881912904, "step": 4867, "train/loss_ctc": 0.5636767148971558, "train/loss_error": 0.43022704124450684, "train/loss_total": 0.4569169878959656 }, { "epoch": 1.3005610472882714, "step": 4868, "train/loss_ctc": 0.9073141813278198, "train/loss_error": 0.4371754229068756, "train/loss_total": 0.5312032103538513 }, { "epoch": 1.3008282126636388, "step": 4869, "train/loss_ctc": 0.8772507905960083, "train/loss_error": 0.43138957023620605, "train/loss_total": 0.5205618143081665 }, { "epoch": 1.3010953780390062, "grad_norm": 1.8240678310394287, "learning_rate": 2.2201442693026984e-05, "loss": 0.4956, "step": 4870 }, { "epoch": 1.3010953780390062, "step": 4870, "train/loss_ctc": 1.0888731479644775, "train/loss_error": 0.467648446559906, "train/loss_total": 0.5918934345245361 }, { "epoch": 1.3013625434143736, "step": 4871, "train/loss_ctc": 0.9085793495178223, "train/loss_error": 0.4154170751571655, "train/loss_total": 0.5140495300292969 }, { "epoch": 1.3016297087897408, "step": 4872, "train/loss_ctc": 0.8092265725135803, "train/loss_error": 0.4333306849002838, "train/loss_total": 0.5085098743438721 }, { "epoch": 1.3018968741651082, "step": 4873, "train/loss_ctc": 1.0688625574111938, "train/loss_error": 0.45237430930137634, "train/loss_total": 0.5756719708442688 }, { "epoch": 1.3021640395404757, "step": 4874, "train/loss_ctc": 0.5558586716651917, "train/loss_error": 0.5188804864883423, "train/loss_total": 0.5262761116027832 }, { "epoch": 1.3024312049158429, "step": 4875, "train/loss_ctc": 0.6438592672348022, "train/loss_error": 0.5395821928977966, "train/loss_total": 0.5604376196861267 }, { "epoch": 1.3026983702912103, "step": 4876, "train/loss_ctc": 1.6627799272537231, "train/loss_error": 0.4915180504322052, "train/loss_total": 0.7257704138755798 }, { "epoch": 1.3029655356665777, "step": 4877, "train/loss_ctc": 0.7813336253166199, "train/loss_error": 0.5123777389526367, "train/loss_total": 0.5661689043045044 }, { "epoch": 1.3032327010419449, "step": 4878, "train/loss_ctc": 0.7882164716720581, "train/loss_error": 0.4928758442401886, "train/loss_total": 0.5519439578056335 }, { "epoch": 1.3034998664173123, "step": 4879, "train/loss_ctc": 0.71019446849823, "train/loss_error": 0.43067049980163574, "train/loss_total": 0.48657530546188354 }, { "epoch": 1.3037670317926797, "grad_norm": 2.423353672027588, "learning_rate": 2.2185412770504946e-05, "loss": 0.5607, "step": 4880 }, { "epoch": 1.3037670317926797, "step": 4880, "train/loss_ctc": 0.9036810994148254, "train/loss_error": 0.4520542621612549, "train/loss_total": 0.54237961769104 }, { "epoch": 1.304034197168047, "step": 4881, "train/loss_ctc": 0.44512832164764404, "train/loss_error": 0.4497787058353424, "train/loss_total": 0.4488486349582672 }, { "epoch": 1.3043013625434143, "step": 4882, "train/loss_ctc": 0.34508243203163147, "train/loss_error": 0.42846179008483887, "train/loss_total": 0.41178593039512634 }, { "epoch": 1.3045685279187818, "step": 4883, "train/loss_ctc": 0.7735588550567627, "train/loss_error": 0.47860148549079895, "train/loss_total": 0.5375929474830627 }, { "epoch": 1.304835693294149, "step": 4884, "train/loss_ctc": 0.4095889925956726, "train/loss_error": 0.4641363322734833, "train/loss_total": 0.45322686433792114 }, { "epoch": 1.3051028586695164, "step": 4885, "train/loss_ctc": 0.7566431760787964, "train/loss_error": 0.5144227147102356, "train/loss_total": 0.5628668069839478 }, { "epoch": 1.3053700240448838, "step": 4886, "train/loss_ctc": 0.47478634119033813, "train/loss_error": 0.41785991191864014, "train/loss_total": 0.4292452037334442 }, { "epoch": 1.3056371894202512, "step": 4887, "train/loss_ctc": 0.3462509512901306, "train/loss_error": 0.4301699697971344, "train/loss_total": 0.41338616609573364 }, { "epoch": 1.3059043547956186, "step": 4888, "train/loss_ctc": 0.6499398946762085, "train/loss_error": 0.4743826389312744, "train/loss_total": 0.5094940662384033 }, { "epoch": 1.3061715201709858, "step": 4889, "train/loss_ctc": 1.4358208179473877, "train/loss_error": 0.48943978548049927, "train/loss_total": 0.6787160038948059 }, { "epoch": 1.3064386855463532, "grad_norm": 1.8404911756515503, "learning_rate": 2.2169382847982904e-05, "loss": 0.4988, "step": 4890 }, { "epoch": 1.3064386855463532, "step": 4890, "train/loss_ctc": 0.5368350148200989, "train/loss_error": 0.4624951481819153, "train/loss_total": 0.47736313939094543 }, { "epoch": 1.3067058509217206, "step": 4891, "train/loss_ctc": 1.303941011428833, "train/loss_error": 0.41291913390159607, "train/loss_total": 0.5911235213279724 }, { "epoch": 1.3069730162970878, "step": 4892, "train/loss_ctc": 0.7289301156997681, "train/loss_error": 0.5102956891059875, "train/loss_total": 0.5540226101875305 }, { "epoch": 1.3072401816724553, "step": 4893, "train/loss_ctc": 0.7369956970214844, "train/loss_error": 0.43355873227119446, "train/loss_total": 0.49424612522125244 }, { "epoch": 1.3075073470478227, "step": 4894, "train/loss_ctc": 0.5775360465049744, "train/loss_error": 0.42610153555870056, "train/loss_total": 0.4563884437084198 }, { "epoch": 1.3077745124231899, "step": 4895, "train/loss_ctc": 0.6371586322784424, "train/loss_error": 0.4238496422767639, "train/loss_total": 0.46651142835617065 }, { "epoch": 1.3080416777985573, "step": 4896, "train/loss_ctc": 0.9946810007095337, "train/loss_error": 0.5160952210426331, "train/loss_total": 0.6118124127388 }, { "epoch": 1.3083088431739247, "step": 4897, "train/loss_ctc": 0.5744484663009644, "train/loss_error": 0.5049258470535278, "train/loss_total": 0.5188303589820862 }, { "epoch": 1.308576008549292, "step": 4898, "train/loss_ctc": 0.7445735931396484, "train/loss_error": 0.4230360984802246, "train/loss_total": 0.48734360933303833 }, { "epoch": 1.3088431739246593, "step": 4899, "train/loss_ctc": 0.5531271696090698, "train/loss_error": 0.5091849565505981, "train/loss_total": 0.5179734230041504 }, { "epoch": 1.3091103393000267, "grad_norm": 4.336625576019287, "learning_rate": 2.2153352925460862e-05, "loss": 0.5176, "step": 4900 }, { "epoch": 1.3091103393000267, "step": 4900, "train/loss_ctc": 0.5241191387176514, "train/loss_error": 0.42415696382522583, "train/loss_total": 0.4441494047641754 }, { "epoch": 1.309377504675394, "step": 4901, "train/loss_ctc": 0.7500846982002258, "train/loss_error": 0.4139763116836548, "train/loss_total": 0.4811980128288269 }, { "epoch": 1.3096446700507614, "step": 4902, "train/loss_ctc": 0.36229538917541504, "train/loss_error": 0.5159594416618347, "train/loss_total": 0.4852266311645508 }, { "epoch": 1.3099118354261288, "step": 4903, "train/loss_ctc": 1.0600030422210693, "train/loss_error": 0.4808410108089447, "train/loss_total": 0.5966734290122986 }, { "epoch": 1.3101790008014962, "step": 4904, "train/loss_ctc": 1.277327299118042, "train/loss_error": 0.5473276376724243, "train/loss_total": 0.6933276057243347 }, { "epoch": 1.3104461661768636, "step": 4905, "train/loss_ctc": 0.3934905529022217, "train/loss_error": 0.4450402855873108, "train/loss_total": 0.4347303509712219 }, { "epoch": 1.3107133315522308, "step": 4906, "train/loss_ctc": 0.8456864953041077, "train/loss_error": 0.4874977469444275, "train/loss_total": 0.5591354966163635 }, { "epoch": 1.3109804969275982, "step": 4907, "train/loss_ctc": 0.4546821117401123, "train/loss_error": 0.4405955374240875, "train/loss_total": 0.4434128701686859 }, { "epoch": 1.3112476623029656, "step": 4908, "train/loss_ctc": 0.7865071296691895, "train/loss_error": 0.40020620822906494, "train/loss_total": 0.4774664044380188 }, { "epoch": 1.3115148276783328, "step": 4909, "train/loss_ctc": 0.816137433052063, "train/loss_error": 0.46664613485336304, "train/loss_total": 0.5365443825721741 }, { "epoch": 1.3117819930537002, "grad_norm": 2.593886375427246, "learning_rate": 2.213732300293882e-05, "loss": 0.5152, "step": 4910 }, { "epoch": 1.3117819930537002, "step": 4910, "train/loss_ctc": 0.3675263822078705, "train/loss_error": 0.4464924931526184, "train/loss_total": 0.43069928884506226 }, { "epoch": 1.3120491584290677, "step": 4911, "train/loss_ctc": 1.092143177986145, "train/loss_error": 0.5078961849212646, "train/loss_total": 0.6247456073760986 }, { "epoch": 1.3123163238044349, "step": 4912, "train/loss_ctc": 0.8981883525848389, "train/loss_error": 0.5489863753318787, "train/loss_total": 0.6188267469406128 }, { "epoch": 1.3125834891798023, "step": 4913, "train/loss_ctc": 1.2007079124450684, "train/loss_error": 0.43196699023246765, "train/loss_total": 0.5857151746749878 }, { "epoch": 1.3128506545551697, "step": 4914, "train/loss_ctc": 0.6325483918190002, "train/loss_error": 0.45943665504455566, "train/loss_total": 0.4940590262413025 }, { "epoch": 1.313117819930537, "step": 4915, "train/loss_ctc": 0.536423921585083, "train/loss_error": 0.44745901226997375, "train/loss_total": 0.46525201201438904 }, { "epoch": 1.3133849853059043, "step": 4916, "train/loss_ctc": 0.7184616923332214, "train/loss_error": 0.4434398412704468, "train/loss_total": 0.49844422936439514 }, { "epoch": 1.3136521506812717, "step": 4917, "train/loss_ctc": 0.5195536017417908, "train/loss_error": 0.4811740517616272, "train/loss_total": 0.4888499677181244 }, { "epoch": 1.3139193160566391, "step": 4918, "train/loss_ctc": 0.6609610915184021, "train/loss_error": 0.44399669766426086, "train/loss_total": 0.48738959431648254 }, { "epoch": 1.3141864814320063, "step": 4919, "train/loss_ctc": 0.5725388526916504, "train/loss_error": 0.4111729860305786, "train/loss_total": 0.44344615936279297 }, { "epoch": 1.3144536468073738, "grad_norm": 2.2296178340911865, "learning_rate": 2.2121293080416778e-05, "loss": 0.5137, "step": 4920 }, { "epoch": 1.3144536468073738, "step": 4920, "train/loss_ctc": 0.6568161249160767, "train/loss_error": 0.5192703604698181, "train/loss_total": 0.5467795133590698 }, { "epoch": 1.3147208121827412, "step": 4921, "train/loss_ctc": 1.1235899925231934, "train/loss_error": 0.4435533881187439, "train/loss_total": 0.5795607566833496 }, { "epoch": 1.3149879775581086, "step": 4922, "train/loss_ctc": 0.7083429098129272, "train/loss_error": 0.5263928771018982, "train/loss_total": 0.562782883644104 }, { "epoch": 1.3152551429334758, "step": 4923, "train/loss_ctc": 1.0642902851104736, "train/loss_error": 0.5442844033241272, "train/loss_total": 0.6482855677604675 }, { "epoch": 1.3155223083088432, "step": 4924, "train/loss_ctc": 0.6374921798706055, "train/loss_error": 0.44071489572525024, "train/loss_total": 0.4800703525543213 }, { "epoch": 1.3157894736842106, "step": 4925, "train/loss_ctc": 0.9654702544212341, "train/loss_error": 0.5083039402961731, "train/loss_total": 0.5997372269630432 }, { "epoch": 1.3160566390595778, "step": 4926, "train/loss_ctc": 0.9255713224411011, "train/loss_error": 0.5102506875991821, "train/loss_total": 0.5933148264884949 }, { "epoch": 1.3163238044349452, "step": 4927, "train/loss_ctc": 0.921697735786438, "train/loss_error": 0.4447784721851349, "train/loss_total": 0.5401623249053955 }, { "epoch": 1.3165909698103127, "step": 4928, "train/loss_ctc": 0.7549611330032349, "train/loss_error": 0.5402466058731079, "train/loss_total": 0.5831895470619202 }, { "epoch": 1.3168581351856798, "step": 4929, "train/loss_ctc": 0.5454360842704773, "train/loss_error": 0.4426041543483734, "train/loss_total": 0.4631705582141876 }, { "epoch": 1.3171253005610473, "grad_norm": 1.7100204229354858, "learning_rate": 2.2105263157894736e-05, "loss": 0.5597, "step": 4930 }, { "epoch": 1.3171253005610473, "step": 4930, "train/loss_ctc": 1.3956397771835327, "train/loss_error": 0.4763471186161041, "train/loss_total": 0.6602056622505188 }, { "epoch": 1.3173924659364147, "step": 4931, "train/loss_ctc": 1.6202764511108398, "train/loss_error": 0.4861451983451843, "train/loss_total": 0.7129714488983154 }, { "epoch": 1.3176596313117819, "step": 4932, "train/loss_ctc": 1.1271902322769165, "train/loss_error": 0.5285729765892029, "train/loss_total": 0.6482964158058167 }, { "epoch": 1.3179267966871493, "step": 4933, "train/loss_ctc": 0.4549393951892853, "train/loss_error": 0.495138019323349, "train/loss_total": 0.4870983064174652 }, { "epoch": 1.3181939620625167, "step": 4934, "train/loss_ctc": 0.7365100383758545, "train/loss_error": 0.45009514689445496, "train/loss_total": 0.5073781609535217 }, { "epoch": 1.3184611274378841, "step": 4935, "train/loss_ctc": 0.6850682497024536, "train/loss_error": 0.45659735798835754, "train/loss_total": 0.5022915601730347 }, { "epoch": 1.3187282928132513, "step": 4936, "train/loss_ctc": 1.0032432079315186, "train/loss_error": 0.45678141713142395, "train/loss_total": 0.5660737752914429 }, { "epoch": 1.3189954581886187, "step": 4937, "train/loss_ctc": 0.40647247433662415, "train/loss_error": 0.5173149108886719, "train/loss_total": 0.4951464533805847 }, { "epoch": 1.3192626235639862, "step": 4938, "train/loss_ctc": 0.7069706916809082, "train/loss_error": 0.4991968274116516, "train/loss_total": 0.540751576423645 }, { "epoch": 1.3195297889393536, "step": 4939, "train/loss_ctc": 0.4287887513637543, "train/loss_error": 0.42950427532196045, "train/loss_total": 0.4293611943721771 }, { "epoch": 1.3197969543147208, "grad_norm": 5.208275318145752, "learning_rate": 2.2089233235372698e-05, "loss": 0.555, "step": 4940 }, { "epoch": 1.3197969543147208, "step": 4940, "train/loss_ctc": 0.7665749192237854, "train/loss_error": 0.4907703697681427, "train/loss_total": 0.5459312796592712 }, { "epoch": 1.3200641196900882, "step": 4941, "train/loss_ctc": 0.440833181142807, "train/loss_error": 0.4382766783237457, "train/loss_total": 0.4387879967689514 }, { "epoch": 1.3203312850654556, "step": 4942, "train/loss_ctc": 0.8244010210037231, "train/loss_error": 0.42653658986091614, "train/loss_total": 0.5061094760894775 }, { "epoch": 1.3205984504408228, "step": 4943, "train/loss_ctc": 1.1992615461349487, "train/loss_error": 0.46989601850509644, "train/loss_total": 0.6157691478729248 }, { "epoch": 1.3208656158161902, "step": 4944, "train/loss_ctc": 0.36643335223197937, "train/loss_error": 0.4979228079319, "train/loss_total": 0.4716249406337738 }, { "epoch": 1.3211327811915576, "step": 4945, "train/loss_ctc": 0.6993637084960938, "train/loss_error": 0.5726146697998047, "train/loss_total": 0.5979644656181335 }, { "epoch": 1.3213999465669248, "step": 4946, "train/loss_ctc": 0.9694247841835022, "train/loss_error": 0.5124106407165527, "train/loss_total": 0.6038134694099426 }, { "epoch": 1.3216671119422922, "step": 4947, "train/loss_ctc": 1.0626815557479858, "train/loss_error": 0.45948582887649536, "train/loss_total": 0.5801249742507935 }, { "epoch": 1.3219342773176597, "step": 4948, "train/loss_ctc": 0.9431982040405273, "train/loss_error": 0.45843735337257385, "train/loss_total": 0.5553895235061646 }, { "epoch": 1.3222014426930269, "step": 4949, "train/loss_ctc": 1.197709321975708, "train/loss_error": 0.4875968396663666, "train/loss_total": 0.6296193599700928 }, { "epoch": 1.3224686080683943, "grad_norm": 2.0252811908721924, "learning_rate": 2.2073203312850656e-05, "loss": 0.5545, "step": 4950 }, { "epoch": 1.3224686080683943, "step": 4950, "train/loss_ctc": 0.9311890602111816, "train/loss_error": 0.4678657054901123, "train/loss_total": 0.560530424118042 }, { "epoch": 1.3227357734437617, "step": 4951, "train/loss_ctc": 0.9674584865570068, "train/loss_error": 0.4932202100753784, "train/loss_total": 0.588067889213562 }, { "epoch": 1.3230029388191291, "step": 4952, "train/loss_ctc": 0.9876700639724731, "train/loss_error": 0.4425678253173828, "train/loss_total": 0.5515882968902588 }, { "epoch": 1.3232701041944963, "step": 4953, "train/loss_ctc": 0.9972817897796631, "train/loss_error": 0.5375449657440186, "train/loss_total": 0.6294923424720764 }, { "epoch": 1.3235372695698637, "step": 4954, "train/loss_ctc": 0.6799749732017517, "train/loss_error": 0.5114798545837402, "train/loss_total": 0.5451788902282715 }, { "epoch": 1.3238044349452311, "step": 4955, "train/loss_ctc": 1.5016765594482422, "train/loss_error": 0.4687519967556, "train/loss_total": 0.6753369569778442 }, { "epoch": 1.3240716003205986, "step": 4956, "train/loss_ctc": 1.0046510696411133, "train/loss_error": 0.4577604830265045, "train/loss_total": 0.5671386122703552 }, { "epoch": 1.3243387656959658, "step": 4957, "train/loss_ctc": 0.6659294366836548, "train/loss_error": 0.5199298858642578, "train/loss_total": 0.549129843711853 }, { "epoch": 1.3246059310713332, "step": 4958, "train/loss_ctc": 0.3393169641494751, "train/loss_error": 0.4423995912075043, "train/loss_total": 0.42178308963775635 }, { "epoch": 1.3248730964467006, "step": 4959, "train/loss_ctc": 1.4476419687271118, "train/loss_error": 0.48969894647598267, "train/loss_total": 0.6812875270843506 }, { "epoch": 1.3251402618220678, "grad_norm": 3.2568602561950684, "learning_rate": 2.2057173390328614e-05, "loss": 0.577, "step": 4960 }, { "epoch": 1.3251402618220678, "step": 4960, "train/loss_ctc": 0.681602954864502, "train/loss_error": 0.4948088824748993, "train/loss_total": 0.5321676731109619 }, { "epoch": 1.3254074271974352, "step": 4961, "train/loss_ctc": 1.0889325141906738, "train/loss_error": 0.5132815837860107, "train/loss_total": 0.6284117698669434 }, { "epoch": 1.3256745925728026, "step": 4962, "train/loss_ctc": 0.7388172149658203, "train/loss_error": 0.478802353143692, "train/loss_total": 0.5308053493499756 }, { "epoch": 1.3259417579481698, "step": 4963, "train/loss_ctc": 0.9275641441345215, "train/loss_error": 0.404640793800354, "train/loss_total": 0.5092254877090454 }, { "epoch": 1.3262089233235372, "step": 4964, "train/loss_ctc": 0.8453792333602905, "train/loss_error": 0.5158767104148865, "train/loss_total": 0.5817772150039673 }, { "epoch": 1.3264760886989047, "step": 4965, "train/loss_ctc": 0.5110442638397217, "train/loss_error": 0.45840802788734436, "train/loss_total": 0.4689352810382843 }, { "epoch": 1.3267432540742718, "step": 4966, "train/loss_ctc": 0.5066049098968506, "train/loss_error": 0.4460008442401886, "train/loss_total": 0.458121657371521 }, { "epoch": 1.3270104194496393, "step": 4967, "train/loss_ctc": 1.5483381748199463, "train/loss_error": 0.5243215560913086, "train/loss_total": 0.729124903678894 }, { "epoch": 1.3272775848250067, "step": 4968, "train/loss_ctc": 0.8579908013343811, "train/loss_error": 0.5059296488761902, "train/loss_total": 0.5763418674468994 }, { "epoch": 1.327544750200374, "step": 4969, "train/loss_ctc": 0.6029270887374878, "train/loss_error": 0.4284740388393402, "train/loss_total": 0.4633646607398987 }, { "epoch": 1.3278119155757415, "grad_norm": 2.6367573738098145, "learning_rate": 2.2041143467806572e-05, "loss": 0.5478, "step": 4970 }, { "epoch": 1.3278119155757415, "step": 4970, "train/loss_ctc": 0.7379000186920166, "train/loss_error": 0.5074558258056641, "train/loss_total": 0.5535447001457214 }, { "epoch": 1.3280790809511087, "step": 4971, "train/loss_ctc": 0.9688830971717834, "train/loss_error": 0.4344210624694824, "train/loss_total": 0.5413134694099426 }, { "epoch": 1.3283462463264761, "step": 4972, "train/loss_ctc": 0.6327587366104126, "train/loss_error": 0.4611894488334656, "train/loss_total": 0.495503306388855 }, { "epoch": 1.3286134117018435, "step": 4973, "train/loss_ctc": 0.9327882528305054, "train/loss_error": 0.4580404460430145, "train/loss_total": 0.5529900193214417 }, { "epoch": 1.3288805770772107, "step": 4974, "train/loss_ctc": 0.4097711741924286, "train/loss_error": 0.47028157114982605, "train/loss_total": 0.4581795036792755 }, { "epoch": 1.3291477424525782, "step": 4975, "train/loss_ctc": 0.6106463670730591, "train/loss_error": 0.42549842596054077, "train/loss_total": 0.4625280499458313 }, { "epoch": 1.3294149078279456, "step": 4976, "train/loss_ctc": 0.5707379579544067, "train/loss_error": 0.46567878127098083, "train/loss_total": 0.4866906404495239 }, { "epoch": 1.3296820732033128, "step": 4977, "train/loss_ctc": 0.48647594451904297, "train/loss_error": 0.46355727314949036, "train/loss_total": 0.46814101934432983 }, { "epoch": 1.3299492385786802, "step": 4978, "train/loss_ctc": 0.5472063422203064, "train/loss_error": 0.45732182264328003, "train/loss_total": 0.4752987325191498 }, { "epoch": 1.3302164039540476, "step": 4979, "train/loss_ctc": 0.803708016872406, "train/loss_error": 0.4722919166088104, "train/loss_total": 0.5385751724243164 }, { "epoch": 1.3304835693294148, "grad_norm": 1.2687005996704102, "learning_rate": 2.202511354528453e-05, "loss": 0.5033, "step": 4980 }, { "epoch": 1.3304835693294148, "step": 4980, "train/loss_ctc": 1.0989257097244263, "train/loss_error": 0.5329940915107727, "train/loss_total": 0.6461803913116455 }, { "epoch": 1.3307507347047822, "step": 4981, "train/loss_ctc": 0.4973888397216797, "train/loss_error": 0.46727508306503296, "train/loss_total": 0.4732978343963623 }, { "epoch": 1.3310179000801496, "step": 4982, "train/loss_ctc": 0.9560264348983765, "train/loss_error": 0.47969943284988403, "train/loss_total": 0.5749648809432983 }, { "epoch": 1.3312850654555168, "step": 4983, "train/loss_ctc": 0.48698464035987854, "train/loss_error": 0.4226757287979126, "train/loss_total": 0.43553751707077026 }, { "epoch": 1.3315522308308843, "step": 4984, "train/loss_ctc": 0.8896030187606812, "train/loss_error": 0.46956753730773926, "train/loss_total": 0.5535746812820435 }, { "epoch": 1.3318193962062517, "step": 4985, "train/loss_ctc": 0.5420058965682983, "train/loss_error": 0.4258124828338623, "train/loss_total": 0.449051171541214 }, { "epoch": 1.332086561581619, "step": 4986, "train/loss_ctc": 0.7342305779457092, "train/loss_error": 0.4045979380607605, "train/loss_total": 0.47052446007728577 }, { "epoch": 1.3323537269569865, "step": 4987, "train/loss_ctc": 0.8495804071426392, "train/loss_error": 0.39121562242507935, "train/loss_total": 0.4828885793685913 }, { "epoch": 1.3326208923323537, "step": 4988, "train/loss_ctc": 0.5112534761428833, "train/loss_error": 0.46133050322532654, "train/loss_total": 0.4713151156902313 }, { "epoch": 1.3328880577077211, "step": 4989, "train/loss_ctc": 1.260876178741455, "train/loss_error": 0.5169298648834229, "train/loss_total": 0.6657191514968872 }, { "epoch": 1.3331552230830885, "grad_norm": 1.4806383848190308, "learning_rate": 2.2009083622762488e-05, "loss": 0.5223, "step": 4990 }, { "epoch": 1.3331552230830885, "step": 4990, "train/loss_ctc": 0.7690868377685547, "train/loss_error": 0.45276105403900146, "train/loss_total": 0.5160261988639832 }, { "epoch": 1.3334223884584557, "step": 4991, "train/loss_ctc": 1.0306496620178223, "train/loss_error": 0.4503284692764282, "train/loss_total": 0.566392719745636 }, { "epoch": 1.3336895538338231, "step": 4992, "train/loss_ctc": 0.5659608244895935, "train/loss_error": 0.49895012378692627, "train/loss_total": 0.5123522877693176 }, { "epoch": 1.3339567192091906, "step": 4993, "train/loss_ctc": 1.0842385292053223, "train/loss_error": 0.4016697108745575, "train/loss_total": 0.5381834506988525 }, { "epoch": 1.3342238845845578, "step": 4994, "train/loss_ctc": 0.8587936758995056, "train/loss_error": 0.4775850772857666, "train/loss_total": 0.5538268089294434 }, { "epoch": 1.3344910499599252, "step": 4995, "train/loss_ctc": 0.9336515665054321, "train/loss_error": 0.4286523759365082, "train/loss_total": 0.5296522378921509 }, { "epoch": 1.3347582153352926, "step": 4996, "train/loss_ctc": 1.0691702365875244, "train/loss_error": 0.4606565237045288, "train/loss_total": 0.5823593139648438 }, { "epoch": 1.3350253807106598, "step": 4997, "train/loss_ctc": 0.9276585578918457, "train/loss_error": 0.5228841304779053, "train/loss_total": 0.6038390398025513 }, { "epoch": 1.3352925460860272, "step": 4998, "train/loss_ctc": 0.6157269477844238, "train/loss_error": 0.49485698342323303, "train/loss_total": 0.5190309882164001 }, { "epoch": 1.3355597114613946, "step": 4999, "train/loss_ctc": 1.469657063484192, "train/loss_error": 0.4917365312576294, "train/loss_total": 0.6873206496238708 }, { "epoch": 1.3358268768367618, "grad_norm": 2.029376268386841, "learning_rate": 2.199305370024045e-05, "loss": 0.5609, "step": 5000 }, { "epoch": 1.3358268768367618, "step": 5000, "train/loss_ctc": 0.4293995201587677, "train/loss_error": 0.45422470569610596, "train/loss_total": 0.4492596983909607 }, { "epoch": 1.3360940422121292, "step": 5001, "train/loss_ctc": 0.42322129011154175, "train/loss_error": 0.4859665036201477, "train/loss_total": 0.4734174609184265 }, { "epoch": 1.3363612075874967, "step": 5002, "train/loss_ctc": 0.5954070091247559, "train/loss_error": 0.4353278875350952, "train/loss_total": 0.4673437178134918 }, { "epoch": 1.336628372962864, "step": 5003, "train/loss_ctc": 1.2825510501861572, "train/loss_error": 0.4843989610671997, "train/loss_total": 0.6440293788909912 }, { "epoch": 1.3368955383382315, "step": 5004, "train/loss_ctc": 0.9640653729438782, "train/loss_error": 0.4601409137248993, "train/loss_total": 0.5609258413314819 }, { "epoch": 1.3371627037135987, "step": 5005, "train/loss_ctc": 0.7187499403953552, "train/loss_error": 0.45413485169410706, "train/loss_total": 0.5070579051971436 }, { "epoch": 1.337429869088966, "step": 5006, "train/loss_ctc": 0.5084930658340454, "train/loss_error": 0.5093933343887329, "train/loss_total": 0.5092132687568665 }, { "epoch": 1.3376970344643335, "step": 5007, "train/loss_ctc": 0.7445857524871826, "train/loss_error": 0.5389600992202759, "train/loss_total": 0.5800852179527283 }, { "epoch": 1.3379641998397007, "step": 5008, "train/loss_ctc": 0.8849115371704102, "train/loss_error": 0.5225716233253479, "train/loss_total": 0.5950396060943604 }, { "epoch": 1.3382313652150681, "step": 5009, "train/loss_ctc": 0.9631091356277466, "train/loss_error": 0.3969666361808777, "train/loss_total": 0.5101951360702515 }, { "epoch": 1.3384985305904356, "grad_norm": 1.6807918548583984, "learning_rate": 2.1977023777718408e-05, "loss": 0.5297, "step": 5010 }, { "epoch": 1.3384985305904356, "step": 5010, "train/loss_ctc": 0.860977053642273, "train/loss_error": 0.4793388545513153, "train/loss_total": 0.5556665062904358 }, { "epoch": 1.3387656959658027, "step": 5011, "train/loss_ctc": 0.49422937631607056, "train/loss_error": 0.44999951124191284, "train/loss_total": 0.45884549617767334 }, { "epoch": 1.3390328613411702, "step": 5012, "train/loss_ctc": 1.147167682647705, "train/loss_error": 0.47895026206970215, "train/loss_total": 0.6125937700271606 }, { "epoch": 1.3393000267165376, "step": 5013, "train/loss_ctc": 0.657052218914032, "train/loss_error": 0.4878949522972107, "train/loss_total": 0.5217264294624329 }, { "epoch": 1.3395671920919048, "step": 5014, "train/loss_ctc": 0.6078237891197205, "train/loss_error": 0.5076206922531128, "train/loss_total": 0.5276613235473633 }, { "epoch": 1.3398343574672722, "step": 5015, "train/loss_ctc": 0.36105939745903015, "train/loss_error": 0.4482651650905609, "train/loss_total": 0.43082404136657715 }, { "epoch": 1.3401015228426396, "step": 5016, "train/loss_ctc": 0.5129947066307068, "train/loss_error": 0.43810197710990906, "train/loss_total": 0.45308053493499756 }, { "epoch": 1.3403686882180068, "step": 5017, "train/loss_ctc": 0.8349571228027344, "train/loss_error": 0.49222680926322937, "train/loss_total": 0.5607728958129883 }, { "epoch": 1.3406358535933742, "step": 5018, "train/loss_ctc": 0.7227669358253479, "train/loss_error": 0.42855989933013916, "train/loss_total": 0.4874013066291809 }, { "epoch": 1.3409030189687416, "step": 5019, "train/loss_ctc": 1.0904854536056519, "train/loss_error": 0.4729914963245392, "train/loss_total": 0.5964902639389038 }, { "epoch": 1.341170184344109, "grad_norm": 1.8894884586334229, "learning_rate": 2.196099385519637e-05, "loss": 0.5205, "step": 5020 }, { "epoch": 1.341170184344109, "step": 5020, "train/loss_ctc": 0.499880850315094, "train/loss_error": 0.45873355865478516, "train/loss_total": 0.4669630229473114 }, { "epoch": 1.3414373497194765, "step": 5021, "train/loss_ctc": 0.8931925892829895, "train/loss_error": 0.4110026955604553, "train/loss_total": 0.5074406862258911 }, { "epoch": 1.3417045150948437, "step": 5022, "train/loss_ctc": 1.3553965091705322, "train/loss_error": 0.4290279448032379, "train/loss_total": 0.6143016815185547 }, { "epoch": 1.341971680470211, "step": 5023, "train/loss_ctc": 0.8060858249664307, "train/loss_error": 0.5017616152763367, "train/loss_total": 0.5626264810562134 }, { "epoch": 1.3422388458455785, "step": 5024, "train/loss_ctc": 1.2266970872879028, "train/loss_error": 0.5313869714736938, "train/loss_total": 0.6704490184783936 }, { "epoch": 1.3425060112209457, "step": 5025, "train/loss_ctc": 0.9155534505844116, "train/loss_error": 0.4731220304965973, "train/loss_total": 0.5616083145141602 }, { "epoch": 1.3427731765963131, "step": 5026, "train/loss_ctc": 0.6892684698104858, "train/loss_error": 0.467797189950943, "train/loss_total": 0.5120914578437805 }, { "epoch": 1.3430403419716805, "step": 5027, "train/loss_ctc": 0.8057447075843811, "train/loss_error": 0.4516184628009796, "train/loss_total": 0.5224437117576599 }, { "epoch": 1.3433075073470477, "step": 5028, "train/loss_ctc": 0.8649570941925049, "train/loss_error": 0.5338906645774841, "train/loss_total": 0.6001039743423462 }, { "epoch": 1.3435746727224152, "step": 5029, "train/loss_ctc": 0.3712618947029114, "train/loss_error": 0.5016520023345947, "train/loss_total": 0.4755740165710449 }, { "epoch": 1.3438418380977826, "grad_norm": 1.6741095781326294, "learning_rate": 2.1944963932674327e-05, "loss": 0.5494, "step": 5030 }, { "epoch": 1.3438418380977826, "step": 5030, "train/loss_ctc": 0.634809136390686, "train/loss_error": 0.49415653944015503, "train/loss_total": 0.5222870707511902 }, { "epoch": 1.3441090034731498, "step": 5031, "train/loss_ctc": 0.7552304267883301, "train/loss_error": 0.48430097103118896, "train/loss_total": 0.5384868383407593 }, { "epoch": 1.3443761688485172, "step": 5032, "train/loss_ctc": 1.0593019723892212, "train/loss_error": 0.49661290645599365, "train/loss_total": 0.6091507077217102 }, { "epoch": 1.3446433342238846, "step": 5033, "train/loss_ctc": 0.9207947850227356, "train/loss_error": 0.4410562813282013, "train/loss_total": 0.5370039939880371 }, { "epoch": 1.344910499599252, "step": 5034, "train/loss_ctc": 1.2170767784118652, "train/loss_error": 0.3996545970439911, "train/loss_total": 0.5631390810012817 }, { "epoch": 1.3451776649746192, "step": 5035, "train/loss_ctc": 0.5519798398017883, "train/loss_error": 0.44076964259147644, "train/loss_total": 0.4630116820335388 }, { "epoch": 1.3454448303499866, "step": 5036, "train/loss_ctc": 1.0321331024169922, "train/loss_error": 0.42657172679901123, "train/loss_total": 0.5476840138435364 }, { "epoch": 1.345711995725354, "step": 5037, "train/loss_ctc": 0.5301856398582458, "train/loss_error": 0.5014965534210205, "train/loss_total": 0.5072343945503235 }, { "epoch": 1.3459791611007215, "step": 5038, "train/loss_ctc": 1.1021976470947266, "train/loss_error": 0.4725509285926819, "train/loss_total": 0.5984802842140198 }, { "epoch": 1.3462463264760887, "step": 5039, "train/loss_ctc": 0.37988704442977905, "train/loss_error": 0.4619097411632538, "train/loss_total": 0.44550520181655884 }, { "epoch": 1.346513491851456, "grad_norm": 1.6456564664840698, "learning_rate": 2.1928934010152285e-05, "loss": 0.5332, "step": 5040 }, { "epoch": 1.346513491851456, "step": 5040, "train/loss_ctc": 0.5879708528518677, "train/loss_error": 0.40568971633911133, "train/loss_total": 0.442145973443985 }, { "epoch": 1.3467806572268235, "step": 5041, "train/loss_ctc": 0.5956747531890869, "train/loss_error": 0.4456883370876312, "train/loss_total": 0.47568562626838684 }, { "epoch": 1.3470478226021907, "step": 5042, "train/loss_ctc": 0.8937011957168579, "train/loss_error": 0.5821986198425293, "train/loss_total": 0.6444991230964661 }, { "epoch": 1.347314987977558, "step": 5043, "train/loss_ctc": 0.9987350702285767, "train/loss_error": 0.4695903956890106, "train/loss_total": 0.5754193067550659 }, { "epoch": 1.3475821533529255, "step": 5044, "train/loss_ctc": 0.4669870436191559, "train/loss_error": 0.5385177731513977, "train/loss_total": 0.5242116451263428 }, { "epoch": 1.3478493187282927, "step": 5045, "train/loss_ctc": 0.8724182844161987, "train/loss_error": 0.45076924562454224, "train/loss_total": 0.5350990295410156 }, { "epoch": 1.3481164841036601, "step": 5046, "train/loss_ctc": 0.5281256437301636, "train/loss_error": 0.4843258857727051, "train/loss_total": 0.4930858314037323 }, { "epoch": 1.3483836494790276, "step": 5047, "train/loss_ctc": 0.48072218894958496, "train/loss_error": 0.44093021750450134, "train/loss_total": 0.4488886296749115 }, { "epoch": 1.3486508148543948, "step": 5048, "train/loss_ctc": 0.8195849657058716, "train/loss_error": 0.5137994885444641, "train/loss_total": 0.5749565958976746 }, { "epoch": 1.3489179802297622, "step": 5049, "train/loss_ctc": 2.293210744857788, "train/loss_error": 0.5232603549957275, "train/loss_total": 0.8772504329681396 }, { "epoch": 1.3491851456051296, "grad_norm": 2.9149880409240723, "learning_rate": 2.1912904087630243e-05, "loss": 0.5591, "step": 5050 }, { "epoch": 1.3491851456051296, "step": 5050, "train/loss_ctc": 0.45872098207473755, "train/loss_error": 0.48338934779167175, "train/loss_total": 0.47845566272735596 }, { "epoch": 1.349452310980497, "step": 5051, "train/loss_ctc": 0.7811420559883118, "train/loss_error": 0.45271027088165283, "train/loss_total": 0.5183966159820557 }, { "epoch": 1.3497194763558642, "step": 5052, "train/loss_ctc": 0.4442736506462097, "train/loss_error": 0.43691471219062805, "train/loss_total": 0.4383864998817444 }, { "epoch": 1.3499866417312316, "step": 5053, "train/loss_ctc": 1.0917303562164307, "train/loss_error": 0.47276225686073303, "train/loss_total": 0.5965558886528015 }, { "epoch": 1.350253807106599, "step": 5054, "train/loss_ctc": 0.3703233003616333, "train/loss_error": 0.4558163285255432, "train/loss_total": 0.4387177526950836 }, { "epoch": 1.3505209724819665, "step": 5055, "train/loss_ctc": 1.066613793373108, "train/loss_error": 0.47623252868652344, "train/loss_total": 0.5943087935447693 }, { "epoch": 1.3507881378573336, "step": 5056, "train/loss_ctc": 0.7937523126602173, "train/loss_error": 0.46616870164871216, "train/loss_total": 0.5316854119300842 }, { "epoch": 1.351055303232701, "step": 5057, "train/loss_ctc": 0.7663411498069763, "train/loss_error": 0.5179309844970703, "train/loss_total": 0.5676130056381226 }, { "epoch": 1.3513224686080685, "step": 5058, "train/loss_ctc": 0.28624510765075684, "train/loss_error": 0.487474262714386, "train/loss_total": 0.44722843170166016 }, { "epoch": 1.3515896339834357, "step": 5059, "train/loss_ctc": 0.6459155082702637, "train/loss_error": 0.45880386233329773, "train/loss_total": 0.4962261915206909 }, { "epoch": 1.351856799358803, "grad_norm": 3.984278678894043, "learning_rate": 2.1896874165108205e-05, "loss": 0.5108, "step": 5060 }, { "epoch": 1.351856799358803, "step": 5060, "train/loss_ctc": 0.8004312515258789, "train/loss_error": 0.44586291909217834, "train/loss_total": 0.5167766213417053 }, { "epoch": 1.3521239647341705, "step": 5061, "train/loss_ctc": 1.0513083934783936, "train/loss_error": 0.43973544239997864, "train/loss_total": 0.5620500445365906 }, { "epoch": 1.3523911301095377, "step": 5062, "train/loss_ctc": 1.0912238359451294, "train/loss_error": 0.4525964856147766, "train/loss_total": 0.5803219676017761 }, { "epoch": 1.3526582954849051, "step": 5063, "train/loss_ctc": 0.6160792708396912, "train/loss_error": 0.4835221767425537, "train/loss_total": 0.5100336074829102 }, { "epoch": 1.3529254608602725, "step": 5064, "train/loss_ctc": 0.6846541166305542, "train/loss_error": 0.47558125853538513, "train/loss_total": 0.5173958539962769 }, { "epoch": 1.3531926262356397, "step": 5065, "train/loss_ctc": 0.8823673129081726, "train/loss_error": 0.4852254092693329, "train/loss_total": 0.5646538138389587 }, { "epoch": 1.3534597916110072, "step": 5066, "train/loss_ctc": 0.5527557134628296, "train/loss_error": 0.5371876358985901, "train/loss_total": 0.5403012633323669 }, { "epoch": 1.3537269569863746, "step": 5067, "train/loss_ctc": 0.6712403297424316, "train/loss_error": 0.567537248134613, "train/loss_total": 0.5882778763771057 }, { "epoch": 1.353994122361742, "step": 5068, "train/loss_ctc": 1.3131072521209717, "train/loss_error": 0.42647847533226013, "train/loss_total": 0.6038042306900024 }, { "epoch": 1.3542612877371094, "step": 5069, "train/loss_ctc": 0.5382473468780518, "train/loss_error": 0.40188825130462646, "train/loss_total": 0.42916008830070496 }, { "epoch": 1.3545284531124766, "grad_norm": 1.3521051406860352, "learning_rate": 2.1880844242586163e-05, "loss": 0.5413, "step": 5070 }, { "epoch": 1.3545284531124766, "step": 5070, "train/loss_ctc": 0.7275819778442383, "train/loss_error": 0.5023413896560669, "train/loss_total": 0.5473895072937012 }, { "epoch": 1.354795618487844, "step": 5071, "train/loss_ctc": 0.9883195161819458, "train/loss_error": 0.4625002145767212, "train/loss_total": 0.5676640868186951 }, { "epoch": 1.3550627838632114, "step": 5072, "train/loss_ctc": 0.7381730079650879, "train/loss_error": 0.48044782876968384, "train/loss_total": 0.5319928526878357 }, { "epoch": 1.3553299492385786, "step": 5073, "train/loss_ctc": 0.580173134803772, "train/loss_error": 0.42963874340057373, "train/loss_total": 0.4597456157207489 }, { "epoch": 1.355597114613946, "step": 5074, "train/loss_ctc": 0.5962401628494263, "train/loss_error": 0.5041924715042114, "train/loss_total": 0.5226020216941833 }, { "epoch": 1.3558642799893135, "step": 5075, "train/loss_ctc": 0.8681498765945435, "train/loss_error": 0.4890201985836029, "train/loss_total": 0.5648461580276489 }, { "epoch": 1.3561314453646807, "step": 5076, "train/loss_ctc": 0.8304315805435181, "train/loss_error": 0.4769759476184845, "train/loss_total": 0.5476670861244202 }, { "epoch": 1.356398610740048, "step": 5077, "train/loss_ctc": 1.3050379753112793, "train/loss_error": 0.4487590789794922, "train/loss_total": 0.6200149059295654 }, { "epoch": 1.3566657761154155, "step": 5078, "train/loss_ctc": 1.115366816520691, "train/loss_error": 0.4507278800010681, "train/loss_total": 0.5836557149887085 }, { "epoch": 1.3569329414907827, "step": 5079, "train/loss_ctc": 1.38756263256073, "train/loss_error": 0.40598663687705994, "train/loss_total": 0.602301836013794 }, { "epoch": 1.3572001068661501, "grad_norm": 2.789019823074341, "learning_rate": 2.186481432006412e-05, "loss": 0.5548, "step": 5080 }, { "epoch": 1.3572001068661501, "step": 5080, "train/loss_ctc": 1.5409307479858398, "train/loss_error": 0.4717567265033722, "train/loss_total": 0.6855915188789368 }, { "epoch": 1.3574672722415175, "step": 5081, "train/loss_ctc": 0.9934105277061462, "train/loss_error": 0.4284029006958008, "train/loss_total": 0.5414044260978699 }, { "epoch": 1.3577344376168847, "step": 5082, "train/loss_ctc": 0.7886449098587036, "train/loss_error": 0.4938151240348816, "train/loss_total": 0.5527811050415039 }, { "epoch": 1.3580016029922521, "step": 5083, "train/loss_ctc": 1.367471694946289, "train/loss_error": 0.48271477222442627, "train/loss_total": 0.6596661806106567 }, { "epoch": 1.3582687683676196, "step": 5084, "train/loss_ctc": 0.5511672496795654, "train/loss_error": 0.5010460615158081, "train/loss_total": 0.5110703110694885 }, { "epoch": 1.358535933742987, "step": 5085, "train/loss_ctc": 0.4321487843990326, "train/loss_error": 0.4243755042552948, "train/loss_total": 0.4259301424026489 }, { "epoch": 1.3588030991183544, "step": 5086, "train/loss_ctc": 0.6167047023773193, "train/loss_error": 0.46878400444984436, "train/loss_total": 0.49836814403533936 }, { "epoch": 1.3590702644937216, "step": 5087, "train/loss_ctc": 0.5770546197891235, "train/loss_error": 0.5095363259315491, "train/loss_total": 0.5230399966239929 }, { "epoch": 1.359337429869089, "step": 5088, "train/loss_ctc": 0.9273906946182251, "train/loss_error": 0.4361353814601898, "train/loss_total": 0.5343864560127258 }, { "epoch": 1.3596045952444564, "step": 5089, "train/loss_ctc": 0.5800615549087524, "train/loss_error": 0.5201365947723389, "train/loss_total": 0.5321215987205505 }, { "epoch": 1.3598717606198236, "grad_norm": 1.9655227661132812, "learning_rate": 2.184878439754208e-05, "loss": 0.5464, "step": 5090 }, { "epoch": 1.3598717606198236, "step": 5090, "train/loss_ctc": 0.40721139311790466, "train/loss_error": 0.4124271273612976, "train/loss_total": 0.4113839864730835 }, { "epoch": 1.360138925995191, "step": 5091, "train/loss_ctc": 0.5204218626022339, "train/loss_error": 0.42404505610466003, "train/loss_total": 0.4433204233646393 }, { "epoch": 1.3604060913705585, "step": 5092, "train/loss_ctc": 0.60353022813797, "train/loss_error": 0.44178301095962524, "train/loss_total": 0.4741324782371521 }, { "epoch": 1.3606732567459257, "step": 5093, "train/loss_ctc": 0.5640963315963745, "train/loss_error": 0.4478933811187744, "train/loss_total": 0.4711340069770813 }, { "epoch": 1.360940422121293, "step": 5094, "train/loss_ctc": 0.4962930679321289, "train/loss_error": 0.3986174166202545, "train/loss_total": 0.4181525707244873 }, { "epoch": 1.3612075874966605, "step": 5095, "train/loss_ctc": 0.519271969795227, "train/loss_error": 0.5473346710205078, "train/loss_total": 0.5417221188545227 }, { "epoch": 1.3614747528720277, "step": 5096, "train/loss_ctc": 0.7160415649414062, "train/loss_error": 0.5339518189430237, "train/loss_total": 0.5703697800636292 }, { "epoch": 1.361741918247395, "step": 5097, "train/loss_ctc": 0.5299159288406372, "train/loss_error": 0.4685979187488556, "train/loss_total": 0.4808615446090698 }, { "epoch": 1.3620090836227625, "step": 5098, "train/loss_ctc": 0.5280329585075378, "train/loss_error": 0.5463962554931641, "train/loss_total": 0.5427235960960388 }, { "epoch": 1.3622762489981297, "step": 5099, "train/loss_ctc": 0.751695990562439, "train/loss_error": 0.4142334461212158, "train/loss_total": 0.4817259907722473 }, { "epoch": 1.3625434143734971, "grad_norm": 1.818019151687622, "learning_rate": 2.1832754475020037e-05, "loss": 0.4836, "step": 5100 }, { "epoch": 1.3625434143734971, "step": 5100, "train/loss_ctc": 0.6346880793571472, "train/loss_error": 0.4155132472515106, "train/loss_total": 0.459348201751709 }, { "epoch": 1.3628105797488645, "step": 5101, "train/loss_ctc": 0.6904945373535156, "train/loss_error": 0.46107038855552673, "train/loss_total": 0.5069552063941956 }, { "epoch": 1.363077745124232, "step": 5102, "train/loss_ctc": 0.7911892533302307, "train/loss_error": 0.4551258087158203, "train/loss_total": 0.5223385095596313 }, { "epoch": 1.3633449104995994, "step": 5103, "train/loss_ctc": 1.0373435020446777, "train/loss_error": 0.42753246426582336, "train/loss_total": 0.5494946837425232 }, { "epoch": 1.3636120758749666, "step": 5104, "train/loss_ctc": 1.003524899482727, "train/loss_error": 0.42547252774238586, "train/loss_total": 0.5410829782485962 }, { "epoch": 1.363879241250334, "step": 5105, "train/loss_ctc": 1.4080703258514404, "train/loss_error": 0.4364669620990753, "train/loss_total": 0.6307876110076904 }, { "epoch": 1.3641464066257014, "step": 5106, "train/loss_ctc": 0.7552152872085571, "train/loss_error": 0.4337591528892517, "train/loss_total": 0.49805039167404175 }, { "epoch": 1.3644135720010686, "step": 5107, "train/loss_ctc": 1.0966498851776123, "train/loss_error": 0.4090037941932678, "train/loss_total": 0.5465329885482788 }, { "epoch": 1.364680737376436, "step": 5108, "train/loss_ctc": 1.516533374786377, "train/loss_error": 0.4854084551334381, "train/loss_total": 0.6916334629058838 }, { "epoch": 1.3649479027518034, "step": 5109, "train/loss_ctc": 0.8639098405838013, "train/loss_error": 0.48274311423301697, "train/loss_total": 0.5589764714241028 }, { "epoch": 1.3652150681271706, "grad_norm": 4.443912029266357, "learning_rate": 2.1816724552497995e-05, "loss": 0.5505, "step": 5110 }, { "epoch": 1.3652150681271706, "step": 5110, "train/loss_ctc": 0.8300243616104126, "train/loss_error": 0.44588813185691833, "train/loss_total": 0.5227153897285461 }, { "epoch": 1.365482233502538, "step": 5111, "train/loss_ctc": 0.35033655166625977, "train/loss_error": 0.430319219827652, "train/loss_total": 0.41432270407676697 }, { "epoch": 1.3657493988779055, "step": 5112, "train/loss_ctc": 0.6929307579994202, "train/loss_error": 0.44565263390541077, "train/loss_total": 0.4951082468032837 }, { "epoch": 1.3660165642532727, "step": 5113, "train/loss_ctc": 0.6106889247894287, "train/loss_error": 0.5194026231765747, "train/loss_total": 0.5376598834991455 }, { "epoch": 1.36628372962864, "step": 5114, "train/loss_ctc": 0.759742259979248, "train/loss_error": 0.4930292069911957, "train/loss_total": 0.5463718175888062 }, { "epoch": 1.3665508950040075, "step": 5115, "train/loss_ctc": 1.1507823467254639, "train/loss_error": 0.5066272616386414, "train/loss_total": 0.6354582905769348 }, { "epoch": 1.3668180603793747, "step": 5116, "train/loss_ctc": 0.6076228618621826, "train/loss_error": 0.42733827233314514, "train/loss_total": 0.46339520812034607 }, { "epoch": 1.3670852257547421, "step": 5117, "train/loss_ctc": 0.18658789992332458, "train/loss_error": 0.3844064176082611, "train/loss_total": 0.34484270215034485 }, { "epoch": 1.3673523911301095, "step": 5118, "train/loss_ctc": 0.6483687162399292, "train/loss_error": 0.4819987714290619, "train/loss_total": 0.5152727365493774 }, { "epoch": 1.367619556505477, "step": 5119, "train/loss_ctc": 0.655390739440918, "train/loss_error": 0.4969594478607178, "train/loss_total": 0.5286457538604736 }, { "epoch": 1.3678867218808444, "grad_norm": 2.506049156188965, "learning_rate": 2.1800694629975957e-05, "loss": 0.5004, "step": 5120 }, { "epoch": 1.3678867218808444, "step": 5120, "train/loss_ctc": 0.5724713802337646, "train/loss_error": 0.5139684677124023, "train/loss_total": 0.5256690382957458 }, { "epoch": 1.3681538872562116, "step": 5121, "train/loss_ctc": 1.2808151245117188, "train/loss_error": 0.3648257255554199, "train/loss_total": 0.5480235815048218 }, { "epoch": 1.368421052631579, "step": 5122, "train/loss_ctc": 1.556826114654541, "train/loss_error": 0.5440400838851929, "train/loss_total": 0.7465972900390625 }, { "epoch": 1.3686882180069464, "step": 5123, "train/loss_ctc": 0.6923689842224121, "train/loss_error": 0.5419418811798096, "train/loss_total": 0.572027325630188 }, { "epoch": 1.3689553833823136, "step": 5124, "train/loss_ctc": 0.4681994318962097, "train/loss_error": 0.406279593706131, "train/loss_total": 0.41866356134414673 }, { "epoch": 1.369222548757681, "step": 5125, "train/loss_ctc": 0.502341628074646, "train/loss_error": 0.49098846316337585, "train/loss_total": 0.49325910210609436 }, { "epoch": 1.3694897141330484, "step": 5126, "train/loss_ctc": 1.3677148818969727, "train/loss_error": 0.5111855864524841, "train/loss_total": 0.6824914216995239 }, { "epoch": 1.3697568795084156, "step": 5127, "train/loss_ctc": 0.6125552654266357, "train/loss_error": 0.514171838760376, "train/loss_total": 0.5338485240936279 }, { "epoch": 1.370024044883783, "step": 5128, "train/loss_ctc": 0.7142947316169739, "train/loss_error": 0.41152095794677734, "train/loss_total": 0.4720757305622101 }, { "epoch": 1.3702912102591505, "step": 5129, "train/loss_ctc": 0.7584511041641235, "train/loss_error": 0.4451126158237457, "train/loss_total": 0.5077803134918213 }, { "epoch": 1.3705583756345177, "grad_norm": 1.3716824054718018, "learning_rate": 2.1784664707453915e-05, "loss": 0.55, "step": 5130 }, { "epoch": 1.3705583756345177, "step": 5130, "train/loss_ctc": 0.6255660653114319, "train/loss_error": 0.47463664412498474, "train/loss_total": 0.5048225522041321 }, { "epoch": 1.370825541009885, "step": 5131, "train/loss_ctc": 0.5477678775787354, "train/loss_error": 0.49752697348594666, "train/loss_total": 0.5075751543045044 }, { "epoch": 1.3710927063852525, "step": 5132, "train/loss_ctc": 0.6677699089050293, "train/loss_error": 0.462170273065567, "train/loss_total": 0.5032901763916016 }, { "epoch": 1.3713598717606197, "step": 5133, "train/loss_ctc": 0.5018079280853271, "train/loss_error": 0.4094449579715729, "train/loss_total": 0.42791756987571716 }, { "epoch": 1.371627037135987, "step": 5134, "train/loss_ctc": 0.8810522556304932, "train/loss_error": 0.47886237502098083, "train/loss_total": 0.5593003630638123 }, { "epoch": 1.3718942025113545, "step": 5135, "train/loss_ctc": 0.6084520816802979, "train/loss_error": 0.5334842801094055, "train/loss_total": 0.548477828502655 }, { "epoch": 1.372161367886722, "step": 5136, "train/loss_ctc": 0.5754839181900024, "train/loss_error": 0.4547988176345825, "train/loss_total": 0.4789358377456665 }, { "epoch": 1.3724285332620894, "step": 5137, "train/loss_ctc": 0.7806345820426941, "train/loss_error": 0.4610484838485718, "train/loss_total": 0.5249657034873962 }, { "epoch": 1.3726956986374566, "step": 5138, "train/loss_ctc": 1.1992350816726685, "train/loss_error": 0.48610374331474304, "train/loss_total": 0.6287299990653992 }, { "epoch": 1.372962864012824, "step": 5139, "train/loss_ctc": 1.6547868251800537, "train/loss_error": 0.4612065553665161, "train/loss_total": 0.6999226212501526 }, { "epoch": 1.3732300293881914, "grad_norm": 2.8778140544891357, "learning_rate": 2.1768634784931873e-05, "loss": 0.5384, "step": 5140 }, { "epoch": 1.3732300293881914, "step": 5140, "train/loss_ctc": 0.40916043519973755, "train/loss_error": 0.5776593685150146, "train/loss_total": 0.5439596176147461 }, { "epoch": 1.3734971947635586, "step": 5141, "train/loss_ctc": 0.5961446762084961, "train/loss_error": 0.4937579929828644, "train/loss_total": 0.5142353177070618 }, { "epoch": 1.373764360138926, "step": 5142, "train/loss_ctc": 0.20918071269989014, "train/loss_error": 0.4564739465713501, "train/loss_total": 0.40701529383659363 }, { "epoch": 1.3740315255142934, "step": 5143, "train/loss_ctc": 0.6321224570274353, "train/loss_error": 0.46438685059547424, "train/loss_total": 0.4979339838027954 }, { "epoch": 1.3742986908896606, "step": 5144, "train/loss_ctc": 0.6158594489097595, "train/loss_error": 0.5026867985725403, "train/loss_total": 0.5253213047981262 }, { "epoch": 1.374565856265028, "step": 5145, "train/loss_ctc": 0.5043764114379883, "train/loss_error": 0.45142263174057007, "train/loss_total": 0.4620133936405182 }, { "epoch": 1.3748330216403954, "step": 5146, "train/loss_ctc": 1.7378497123718262, "train/loss_error": 0.541386067867279, "train/loss_total": 0.7806788086891174 }, { "epoch": 1.3751001870157626, "step": 5147, "train/loss_ctc": 0.9425755739212036, "train/loss_error": 0.5059489011764526, "train/loss_total": 0.5932742357254028 }, { "epoch": 1.37536735239113, "step": 5148, "train/loss_ctc": 0.5492414236068726, "train/loss_error": 0.45978379249572754, "train/loss_total": 0.47767531871795654 }, { "epoch": 1.3756345177664975, "step": 5149, "train/loss_ctc": 0.28564637899398804, "train/loss_error": 0.5068298578262329, "train/loss_total": 0.4625931680202484 }, { "epoch": 1.375901683141865, "grad_norm": 3.2258846759796143, "learning_rate": 2.175260486240983e-05, "loss": 0.5265, "step": 5150 }, { "epoch": 1.375901683141865, "step": 5150, "train/loss_ctc": 0.6682541966438293, "train/loss_error": 0.5400843024253845, "train/loss_total": 0.5657182931900024 }, { "epoch": 1.376168848517232, "step": 5151, "train/loss_ctc": 0.9879830479621887, "train/loss_error": 0.4793834388256073, "train/loss_total": 0.5811033248901367 }, { "epoch": 1.3764360138925995, "step": 5152, "train/loss_ctc": 1.6493295431137085, "train/loss_error": 0.46498221158981323, "train/loss_total": 0.7018516659736633 }, { "epoch": 1.376703179267967, "step": 5153, "train/loss_ctc": 0.38495802879333496, "train/loss_error": 0.3790968954563141, "train/loss_total": 0.3802691400051117 }, { "epoch": 1.3769703446433343, "step": 5154, "train/loss_ctc": 0.4338468909263611, "train/loss_error": 0.432029664516449, "train/loss_total": 0.4323931038379669 }, { "epoch": 1.3772375100187015, "step": 5155, "train/loss_ctc": 0.5455806255340576, "train/loss_error": 0.4628429114818573, "train/loss_total": 0.4793904721736908 }, { "epoch": 1.377504675394069, "step": 5156, "train/loss_ctc": 0.559482753276825, "train/loss_error": 0.40154364705085754, "train/loss_total": 0.43313145637512207 }, { "epoch": 1.3777718407694364, "step": 5157, "train/loss_ctc": 0.6611131429672241, "train/loss_error": 0.4611896872520447, "train/loss_total": 0.5011743903160095 }, { "epoch": 1.3780390061448036, "step": 5158, "train/loss_ctc": 0.44087815284729004, "train/loss_error": 0.4559676945209503, "train/loss_total": 0.45294979214668274 }, { "epoch": 1.378306171520171, "step": 5159, "train/loss_ctc": 0.9214466214179993, "train/loss_error": 0.4213639199733734, "train/loss_total": 0.5213804841041565 }, { "epoch": 1.3785733368955384, "grad_norm": 1.225403070449829, "learning_rate": 2.173657493988779e-05, "loss": 0.5049, "step": 5160 }, { "epoch": 1.3785733368955384, "step": 5160, "train/loss_ctc": 0.9711699485778809, "train/loss_error": 0.5056519508361816, "train/loss_total": 0.5987555384635925 }, { "epoch": 1.3788405022709056, "step": 5161, "train/loss_ctc": 0.5752652883529663, "train/loss_error": 0.4594587981700897, "train/loss_total": 0.48262009024620056 }, { "epoch": 1.379107667646273, "step": 5162, "train/loss_ctc": 0.6469794511795044, "train/loss_error": 0.4999849498271942, "train/loss_total": 0.5293838381767273 }, { "epoch": 1.3793748330216404, "step": 5163, "train/loss_ctc": 0.8837049007415771, "train/loss_error": 0.570668637752533, "train/loss_total": 0.6332759261131287 }, { "epoch": 1.3796419983970076, "step": 5164, "train/loss_ctc": 0.8276888728141785, "train/loss_error": 0.4329947233200073, "train/loss_total": 0.5119335651397705 }, { "epoch": 1.379909163772375, "step": 5165, "train/loss_ctc": 1.1805815696716309, "train/loss_error": 0.4675917625427246, "train/loss_total": 0.6101897358894348 }, { "epoch": 1.3801763291477425, "step": 5166, "train/loss_ctc": 0.963132381439209, "train/loss_error": 0.4439649283885956, "train/loss_total": 0.5477983951568604 }, { "epoch": 1.3804434945231099, "step": 5167, "train/loss_ctc": 0.2750820517539978, "train/loss_error": 0.4584645926952362, "train/loss_total": 0.4217880666255951 }, { "epoch": 1.380710659898477, "step": 5168, "train/loss_ctc": 1.0520193576812744, "train/loss_error": 0.44445717334747314, "train/loss_total": 0.5659696459770203 }, { "epoch": 1.3809778252738445, "step": 5169, "train/loss_ctc": 1.373993158340454, "train/loss_error": 0.4813620448112488, "train/loss_total": 0.6598882675170898 }, { "epoch": 1.381244990649212, "grad_norm": 3.7650246620178223, "learning_rate": 2.172054501736575e-05, "loss": 0.5562, "step": 5170 }, { "epoch": 1.381244990649212, "step": 5170, "train/loss_ctc": 0.8950678110122681, "train/loss_error": 0.50310879945755, "train/loss_total": 0.5815005898475647 }, { "epoch": 1.3815121560245793, "step": 5171, "train/loss_ctc": 0.897392988204956, "train/loss_error": 0.4439070522785187, "train/loss_total": 0.5346042513847351 }, { "epoch": 1.3817793213999465, "step": 5172, "train/loss_ctc": 0.9839290976524353, "train/loss_error": 0.46394526958465576, "train/loss_total": 0.5679420232772827 }, { "epoch": 1.382046486775314, "step": 5173, "train/loss_ctc": 0.8707549571990967, "train/loss_error": 0.4599861800670624, "train/loss_total": 0.5421399474143982 }, { "epoch": 1.3823136521506814, "step": 5174, "train/loss_ctc": 1.4809536933898926, "train/loss_error": 0.49111050367355347, "train/loss_total": 0.6890791654586792 }, { "epoch": 1.3825808175260486, "step": 5175, "train/loss_ctc": 0.5020679235458374, "train/loss_error": 0.48942145705223083, "train/loss_total": 0.49195075035095215 }, { "epoch": 1.382847982901416, "step": 5176, "train/loss_ctc": 0.9628771543502808, "train/loss_error": 0.44004544615745544, "train/loss_total": 0.5446118116378784 }, { "epoch": 1.3831151482767834, "step": 5177, "train/loss_ctc": 0.48511719703674316, "train/loss_error": 0.452345073223114, "train/loss_total": 0.45889949798583984 }, { "epoch": 1.3833823136521506, "step": 5178, "train/loss_ctc": 0.5701109766960144, "train/loss_error": 0.45199817419052124, "train/loss_total": 0.47562074661254883 }, { "epoch": 1.383649479027518, "step": 5179, "train/loss_ctc": 0.44892358779907227, "train/loss_error": 0.46975380182266235, "train/loss_total": 0.4655877649784088 }, { "epoch": 1.3839166444028854, "grad_norm": 1.5723204612731934, "learning_rate": 2.170451509484371e-05, "loss": 0.5352, "step": 5180 }, { "epoch": 1.3839166444028854, "step": 5180, "train/loss_ctc": 0.6092981696128845, "train/loss_error": 0.42937877774238586, "train/loss_total": 0.46536266803741455 }, { "epoch": 1.3841838097782526, "step": 5181, "train/loss_ctc": 1.1197179555892944, "train/loss_error": 0.5836151838302612, "train/loss_total": 0.69083571434021 }, { "epoch": 1.38445097515362, "step": 5182, "train/loss_ctc": 1.0675581693649292, "train/loss_error": 0.5117480754852295, "train/loss_total": 0.6229100823402405 }, { "epoch": 1.3847181405289875, "step": 5183, "train/loss_ctc": 0.809783935546875, "train/loss_error": 0.4829714894294739, "train/loss_total": 0.548334002494812 }, { "epoch": 1.3849853059043549, "step": 5184, "train/loss_ctc": 2.032134532928467, "train/loss_error": 0.4930689036846161, "train/loss_total": 0.8008820414543152 }, { "epoch": 1.3852524712797223, "step": 5185, "train/loss_ctc": 1.8683210611343384, "train/loss_error": 0.5352690815925598, "train/loss_total": 0.8018795251846313 }, { "epoch": 1.3855196366550895, "step": 5186, "train/loss_ctc": 0.9058018922805786, "train/loss_error": 0.4841603636741638, "train/loss_total": 0.5684886574745178 }, { "epoch": 1.385786802030457, "step": 5187, "train/loss_ctc": 0.7424623966217041, "train/loss_error": 0.439175009727478, "train/loss_total": 0.49983251094818115 }, { "epoch": 1.3860539674058243, "step": 5188, "train/loss_ctc": 1.5526394844055176, "train/loss_error": 0.4392079710960388, "train/loss_total": 0.6618942618370056 }, { "epoch": 1.3863211327811915, "step": 5189, "train/loss_ctc": 0.6230635643005371, "train/loss_error": 0.4962487816810608, "train/loss_total": 0.521611750125885 }, { "epoch": 1.386588298156559, "grad_norm": 1.81016206741333, "learning_rate": 2.1688485172321667e-05, "loss": 0.6182, "step": 5190 }, { "epoch": 1.386588298156559, "step": 5190, "train/loss_ctc": 0.7955521941184998, "train/loss_error": 0.39676976203918457, "train/loss_total": 0.47652626037597656 }, { "epoch": 1.3868554635319263, "step": 5191, "train/loss_ctc": 1.0230005979537964, "train/loss_error": 0.5086615681648254, "train/loss_total": 0.6115293502807617 }, { "epoch": 1.3871226289072935, "step": 5192, "train/loss_ctc": 0.43761950731277466, "train/loss_error": 0.467373251914978, "train/loss_total": 0.46142250299453735 }, { "epoch": 1.387389794282661, "step": 5193, "train/loss_ctc": 0.7019270658493042, "train/loss_error": 0.4484221041202545, "train/loss_total": 0.49912309646606445 }, { "epoch": 1.3876569596580284, "step": 5194, "train/loss_ctc": 0.5344380140304565, "train/loss_error": 0.47788840532302856, "train/loss_total": 0.48919832706451416 }, { "epoch": 1.3879241250333956, "step": 5195, "train/loss_ctc": 0.9723056554794312, "train/loss_error": 0.4117359519004822, "train/loss_total": 0.5238499045372009 }, { "epoch": 1.388191290408763, "step": 5196, "train/loss_ctc": 0.625835120677948, "train/loss_error": 0.40932953357696533, "train/loss_total": 0.4526306390762329 }, { "epoch": 1.3884584557841304, "step": 5197, "train/loss_ctc": 0.7804906964302063, "train/loss_error": 0.4532575011253357, "train/loss_total": 0.5187041759490967 }, { "epoch": 1.3887256211594976, "step": 5198, "train/loss_ctc": 0.9272131323814392, "train/loss_error": 0.42737212777137756, "train/loss_total": 0.527340292930603 }, { "epoch": 1.388992786534865, "step": 5199, "train/loss_ctc": 0.4687081575393677, "train/loss_error": 0.47101303935050964, "train/loss_total": 0.4705520570278168 }, { "epoch": 1.3892599519102324, "grad_norm": 2.0568952560424805, "learning_rate": 2.1672455249799628e-05, "loss": 0.5031, "step": 5200 }, { "epoch": 1.3892599519102324, "step": 5200, "train/loss_ctc": 0.7969313859939575, "train/loss_error": 0.4009653627872467, "train/loss_total": 0.48015856742858887 }, { "epoch": 1.3895271172855999, "step": 5201, "train/loss_ctc": 0.8718454837799072, "train/loss_error": 0.4727010428905487, "train/loss_total": 0.5525299310684204 }, { "epoch": 1.3897942826609673, "step": 5202, "train/loss_ctc": 1.6168984174728394, "train/loss_error": 0.4749016761779785, "train/loss_total": 0.7033010721206665 }, { "epoch": 1.3900614480363345, "step": 5203, "train/loss_ctc": 1.4444549083709717, "train/loss_error": 0.5233256220817566, "train/loss_total": 0.7075514793395996 }, { "epoch": 1.3903286134117019, "step": 5204, "train/loss_ctc": 0.6551883220672607, "train/loss_error": 0.446744441986084, "train/loss_total": 0.48843324184417725 }, { "epoch": 1.3905957787870693, "step": 5205, "train/loss_ctc": 0.4858020544052124, "train/loss_error": 0.42307043075561523, "train/loss_total": 0.43561679124832153 }, { "epoch": 1.3908629441624365, "step": 5206, "train/loss_ctc": 0.5306768417358398, "train/loss_error": 0.39631012082099915, "train/loss_total": 0.42318347096443176 }, { "epoch": 1.391130109537804, "step": 5207, "train/loss_ctc": 0.875541627407074, "train/loss_error": 0.46139851212501526, "train/loss_total": 0.544227123260498 }, { "epoch": 1.3913972749131713, "step": 5208, "train/loss_ctc": 0.9944572448730469, "train/loss_error": 0.5341184735298157, "train/loss_total": 0.6261862516403198 }, { "epoch": 1.3916644402885385, "step": 5209, "train/loss_ctc": 0.6969339847564697, "train/loss_error": 0.49596503376960754, "train/loss_total": 0.5361588001251221 }, { "epoch": 1.391931605663906, "grad_norm": 1.4362618923187256, "learning_rate": 2.1656425327277586e-05, "loss": 0.5497, "step": 5210 }, { "epoch": 1.391931605663906, "step": 5210, "train/loss_ctc": 0.5077357888221741, "train/loss_error": 0.4494275152683258, "train/loss_total": 0.461089164018631 }, { "epoch": 1.3921987710392734, "step": 5211, "train/loss_ctc": 1.6497145891189575, "train/loss_error": 0.5142583250999451, "train/loss_total": 0.7413495779037476 }, { "epoch": 1.3924659364146406, "step": 5212, "train/loss_ctc": 0.7833129167556763, "train/loss_error": 0.44113636016845703, "train/loss_total": 0.5095716714859009 }, { "epoch": 1.392733101790008, "step": 5213, "train/loss_ctc": 1.0356075763702393, "train/loss_error": 0.4608096182346344, "train/loss_total": 0.5757691860198975 }, { "epoch": 1.3930002671653754, "step": 5214, "train/loss_ctc": 1.1062829494476318, "train/loss_error": 0.46781960129737854, "train/loss_total": 0.5955122709274292 }, { "epoch": 1.3932674325407426, "step": 5215, "train/loss_ctc": 0.31036829948425293, "train/loss_error": 0.43315282464027405, "train/loss_total": 0.4085959196090698 }, { "epoch": 1.39353459791611, "step": 5216, "train/loss_ctc": 0.8859195709228516, "train/loss_error": 0.5048127174377441, "train/loss_total": 0.5810340642929077 }, { "epoch": 1.3938017632914774, "step": 5217, "train/loss_ctc": 0.6986603736877441, "train/loss_error": 0.4568448066711426, "train/loss_total": 0.5052079558372498 }, { "epoch": 1.3940689286668448, "step": 5218, "train/loss_ctc": 0.8070582151412964, "train/loss_error": 0.4255957305431366, "train/loss_total": 0.5018882751464844 }, { "epoch": 1.3943360940422123, "step": 5219, "train/loss_ctc": 0.5201412439346313, "train/loss_error": 0.4649335741996765, "train/loss_total": 0.4759751260280609 }, { "epoch": 1.3946032594175795, "grad_norm": 1.9681845903396606, "learning_rate": 2.1640395404755544e-05, "loss": 0.5356, "step": 5220 }, { "epoch": 1.3946032594175795, "step": 5220, "train/loss_ctc": 0.4684641659259796, "train/loss_error": 0.47390130162239075, "train/loss_total": 0.4728138744831085 }, { "epoch": 1.3948704247929469, "step": 5221, "train/loss_ctc": 0.7059310674667358, "train/loss_error": 0.39610832929611206, "train/loss_total": 0.4580729007720947 }, { "epoch": 1.3951375901683143, "step": 5222, "train/loss_ctc": 0.4547884166240692, "train/loss_error": 0.4839249551296234, "train/loss_total": 0.47809767723083496 }, { "epoch": 1.3954047555436815, "step": 5223, "train/loss_ctc": 0.3679261803627014, "train/loss_error": 0.5435471534729004, "train/loss_total": 0.5084229707717896 }, { "epoch": 1.395671920919049, "step": 5224, "train/loss_ctc": 0.42654016613960266, "train/loss_error": 0.4275587201118469, "train/loss_total": 0.427355021238327 }, { "epoch": 1.3959390862944163, "step": 5225, "train/loss_ctc": 1.1206390857696533, "train/loss_error": 0.4387601315975189, "train/loss_total": 0.5751359462738037 }, { "epoch": 1.3962062516697835, "step": 5226, "train/loss_ctc": 0.9298880696296692, "train/loss_error": 0.5241930484771729, "train/loss_total": 0.60533207654953 }, { "epoch": 1.396473417045151, "step": 5227, "train/loss_ctc": 0.8762221336364746, "train/loss_error": 0.46037545800209045, "train/loss_total": 0.5435448288917542 }, { "epoch": 1.3967405824205184, "step": 5228, "train/loss_ctc": 0.6784756183624268, "train/loss_error": 0.4988517463207245, "train/loss_total": 0.5347765684127808 }, { "epoch": 1.3970077477958855, "step": 5229, "train/loss_ctc": 0.9277552962303162, "train/loss_error": 0.49605417251586914, "train/loss_total": 0.5823944211006165 }, { "epoch": 1.397274913171253, "grad_norm": 2.849331855773926, "learning_rate": 2.1624365482233506e-05, "loss": 0.5186, "step": 5230 }, { "epoch": 1.397274913171253, "step": 5230, "train/loss_ctc": 1.3617937564849854, "train/loss_error": 0.46916502714157104, "train/loss_total": 0.6476907730102539 }, { "epoch": 1.3975420785466204, "step": 5231, "train/loss_ctc": 0.6050554513931274, "train/loss_error": 0.4941244125366211, "train/loss_total": 0.5163106322288513 }, { "epoch": 1.3978092439219876, "step": 5232, "train/loss_ctc": 0.47132834792137146, "train/loss_error": 0.45726868510246277, "train/loss_total": 0.460080623626709 }, { "epoch": 1.398076409297355, "step": 5233, "train/loss_ctc": 0.629313588142395, "train/loss_error": 0.4701174795627594, "train/loss_total": 0.5019567012786865 }, { "epoch": 1.3983435746727224, "step": 5234, "train/loss_ctc": 1.7480247020721436, "train/loss_error": 0.49814197421073914, "train/loss_total": 0.74811851978302 }, { "epoch": 1.3986107400480898, "step": 5235, "train/loss_ctc": 0.2837642431259155, "train/loss_error": 0.4292503297328949, "train/loss_total": 0.40015313029289246 }, { "epoch": 1.3988779054234572, "step": 5236, "train/loss_ctc": 0.5837754011154175, "train/loss_error": 0.4348366856575012, "train/loss_total": 0.46462446451187134 }, { "epoch": 1.3991450707988244, "step": 5237, "train/loss_ctc": 0.41112256050109863, "train/loss_error": 0.4801076352596283, "train/loss_total": 0.46631062030792236 }, { "epoch": 1.3994122361741919, "step": 5238, "train/loss_ctc": 0.8296369314193726, "train/loss_error": 0.40387487411499023, "train/loss_total": 0.48902732133865356 }, { "epoch": 1.3996794015495593, "step": 5239, "train/loss_ctc": 0.7684952020645142, "train/loss_error": 0.4573032855987549, "train/loss_total": 0.5195416808128357 }, { "epoch": 1.3999465669249265, "grad_norm": 6.646755695343018, "learning_rate": 2.1608335559711464e-05, "loss": 0.5214, "step": 5240 }, { "epoch": 1.3999465669249265, "step": 5240, "train/loss_ctc": 0.4746784567832947, "train/loss_error": 0.40891802310943604, "train/loss_total": 0.42207011580467224 }, { "epoch": 1.400213732300294, "step": 5241, "train/loss_ctc": 0.9598226547241211, "train/loss_error": 0.4680861234664917, "train/loss_total": 0.5664334297180176 }, { "epoch": 1.4004808976756613, "step": 5242, "train/loss_ctc": 1.3809581995010376, "train/loss_error": 0.4746682047843933, "train/loss_total": 0.6559262275695801 }, { "epoch": 1.4007480630510285, "step": 5243, "train/loss_ctc": 0.6485185623168945, "train/loss_error": 0.3895030915737152, "train/loss_total": 0.4413061738014221 }, { "epoch": 1.401015228426396, "step": 5244, "train/loss_ctc": 0.5165045857429504, "train/loss_error": 0.4835459291934967, "train/loss_total": 0.49013766646385193 }, { "epoch": 1.4012823938017633, "step": 5245, "train/loss_ctc": 0.8154330253601074, "train/loss_error": 0.47452351450920105, "train/loss_total": 0.5427054166793823 }, { "epoch": 1.4015495591771305, "step": 5246, "train/loss_ctc": 0.7576639652252197, "train/loss_error": 0.40744686126708984, "train/loss_total": 0.47749030590057373 }, { "epoch": 1.401816724552498, "step": 5247, "train/loss_ctc": 0.9584569931030273, "train/loss_error": 0.4839313328266144, "train/loss_total": 0.5788364410400391 }, { "epoch": 1.4020838899278654, "step": 5248, "train/loss_ctc": 0.8689666986465454, "train/loss_error": 0.5036548376083374, "train/loss_total": 0.5767172574996948 }, { "epoch": 1.4023510553032326, "step": 5249, "train/loss_ctc": 0.510699450969696, "train/loss_error": 0.48370361328125, "train/loss_total": 0.4891027808189392 }, { "epoch": 1.4026182206786, "grad_norm": 2.9458022117614746, "learning_rate": 2.1592305637189422e-05, "loss": 0.5241, "step": 5250 }, { "epoch": 1.4026182206786, "step": 5250, "train/loss_ctc": 0.6048679947853088, "train/loss_error": 0.45420658588409424, "train/loss_total": 0.4843388795852661 }, { "epoch": 1.4028853860539674, "step": 5251, "train/loss_ctc": 0.5508760213851929, "train/loss_error": 0.43782877922058105, "train/loss_total": 0.46043825149536133 }, { "epoch": 1.4031525514293348, "step": 5252, "train/loss_ctc": 0.8835536241531372, "train/loss_error": 0.48502519726753235, "train/loss_total": 0.5647308826446533 }, { "epoch": 1.4034197168047022, "step": 5253, "train/loss_ctc": 0.9419876337051392, "train/loss_error": 0.5220508575439453, "train/loss_total": 0.6060382127761841 }, { "epoch": 1.4036868821800694, "step": 5254, "train/loss_ctc": 0.8142350912094116, "train/loss_error": 0.536482572555542, "train/loss_total": 0.5920330882072449 }, { "epoch": 1.4039540475554368, "step": 5255, "train/loss_ctc": 0.6145775318145752, "train/loss_error": 0.5117567777633667, "train/loss_total": 0.5323209762573242 }, { "epoch": 1.4042212129308043, "step": 5256, "train/loss_ctc": 0.9574795365333557, "train/loss_error": 0.4857224225997925, "train/loss_total": 0.5800738334655762 }, { "epoch": 1.4044883783061715, "step": 5257, "train/loss_ctc": 1.2250405550003052, "train/loss_error": 0.4499971568584442, "train/loss_total": 0.6050058603286743 }, { "epoch": 1.4047555436815389, "step": 5258, "train/loss_ctc": 0.5979390144348145, "train/loss_error": 0.4766901135444641, "train/loss_total": 0.5009399056434631 }, { "epoch": 1.4050227090569063, "step": 5259, "train/loss_ctc": 0.4779195785522461, "train/loss_error": 0.48422712087631226, "train/loss_total": 0.4829656183719635 }, { "epoch": 1.4052898744322735, "grad_norm": 2.2284798622131348, "learning_rate": 2.157627571466738e-05, "loss": 0.5409, "step": 5260 }, { "epoch": 1.4052898744322735, "step": 5260, "train/loss_ctc": 0.5207028388977051, "train/loss_error": 0.5287600755691528, "train/loss_total": 0.5271486043930054 }, { "epoch": 1.405557039807641, "step": 5261, "train/loss_ctc": 0.9353870749473572, "train/loss_error": 0.4764402508735657, "train/loss_total": 0.568229615688324 }, { "epoch": 1.4058242051830083, "step": 5262, "train/loss_ctc": 0.5704389810562134, "train/loss_error": 0.4621334373950958, "train/loss_total": 0.48379454016685486 }, { "epoch": 1.4060913705583755, "step": 5263, "train/loss_ctc": 0.8192225098609924, "train/loss_error": 0.4730367660522461, "train/loss_total": 0.5422739386558533 }, { "epoch": 1.406358535933743, "step": 5264, "train/loss_ctc": 1.0018963813781738, "train/loss_error": 0.5186611413955688, "train/loss_total": 0.6153081655502319 }, { "epoch": 1.4066257013091104, "step": 5265, "train/loss_ctc": 0.4913521409034729, "train/loss_error": 0.4844670593738556, "train/loss_total": 0.48584407567977905 }, { "epoch": 1.4068928666844778, "step": 5266, "train/loss_ctc": 0.28445520997047424, "train/loss_error": 0.3773458003997803, "train/loss_total": 0.35876768827438354 }, { "epoch": 1.407160032059845, "step": 5267, "train/loss_ctc": 0.5379481911659241, "train/loss_error": 0.513582706451416, "train/loss_total": 0.5184558033943176 }, { "epoch": 1.4074271974352124, "step": 5268, "train/loss_ctc": 1.1493022441864014, "train/loss_error": 0.5708321928977966, "train/loss_total": 0.6865261793136597 }, { "epoch": 1.4076943628105798, "step": 5269, "train/loss_ctc": 1.4452941417694092, "train/loss_error": 0.5121324062347412, "train/loss_total": 0.6987648010253906 }, { "epoch": 1.4079615281859472, "grad_norm": 1.7856614589691162, "learning_rate": 2.1560245792145338e-05, "loss": 0.5485, "step": 5270 }, { "epoch": 1.4079615281859472, "step": 5270, "train/loss_ctc": 0.5335569977760315, "train/loss_error": 0.518317699432373, "train/loss_total": 0.5213655829429626 }, { "epoch": 1.4082286935613144, "step": 5271, "train/loss_ctc": 0.48602211475372314, "train/loss_error": 0.47879132628440857, "train/loss_total": 0.4802374839782715 }, { "epoch": 1.4084958589366818, "step": 5272, "train/loss_ctc": 1.0968317985534668, "train/loss_error": 0.38125160336494446, "train/loss_total": 0.52436763048172 }, { "epoch": 1.4087630243120493, "step": 5273, "train/loss_ctc": 0.9443777799606323, "train/loss_error": 0.4637051522731781, "train/loss_total": 0.5598397254943848 }, { "epoch": 1.4090301896874164, "step": 5274, "train/loss_ctc": 1.1794453859329224, "train/loss_error": 0.44215866923332214, "train/loss_total": 0.589616060256958 }, { "epoch": 1.4092973550627839, "step": 5275, "train/loss_ctc": 0.7152352929115295, "train/loss_error": 0.41541099548339844, "train/loss_total": 0.47537586092948914 }, { "epoch": 1.4095645204381513, "step": 5276, "train/loss_ctc": 1.7828404903411865, "train/loss_error": 0.433724969625473, "train/loss_total": 0.7035480737686157 }, { "epoch": 1.4098316858135185, "step": 5277, "train/loss_ctc": 0.38121962547302246, "train/loss_error": 0.5590887665748596, "train/loss_total": 0.5235149264335632 }, { "epoch": 1.410098851188886, "step": 5278, "train/loss_ctc": 0.7217651605606079, "train/loss_error": 0.4306212067604065, "train/loss_total": 0.4888499975204468 }, { "epoch": 1.4103660165642533, "step": 5279, "train/loss_ctc": 2.05588960647583, "train/loss_error": 0.4675806760787964, "train/loss_total": 0.7852424383163452 }, { "epoch": 1.4106331819396205, "grad_norm": 1.4294922351837158, "learning_rate": 2.1544215869623296e-05, "loss": 0.5652, "step": 5280 }, { "epoch": 1.4106331819396205, "step": 5280, "train/loss_ctc": 0.709989607334137, "train/loss_error": 0.5239027142524719, "train/loss_total": 0.5611200928688049 }, { "epoch": 1.410900347314988, "step": 5281, "train/loss_ctc": 1.985769271850586, "train/loss_error": 0.4776209890842438, "train/loss_total": 0.7792506217956543 }, { "epoch": 1.4111675126903553, "step": 5282, "train/loss_ctc": 1.0024869441986084, "train/loss_error": 0.42218250036239624, "train/loss_total": 0.5382434129714966 }, { "epoch": 1.4114346780657228, "step": 5283, "train/loss_ctc": 0.89432692527771, "train/loss_error": 0.4246978163719177, "train/loss_total": 0.5186236500740051 }, { "epoch": 1.41170184344109, "step": 5284, "train/loss_ctc": 0.3053853511810303, "train/loss_error": 0.4245761036872864, "train/loss_total": 0.4007379412651062 }, { "epoch": 1.4119690088164574, "step": 5285, "train/loss_ctc": 0.9445956945419312, "train/loss_error": 0.5103888511657715, "train/loss_total": 0.5972302556037903 }, { "epoch": 1.4122361741918248, "step": 5286, "train/loss_ctc": 0.76997971534729, "train/loss_error": 0.4590677320957184, "train/loss_total": 0.5212501287460327 }, { "epoch": 1.4125033395671922, "step": 5287, "train/loss_ctc": 0.5005528330802917, "train/loss_error": 0.5360084772109985, "train/loss_total": 0.5289173722267151 }, { "epoch": 1.4127705049425594, "step": 5288, "train/loss_ctc": 0.6266013979911804, "train/loss_error": 0.49664273858070374, "train/loss_total": 0.5226345062255859 }, { "epoch": 1.4130376703179268, "step": 5289, "train/loss_ctc": 0.8473448753356934, "train/loss_error": 0.48345136642456055, "train/loss_total": 0.5562300682067871 }, { "epoch": 1.4133048356932942, "grad_norm": 2.413874387741089, "learning_rate": 2.1528185947101258e-05, "loss": 0.5524, "step": 5290 }, { "epoch": 1.4133048356932942, "step": 5290, "train/loss_ctc": 1.2950594425201416, "train/loss_error": 0.4286816120147705, "train/loss_total": 0.6019572019577026 }, { "epoch": 1.4135720010686614, "step": 5291, "train/loss_ctc": 0.66539067029953, "train/loss_error": 0.46192046999931335, "train/loss_total": 0.5026144981384277 }, { "epoch": 1.4138391664440288, "step": 5292, "train/loss_ctc": 1.3792710304260254, "train/loss_error": 0.5295799970626831, "train/loss_total": 0.6995182037353516 }, { "epoch": 1.4141063318193963, "step": 5293, "train/loss_ctc": 0.736244797706604, "train/loss_error": 0.47889187932014465, "train/loss_total": 0.5303624868392944 }, { "epoch": 1.4143734971947635, "step": 5294, "train/loss_ctc": 0.7449562549591064, "train/loss_error": 0.42616191506385803, "train/loss_total": 0.48992079496383667 }, { "epoch": 1.4146406625701309, "step": 5295, "train/loss_ctc": 0.4525192677974701, "train/loss_error": 0.45999154448509216, "train/loss_total": 0.4584971070289612 }, { "epoch": 1.4149078279454983, "step": 5296, "train/loss_ctc": 0.6612316370010376, "train/loss_error": 0.5090235471725464, "train/loss_total": 0.5394651889801025 }, { "epoch": 1.4151749933208655, "step": 5297, "train/loss_ctc": 0.5553237795829773, "train/loss_error": 0.4471924901008606, "train/loss_total": 0.4688187539577484 }, { "epoch": 1.415442158696233, "step": 5298, "train/loss_ctc": 1.1385657787322998, "train/loss_error": 0.441558301448822, "train/loss_total": 0.5809597969055176 }, { "epoch": 1.4157093240716003, "step": 5299, "train/loss_ctc": 0.6910079717636108, "train/loss_error": 0.42714986205101013, "train/loss_total": 0.47992148995399475 }, { "epoch": 1.4159764894469677, "grad_norm": 1.8802250623703003, "learning_rate": 2.1512156024579216e-05, "loss": 0.5352, "step": 5300 }, { "epoch": 1.4159764894469677, "step": 5300, "train/loss_ctc": 0.9904923439025879, "train/loss_error": 0.4085428714752197, "train/loss_total": 0.5249327421188354 }, { "epoch": 1.4162436548223352, "step": 5301, "train/loss_ctc": 0.8221689462661743, "train/loss_error": 0.4732714593410492, "train/loss_total": 0.5430509448051453 }, { "epoch": 1.4165108201977024, "step": 5302, "train/loss_ctc": 0.6869465112686157, "train/loss_error": 0.45759570598602295, "train/loss_total": 0.5034658908843994 }, { "epoch": 1.4167779855730698, "step": 5303, "train/loss_ctc": 1.2285974025726318, "train/loss_error": 0.5123178362846375, "train/loss_total": 0.6555737257003784 }, { "epoch": 1.4170451509484372, "step": 5304, "train/loss_ctc": 0.6572681665420532, "train/loss_error": 0.48671093583106995, "train/loss_total": 0.5208224058151245 }, { "epoch": 1.4173123163238044, "step": 5305, "train/loss_ctc": 0.688575804233551, "train/loss_error": 0.4234553277492523, "train/loss_total": 0.4764794409275055 }, { "epoch": 1.4175794816991718, "step": 5306, "train/loss_ctc": 0.725901186466217, "train/loss_error": 0.5250665545463562, "train/loss_total": 0.5652334690093994 }, { "epoch": 1.4178466470745392, "step": 5307, "train/loss_ctc": 0.9884527921676636, "train/loss_error": 0.5020288228988647, "train/loss_total": 0.5993136167526245 }, { "epoch": 1.4181138124499064, "step": 5308, "train/loss_ctc": 0.5853886604309082, "train/loss_error": 0.47083598375320435, "train/loss_total": 0.4937465190887451 }, { "epoch": 1.4183809778252738, "step": 5309, "train/loss_ctc": 0.5974642038345337, "train/loss_error": 0.5324697494506836, "train/loss_total": 0.5454686284065247 }, { "epoch": 1.4186481432006413, "grad_norm": 1.7661970853805542, "learning_rate": 2.1496126102057174e-05, "loss": 0.5428, "step": 5310 }, { "epoch": 1.4186481432006413, "step": 5310, "train/loss_ctc": 1.4106547832489014, "train/loss_error": 0.5308799743652344, "train/loss_total": 0.7068349123001099 }, { "epoch": 1.4189153085760084, "step": 5311, "train/loss_ctc": 1.0235793590545654, "train/loss_error": 0.48703548312187195, "train/loss_total": 0.5943442583084106 }, { "epoch": 1.4191824739513759, "step": 5312, "train/loss_ctc": 0.45794403553009033, "train/loss_error": 0.4578215777873993, "train/loss_total": 0.45784610509872437 }, { "epoch": 1.4194496393267433, "step": 5313, "train/loss_ctc": 0.4245779514312744, "train/loss_error": 0.5970790982246399, "train/loss_total": 0.5625788569450378 }, { "epoch": 1.4197168047021105, "step": 5314, "train/loss_ctc": 1.0972408056259155, "train/loss_error": 0.4572048783302307, "train/loss_total": 0.5852120518684387 }, { "epoch": 1.419983970077478, "step": 5315, "train/loss_ctc": 0.7319273948669434, "train/loss_error": 0.48752614855766296, "train/loss_total": 0.536406397819519 }, { "epoch": 1.4202511354528453, "step": 5316, "train/loss_ctc": 0.38265669345855713, "train/loss_error": 0.42711010575294495, "train/loss_total": 0.4182194471359253 }, { "epoch": 1.4205183008282127, "step": 5317, "train/loss_ctc": 0.6903787851333618, "train/loss_error": 0.4968254864215851, "train/loss_total": 0.5355361700057983 }, { "epoch": 1.4207854662035801, "step": 5318, "train/loss_ctc": 0.4165594279766083, "train/loss_error": 0.44288402795791626, "train/loss_total": 0.4376191198825836 }, { "epoch": 1.4210526315789473, "step": 5319, "train/loss_ctc": 1.8216307163238525, "train/loss_error": 0.4436434507369995, "train/loss_total": 0.7192409038543701 }, { "epoch": 1.4213197969543148, "grad_norm": 1.1397587060928345, "learning_rate": 2.1480096179535132e-05, "loss": 0.5554, "step": 5320 }, { "epoch": 1.4213197969543148, "step": 5320, "train/loss_ctc": 0.4727277159690857, "train/loss_error": 0.46821075677871704, "train/loss_total": 0.46911415457725525 }, { "epoch": 1.4215869623296822, "step": 5321, "train/loss_ctc": 0.5766640901565552, "train/loss_error": 0.47429129481315613, "train/loss_total": 0.49476584792137146 }, { "epoch": 1.4218541277050494, "step": 5322, "train/loss_ctc": 0.6231963634490967, "train/loss_error": 0.47831496596336365, "train/loss_total": 0.5072912573814392 }, { "epoch": 1.4221212930804168, "step": 5323, "train/loss_ctc": 0.44517263770103455, "train/loss_error": 0.5170326232910156, "train/loss_total": 0.5026606321334839 }, { "epoch": 1.4223884584557842, "step": 5324, "train/loss_ctc": 0.5305845737457275, "train/loss_error": 0.39995309710502625, "train/loss_total": 0.4260793924331665 }, { "epoch": 1.4226556238311514, "step": 5325, "train/loss_ctc": 0.9182367920875549, "train/loss_error": 0.4401366412639618, "train/loss_total": 0.5357567071914673 }, { "epoch": 1.4229227892065188, "step": 5326, "train/loss_ctc": 0.7741062641143799, "train/loss_error": 0.4431454539299011, "train/loss_total": 0.5093376040458679 }, { "epoch": 1.4231899545818862, "step": 5327, "train/loss_ctc": 0.4199434518814087, "train/loss_error": 0.4913298189640045, "train/loss_total": 0.47705256938934326 }, { "epoch": 1.4234571199572534, "step": 5328, "train/loss_ctc": 0.39398670196533203, "train/loss_error": 0.44166505336761475, "train/loss_total": 0.4321293830871582 }, { "epoch": 1.4237242853326209, "step": 5329, "train/loss_ctc": 0.36802375316619873, "train/loss_error": 0.3850766122341156, "train/loss_total": 0.38166606426239014 }, { "epoch": 1.4239914507079883, "grad_norm": 1.1278350353240967, "learning_rate": 2.146406625701309e-05, "loss": 0.4736, "step": 5330 }, { "epoch": 1.4239914507079883, "step": 5330, "train/loss_ctc": 1.5325367450714111, "train/loss_error": 0.5056254863739014, "train/loss_total": 0.7110077142715454 }, { "epoch": 1.4242586160833555, "step": 5331, "train/loss_ctc": 1.113208293914795, "train/loss_error": 0.4698956310749054, "train/loss_total": 0.5985581874847412 }, { "epoch": 1.4245257814587229, "step": 5332, "train/loss_ctc": 0.5444629192352295, "train/loss_error": 0.5154768824577332, "train/loss_total": 0.5212740898132324 }, { "epoch": 1.4247929468340903, "step": 5333, "train/loss_ctc": 0.5709472894668579, "train/loss_error": 0.4562995433807373, "train/loss_total": 0.4792290925979614 }, { "epoch": 1.4250601122094577, "step": 5334, "train/loss_ctc": 0.9216256141662598, "train/loss_error": 0.4612240493297577, "train/loss_total": 0.5533043742179871 }, { "epoch": 1.4253272775848251, "step": 5335, "train/loss_ctc": 0.7609704732894897, "train/loss_error": 0.5191632509231567, "train/loss_total": 0.5675247311592102 }, { "epoch": 1.4255944429601923, "step": 5336, "train/loss_ctc": 0.37800532579421997, "train/loss_error": 0.5217843651771545, "train/loss_total": 0.49302858114242554 }, { "epoch": 1.4258616083355597, "step": 5337, "train/loss_ctc": 0.4171872138977051, "train/loss_error": 0.47438743710517883, "train/loss_total": 0.46294739842414856 }, { "epoch": 1.4261287737109272, "step": 5338, "train/loss_ctc": 1.212769865989685, "train/loss_error": 0.44232988357543945, "train/loss_total": 0.5964179039001465 }, { "epoch": 1.4263959390862944, "step": 5339, "train/loss_ctc": 0.6123156547546387, "train/loss_error": 0.527378261089325, "train/loss_total": 0.5443657636642456 }, { "epoch": 1.4266631044616618, "grad_norm": 1.7424085140228271, "learning_rate": 2.1448036334491048e-05, "loss": 0.5528, "step": 5340 }, { "epoch": 1.4266631044616618, "step": 5340, "train/loss_ctc": 0.6516035199165344, "train/loss_error": 0.4932669997215271, "train/loss_total": 0.5249342918395996 }, { "epoch": 1.4269302698370292, "step": 5341, "train/loss_ctc": 0.6318923830986023, "train/loss_error": 0.46380743384361267, "train/loss_total": 0.4974244236946106 }, { "epoch": 1.4271974352123964, "step": 5342, "train/loss_ctc": 0.3627125322818756, "train/loss_error": 0.4218360185623169, "train/loss_total": 0.410011351108551 }, { "epoch": 1.4274646005877638, "step": 5343, "train/loss_ctc": 1.0650724172592163, "train/loss_error": 0.48312312364578247, "train/loss_total": 0.5995129942893982 }, { "epoch": 1.4277317659631312, "step": 5344, "train/loss_ctc": 0.39957404136657715, "train/loss_error": 0.41283077001571655, "train/loss_total": 0.4101794362068176 }, { "epoch": 1.4279989313384984, "step": 5345, "train/loss_ctc": 0.5424574017524719, "train/loss_error": 0.46600398421287537, "train/loss_total": 0.4812946617603302 }, { "epoch": 1.4282660967138658, "step": 5346, "train/loss_ctc": 0.6404021382331848, "train/loss_error": 0.45018500089645386, "train/loss_total": 0.488228440284729 }, { "epoch": 1.4285332620892333, "step": 5347, "train/loss_ctc": 0.8810701966285706, "train/loss_error": 0.4256029427051544, "train/loss_total": 0.5166963934898376 }, { "epoch": 1.4288004274646005, "step": 5348, "train/loss_ctc": 0.39583244919776917, "train/loss_error": 0.44022589921951294, "train/loss_total": 0.43134722113609314 }, { "epoch": 1.4290675928399679, "step": 5349, "train/loss_ctc": 0.9615591764450073, "train/loss_error": 0.4611682593822479, "train/loss_total": 0.5612464547157288 }, { "epoch": 1.4293347582153353, "grad_norm": 4.348267555236816, "learning_rate": 2.143200641196901e-05, "loss": 0.4921, "step": 5350 }, { "epoch": 1.4293347582153353, "step": 5350, "train/loss_ctc": 0.42584338784217834, "train/loss_error": 0.4980359375476837, "train/loss_total": 0.48359745740890503 }, { "epoch": 1.4296019235907027, "step": 5351, "train/loss_ctc": 0.44796621799468994, "train/loss_error": 0.45391708612442017, "train/loss_total": 0.45272690057754517 }, { "epoch": 1.4298690889660701, "step": 5352, "train/loss_ctc": 0.6887683868408203, "train/loss_error": 0.4374597370624542, "train/loss_total": 0.4877215027809143 }, { "epoch": 1.4301362543414373, "step": 5353, "train/loss_ctc": 1.0664184093475342, "train/loss_error": 0.4329276382923126, "train/loss_total": 0.5596258044242859 }, { "epoch": 1.4304034197168047, "step": 5354, "train/loss_ctc": 1.314748764038086, "train/loss_error": 0.48669207096099854, "train/loss_total": 0.6523034572601318 }, { "epoch": 1.4306705850921722, "step": 5355, "train/loss_ctc": 0.6014319658279419, "train/loss_error": 0.478310227394104, "train/loss_total": 0.5029345750808716 }, { "epoch": 1.4309377504675393, "step": 5356, "train/loss_ctc": 0.46712738275527954, "train/loss_error": 0.44919082522392273, "train/loss_total": 0.452778160572052 }, { "epoch": 1.4312049158429068, "step": 5357, "train/loss_ctc": 0.46125081181526184, "train/loss_error": 0.5372634530067444, "train/loss_total": 0.5220609307289124 }, { "epoch": 1.4314720812182742, "step": 5358, "train/loss_ctc": 0.5358332395553589, "train/loss_error": 0.45786723494529724, "train/loss_total": 0.47346043586730957 }, { "epoch": 1.4317392465936414, "step": 5359, "train/loss_ctc": 0.8955150842666626, "train/loss_error": 0.5097146034240723, "train/loss_total": 0.5868747234344482 }, { "epoch": 1.4320064119690088, "grad_norm": 2.725198745727539, "learning_rate": 2.1415976489446968e-05, "loss": 0.5174, "step": 5360 }, { "epoch": 1.4320064119690088, "step": 5360, "train/loss_ctc": 0.4899306297302246, "train/loss_error": 0.4509453773498535, "train/loss_total": 0.4587424397468567 }, { "epoch": 1.4322735773443762, "step": 5361, "train/loss_ctc": 0.6201145648956299, "train/loss_error": 0.4729962944984436, "train/loss_total": 0.5024199485778809 }, { "epoch": 1.4325407427197434, "step": 5362, "train/loss_ctc": 1.296734094619751, "train/loss_error": 0.48353102803230286, "train/loss_total": 0.6461716294288635 }, { "epoch": 1.4328079080951108, "step": 5363, "train/loss_ctc": 0.926640510559082, "train/loss_error": 0.3941957652568817, "train/loss_total": 0.5006847381591797 }, { "epoch": 1.4330750734704782, "step": 5364, "train/loss_ctc": 0.6790627837181091, "train/loss_error": 0.3740193247795105, "train/loss_total": 0.4350280165672302 }, { "epoch": 1.4333422388458454, "step": 5365, "train/loss_ctc": 0.351535439491272, "train/loss_error": 0.5141026377677917, "train/loss_total": 0.4815891981124878 }, { "epoch": 1.4336094042212129, "step": 5366, "train/loss_ctc": 0.8063108921051025, "train/loss_error": 0.4853619635105133, "train/loss_total": 0.5495517253875732 }, { "epoch": 1.4338765695965803, "step": 5367, "train/loss_ctc": 1.0296838283538818, "train/loss_error": 0.5072479844093323, "train/loss_total": 0.6117351651191711 }, { "epoch": 1.4341437349719477, "step": 5368, "train/loss_ctc": 1.0221673250198364, "train/loss_error": 0.43592777848243713, "train/loss_total": 0.553175687789917 }, { "epoch": 1.434410900347315, "step": 5369, "train/loss_ctc": 0.44995754957199097, "train/loss_error": 0.45585137605667114, "train/loss_total": 0.45467260479927063 }, { "epoch": 1.4346780657226823, "grad_norm": 3.5381429195404053, "learning_rate": 2.1399946566924926e-05, "loss": 0.5194, "step": 5370 }, { "epoch": 1.4346780657226823, "step": 5370, "train/loss_ctc": 1.1256837844848633, "train/loss_error": 0.44606420397758484, "train/loss_total": 0.5819880962371826 }, { "epoch": 1.4349452310980497, "step": 5371, "train/loss_ctc": 0.8147503137588501, "train/loss_error": 0.43047648668289185, "train/loss_total": 0.5073312520980835 }, { "epoch": 1.4352123964734171, "step": 5372, "train/loss_ctc": 1.180392861366272, "train/loss_error": 0.42848846316337585, "train/loss_total": 0.5788693428039551 }, { "epoch": 1.4354795618487843, "step": 5373, "train/loss_ctc": 0.6707545518875122, "train/loss_error": 0.4868667721748352, "train/loss_total": 0.5236443281173706 }, { "epoch": 1.4357467272241518, "step": 5374, "train/loss_ctc": 0.8217519521713257, "train/loss_error": 0.5178185701370239, "train/loss_total": 0.5786052942276001 }, { "epoch": 1.4360138925995192, "step": 5375, "train/loss_ctc": 0.6338901519775391, "train/loss_error": 0.42908403277397156, "train/loss_total": 0.470045268535614 }, { "epoch": 1.4362810579748864, "step": 5376, "train/loss_ctc": 0.9711778163909912, "train/loss_error": 0.4138158857822418, "train/loss_total": 0.5252882838249207 }, { "epoch": 1.4365482233502538, "step": 5377, "train/loss_ctc": 0.48331716656684875, "train/loss_error": 0.46219319105148315, "train/loss_total": 0.46641799807548523 }, { "epoch": 1.4368153887256212, "step": 5378, "train/loss_ctc": 0.7080473303794861, "train/loss_error": 0.4994881749153137, "train/loss_total": 0.5412000417709351 }, { "epoch": 1.4370825541009884, "step": 5379, "train/loss_ctc": 0.4632326066493988, "train/loss_error": 0.48379063606262207, "train/loss_total": 0.47967904806137085 }, { "epoch": 1.4373497194763558, "grad_norm": 1.616140365600586, "learning_rate": 2.1383916644402887e-05, "loss": 0.5253, "step": 5380 }, { "epoch": 1.4373497194763558, "step": 5380, "train/loss_ctc": 1.7112290859222412, "train/loss_error": 0.406487375497818, "train/loss_total": 0.6674357652664185 }, { "epoch": 1.4376168848517232, "step": 5381, "train/loss_ctc": 1.11501145362854, "train/loss_error": 0.4592885673046112, "train/loss_total": 0.5904331803321838 }, { "epoch": 1.4378840502270906, "step": 5382, "train/loss_ctc": 0.6285043358802795, "train/loss_error": 0.5093216300010681, "train/loss_total": 0.5331581830978394 }, { "epoch": 1.4381512156024578, "step": 5383, "train/loss_ctc": 1.1473159790039062, "train/loss_error": 0.41154518723487854, "train/loss_total": 0.558699369430542 }, { "epoch": 1.4384183809778253, "step": 5384, "train/loss_ctc": 0.6195727586746216, "train/loss_error": 0.4102683365345001, "train/loss_total": 0.4521292448043823 }, { "epoch": 1.4386855463531927, "step": 5385, "train/loss_ctc": 0.9126148223876953, "train/loss_error": 0.5162216424942017, "train/loss_total": 0.5955002903938293 }, { "epoch": 1.43895271172856, "step": 5386, "train/loss_ctc": 0.42928558588027954, "train/loss_error": 0.43082889914512634, "train/loss_total": 0.430520236492157 }, { "epoch": 1.4392198771039273, "step": 5387, "train/loss_ctc": 0.398294061422348, "train/loss_error": 0.49729612469673157, "train/loss_total": 0.4774957001209259 }, { "epoch": 1.4394870424792947, "step": 5388, "train/loss_ctc": 1.0403846502304077, "train/loss_error": 0.49672940373420715, "train/loss_total": 0.6054604649543762 }, { "epoch": 1.4397542078546621, "step": 5389, "train/loss_ctc": 1.6409318447113037, "train/loss_error": 0.497259259223938, "train/loss_total": 0.7259937524795532 }, { "epoch": 1.4400213732300293, "grad_norm": 2.4770619869232178, "learning_rate": 2.1367886721880845e-05, "loss": 0.5637, "step": 5390 }, { "epoch": 1.4400213732300293, "step": 5390, "train/loss_ctc": 0.5292998552322388, "train/loss_error": 0.42025497555732727, "train/loss_total": 0.44206395745277405 }, { "epoch": 1.4402885386053967, "step": 5391, "train/loss_ctc": 0.8837306499481201, "train/loss_error": 0.478939950466156, "train/loss_total": 0.5598981380462646 }, { "epoch": 1.4405557039807642, "step": 5392, "train/loss_ctc": 0.46714866161346436, "train/loss_error": 0.4951295256614685, "train/loss_total": 0.48953336477279663 }, { "epoch": 1.4408228693561314, "step": 5393, "train/loss_ctc": 0.4432535469532013, "train/loss_error": 0.4126102924346924, "train/loss_total": 0.4187389314174652 }, { "epoch": 1.4410900347314988, "step": 5394, "train/loss_ctc": 1.0960320234298706, "train/loss_error": 0.4372604787349701, "train/loss_total": 0.5690147876739502 }, { "epoch": 1.4413572001068662, "step": 5395, "train/loss_ctc": 0.5656267404556274, "train/loss_error": 0.5052698850631714, "train/loss_total": 0.5173412561416626 }, { "epoch": 1.4416243654822334, "step": 5396, "train/loss_ctc": 0.23236601054668427, "train/loss_error": 0.42937642335891724, "train/loss_total": 0.38997435569763184 }, { "epoch": 1.4418915308576008, "step": 5397, "train/loss_ctc": 0.6276510953903198, "train/loss_error": 0.441623330116272, "train/loss_total": 0.47882890701293945 }, { "epoch": 1.4421586962329682, "step": 5398, "train/loss_ctc": 0.7401968240737915, "train/loss_error": 0.43762290477752686, "train/loss_total": 0.4981377124786377 }, { "epoch": 1.4424258616083356, "step": 5399, "train/loss_ctc": 0.7276512384414673, "train/loss_error": 0.48961925506591797, "train/loss_total": 0.5372256636619568 }, { "epoch": 1.4426930269837028, "grad_norm": 4.3026204109191895, "learning_rate": 2.1351856799358803e-05, "loss": 0.4901, "step": 5400 }, { "epoch": 1.4426930269837028, "step": 5400, "train/loss_ctc": 0.8135637044906616, "train/loss_error": 0.3938402235507965, "train/loss_total": 0.4777849316596985 }, { "epoch": 1.4429601923590702, "step": 5401, "train/loss_ctc": 0.5773953199386597, "train/loss_error": 0.4746154844760895, "train/loss_total": 0.4951714873313904 }, { "epoch": 1.4432273577344377, "step": 5402, "train/loss_ctc": 0.45981118083000183, "train/loss_error": 0.4290216267108917, "train/loss_total": 0.43517956137657166 }, { "epoch": 1.443494523109805, "step": 5403, "train/loss_ctc": 0.8752828240394592, "train/loss_error": 0.43368083238601685, "train/loss_total": 0.5220012068748474 }, { "epoch": 1.4437616884851723, "step": 5404, "train/loss_ctc": 1.2609622478485107, "train/loss_error": 0.4289892017841339, "train/loss_total": 0.5953838229179382 }, { "epoch": 1.4440288538605397, "step": 5405, "train/loss_ctc": 0.30845844745635986, "train/loss_error": 0.4779067933559418, "train/loss_total": 0.4440171420574188 }, { "epoch": 1.4442960192359071, "step": 5406, "train/loss_ctc": 0.7178902626037598, "train/loss_error": 0.46979427337646484, "train/loss_total": 0.5194134712219238 }, { "epoch": 1.4445631846112743, "step": 5407, "train/loss_ctc": 0.5123506784439087, "train/loss_error": 0.5539098381996155, "train/loss_total": 0.545598030090332 }, { "epoch": 1.4448303499866417, "step": 5408, "train/loss_ctc": 0.6902077794075012, "train/loss_error": 0.42279404401779175, "train/loss_total": 0.47627678513526917 }, { "epoch": 1.4450975153620091, "step": 5409, "train/loss_ctc": 0.5117406845092773, "train/loss_error": 0.43504035472869873, "train/loss_total": 0.45038044452667236 }, { "epoch": 1.4453646807373763, "grad_norm": 1.6352235078811646, "learning_rate": 2.1335826876836765e-05, "loss": 0.4961, "step": 5410 }, { "epoch": 1.4453646807373763, "step": 5410, "train/loss_ctc": 1.2552752494812012, "train/loss_error": 0.44795891642570496, "train/loss_total": 0.6094222068786621 }, { "epoch": 1.4456318461127438, "step": 5411, "train/loss_ctc": 0.511027991771698, "train/loss_error": 0.45991945266723633, "train/loss_total": 0.4701411724090576 }, { "epoch": 1.4458990114881112, "step": 5412, "train/loss_ctc": 0.4969402551651001, "train/loss_error": 0.4453164339065552, "train/loss_total": 0.4556412100791931 }, { "epoch": 1.4461661768634784, "step": 5413, "train/loss_ctc": 0.7293568849563599, "train/loss_error": 0.46913251280784607, "train/loss_total": 0.5211774110794067 }, { "epoch": 1.4464333422388458, "step": 5414, "train/loss_ctc": 0.45847272872924805, "train/loss_error": 0.4984930753707886, "train/loss_total": 0.49048900604248047 }, { "epoch": 1.4467005076142132, "step": 5415, "train/loss_ctc": 0.7096061706542969, "train/loss_error": 0.48623666167259216, "train/loss_total": 0.5309105515480042 }, { "epoch": 1.4469676729895806, "step": 5416, "train/loss_ctc": 0.8355128169059753, "train/loss_error": 0.425849974155426, "train/loss_total": 0.507782518863678 }, { "epoch": 1.447234838364948, "step": 5417, "train/loss_ctc": 0.3716062903404236, "train/loss_error": 0.42953556776046753, "train/loss_total": 0.41794973611831665 }, { "epoch": 1.4475020037403152, "step": 5418, "train/loss_ctc": 0.8747214078903198, "train/loss_error": 0.4911893606185913, "train/loss_total": 0.567895770072937 }, { "epoch": 1.4477691691156827, "step": 5419, "train/loss_ctc": 0.6064562201499939, "train/loss_error": 0.4697805345058441, "train/loss_total": 0.4971156716346741 }, { "epoch": 1.44803633449105, "grad_norm": 3.8502418994903564, "learning_rate": 2.1319796954314723e-05, "loss": 0.5069, "step": 5420 }, { "epoch": 1.44803633449105, "step": 5420, "train/loss_ctc": 0.9156332015991211, "train/loss_error": 0.46466031670570374, "train/loss_total": 0.5548549294471741 }, { "epoch": 1.4483034998664173, "step": 5421, "train/loss_ctc": 0.4620613157749176, "train/loss_error": 0.46968910098075867, "train/loss_total": 0.46816354990005493 }, { "epoch": 1.4485706652417847, "step": 5422, "train/loss_ctc": 0.9737681150436401, "train/loss_error": 0.43702614307403564, "train/loss_total": 0.5443745255470276 }, { "epoch": 1.448837830617152, "step": 5423, "train/loss_ctc": 0.4254634380340576, "train/loss_error": 0.44189542531967163, "train/loss_total": 0.4386090338230133 }, { "epoch": 1.4491049959925193, "step": 5424, "train/loss_ctc": 0.5636584758758545, "train/loss_error": 0.45044949650764465, "train/loss_total": 0.4730913043022156 }, { "epoch": 1.4493721613678867, "step": 5425, "train/loss_ctc": 0.9707409143447876, "train/loss_error": 0.4889329969882965, "train/loss_total": 0.5852946043014526 }, { "epoch": 1.4496393267432541, "step": 5426, "train/loss_ctc": 0.723075270652771, "train/loss_error": 0.42823904752731323, "train/loss_total": 0.4872063100337982 }, { "epoch": 1.4499064921186213, "step": 5427, "train/loss_ctc": 0.6790310144424438, "train/loss_error": 0.4969034492969513, "train/loss_total": 0.5333290100097656 }, { "epoch": 1.4501736574939887, "step": 5428, "train/loss_ctc": 0.6265157461166382, "train/loss_error": 0.4533938467502594, "train/loss_total": 0.4880182445049286 }, { "epoch": 1.4504408228693562, "step": 5429, "train/loss_ctc": 0.8964227437973022, "train/loss_error": 0.5087531805038452, "train/loss_total": 0.5862870812416077 }, { "epoch": 1.4507079882447234, "grad_norm": 3.1137571334838867, "learning_rate": 2.130376703179268e-05, "loss": 0.5159, "step": 5430 }, { "epoch": 1.4507079882447234, "step": 5430, "train/loss_ctc": 0.43800169229507446, "train/loss_error": 0.4216030538082123, "train/loss_total": 0.42488279938697815 }, { "epoch": 1.4509751536200908, "step": 5431, "train/loss_ctc": 0.85550457239151, "train/loss_error": 0.4625168442726135, "train/loss_total": 0.5411143898963928 }, { "epoch": 1.4512423189954582, "step": 5432, "train/loss_ctc": 1.0428590774536133, "train/loss_error": 0.5008417367935181, "train/loss_total": 0.6092451810836792 }, { "epoch": 1.4515094843708256, "step": 5433, "train/loss_ctc": 0.6311134099960327, "train/loss_error": 0.41943472623825073, "train/loss_total": 0.4617704749107361 }, { "epoch": 1.451776649746193, "step": 5434, "train/loss_ctc": 0.7788119316101074, "train/loss_error": 0.46544376015663147, "train/loss_total": 0.5281174182891846 }, { "epoch": 1.4520438151215602, "step": 5435, "train/loss_ctc": 1.068882703781128, "train/loss_error": 0.4297601580619812, "train/loss_total": 0.5575847029685974 }, { "epoch": 1.4523109804969276, "step": 5436, "train/loss_ctc": 1.3670839071273804, "train/loss_error": 0.4113580882549286, "train/loss_total": 0.6025032997131348 }, { "epoch": 1.452578145872295, "step": 5437, "train/loss_ctc": 0.32607781887054443, "train/loss_error": 0.45438116788864136, "train/loss_total": 0.42872050404548645 }, { "epoch": 1.4528453112476623, "step": 5438, "train/loss_ctc": 0.5687707662582397, "train/loss_error": 0.43280351161956787, "train/loss_total": 0.4599969685077667 }, { "epoch": 1.4531124766230297, "step": 5439, "train/loss_ctc": 0.7982035279273987, "train/loss_error": 0.5220350623130798, "train/loss_total": 0.5772687792778015 }, { "epoch": 1.453379641998397, "grad_norm": 6.358109474182129, "learning_rate": 2.128773710927064e-05, "loss": 0.5191, "step": 5440 }, { "epoch": 1.453379641998397, "step": 5440, "train/loss_ctc": 0.929195761680603, "train/loss_error": 0.48332250118255615, "train/loss_total": 0.5724971890449524 }, { "epoch": 1.4536468073737643, "step": 5441, "train/loss_ctc": 0.47653019428253174, "train/loss_error": 0.46939337253570557, "train/loss_total": 0.47082075476646423 }, { "epoch": 1.4539139727491317, "step": 5442, "train/loss_ctc": 0.8874345421791077, "train/loss_error": 0.4413614869117737, "train/loss_total": 0.5305761098861694 }, { "epoch": 1.4541811381244991, "step": 5443, "train/loss_ctc": 0.6541521549224854, "train/loss_error": 0.4121301472187042, "train/loss_total": 0.46053457260131836 }, { "epoch": 1.4544483034998663, "step": 5444, "train/loss_ctc": 0.44213634729385376, "train/loss_error": 0.5128440260887146, "train/loss_total": 0.4987024962902069 }, { "epoch": 1.4547154688752337, "step": 5445, "train/loss_ctc": 1.2026945352554321, "train/loss_error": 0.4532759487628937, "train/loss_total": 0.6031596660614014 }, { "epoch": 1.4549826342506011, "step": 5446, "train/loss_ctc": 0.9080733060836792, "train/loss_error": 0.47242581844329834, "train/loss_total": 0.5595552921295166 }, { "epoch": 1.4552497996259683, "step": 5447, "train/loss_ctc": 0.3172719180583954, "train/loss_error": 0.45595771074295044, "train/loss_total": 0.42822057008743286 }, { "epoch": 1.4555169650013358, "step": 5448, "train/loss_ctc": 0.8742009401321411, "train/loss_error": 0.4186025857925415, "train/loss_total": 0.5097222924232483 }, { "epoch": 1.4557841303767032, "step": 5449, "train/loss_ctc": 0.5973267555236816, "train/loss_error": 0.4152475595474243, "train/loss_total": 0.45166340470314026 }, { "epoch": 1.4560512957520706, "grad_norm": 2.107562303543091, "learning_rate": 2.1271707186748597e-05, "loss": 0.5085, "step": 5450 }, { "epoch": 1.4560512957520706, "step": 5450, "train/loss_ctc": 0.8358296155929565, "train/loss_error": 0.40880075097084045, "train/loss_total": 0.4942065477371216 }, { "epoch": 1.456318461127438, "step": 5451, "train/loss_ctc": 0.8076688051223755, "train/loss_error": 0.517317533493042, "train/loss_total": 0.5753877758979797 }, { "epoch": 1.4565856265028052, "step": 5452, "train/loss_ctc": 0.7125123143196106, "train/loss_error": 0.4276479482650757, "train/loss_total": 0.4846208095550537 }, { "epoch": 1.4568527918781726, "step": 5453, "train/loss_ctc": 1.0299040079116821, "train/loss_error": 0.4836256802082062, "train/loss_total": 0.5928813219070435 }, { "epoch": 1.45711995725354, "step": 5454, "train/loss_ctc": 0.9537816047668457, "train/loss_error": 0.4384148418903351, "train/loss_total": 0.5414881706237793 }, { "epoch": 1.4573871226289072, "step": 5455, "train/loss_ctc": 0.7999740242958069, "train/loss_error": 0.47557953000068665, "train/loss_total": 0.5404584407806396 }, { "epoch": 1.4576542880042747, "step": 5456, "train/loss_ctc": 0.46945226192474365, "train/loss_error": 0.48786407709121704, "train/loss_total": 0.4841817319393158 }, { "epoch": 1.457921453379642, "step": 5457, "train/loss_ctc": 0.4979884624481201, "train/loss_error": 0.5055363178253174, "train/loss_total": 0.5040267705917358 }, { "epoch": 1.4581886187550093, "step": 5458, "train/loss_ctc": 0.9383150935173035, "train/loss_error": 0.4629143476486206, "train/loss_total": 0.5579944849014282 }, { "epoch": 1.4584557841303767, "step": 5459, "train/loss_ctc": 0.4636540710926056, "train/loss_error": 0.47945457696914673, "train/loss_total": 0.47629448771476746 }, { "epoch": 1.458722949505744, "grad_norm": 2.640505075454712, "learning_rate": 2.125567726422656e-05, "loss": 0.5252, "step": 5460 }, { "epoch": 1.458722949505744, "step": 5460, "train/loss_ctc": 0.3953899145126343, "train/loss_error": 0.47283416986465454, "train/loss_total": 0.4573453366756439 }, { "epoch": 1.4589901148811113, "step": 5461, "train/loss_ctc": 1.1544920206069946, "train/loss_error": 0.4591815769672394, "train/loss_total": 0.5982437133789062 }, { "epoch": 1.4592572802564787, "step": 5462, "train/loss_ctc": 2.2101097106933594, "train/loss_error": 0.4944345951080322, "train/loss_total": 0.8375695943832397 }, { "epoch": 1.4595244456318461, "step": 5463, "train/loss_ctc": 0.6189283728599548, "train/loss_error": 0.419439435005188, "train/loss_total": 0.4593372344970703 }, { "epoch": 1.4597916110072133, "step": 5464, "train/loss_ctc": 0.6379728317260742, "train/loss_error": 0.4502573609352112, "train/loss_total": 0.4878004789352417 }, { "epoch": 1.4600587763825807, "step": 5465, "train/loss_ctc": 0.4534633755683899, "train/loss_error": 0.45046180486679077, "train/loss_total": 0.4510621130466461 }, { "epoch": 1.4603259417579482, "step": 5466, "train/loss_ctc": 0.911300539970398, "train/loss_error": 0.46210524439811707, "train/loss_total": 0.5519443154335022 }, { "epoch": 1.4605931071333156, "step": 5467, "train/loss_ctc": 0.8050518035888672, "train/loss_error": 0.4802999198436737, "train/loss_total": 0.5452502965927124 }, { "epoch": 1.460860272508683, "step": 5468, "train/loss_ctc": 0.458707332611084, "train/loss_error": 0.469977468252182, "train/loss_total": 0.46772345900535583 }, { "epoch": 1.4611274378840502, "step": 5469, "train/loss_ctc": 1.4890854358673096, "train/loss_error": 0.5267447829246521, "train/loss_total": 0.7192128896713257 }, { "epoch": 1.4613946032594176, "grad_norm": 2.0493342876434326, "learning_rate": 2.1239647341704517e-05, "loss": 0.5575, "step": 5470 }, { "epoch": 1.4613946032594176, "step": 5470, "train/loss_ctc": 0.5806288123130798, "train/loss_error": 0.42475640773773193, "train/loss_total": 0.4559308886528015 }, { "epoch": 1.461661768634785, "step": 5471, "train/loss_ctc": 0.5090621709823608, "train/loss_error": 0.4772234559059143, "train/loss_total": 0.4835911989212036 }, { "epoch": 1.4619289340101522, "step": 5472, "train/loss_ctc": 0.8939369916915894, "train/loss_error": 0.4754776060581207, "train/loss_total": 0.5591694712638855 }, { "epoch": 1.4621960993855196, "step": 5473, "train/loss_ctc": 0.6236696243286133, "train/loss_error": 0.4719691872596741, "train/loss_total": 0.5023093223571777 }, { "epoch": 1.462463264760887, "step": 5474, "train/loss_ctc": 0.5139796733856201, "train/loss_error": 0.44443342089653015, "train/loss_total": 0.45834267139434814 }, { "epoch": 1.4627304301362543, "step": 5475, "train/loss_ctc": 0.815506100654602, "train/loss_error": 0.45768043398857117, "train/loss_total": 0.5292456150054932 }, { "epoch": 1.4629975955116217, "step": 5476, "train/loss_ctc": 0.5043811202049255, "train/loss_error": 0.4807423949241638, "train/loss_total": 0.485470175743103 }, { "epoch": 1.463264760886989, "step": 5477, "train/loss_ctc": 0.9799275398254395, "train/loss_error": 0.49325644969940186, "train/loss_total": 0.5905906558036804 }, { "epoch": 1.4635319262623563, "step": 5478, "train/loss_ctc": 0.22143948078155518, "train/loss_error": 0.5167883038520813, "train/loss_total": 0.45771855115890503 }, { "epoch": 1.4637990916377237, "step": 5479, "train/loss_ctc": 1.1049575805664062, "train/loss_error": 0.4199434518814087, "train/loss_total": 0.5569462776184082 }, { "epoch": 1.4640662570130911, "grad_norm": 3.1357262134552, "learning_rate": 2.1223617419182475e-05, "loss": 0.5079, "step": 5480 }, { "epoch": 1.4640662570130911, "step": 5480, "train/loss_ctc": 0.8206772804260254, "train/loss_error": 0.44454866647720337, "train/loss_total": 0.5197744369506836 }, { "epoch": 1.4643334223884585, "step": 5481, "train/loss_ctc": 1.2399287223815918, "train/loss_error": 0.5171307325363159, "train/loss_total": 0.661690354347229 }, { "epoch": 1.4646005877638257, "step": 5482, "train/loss_ctc": 1.2950046062469482, "train/loss_error": 0.5533133149147034, "train/loss_total": 0.7016515731811523 }, { "epoch": 1.4648677531391932, "step": 5483, "train/loss_ctc": 0.6498121619224548, "train/loss_error": 0.5124616026878357, "train/loss_total": 0.5399317145347595 }, { "epoch": 1.4651349185145606, "step": 5484, "train/loss_ctc": 0.6712576150894165, "train/loss_error": 0.5007680058479309, "train/loss_total": 0.5348659157752991 }, { "epoch": 1.465402083889928, "step": 5485, "train/loss_ctc": 0.8034319877624512, "train/loss_error": 0.42450299859046936, "train/loss_total": 0.5002888441085815 }, { "epoch": 1.4656692492652952, "step": 5486, "train/loss_ctc": 1.0369398593902588, "train/loss_error": 0.47707149386405945, "train/loss_total": 0.5890451669692993 }, { "epoch": 1.4659364146406626, "step": 5487, "train/loss_ctc": 0.8734276294708252, "train/loss_error": 0.46401962637901306, "train/loss_total": 0.5459012389183044 }, { "epoch": 1.46620358001603, "step": 5488, "train/loss_ctc": 0.9654369950294495, "train/loss_error": 0.5014836192131042, "train/loss_total": 0.5942742824554443 }, { "epoch": 1.4664707453913972, "step": 5489, "train/loss_ctc": 0.8509366512298584, "train/loss_error": 0.5228123664855957, "train/loss_total": 0.5884372591972351 }, { "epoch": 1.4667379107667646, "grad_norm": 2.603508472442627, "learning_rate": 2.1207587496660433e-05, "loss": 0.5776, "step": 5490 }, { "epoch": 1.4667379107667646, "step": 5490, "train/loss_ctc": 0.533903956413269, "train/loss_error": 0.447498619556427, "train/loss_total": 0.46477970480918884 }, { "epoch": 1.467005076142132, "step": 5491, "train/loss_ctc": 0.41334930062294006, "train/loss_error": 0.4360560178756714, "train/loss_total": 0.4315146803855896 }, { "epoch": 1.4672722415174992, "step": 5492, "train/loss_ctc": 0.6257545948028564, "train/loss_error": 0.3939532935619354, "train/loss_total": 0.44031354784965515 }, { "epoch": 1.4675394068928667, "step": 5493, "train/loss_ctc": 1.167897343635559, "train/loss_error": 0.4995839595794678, "train/loss_total": 0.633246660232544 }, { "epoch": 1.467806572268234, "step": 5494, "train/loss_ctc": 1.0683627128601074, "train/loss_error": 0.3878941237926483, "train/loss_total": 0.523987889289856 }, { "epoch": 1.4680737376436013, "step": 5495, "train/loss_ctc": 1.1052964925765991, "train/loss_error": 0.4850025773048401, "train/loss_total": 0.6090613603591919 }, { "epoch": 1.4683409030189687, "step": 5496, "train/loss_ctc": 0.5349043607711792, "train/loss_error": 0.5075548887252808, "train/loss_total": 0.5130248069763184 }, { "epoch": 1.468608068394336, "step": 5497, "train/loss_ctc": 0.9890537858009338, "train/loss_error": 0.46017369627952576, "train/loss_total": 0.5659497380256653 }, { "epoch": 1.4688752337697035, "step": 5498, "train/loss_ctc": 0.5975779294967651, "train/loss_error": 0.47551557421684265, "train/loss_total": 0.4999280571937561 }, { "epoch": 1.4691423991450707, "step": 5499, "train/loss_ctc": 0.48431122303009033, "train/loss_error": 0.4449974596500397, "train/loss_total": 0.4528602361679077 }, { "epoch": 1.4694095645204381, "grad_norm": 6.236603736877441, "learning_rate": 2.119155757413839e-05, "loss": 0.5135, "step": 5500 }, { "epoch": 1.4694095645204381, "step": 5500, "train/loss_ctc": 0.9359833002090454, "train/loss_error": 0.4492517113685608, "train/loss_total": 0.5465980172157288 }, { "epoch": 1.4696767298958056, "step": 5501, "train/loss_ctc": 0.3248089551925659, "train/loss_error": 0.49615123867988586, "train/loss_total": 0.4618827998638153 }, { "epoch": 1.469943895271173, "step": 5502, "train/loss_ctc": 1.3028483390808105, "train/loss_error": 0.5045719742774963, "train/loss_total": 0.6642272472381592 }, { "epoch": 1.4702110606465402, "step": 5503, "train/loss_ctc": 0.5822073221206665, "train/loss_error": 0.4614131450653076, "train/loss_total": 0.4855719804763794 }, { "epoch": 1.4704782260219076, "step": 5504, "train/loss_ctc": 0.693912923336029, "train/loss_error": 0.4611395001411438, "train/loss_total": 0.5076941847801208 }, { "epoch": 1.470745391397275, "step": 5505, "train/loss_ctc": 1.2663700580596924, "train/loss_error": 0.48202988505363464, "train/loss_total": 0.6388978958129883 }, { "epoch": 1.4710125567726422, "step": 5506, "train/loss_ctc": 0.7354477643966675, "train/loss_error": 0.4434972107410431, "train/loss_total": 0.501887321472168 }, { "epoch": 1.4712797221480096, "step": 5507, "train/loss_ctc": 0.8482105731964111, "train/loss_error": 0.47879642248153687, "train/loss_total": 0.5526793003082275 }, { "epoch": 1.471546887523377, "step": 5508, "train/loss_ctc": 0.796928346157074, "train/loss_error": 0.5441271662712097, "train/loss_total": 0.5946874022483826 }, { "epoch": 1.4718140528987442, "step": 5509, "train/loss_ctc": 1.2159138917922974, "train/loss_error": 0.42353999614715576, "train/loss_total": 0.582014799118042 }, { "epoch": 1.4720812182741116, "grad_norm": 1.457221508026123, "learning_rate": 2.117552765161635e-05, "loss": 0.5536, "step": 5510 }, { "epoch": 1.4720812182741116, "step": 5510, "train/loss_ctc": 0.9102177023887634, "train/loss_error": 0.45588353276252747, "train/loss_total": 0.5467503666877747 }, { "epoch": 1.472348383649479, "step": 5511, "train/loss_ctc": 1.9137508869171143, "train/loss_error": 0.4815015494823456, "train/loss_total": 0.7679514288902283 }, { "epoch": 1.4726155490248463, "step": 5512, "train/loss_ctc": 0.5390665531158447, "train/loss_error": 0.45898136496543884, "train/loss_total": 0.474998414516449 }, { "epoch": 1.4728827144002137, "step": 5513, "train/loss_ctc": 0.4128430187702179, "train/loss_error": 0.5522167682647705, "train/loss_total": 0.5243420004844666 }, { "epoch": 1.473149879775581, "step": 5514, "train/loss_ctc": 0.6782515048980713, "train/loss_error": 0.4970196783542633, "train/loss_total": 0.5332660675048828 }, { "epoch": 1.4734170451509485, "step": 5515, "train/loss_ctc": 0.44459861516952515, "train/loss_error": 0.4598531126976013, "train/loss_total": 0.45680221915245056 }, { "epoch": 1.4736842105263157, "step": 5516, "train/loss_ctc": 1.0560437440872192, "train/loss_error": 0.45808646082878113, "train/loss_total": 0.5776779055595398 }, { "epoch": 1.4739513759016831, "step": 5517, "train/loss_ctc": 1.72127366065979, "train/loss_error": 0.5692620873451233, "train/loss_total": 0.7996643781661987 }, { "epoch": 1.4742185412770505, "step": 5518, "train/loss_ctc": 0.818909764289856, "train/loss_error": 0.45159095525741577, "train/loss_total": 0.5250547528266907 }, { "epoch": 1.474485706652418, "step": 5519, "train/loss_ctc": 0.7443466782569885, "train/loss_error": 0.4640527069568634, "train/loss_total": 0.5201115012168884 }, { "epoch": 1.4747528720277852, "grad_norm": 2.642932176589966, "learning_rate": 2.115949772909431e-05, "loss": 0.5727, "step": 5520 }, { "epoch": 1.4747528720277852, "step": 5520, "train/loss_ctc": 1.3342716693878174, "train/loss_error": 0.49314284324645996, "train/loss_total": 0.6613686084747314 }, { "epoch": 1.4750200374031526, "step": 5521, "train/loss_ctc": 0.4857069253921509, "train/loss_error": 0.4108986258506775, "train/loss_total": 0.42586028575897217 }, { "epoch": 1.47528720277852, "step": 5522, "train/loss_ctc": 0.7106937766075134, "train/loss_error": 0.44274091720581055, "train/loss_total": 0.49633151292800903 }, { "epoch": 1.4755543681538872, "step": 5523, "train/loss_ctc": 0.9103212356567383, "train/loss_error": 0.5368189215660095, "train/loss_total": 0.6115193963050842 }, { "epoch": 1.4758215335292546, "step": 5524, "train/loss_ctc": 0.5319739580154419, "train/loss_error": 0.5011774301528931, "train/loss_total": 0.5073367357254028 }, { "epoch": 1.476088698904622, "step": 5525, "train/loss_ctc": 0.5866239070892334, "train/loss_error": 0.48040130734443665, "train/loss_total": 0.5016458630561829 }, { "epoch": 1.4763558642799892, "step": 5526, "train/loss_ctc": 0.7656022310256958, "train/loss_error": 0.4766453802585602, "train/loss_total": 0.5344367623329163 }, { "epoch": 1.4766230296553566, "step": 5527, "train/loss_ctc": 0.7449448704719543, "train/loss_error": 0.4253481924533844, "train/loss_total": 0.4892675280570984 }, { "epoch": 1.476890195030724, "step": 5528, "train/loss_ctc": 0.7140339612960815, "train/loss_error": 0.5015438795089722, "train/loss_total": 0.5440418720245361 }, { "epoch": 1.4771573604060912, "step": 5529, "train/loss_ctc": 1.0856190919876099, "train/loss_error": 0.43908989429473877, "train/loss_total": 0.568395733833313 }, { "epoch": 1.4774245257814587, "grad_norm": 1.2268167734146118, "learning_rate": 2.114346780657227e-05, "loss": 0.534, "step": 5530 }, { "epoch": 1.4774245257814587, "step": 5530, "train/loss_ctc": 0.7664775848388672, "train/loss_error": 0.5229417681694031, "train/loss_total": 0.5716489553451538 }, { "epoch": 1.477691691156826, "step": 5531, "train/loss_ctc": 0.7472148537635803, "train/loss_error": 0.5102072954177856, "train/loss_total": 0.5576088428497314 }, { "epoch": 1.4779588565321935, "step": 5532, "train/loss_ctc": 0.6821613311767578, "train/loss_error": 0.5372351408004761, "train/loss_total": 0.5662204027175903 }, { "epoch": 1.478226021907561, "step": 5533, "train/loss_ctc": 0.5356047749519348, "train/loss_error": 0.46680667996406555, "train/loss_total": 0.4805663228034973 }, { "epoch": 1.4784931872829281, "step": 5534, "train/loss_ctc": 0.8152424097061157, "train/loss_error": 0.4536649286746979, "train/loss_total": 0.5259804129600525 }, { "epoch": 1.4787603526582955, "step": 5535, "train/loss_ctc": 1.4475560188293457, "train/loss_error": 0.4794059693813324, "train/loss_total": 0.6730359792709351 }, { "epoch": 1.479027518033663, "step": 5536, "train/loss_ctc": 0.7480941414833069, "train/loss_error": 0.547738790512085, "train/loss_total": 0.5878098607063293 }, { "epoch": 1.4792946834090301, "step": 5537, "train/loss_ctc": 0.8112889528274536, "train/loss_error": 0.48181724548339844, "train/loss_total": 0.5477116107940674 }, { "epoch": 1.4795618487843976, "step": 5538, "train/loss_ctc": 1.198953628540039, "train/loss_error": 0.41590040922164917, "train/loss_total": 0.5725110769271851 }, { "epoch": 1.479829014159765, "step": 5539, "train/loss_ctc": 0.5766825079917908, "train/loss_error": 0.47268688678741455, "train/loss_total": 0.4934860169887543 }, { "epoch": 1.4800961795351322, "grad_norm": 1.9268743991851807, "learning_rate": 2.1127437884050227e-05, "loss": 0.5577, "step": 5540 }, { "epoch": 1.4800961795351322, "step": 5540, "train/loss_ctc": 0.6421362161636353, "train/loss_error": 0.4721354842185974, "train/loss_total": 0.5061356425285339 }, { "epoch": 1.4803633449104996, "step": 5541, "train/loss_ctc": 1.0487914085388184, "train/loss_error": 0.5212680101394653, "train/loss_total": 0.6267727017402649 }, { "epoch": 1.480630510285867, "step": 5542, "train/loss_ctc": 0.6317393183708191, "train/loss_error": 0.4388779103755951, "train/loss_total": 0.4774501919746399 }, { "epoch": 1.4808976756612342, "step": 5543, "train/loss_ctc": 0.9282828569412231, "train/loss_error": 0.4262097477912903, "train/loss_total": 0.5266243815422058 }, { "epoch": 1.4811648410366016, "step": 5544, "train/loss_ctc": 1.104176640510559, "train/loss_error": 0.49867016077041626, "train/loss_total": 0.6197714805603027 }, { "epoch": 1.481432006411969, "step": 5545, "train/loss_ctc": 0.6453990936279297, "train/loss_error": 0.43039217591285706, "train/loss_total": 0.4733935594558716 }, { "epoch": 1.4816991717873362, "step": 5546, "train/loss_ctc": 0.8376531004905701, "train/loss_error": 0.4993560314178467, "train/loss_total": 0.5670154690742493 }, { "epoch": 1.4819663371627037, "step": 5547, "train/loss_ctc": 0.3035831153392792, "train/loss_error": 0.40741679072380066, "train/loss_total": 0.38665005564689636 }, { "epoch": 1.482233502538071, "step": 5548, "train/loss_ctc": 0.8118211030960083, "train/loss_error": 0.4862988293170929, "train/loss_total": 0.551403284072876 }, { "epoch": 1.4825006679134385, "step": 5549, "train/loss_ctc": 0.6135203838348389, "train/loss_error": 0.4161509871482849, "train/loss_total": 0.45562487840652466 }, { "epoch": 1.482767833288806, "grad_norm": 3.585256814956665, "learning_rate": 2.1111407961528185e-05, "loss": 0.5191, "step": 5550 }, { "epoch": 1.482767833288806, "step": 5550, "train/loss_ctc": 1.897675633430481, "train/loss_error": 0.4914216697216034, "train/loss_total": 0.7726724743843079 }, { "epoch": 1.483034998664173, "step": 5551, "train/loss_ctc": 1.0037966966629028, "train/loss_error": 0.42191192507743835, "train/loss_total": 0.5382888913154602 }, { "epoch": 1.4833021640395405, "step": 5552, "train/loss_ctc": 1.0510002374649048, "train/loss_error": 0.4728534519672394, "train/loss_total": 0.5884827971458435 }, { "epoch": 1.483569329414908, "step": 5553, "train/loss_ctc": 0.8553116917610168, "train/loss_error": 0.5331462621688843, "train/loss_total": 0.5975793600082397 }, { "epoch": 1.4838364947902751, "step": 5554, "train/loss_ctc": 0.4441726803779602, "train/loss_error": 0.4278842508792877, "train/loss_total": 0.4311419725418091 }, { "epoch": 1.4841036601656425, "step": 5555, "train/loss_ctc": 0.5513759851455688, "train/loss_error": 0.5181087255477905, "train/loss_total": 0.5247621536254883 }, { "epoch": 1.48437082554101, "step": 5556, "train/loss_ctc": 0.5758519768714905, "train/loss_error": 0.48960909247398376, "train/loss_total": 0.506857693195343 }, { "epoch": 1.4846379909163772, "step": 5557, "train/loss_ctc": 0.278656005859375, "train/loss_error": 0.44369640946388245, "train/loss_total": 0.4106883406639099 }, { "epoch": 1.4849051562917446, "step": 5558, "train/loss_ctc": 1.38014817237854, "train/loss_error": 0.4807837903499603, "train/loss_total": 0.6606566905975342 }, { "epoch": 1.485172321667112, "step": 5559, "train/loss_ctc": 0.5527909994125366, "train/loss_error": 0.5310618877410889, "train/loss_total": 0.5354077219963074 }, { "epoch": 1.4854394870424792, "grad_norm": 1.7730506658554077, "learning_rate": 2.1095378039006146e-05, "loss": 0.5567, "step": 5560 }, { "epoch": 1.4854394870424792, "step": 5560, "train/loss_ctc": 0.6348516941070557, "train/loss_error": 0.47460123896598816, "train/loss_total": 0.5066513419151306 }, { "epoch": 1.4857066524178466, "step": 5561, "train/loss_ctc": 0.40474769473075867, "train/loss_error": 0.46418821811676025, "train/loss_total": 0.45230013132095337 }, { "epoch": 1.485973817793214, "step": 5562, "train/loss_ctc": 0.5694713592529297, "train/loss_error": 0.5330010652542114, "train/loss_total": 0.5402951240539551 }, { "epoch": 1.4862409831685812, "step": 5563, "train/loss_ctc": 0.5140627026557922, "train/loss_error": 0.4862785041332245, "train/loss_total": 0.491835355758667 }, { "epoch": 1.4865081485439486, "step": 5564, "train/loss_ctc": 1.6596043109893799, "train/loss_error": 0.467669278383255, "train/loss_total": 0.7060562968254089 }, { "epoch": 1.486775313919316, "step": 5565, "train/loss_ctc": 0.8895759582519531, "train/loss_error": 0.4422304034233093, "train/loss_total": 0.531699538230896 }, { "epoch": 1.4870424792946835, "step": 5566, "train/loss_ctc": 1.9778389930725098, "train/loss_error": 0.442074179649353, "train/loss_total": 0.7492271661758423 }, { "epoch": 1.487309644670051, "step": 5567, "train/loss_ctc": 0.6761622428894043, "train/loss_error": 0.5163634419441223, "train/loss_total": 0.5483232140541077 }, { "epoch": 1.487576810045418, "step": 5568, "train/loss_ctc": 0.7020810842514038, "train/loss_error": 0.4576941132545471, "train/loss_total": 0.5065715312957764 }, { "epoch": 1.4878439754207855, "step": 5569, "train/loss_ctc": 1.1798741817474365, "train/loss_error": 0.4485444128513336, "train/loss_total": 0.5948103666305542 }, { "epoch": 1.488111140796153, "grad_norm": 2.6004443168640137, "learning_rate": 2.1079348116484104e-05, "loss": 0.5628, "step": 5570 }, { "epoch": 1.488111140796153, "step": 5570, "train/loss_ctc": 1.2869521379470825, "train/loss_error": 0.48615947365760803, "train/loss_total": 0.6463180184364319 }, { "epoch": 1.4883783061715201, "step": 5571, "train/loss_ctc": 1.291456937789917, "train/loss_error": 0.49889349937438965, "train/loss_total": 0.657406210899353 }, { "epoch": 1.4886454715468875, "step": 5572, "train/loss_ctc": 0.6745131611824036, "train/loss_error": 0.42413610219955444, "train/loss_total": 0.47421151399612427 }, { "epoch": 1.488912636922255, "step": 5573, "train/loss_ctc": 0.3777053952217102, "train/loss_error": 0.5132876634597778, "train/loss_total": 0.4861712157726288 }, { "epoch": 1.4891798022976221, "step": 5574, "train/loss_ctc": 1.0264859199523926, "train/loss_error": 0.49990442395210266, "train/loss_total": 0.6052207350730896 }, { "epoch": 1.4894469676729896, "step": 5575, "train/loss_ctc": 0.37169715762138367, "train/loss_error": 0.4360818862915039, "train/loss_total": 0.4232049584388733 }, { "epoch": 1.489714133048357, "step": 5576, "train/loss_ctc": 0.6202408671379089, "train/loss_error": 0.41157400608062744, "train/loss_total": 0.4533073902130127 }, { "epoch": 1.4899812984237242, "step": 5577, "train/loss_ctc": 1.249495506286621, "train/loss_error": 0.4863450825214386, "train/loss_total": 0.638975203037262 }, { "epoch": 1.4902484637990916, "step": 5578, "train/loss_ctc": 0.8297804594039917, "train/loss_error": 0.4546912610530853, "train/loss_total": 0.5297091007232666 }, { "epoch": 1.490515629174459, "step": 5579, "train/loss_ctc": 1.1819398403167725, "train/loss_error": 0.49558085203170776, "train/loss_total": 0.6328526735305786 }, { "epoch": 1.4907827945498262, "grad_norm": 1.7638535499572754, "learning_rate": 2.1063318193962066e-05, "loss": 0.5547, "step": 5580 }, { "epoch": 1.4907827945498262, "step": 5580, "train/loss_ctc": 1.0093059539794922, "train/loss_error": 0.5554883480072021, "train/loss_total": 0.6462518572807312 }, { "epoch": 1.4910499599251936, "step": 5581, "train/loss_ctc": 0.5344458818435669, "train/loss_error": 0.48757126927375793, "train/loss_total": 0.49694621562957764 }, { "epoch": 1.491317125300561, "step": 5582, "train/loss_ctc": 1.1677825450897217, "train/loss_error": 0.5226927399635315, "train/loss_total": 0.6517107486724854 }, { "epoch": 1.4915842906759285, "step": 5583, "train/loss_ctc": 0.33831021189689636, "train/loss_error": 0.39035764336586, "train/loss_total": 0.37994813919067383 }, { "epoch": 1.4918514560512959, "step": 5584, "train/loss_ctc": 0.8206830024719238, "train/loss_error": 0.5281939506530762, "train/loss_total": 0.5866917967796326 }, { "epoch": 1.492118621426663, "step": 5585, "train/loss_ctc": 1.2248618602752686, "train/loss_error": 0.44011732935905457, "train/loss_total": 0.5970662832260132 }, { "epoch": 1.4923857868020305, "step": 5586, "train/loss_ctc": 0.543387770652771, "train/loss_error": 0.4360630512237549, "train/loss_total": 0.4575280249118805 }, { "epoch": 1.492652952177398, "step": 5587, "train/loss_ctc": 0.5625454187393188, "train/loss_error": 0.4573386013507843, "train/loss_total": 0.4783799648284912 }, { "epoch": 1.492920117552765, "step": 5588, "train/loss_ctc": 1.1216129064559937, "train/loss_error": 0.5247852206230164, "train/loss_total": 0.6441507339477539 }, { "epoch": 1.4931872829281325, "step": 5589, "train/loss_ctc": 0.45610296726226807, "train/loss_error": 0.44630181789398193, "train/loss_total": 0.4482620358467102 }, { "epoch": 1.4934544483035, "grad_norm": 1.7504626512527466, "learning_rate": 2.1047288271440024e-05, "loss": 0.5387, "step": 5590 }, { "epoch": 1.4934544483035, "step": 5590, "train/loss_ctc": 1.3330141305923462, "train/loss_error": 0.461649626493454, "train/loss_total": 0.6359225511550903 }, { "epoch": 1.4937216136788671, "step": 5591, "train/loss_ctc": 0.7729636430740356, "train/loss_error": 0.48637786507606506, "train/loss_total": 0.5436950325965881 }, { "epoch": 1.4939887790542346, "step": 5592, "train/loss_ctc": 1.1130915880203247, "train/loss_error": 0.4481450319290161, "train/loss_total": 0.5811343789100647 }, { "epoch": 1.494255944429602, "step": 5593, "train/loss_ctc": 0.5424082279205322, "train/loss_error": 0.4785526394844055, "train/loss_total": 0.4913237690925598 }, { "epoch": 1.4945231098049692, "step": 5594, "train/loss_ctc": 0.9333027601242065, "train/loss_error": 0.4582194983959198, "train/loss_total": 0.5532361268997192 }, { "epoch": 1.4947902751803366, "step": 5595, "train/loss_ctc": 0.7419062852859497, "train/loss_error": 0.4472287893295288, "train/loss_total": 0.5061643123626709 }, { "epoch": 1.495057440555704, "step": 5596, "train/loss_ctc": 0.6265877485275269, "train/loss_error": 0.4771483838558197, "train/loss_total": 0.5070362687110901 }, { "epoch": 1.4953246059310714, "step": 5597, "train/loss_ctc": 1.0003077983856201, "train/loss_error": 0.5605902671813965, "train/loss_total": 0.648533821105957 }, { "epoch": 1.4955917713064386, "step": 5598, "train/loss_ctc": 0.3855619430541992, "train/loss_error": 0.4349384009838104, "train/loss_total": 0.4250631332397461 }, { "epoch": 1.495858936681806, "step": 5599, "train/loss_ctc": 0.5156978368759155, "train/loss_error": 0.4348810017108917, "train/loss_total": 0.45104438066482544 }, { "epoch": 1.4961261020571734, "grad_norm": 1.422116756439209, "learning_rate": 2.1031258348917982e-05, "loss": 0.5343, "step": 5600 }, { "epoch": 1.4961261020571734, "step": 5600, "train/loss_ctc": 0.32866206765174866, "train/loss_error": 0.4854058027267456, "train/loss_total": 0.45405706763267517 }, { "epoch": 1.4963932674325409, "step": 5601, "train/loss_ctc": 0.9631093740463257, "train/loss_error": 0.42033666372299194, "train/loss_total": 0.5288912057876587 }, { "epoch": 1.496660432807908, "step": 5602, "train/loss_ctc": 1.0527641773223877, "train/loss_error": 0.5071070790290833, "train/loss_total": 0.6162384748458862 }, { "epoch": 1.4969275981832755, "step": 5603, "train/loss_ctc": 0.4008345603942871, "train/loss_error": 0.452784925699234, "train/loss_total": 0.44239485263824463 }, { "epoch": 1.497194763558643, "step": 5604, "train/loss_ctc": 0.48157358169555664, "train/loss_error": 0.39086633920669556, "train/loss_total": 0.40900781750679016 }, { "epoch": 1.49746192893401, "step": 5605, "train/loss_ctc": 1.315657138824463, "train/loss_error": 0.44521355628967285, "train/loss_total": 0.6193022727966309 }, { "epoch": 1.4977290943093775, "step": 5606, "train/loss_ctc": 1.1159586906433105, "train/loss_error": 0.47878316044807434, "train/loss_total": 0.6062182784080505 }, { "epoch": 1.497996259684745, "step": 5607, "train/loss_ctc": 0.6574593782424927, "train/loss_error": 0.4385148286819458, "train/loss_total": 0.4823037385940552 }, { "epoch": 1.4982634250601121, "step": 5608, "train/loss_ctc": 0.4349766969680786, "train/loss_error": 0.42926689982414246, "train/loss_total": 0.43040886521339417 }, { "epoch": 1.4985305904354795, "step": 5609, "train/loss_ctc": 0.6142482757568359, "train/loss_error": 0.5244221687316895, "train/loss_total": 0.5423874258995056 }, { "epoch": 1.498797755810847, "grad_norm": 2.0443193912506104, "learning_rate": 2.101522842639594e-05, "loss": 0.5131, "step": 5610 }, { "epoch": 1.498797755810847, "step": 5610, "train/loss_ctc": 1.6246448755264282, "train/loss_error": 0.4364332854747772, "train/loss_total": 0.6740756034851074 }, { "epoch": 1.4990649211862141, "step": 5611, "train/loss_ctc": 0.6340487599372864, "train/loss_error": 0.45285943150520325, "train/loss_total": 0.4890972971916199 }, { "epoch": 1.4993320865615816, "step": 5612, "train/loss_ctc": 0.6993587017059326, "train/loss_error": 0.45528700947761536, "train/loss_total": 0.5041013956069946 }, { "epoch": 1.499599251936949, "step": 5613, "train/loss_ctc": 0.9702046513557434, "train/loss_error": 0.48969608545303345, "train/loss_total": 0.5857977867126465 }, { "epoch": 1.4998664173123164, "step": 5614, "train/loss_ctc": 0.3995402455329895, "train/loss_error": 0.5156890153884888, "train/loss_total": 0.4924592673778534 }, { "epoch": 1.5001335826876838, "step": 5615, "train/loss_ctc": 1.6052610874176025, "train/loss_error": 0.4999842345714569, "train/loss_total": 0.7210396528244019 }, { "epoch": 1.500400748063051, "step": 5616, "train/loss_ctc": 0.9801728129386902, "train/loss_error": 0.5531495809555054, "train/loss_total": 0.6385542154312134 }, { "epoch": 1.5006679134384184, "step": 5617, "train/loss_ctc": 0.8966180682182312, "train/loss_error": 0.452495813369751, "train/loss_total": 0.541320264339447 }, { "epoch": 1.5009350788137859, "step": 5618, "train/loss_ctc": 0.7002924084663391, "train/loss_error": 0.39109066128730774, "train/loss_total": 0.4529310166835785 }, { "epoch": 1.501202244189153, "step": 5619, "train/loss_ctc": 1.170264720916748, "train/loss_error": 0.46910348534584045, "train/loss_total": 0.609335720539093 }, { "epoch": 1.5014694095645205, "grad_norm": 1.2924039363861084, "learning_rate": 2.0999198503873898e-05, "loss": 0.5709, "step": 5620 }, { "epoch": 1.5014694095645205, "step": 5620, "train/loss_ctc": 0.9238802194595337, "train/loss_error": 0.5035936236381531, "train/loss_total": 0.5876509547233582 }, { "epoch": 1.5017365749398879, "step": 5621, "train/loss_ctc": 0.7469303011894226, "train/loss_error": 0.4471312165260315, "train/loss_total": 0.5070910453796387 }, { "epoch": 1.502003740315255, "step": 5622, "train/loss_ctc": 0.4867273271083832, "train/loss_error": 0.4869428873062134, "train/loss_total": 0.48689979314804077 }, { "epoch": 1.5022709056906225, "step": 5623, "train/loss_ctc": 0.20412299036979675, "train/loss_error": 0.4292532503604889, "train/loss_total": 0.3842271864414215 }, { "epoch": 1.50253807106599, "step": 5624, "train/loss_ctc": 1.4649834632873535, "train/loss_error": 0.5195184946060181, "train/loss_total": 0.7086114883422852 }, { "epoch": 1.502805236441357, "step": 5625, "train/loss_ctc": 0.63547682762146, "train/loss_error": 0.5001234412193298, "train/loss_total": 0.5271941423416138 }, { "epoch": 1.5030724018167245, "step": 5626, "train/loss_ctc": 1.3240249156951904, "train/loss_error": 0.4765215814113617, "train/loss_total": 0.6460222601890564 }, { "epoch": 1.503339567192092, "step": 5627, "train/loss_ctc": 0.5644556283950806, "train/loss_error": 0.47062817215919495, "train/loss_total": 0.4893936812877655 }, { "epoch": 1.5036067325674591, "step": 5628, "train/loss_ctc": 1.2333505153656006, "train/loss_error": 0.43214038014411926, "train/loss_total": 0.5923824310302734 }, { "epoch": 1.5038738979428268, "step": 5629, "train/loss_ctc": 0.9530313014984131, "train/loss_error": 0.43178263306617737, "train/loss_total": 0.5360323786735535 }, { "epoch": 1.504141063318194, "grad_norm": 1.1344596147537231, "learning_rate": 2.0983168581351856e-05, "loss": 0.5466, "step": 5630 }, { "epoch": 1.504141063318194, "step": 5630, "train/loss_ctc": 0.7136527299880981, "train/loss_error": 0.4912050664424896, "train/loss_total": 0.5356945991516113 }, { "epoch": 1.5044082286935612, "step": 5631, "train/loss_ctc": 0.943878173828125, "train/loss_error": 0.4476628601551056, "train/loss_total": 0.5469059348106384 }, { "epoch": 1.5046753940689288, "step": 5632, "train/loss_ctc": 0.806817889213562, "train/loss_error": 0.49433305859565735, "train/loss_total": 0.5568300485610962 }, { "epoch": 1.504942559444296, "step": 5633, "train/loss_ctc": 0.6331459283828735, "train/loss_error": 0.4470307230949402, "train/loss_total": 0.48425376415252686 }, { "epoch": 1.5052097248196634, "step": 5634, "train/loss_ctc": 0.490050733089447, "train/loss_error": 0.46231672167778015, "train/loss_total": 0.467863529920578 }, { "epoch": 1.5054768901950308, "step": 5635, "train/loss_ctc": 1.2905972003936768, "train/loss_error": 0.4873892366886139, "train/loss_total": 0.6480308175086975 }, { "epoch": 1.505744055570398, "step": 5636, "train/loss_ctc": 0.43275564908981323, "train/loss_error": 0.4037872552871704, "train/loss_total": 0.40958094596862793 }, { "epoch": 1.5060112209457654, "step": 5637, "train/loss_ctc": 0.9004573822021484, "train/loss_error": 0.4897226393222809, "train/loss_total": 0.5718696117401123 }, { "epoch": 1.5062783863211329, "step": 5638, "train/loss_ctc": 0.6603778004646301, "train/loss_error": 0.5456422567367554, "train/loss_total": 0.5685893893241882 }, { "epoch": 1.5065455516965, "step": 5639, "train/loss_ctc": 1.9349019527435303, "train/loss_error": 0.4619254171848297, "train/loss_total": 0.7565207481384277 }, { "epoch": 1.5068127170718675, "grad_norm": 2.071741819381714, "learning_rate": 2.0967138658829818e-05, "loss": 0.5546, "step": 5640 }, { "epoch": 1.5068127170718675, "step": 5640, "train/loss_ctc": 0.46499067544937134, "train/loss_error": 0.4231385290622711, "train/loss_total": 0.43150898814201355 }, { "epoch": 1.507079882447235, "step": 5641, "train/loss_ctc": 0.48620909452438354, "train/loss_error": 0.47299739718437195, "train/loss_total": 0.4756397306919098 }, { "epoch": 1.507347047822602, "step": 5642, "train/loss_ctc": 0.7395942211151123, "train/loss_error": 0.3829348385334015, "train/loss_total": 0.4542667269706726 }, { "epoch": 1.5076142131979695, "step": 5643, "train/loss_ctc": 0.4707852602005005, "train/loss_error": 0.454010546207428, "train/loss_total": 0.4573655128479004 }, { "epoch": 1.507881378573337, "step": 5644, "train/loss_ctc": 1.017653465270996, "train/loss_error": 0.46571773290634155, "train/loss_total": 0.5761048793792725 }, { "epoch": 1.5081485439487041, "step": 5645, "train/loss_ctc": 0.8066582083702087, "train/loss_error": 0.4140867590904236, "train/loss_total": 0.49260103702545166 }, { "epoch": 1.5084157093240718, "step": 5646, "train/loss_ctc": 0.35773807764053345, "train/loss_error": 0.4950718283653259, "train/loss_total": 0.4676050841808319 }, { "epoch": 1.508682874699439, "step": 5647, "train/loss_ctc": 0.3863132894039154, "train/loss_error": 0.5312708616256714, "train/loss_total": 0.5022793412208557 }, { "epoch": 1.5089500400748062, "step": 5648, "train/loss_ctc": 0.526274561882019, "train/loss_error": 0.48529431223869324, "train/loss_total": 0.4934903681278229 }, { "epoch": 1.5092172054501738, "step": 5649, "train/loss_ctc": 0.6392713785171509, "train/loss_error": 0.4944293797016144, "train/loss_total": 0.5233978033065796 }, { "epoch": 1.509484370825541, "grad_norm": 3.256676197052002, "learning_rate": 2.0951108736307776e-05, "loss": 0.4874, "step": 5650 }, { "epoch": 1.509484370825541, "step": 5650, "train/loss_ctc": 0.8897608518600464, "train/loss_error": 0.41863343119621277, "train/loss_total": 0.5128589272499084 }, { "epoch": 1.5097515362009084, "step": 5651, "train/loss_ctc": 0.4853319823741913, "train/loss_error": 0.39917856454849243, "train/loss_total": 0.4164092540740967 }, { "epoch": 1.5100187015762758, "step": 5652, "train/loss_ctc": 0.9623194932937622, "train/loss_error": 0.3965635895729065, "train/loss_total": 0.5097147822380066 }, { "epoch": 1.510285866951643, "step": 5653, "train/loss_ctc": 0.6172670125961304, "train/loss_error": 0.4841442108154297, "train/loss_total": 0.5107687711715698 }, { "epoch": 1.5105530323270104, "step": 5654, "train/loss_ctc": 1.7135906219482422, "train/loss_error": 0.4663706421852112, "train/loss_total": 0.7158146500587463 }, { "epoch": 1.5108201977023779, "step": 5655, "train/loss_ctc": 0.6248667240142822, "train/loss_error": 0.49515438079833984, "train/loss_total": 0.5210968255996704 }, { "epoch": 1.511087363077745, "step": 5656, "train/loss_ctc": 0.3827788829803467, "train/loss_error": 0.4655972719192505, "train/loss_total": 0.44903361797332764 }, { "epoch": 1.5113545284531125, "step": 5657, "train/loss_ctc": 0.24635373055934906, "train/loss_error": 0.4455716013908386, "train/loss_total": 0.4057280421257019 }, { "epoch": 1.5116216938284799, "step": 5658, "train/loss_ctc": 0.829034686088562, "train/loss_error": 0.47554314136505127, "train/loss_total": 0.5462414622306824 }, { "epoch": 1.511888859203847, "step": 5659, "train/loss_ctc": 0.9482371211051941, "train/loss_error": 0.4996577799320221, "train/loss_total": 0.5893736481666565 }, { "epoch": 1.5121560245792145, "grad_norm": 1.598175287246704, "learning_rate": 2.0935078813785734e-05, "loss": 0.5177, "step": 5660 }, { "epoch": 1.5121560245792145, "step": 5660, "train/loss_ctc": 0.9355138540267944, "train/loss_error": 0.38218459486961365, "train/loss_total": 0.49285048246383667 }, { "epoch": 1.512423189954582, "step": 5661, "train/loss_ctc": 0.8167393207550049, "train/loss_error": 0.5074491500854492, "train/loss_total": 0.5693072080612183 }, { "epoch": 1.512690355329949, "step": 5662, "train/loss_ctc": 0.7069038152694702, "train/loss_error": 0.4085135757923126, "train/loss_total": 0.46819162368774414 }, { "epoch": 1.5129575207053167, "step": 5663, "train/loss_ctc": 0.5861591100692749, "train/loss_error": 0.43653926253318787, "train/loss_total": 0.46646323800086975 }, { "epoch": 1.513224686080684, "step": 5664, "train/loss_ctc": 0.7540863752365112, "train/loss_error": 0.4473542273044586, "train/loss_total": 0.5087006688117981 }, { "epoch": 1.5134918514560511, "step": 5665, "train/loss_ctc": 0.6325290203094482, "train/loss_error": 0.4532466232776642, "train/loss_total": 0.48910313844680786 }, { "epoch": 1.5137590168314188, "step": 5666, "train/loss_ctc": 1.0312412977218628, "train/loss_error": 0.43000328540802, "train/loss_total": 0.5502508878707886 }, { "epoch": 1.514026182206786, "step": 5667, "train/loss_ctc": 1.063823938369751, "train/loss_error": 0.44118571281433105, "train/loss_total": 0.5657133460044861 }, { "epoch": 1.5142933475821534, "step": 5668, "train/loss_ctc": 0.4985393285751343, "train/loss_error": 0.4410982131958008, "train/loss_total": 0.45258644223213196 }, { "epoch": 1.5145605129575208, "step": 5669, "train/loss_ctc": 0.49752742052078247, "train/loss_error": 0.5052178502082825, "train/loss_total": 0.5036797523498535 }, { "epoch": 1.514827678332888, "grad_norm": 5.9756903648376465, "learning_rate": 2.0919048891263692e-05, "loss": 0.5067, "step": 5670 }, { "epoch": 1.514827678332888, "step": 5670, "train/loss_ctc": 0.9020562171936035, "train/loss_error": 0.4905538558959961, "train/loss_total": 0.5728543400764465 }, { "epoch": 1.5150948437082554, "step": 5671, "train/loss_ctc": 0.5954426527023315, "train/loss_error": 0.4703804850578308, "train/loss_total": 0.49539291858673096 }, { "epoch": 1.5153620090836228, "step": 5672, "train/loss_ctc": 0.65967857837677, "train/loss_error": 0.48893436789512634, "train/loss_total": 0.5230832099914551 }, { "epoch": 1.51562917445899, "step": 5673, "train/loss_ctc": 1.1098376512527466, "train/loss_error": 0.5133152008056641, "train/loss_total": 0.6326196789741516 }, { "epoch": 1.5158963398343575, "step": 5674, "train/loss_ctc": 1.6969094276428223, "train/loss_error": 0.42488613724708557, "train/loss_total": 0.6792908310890198 }, { "epoch": 1.5161635052097249, "step": 5675, "train/loss_ctc": 0.7057669162750244, "train/loss_error": 0.4798228144645691, "train/loss_total": 0.5250116586685181 }, { "epoch": 1.516430670585092, "step": 5676, "train/loss_ctc": 0.9747388958930969, "train/loss_error": 0.48455172777175903, "train/loss_total": 0.5825891494750977 }, { "epoch": 1.5166978359604595, "step": 5677, "train/loss_ctc": 0.3213249444961548, "train/loss_error": 0.45792123675346375, "train/loss_total": 0.43060198426246643 }, { "epoch": 1.516965001335827, "step": 5678, "train/loss_ctc": 0.9060279726982117, "train/loss_error": 0.49681931734085083, "train/loss_total": 0.5786610841751099 }, { "epoch": 1.517232166711194, "step": 5679, "train/loss_ctc": 0.7868350744247437, "train/loss_error": 0.47914960980415344, "train/loss_total": 0.5406867265701294 }, { "epoch": 1.5174993320865617, "grad_norm": 3.9344842433929443, "learning_rate": 2.090301896874165e-05, "loss": 0.5561, "step": 5680 }, { "epoch": 1.5174993320865617, "step": 5680, "train/loss_ctc": 0.6288673877716064, "train/loss_error": 0.4976198673248291, "train/loss_total": 0.5238693952560425 }, { "epoch": 1.517766497461929, "step": 5681, "train/loss_ctc": 0.8176802396774292, "train/loss_error": 0.5180543065071106, "train/loss_total": 0.5779795050621033 }, { "epoch": 1.5180336628372961, "step": 5682, "train/loss_ctc": 0.5557085871696472, "train/loss_error": 0.48729974031448364, "train/loss_total": 0.5009815096855164 }, { "epoch": 1.5183008282126638, "step": 5683, "train/loss_ctc": 0.5667664408683777, "train/loss_error": 0.5034123063087463, "train/loss_total": 0.5160831212997437 }, { "epoch": 1.518567993588031, "step": 5684, "train/loss_ctc": 1.7059154510498047, "train/loss_error": 0.4875441789627075, "train/loss_total": 0.7312184572219849 }, { "epoch": 1.5188351589633984, "step": 5685, "train/loss_ctc": 0.999596357345581, "train/loss_error": 0.5084287524223328, "train/loss_total": 0.6066622734069824 }, { "epoch": 1.5191023243387658, "step": 5686, "train/loss_ctc": 0.8253834247589111, "train/loss_error": 0.49330371618270874, "train/loss_total": 0.5597196817398071 }, { "epoch": 1.519369489714133, "step": 5687, "train/loss_ctc": 0.5458406209945679, "train/loss_error": 0.42862582206726074, "train/loss_total": 0.4520688056945801 }, { "epoch": 1.5196366550895004, "step": 5688, "train/loss_ctc": 1.0416808128356934, "train/loss_error": 0.5037574172019958, "train/loss_total": 0.6113420724868774 }, { "epoch": 1.5199038204648678, "step": 5689, "train/loss_ctc": 1.199770212173462, "train/loss_error": 0.45096641778945923, "train/loss_total": 0.6007272005081177 }, { "epoch": 1.520170985840235, "grad_norm": 1.4828556776046753, "learning_rate": 2.0886989046219608e-05, "loss": 0.5681, "step": 5690 }, { "epoch": 1.520170985840235, "step": 5690, "train/loss_ctc": 1.8394825458526611, "train/loss_error": 0.4334605634212494, "train/loss_total": 0.7146649956703186 }, { "epoch": 1.5204381512156024, "step": 5691, "train/loss_ctc": 0.7578226327896118, "train/loss_error": 0.49901658296585083, "train/loss_total": 0.550777792930603 }, { "epoch": 1.5207053165909699, "step": 5692, "train/loss_ctc": 1.1139254570007324, "train/loss_error": 0.568135678768158, "train/loss_total": 0.6772936582565308 }, { "epoch": 1.520972481966337, "step": 5693, "train/loss_ctc": 0.36109626293182373, "train/loss_error": 0.47821444272994995, "train/loss_total": 0.45479080080986023 }, { "epoch": 1.5212396473417045, "step": 5694, "train/loss_ctc": 0.36312225461006165, "train/loss_error": 0.4613412022590637, "train/loss_total": 0.4416974186897278 }, { "epoch": 1.5215068127170719, "step": 5695, "train/loss_ctc": 0.8720670342445374, "train/loss_error": 0.4270232617855072, "train/loss_total": 0.5160320401191711 }, { "epoch": 1.521773978092439, "step": 5696, "train/loss_ctc": 0.6082665920257568, "train/loss_error": 0.4535857141017914, "train/loss_total": 0.48452189564704895 }, { "epoch": 1.5220411434678067, "step": 5697, "train/loss_ctc": 1.2998065948486328, "train/loss_error": 0.45562103390693665, "train/loss_total": 0.6244581937789917 }, { "epoch": 1.522308308843174, "step": 5698, "train/loss_ctc": 0.9839509725570679, "train/loss_error": 0.46986186504364014, "train/loss_total": 0.5726796984672546 }, { "epoch": 1.5225754742185411, "step": 5699, "train/loss_ctc": 1.272564172744751, "train/loss_error": 0.5226191282272339, "train/loss_total": 0.6726081371307373 }, { "epoch": 1.5228426395939088, "grad_norm": 1.9044348001480103, "learning_rate": 2.087095912369757e-05, "loss": 0.571, "step": 5700 }, { "epoch": 1.5228426395939088, "step": 5700, "train/loss_ctc": 0.8636599779129028, "train/loss_error": 0.47306153178215027, "train/loss_total": 0.5511811971664429 }, { "epoch": 1.523109804969276, "step": 5701, "train/loss_ctc": 1.050295352935791, "train/loss_error": 0.46229812502861023, "train/loss_total": 0.5798975825309753 }, { "epoch": 1.5233769703446434, "step": 5702, "train/loss_ctc": 0.8110357522964478, "train/loss_error": 0.4294407069683075, "train/loss_total": 0.5057597160339355 }, { "epoch": 1.5236441357200108, "step": 5703, "train/loss_ctc": 1.1433180570602417, "train/loss_error": 0.4887712299823761, "train/loss_total": 0.6196805834770203 }, { "epoch": 1.523911301095378, "step": 5704, "train/loss_ctc": 1.037524700164795, "train/loss_error": 0.44070670008659363, "train/loss_total": 0.5600703358650208 }, { "epoch": 1.5241784664707454, "step": 5705, "train/loss_ctc": 0.4746930003166199, "train/loss_error": 0.4496563673019409, "train/loss_total": 0.4546636939048767 }, { "epoch": 1.5244456318461128, "step": 5706, "train/loss_ctc": 1.3940125703811646, "train/loss_error": 0.41429197788238525, "train/loss_total": 0.6102361083030701 }, { "epoch": 1.52471279722148, "step": 5707, "train/loss_ctc": 1.1793787479400635, "train/loss_error": 0.5191302299499512, "train/loss_total": 0.6511799097061157 }, { "epoch": 1.5249799625968474, "step": 5708, "train/loss_ctc": 0.6114165782928467, "train/loss_error": 0.41955721378326416, "train/loss_total": 0.4579290747642517 }, { "epoch": 1.5252471279722148, "step": 5709, "train/loss_ctc": 1.2194288969039917, "train/loss_error": 0.45248115062713623, "train/loss_total": 0.6058707237243652 }, { "epoch": 1.525514293347582, "grad_norm": 2.287849187850952, "learning_rate": 2.0854929201175528e-05, "loss": 0.5596, "step": 5710 }, { "epoch": 1.525514293347582, "step": 5710, "train/loss_ctc": 0.768081784248352, "train/loss_error": 0.4310978055000305, "train/loss_total": 0.49849462509155273 }, { "epoch": 1.5257814587229495, "step": 5711, "train/loss_ctc": 0.9879300594329834, "train/loss_error": 0.4570970833301544, "train/loss_total": 0.5632637143135071 }, { "epoch": 1.5260486240983169, "step": 5712, "train/loss_ctc": 0.750394344329834, "train/loss_error": 0.4757039546966553, "train/loss_total": 0.530642032623291 }, { "epoch": 1.526315789473684, "step": 5713, "train/loss_ctc": 1.0771713256835938, "train/loss_error": 0.4662628769874573, "train/loss_total": 0.5884445905685425 }, { "epoch": 1.5265829548490517, "step": 5714, "train/loss_ctc": 0.7539077997207642, "train/loss_error": 0.5177932977676392, "train/loss_total": 0.5650162100791931 }, { "epoch": 1.526850120224419, "step": 5715, "train/loss_ctc": 2.334868907928467, "train/loss_error": 0.44265201687812805, "train/loss_total": 0.8210954070091248 }, { "epoch": 1.5271172855997863, "step": 5716, "train/loss_ctc": 1.3521456718444824, "train/loss_error": 0.45229771733283997, "train/loss_total": 0.6322673559188843 }, { "epoch": 1.5273844509751537, "step": 5717, "train/loss_ctc": 1.3529503345489502, "train/loss_error": 0.38629770278930664, "train/loss_total": 0.5796282291412354 }, { "epoch": 1.527651616350521, "step": 5718, "train/loss_ctc": 0.5435101389884949, "train/loss_error": 0.40439683198928833, "train/loss_total": 0.4322195053100586 }, { "epoch": 1.5279187817258884, "step": 5719, "train/loss_ctc": 1.0430285930633545, "train/loss_error": 0.44035348296165466, "train/loss_total": 0.5608885288238525 }, { "epoch": 1.5281859471012558, "grad_norm": 1.911832571029663, "learning_rate": 2.0838899278653486e-05, "loss": 0.5772, "step": 5720 }, { "epoch": 1.5281859471012558, "step": 5720, "train/loss_ctc": 0.581193745136261, "train/loss_error": 0.46153801679611206, "train/loss_total": 0.48546916246414185 }, { "epoch": 1.528453112476623, "step": 5721, "train/loss_ctc": 0.612999677658081, "train/loss_error": 0.44463756680488586, "train/loss_total": 0.4783099889755249 }, { "epoch": 1.5287202778519904, "step": 5722, "train/loss_ctc": 0.5407442450523376, "train/loss_error": 0.4356396794319153, "train/loss_total": 0.45666059851646423 }, { "epoch": 1.5289874432273578, "step": 5723, "train/loss_ctc": 0.4362717568874359, "train/loss_error": 0.4639056921005249, "train/loss_total": 0.4583789110183716 }, { "epoch": 1.529254608602725, "step": 5724, "train/loss_ctc": 0.9159097671508789, "train/loss_error": 0.40223056077957153, "train/loss_total": 0.5049664378166199 }, { "epoch": 1.5295217739780924, "step": 5725, "train/loss_ctc": 0.8694669604301453, "train/loss_error": 0.48990219831466675, "train/loss_total": 0.5658151507377625 }, { "epoch": 1.5297889393534598, "step": 5726, "train/loss_ctc": 1.3693406581878662, "train/loss_error": 0.5286887288093567, "train/loss_total": 0.6968191266059875 }, { "epoch": 1.530056104728827, "step": 5727, "train/loss_ctc": 0.45925217866897583, "train/loss_error": 0.47561171650886536, "train/loss_total": 0.47233980894088745 }, { "epoch": 1.5303232701041944, "step": 5728, "train/loss_ctc": 0.47660601139068604, "train/loss_error": 0.46562638878822327, "train/loss_total": 0.4678223133087158 }, { "epoch": 1.5305904354795619, "step": 5729, "train/loss_ctc": 1.2140027284622192, "train/loss_error": 0.5020378232002258, "train/loss_total": 0.6444308161735535 }, { "epoch": 1.530857600854929, "grad_norm": 1.908355951309204, "learning_rate": 2.0822869356131444e-05, "loss": 0.5231, "step": 5730 }, { "epoch": 1.530857600854929, "step": 5730, "train/loss_ctc": 0.5329206585884094, "train/loss_error": 0.4886014461517334, "train/loss_total": 0.4974652826786041 }, { "epoch": 1.5311247662302967, "step": 5731, "train/loss_ctc": 1.0381954908370972, "train/loss_error": 0.4532393217086792, "train/loss_total": 0.5702306032180786 }, { "epoch": 1.531391931605664, "step": 5732, "train/loss_ctc": 1.025321125984192, "train/loss_error": 0.43442848324775696, "train/loss_total": 0.552606999874115 }, { "epoch": 1.5316590969810313, "step": 5733, "train/loss_ctc": 1.0489206314086914, "train/loss_error": 0.4523368179798126, "train/loss_total": 0.5716536045074463 }, { "epoch": 1.5319262623563987, "step": 5734, "train/loss_ctc": 0.4543550908565521, "train/loss_error": 0.45818284153938293, "train/loss_total": 0.4574173092842102 }, { "epoch": 1.532193427731766, "step": 5735, "train/loss_ctc": 0.592178225517273, "train/loss_error": 0.46087247133255005, "train/loss_total": 0.48713362216949463 }, { "epoch": 1.5324605931071333, "step": 5736, "train/loss_ctc": 0.7752919793128967, "train/loss_error": 0.4233753979206085, "train/loss_total": 0.4937587380409241 }, { "epoch": 1.5327277584825008, "step": 5737, "train/loss_ctc": 0.5133020877838135, "train/loss_error": 0.46158283948898315, "train/loss_total": 0.4719266891479492 }, { "epoch": 1.532994923857868, "step": 5738, "train/loss_ctc": 0.945920467376709, "train/loss_error": 0.40251457691192627, "train/loss_total": 0.5111957788467407 }, { "epoch": 1.5332620892332354, "step": 5739, "train/loss_ctc": 0.880782961845398, "train/loss_error": 0.4313462972640991, "train/loss_total": 0.5212336182594299 }, { "epoch": 1.5335292546086028, "grad_norm": 1.9200581312179565, "learning_rate": 2.0806839433609405e-05, "loss": 0.5135, "step": 5740 }, { "epoch": 1.5335292546086028, "step": 5740, "train/loss_ctc": 1.321894884109497, "train/loss_error": 0.490716814994812, "train/loss_total": 0.656952440738678 }, { "epoch": 1.53379641998397, "step": 5741, "train/loss_ctc": 1.0469810962677002, "train/loss_error": 0.5983043909072876, "train/loss_total": 0.6880397200584412 }, { "epoch": 1.5340635853593374, "step": 5742, "train/loss_ctc": 0.4301151633262634, "train/loss_error": 0.4894940257072449, "train/loss_total": 0.4776182472705841 }, { "epoch": 1.5343307507347048, "step": 5743, "train/loss_ctc": 0.6580309271812439, "train/loss_error": 0.45545369386672974, "train/loss_total": 0.49596914649009705 }, { "epoch": 1.534597916110072, "step": 5744, "train/loss_ctc": 0.7003917694091797, "train/loss_error": 0.44855326414108276, "train/loss_total": 0.4989209771156311 }, { "epoch": 1.5348650814854397, "step": 5745, "train/loss_ctc": 1.0101640224456787, "train/loss_error": 0.48661506175994873, "train/loss_total": 0.5913248658180237 }, { "epoch": 1.5351322468608068, "step": 5746, "train/loss_ctc": 0.6159263849258423, "train/loss_error": 0.4203376770019531, "train/loss_total": 0.4594554305076599 }, { "epoch": 1.535399412236174, "step": 5747, "train/loss_ctc": 0.4923401176929474, "train/loss_error": 0.4654618203639984, "train/loss_total": 0.4708375036716461 }, { "epoch": 1.5356665776115417, "step": 5748, "train/loss_ctc": 0.9285190105438232, "train/loss_error": 0.5002904534339905, "train/loss_total": 0.5859361886978149 }, { "epoch": 1.5359337429869089, "step": 5749, "train/loss_ctc": 1.0126056671142578, "train/loss_error": 0.4371265172958374, "train/loss_total": 0.5522223711013794 }, { "epoch": 1.5362009083622763, "grad_norm": 1.854902744293213, "learning_rate": 2.0790809511087363e-05, "loss": 0.5477, "step": 5750 }, { "epoch": 1.5362009083622763, "step": 5750, "train/loss_ctc": 1.4659169912338257, "train/loss_error": 0.426373153924942, "train/loss_total": 0.6342819333076477 }, { "epoch": 1.5364680737376437, "step": 5751, "train/loss_ctc": 0.28938746452331543, "train/loss_error": 0.4586481750011444, "train/loss_total": 0.4247960150241852 }, { "epoch": 1.536735239113011, "step": 5752, "train/loss_ctc": 0.8188122510910034, "train/loss_error": 0.41431376338005066, "train/loss_total": 0.49521347880363464 }, { "epoch": 1.5370024044883783, "step": 5753, "train/loss_ctc": 0.34854939579963684, "train/loss_error": 0.43844830989837646, "train/loss_total": 0.4204685091972351 }, { "epoch": 1.5372695698637457, "step": 5754, "train/loss_ctc": 0.8138806223869324, "train/loss_error": 0.5106914639472961, "train/loss_total": 0.5713292956352234 }, { "epoch": 1.537536735239113, "step": 5755, "train/loss_ctc": 0.8263143301010132, "train/loss_error": 0.5112304091453552, "train/loss_total": 0.5742471814155579 }, { "epoch": 1.5378039006144804, "step": 5756, "train/loss_ctc": 0.5854307413101196, "train/loss_error": 0.5141328573226929, "train/loss_total": 0.5283924341201782 }, { "epoch": 1.5380710659898478, "step": 5757, "train/loss_ctc": 0.3751513957977295, "train/loss_error": 0.46419864892959595, "train/loss_total": 0.44638919830322266 }, { "epoch": 1.538338231365215, "step": 5758, "train/loss_ctc": 1.3062727451324463, "train/loss_error": 0.48404455184936523, "train/loss_total": 0.6484901905059814 }, { "epoch": 1.5386053967405824, "step": 5759, "train/loss_ctc": 1.0491943359375, "train/loss_error": 0.5260694622993469, "train/loss_total": 0.6306944489479065 }, { "epoch": 1.5388725621159498, "grad_norm": 2.3864219188690186, "learning_rate": 2.0774779588565325e-05, "loss": 0.5374, "step": 5760 }, { "epoch": 1.5388725621159498, "step": 5760, "train/loss_ctc": 0.4977174997329712, "train/loss_error": 0.5087296962738037, "train/loss_total": 0.5065272450447083 }, { "epoch": 1.539139727491317, "step": 5761, "train/loss_ctc": 0.32130196690559387, "train/loss_error": 0.4256143271923065, "train/loss_total": 0.40475186705589294 }, { "epoch": 1.5394068928666846, "step": 5762, "train/loss_ctc": 0.5992896556854248, "train/loss_error": 0.4668434262275696, "train/loss_total": 0.4933326840400696 }, { "epoch": 1.5396740582420518, "step": 5763, "train/loss_ctc": 1.5130724906921387, "train/loss_error": 0.5187442898750305, "train/loss_total": 0.7176099419593811 }, { "epoch": 1.539941223617419, "step": 5764, "train/loss_ctc": 0.6330745220184326, "train/loss_error": 0.4901977777481079, "train/loss_total": 0.5187731385231018 }, { "epoch": 1.5402083889927867, "step": 5765, "train/loss_ctc": 0.971373438835144, "train/loss_error": 0.444784015417099, "train/loss_total": 0.5501018762588501 }, { "epoch": 1.5404755543681539, "step": 5766, "train/loss_ctc": 0.7418466806411743, "train/loss_error": 0.46404388546943665, "train/loss_total": 0.5196044445037842 }, { "epoch": 1.5407427197435213, "step": 5767, "train/loss_ctc": 0.47166359424591064, "train/loss_error": 0.4804569482803345, "train/loss_total": 0.4786982834339142 }, { "epoch": 1.5410098851188887, "step": 5768, "train/loss_ctc": 1.0922502279281616, "train/loss_error": 0.4186505675315857, "train/loss_total": 0.5533705353736877 }, { "epoch": 1.541277050494256, "step": 5769, "train/loss_ctc": 0.5831303596496582, "train/loss_error": 0.4617275297641754, "train/loss_total": 0.4860081076622009 }, { "epoch": 1.5415442158696233, "grad_norm": 2.4334819316864014, "learning_rate": 2.0758749666043283e-05, "loss": 0.5229, "step": 5770 }, { "epoch": 1.5415442158696233, "step": 5770, "train/loss_ctc": 1.3230727910995483, "train/loss_error": 0.5103819370269775, "train/loss_total": 0.6729201078414917 }, { "epoch": 1.5418113812449907, "step": 5771, "train/loss_ctc": 0.8560793399810791, "train/loss_error": 0.4911860525608063, "train/loss_total": 0.5641646981239319 }, { "epoch": 1.542078546620358, "step": 5772, "train/loss_ctc": 1.6573799848556519, "train/loss_error": 0.5027767419815063, "train/loss_total": 0.7336974143981934 }, { "epoch": 1.5423457119957253, "step": 5773, "train/loss_ctc": 1.191655158996582, "train/loss_error": 0.48879629373550415, "train/loss_total": 0.6293680667877197 }, { "epoch": 1.5426128773710928, "step": 5774, "train/loss_ctc": 1.2066677808761597, "train/loss_error": 0.4637833535671234, "train/loss_total": 0.6123602390289307 }, { "epoch": 1.54288004274646, "step": 5775, "train/loss_ctc": 0.47840815782546997, "train/loss_error": 0.45359572768211365, "train/loss_total": 0.45855823159217834 }, { "epoch": 1.5431472081218274, "step": 5776, "train/loss_ctc": 0.48513829708099365, "train/loss_error": 0.4339383542537689, "train/loss_total": 0.44417834281921387 }, { "epoch": 1.5434143734971948, "step": 5777, "train/loss_ctc": 1.0780689716339111, "train/loss_error": 0.4684657156467438, "train/loss_total": 0.5903863906860352 }, { "epoch": 1.543681538872562, "step": 5778, "train/loss_ctc": 0.8765451908111572, "train/loss_error": 0.5284351706504822, "train/loss_total": 0.598057210445404 }, { "epoch": 1.5439487042479296, "step": 5779, "train/loss_ctc": 0.9006617665290833, "train/loss_error": 0.4948779344558716, "train/loss_total": 0.5760347247123718 }, { "epoch": 1.5442158696232968, "grad_norm": 2.3454997539520264, "learning_rate": 2.074271974352124e-05, "loss": 0.588, "step": 5780 }, { "epoch": 1.5442158696232968, "step": 5780, "train/loss_ctc": 0.693180501461029, "train/loss_error": 0.47394827008247375, "train/loss_total": 0.5177947282791138 }, { "epoch": 1.544483034998664, "step": 5781, "train/loss_ctc": 0.4676514267921448, "train/loss_error": 0.5681662559509277, "train/loss_total": 0.5480632781982422 }, { "epoch": 1.5447502003740317, "step": 5782, "train/loss_ctc": 1.3743165731430054, "train/loss_error": 0.4586308002471924, "train/loss_total": 0.6417679786682129 }, { "epoch": 1.5450173657493989, "step": 5783, "train/loss_ctc": 0.40042486786842346, "train/loss_error": 0.4464414119720459, "train/loss_total": 0.4372381269931793 }, { "epoch": 1.5452845311247663, "step": 5784, "train/loss_ctc": 0.7118317484855652, "train/loss_error": 0.5196370482444763, "train/loss_total": 0.558076024055481 }, { "epoch": 1.5455516965001337, "step": 5785, "train/loss_ctc": 0.4836122989654541, "train/loss_error": 0.4240410029888153, "train/loss_total": 0.4359552562236786 }, { "epoch": 1.5458188618755009, "step": 5786, "train/loss_ctc": 1.115835428237915, "train/loss_error": 0.4034852385520935, "train/loss_total": 0.5459553003311157 }, { "epoch": 1.5460860272508683, "step": 5787, "train/loss_ctc": 1.0944104194641113, "train/loss_error": 0.5229795575141907, "train/loss_total": 0.6372657418251038 }, { "epoch": 1.5463531926262357, "step": 5788, "train/loss_ctc": 0.8682012557983398, "train/loss_error": 0.5032146573066711, "train/loss_total": 0.5762119889259338 }, { "epoch": 1.546620358001603, "step": 5789, "train/loss_ctc": 0.8866159915924072, "train/loss_error": 0.4470272362232208, "train/loss_total": 0.534945011138916 }, { "epoch": 1.5468875233769703, "grad_norm": 1.830235481262207, "learning_rate": 2.07266898209992e-05, "loss": 0.5433, "step": 5790 }, { "epoch": 1.5468875233769703, "step": 5790, "train/loss_ctc": 0.5115035176277161, "train/loss_error": 0.5018869042396545, "train/loss_total": 0.5038102269172668 }, { "epoch": 1.5471546887523377, "step": 5791, "train/loss_ctc": 1.0168287754058838, "train/loss_error": 0.47284263372421265, "train/loss_total": 0.5816398859024048 }, { "epoch": 1.547421854127705, "step": 5792, "train/loss_ctc": 0.9844192266464233, "train/loss_error": 0.539541482925415, "train/loss_total": 0.6285170316696167 }, { "epoch": 1.5476890195030724, "step": 5793, "train/loss_ctc": 0.9384051561355591, "train/loss_error": 0.4851985275745392, "train/loss_total": 0.5758398771286011 }, { "epoch": 1.5479561848784398, "step": 5794, "train/loss_ctc": 1.5796289443969727, "train/loss_error": 0.5155817270278931, "train/loss_total": 0.728391170501709 }, { "epoch": 1.548223350253807, "step": 5795, "train/loss_ctc": 0.959728479385376, "train/loss_error": 0.41290566325187683, "train/loss_total": 0.5222702026367188 }, { "epoch": 1.5484905156291746, "step": 5796, "train/loss_ctc": 1.401848316192627, "train/loss_error": 0.46199169754981995, "train/loss_total": 0.6499630212783813 }, { "epoch": 1.5487576810045418, "step": 5797, "train/loss_ctc": 0.4063842296600342, "train/loss_error": 0.4163050949573517, "train/loss_total": 0.4143209457397461 }, { "epoch": 1.549024846379909, "step": 5798, "train/loss_ctc": 0.3620738983154297, "train/loss_error": 0.45312193036079407, "train/loss_total": 0.4349123239517212 }, { "epoch": 1.5492920117552766, "step": 5799, "train/loss_ctc": 0.4589183032512665, "train/loss_error": 0.4533332586288452, "train/loss_total": 0.4544502794742584 }, { "epoch": 1.5495591771306438, "grad_norm": 1.3682305812835693, "learning_rate": 2.0710659898477157e-05, "loss": 0.5494, "step": 5800 }, { "epoch": 1.5495591771306438, "step": 5800, "train/loss_ctc": 1.4340262413024902, "train/loss_error": 0.4639127552509308, "train/loss_total": 0.6579354405403137 }, { "epoch": 1.5498263425060113, "step": 5801, "train/loss_ctc": 0.7811869382858276, "train/loss_error": 0.42636704444885254, "train/loss_total": 0.49733102321624756 }, { "epoch": 1.5500935078813787, "step": 5802, "train/loss_ctc": 0.7698739171028137, "train/loss_error": 0.4793662428855896, "train/loss_total": 0.5374677777290344 }, { "epoch": 1.5503606732567459, "step": 5803, "train/loss_ctc": 1.1562378406524658, "train/loss_error": 0.44962233304977417, "train/loss_total": 0.5909454822540283 }, { "epoch": 1.5506278386321133, "step": 5804, "train/loss_ctc": 1.1487407684326172, "train/loss_error": 0.5037817358970642, "train/loss_total": 0.6327735185623169 }, { "epoch": 1.5508950040074807, "step": 5805, "train/loss_ctc": 0.267900288105011, "train/loss_error": 0.4786841869354248, "train/loss_total": 0.43652740120887756 }, { "epoch": 1.551162169382848, "step": 5806, "train/loss_ctc": 0.47729527950286865, "train/loss_error": 0.4560479521751404, "train/loss_total": 0.4602974057197571 }, { "epoch": 1.5514293347582153, "step": 5807, "train/loss_ctc": 0.4541357159614563, "train/loss_error": 0.5271504521369934, "train/loss_total": 0.512547492980957 }, { "epoch": 1.5516965001335827, "step": 5808, "train/loss_ctc": 1.6720932722091675, "train/loss_error": 0.4302601218223572, "train/loss_total": 0.6786267757415771 }, { "epoch": 1.55196366550895, "step": 5809, "train/loss_ctc": 1.882688283920288, "train/loss_error": 0.49806568026542664, "train/loss_total": 0.7749902009963989 }, { "epoch": 1.5522308308843173, "grad_norm": 2.8769214153289795, "learning_rate": 2.069462997595512e-05, "loss": 0.5779, "step": 5810 }, { "epoch": 1.5522308308843173, "step": 5810, "train/loss_ctc": 0.5438432097434998, "train/loss_error": 0.4162408113479614, "train/loss_total": 0.4417612850666046 }, { "epoch": 1.5524979962596848, "step": 5811, "train/loss_ctc": 0.7630120515823364, "train/loss_error": 0.4521506130695343, "train/loss_total": 0.5143229365348816 }, { "epoch": 1.552765161635052, "step": 5812, "train/loss_ctc": 0.3967246413230896, "train/loss_error": 0.4652788043022156, "train/loss_total": 0.45156797766685486 }, { "epoch": 1.5530323270104196, "step": 5813, "train/loss_ctc": 0.9061465263366699, "train/loss_error": 0.5091923475265503, "train/loss_total": 0.5885831713676453 }, { "epoch": 1.5532994923857868, "step": 5814, "train/loss_ctc": 0.675016462802887, "train/loss_error": 0.419616162776947, "train/loss_total": 0.47069624066352844 }, { "epoch": 1.553566657761154, "step": 5815, "train/loss_ctc": 0.48294079303741455, "train/loss_error": 0.5081641674041748, "train/loss_total": 0.5031194686889648 }, { "epoch": 1.5538338231365216, "step": 5816, "train/loss_ctc": 1.0162625312805176, "train/loss_error": 0.4463503360748291, "train/loss_total": 0.5603327751159668 }, { "epoch": 1.5541009885118888, "step": 5817, "train/loss_ctc": 1.0770564079284668, "train/loss_error": 0.5364518761634827, "train/loss_total": 0.6445727944374084 }, { "epoch": 1.5543681538872562, "step": 5818, "train/loss_ctc": 1.1463589668273926, "train/loss_error": 0.4702678918838501, "train/loss_total": 0.6054861545562744 }, { "epoch": 1.5546353192626237, "step": 5819, "train/loss_ctc": 0.4397125840187073, "train/loss_error": 0.4469265639781952, "train/loss_total": 0.4454837739467621 }, { "epoch": 1.5549024846379909, "grad_norm": 1.8846832513809204, "learning_rate": 2.0678600053433077e-05, "loss": 0.5226, "step": 5820 }, { "epoch": 1.5549024846379909, "step": 5820, "train/loss_ctc": 0.6283113360404968, "train/loss_error": 0.4792957901954651, "train/loss_total": 0.5090988874435425 }, { "epoch": 1.5551696500133583, "step": 5821, "train/loss_ctc": 0.50688636302948, "train/loss_error": 0.423795610666275, "train/loss_total": 0.44041377305984497 }, { "epoch": 1.5554368153887257, "step": 5822, "train/loss_ctc": 0.543092668056488, "train/loss_error": 0.4106139838695526, "train/loss_total": 0.43710970878601074 }, { "epoch": 1.5557039807640929, "step": 5823, "train/loss_ctc": 1.219228982925415, "train/loss_error": 0.4344458281993866, "train/loss_total": 0.5914024710655212 }, { "epoch": 1.5559711461394603, "step": 5824, "train/loss_ctc": 0.7760410308837891, "train/loss_error": 0.48897871375083923, "train/loss_total": 0.5463911890983582 }, { "epoch": 1.5562383115148277, "step": 5825, "train/loss_ctc": 0.8519132733345032, "train/loss_error": 0.4671807587146759, "train/loss_total": 0.5441272854804993 }, { "epoch": 1.556505476890195, "step": 5826, "train/loss_ctc": 0.7492741346359253, "train/loss_error": 0.429969847202301, "train/loss_total": 0.49383068084716797 }, { "epoch": 1.5567726422655623, "step": 5827, "train/loss_ctc": 0.41976282000541687, "train/loss_error": 0.4841534197330475, "train/loss_total": 0.47127532958984375 }, { "epoch": 1.5570398076409298, "step": 5828, "train/loss_ctc": 0.44729048013687134, "train/loss_error": 0.4829460680484772, "train/loss_total": 0.47581496834754944 }, { "epoch": 1.557306973016297, "step": 5829, "train/loss_ctc": 0.9263628721237183, "train/loss_error": 0.4275754988193512, "train/loss_total": 0.5273330211639404 }, { "epoch": 1.5575741383916646, "grad_norm": 2.3507564067840576, "learning_rate": 2.0662570130911035e-05, "loss": 0.5037, "step": 5830 }, { "epoch": 1.5575741383916646, "step": 5830, "train/loss_ctc": 0.4428980350494385, "train/loss_error": 0.4915931522846222, "train/loss_total": 0.4818541407585144 }, { "epoch": 1.5578413037670318, "step": 5831, "train/loss_ctc": 0.5757765769958496, "train/loss_error": 0.4901350438594818, "train/loss_total": 0.5072633624076843 }, { "epoch": 1.5581084691423992, "step": 5832, "train/loss_ctc": 0.7022030353546143, "train/loss_error": 0.5270810127258301, "train/loss_total": 0.5621054172515869 }, { "epoch": 1.5583756345177666, "step": 5833, "train/loss_ctc": 0.405640184879303, "train/loss_error": 0.4488135576248169, "train/loss_total": 0.44017887115478516 }, { "epoch": 1.5586427998931338, "step": 5834, "train/loss_ctc": 0.8840329051017761, "train/loss_error": 0.4827702045440674, "train/loss_total": 0.5630227327346802 }, { "epoch": 1.5589099652685012, "step": 5835, "train/loss_ctc": 1.071601152420044, "train/loss_error": 0.48481985926628113, "train/loss_total": 0.6021761298179626 }, { "epoch": 1.5591771306438686, "step": 5836, "train/loss_ctc": 0.5961927175521851, "train/loss_error": 0.4493832588195801, "train/loss_total": 0.47874516248703003 }, { "epoch": 1.5594442960192358, "step": 5837, "train/loss_ctc": 0.854690670967102, "train/loss_error": 0.541816771030426, "train/loss_total": 0.6043915748596191 }, { "epoch": 1.5597114613946033, "step": 5838, "train/loss_ctc": 0.7248173952102661, "train/loss_error": 0.45785385370254517, "train/loss_total": 0.5112465620040894 }, { "epoch": 1.5599786267699707, "step": 5839, "train/loss_ctc": 0.33973953127861023, "train/loss_error": 0.47649717330932617, "train/loss_total": 0.44914567470550537 }, { "epoch": 1.5602457921453379, "grad_norm": 1.55374014377594, "learning_rate": 2.0646540208388993e-05, "loss": 0.52, "step": 5840 }, { "epoch": 1.5602457921453379, "step": 5840, "train/loss_ctc": 0.4700721502304077, "train/loss_error": 0.4147908687591553, "train/loss_total": 0.4258471429347992 }, { "epoch": 1.5605129575207053, "step": 5841, "train/loss_ctc": 0.6528154611587524, "train/loss_error": 0.49052175879478455, "train/loss_total": 0.5229805111885071 }, { "epoch": 1.5607801228960727, "step": 5842, "train/loss_ctc": 0.9609667062759399, "train/loss_error": 0.4768325686454773, "train/loss_total": 0.5736594200134277 }, { "epoch": 1.56104728827144, "step": 5843, "train/loss_ctc": 0.740886390209198, "train/loss_error": 0.42832693457603455, "train/loss_total": 0.49083882570266724 }, { "epoch": 1.5613144536468073, "step": 5844, "train/loss_ctc": 0.41811904311180115, "train/loss_error": 0.4374670386314392, "train/loss_total": 0.4335974454879761 }, { "epoch": 1.5615816190221747, "step": 5845, "train/loss_ctc": 0.8189380764961243, "train/loss_error": 0.497150182723999, "train/loss_total": 0.5615077614784241 }, { "epoch": 1.561848784397542, "step": 5846, "train/loss_ctc": 0.4210449457168579, "train/loss_error": 0.4324384331703186, "train/loss_total": 0.4301597476005554 }, { "epoch": 1.5621159497729096, "step": 5847, "train/loss_ctc": 0.7540156841278076, "train/loss_error": 0.5194839239120483, "train/loss_total": 0.5663902759552002 }, { "epoch": 1.5623831151482768, "step": 5848, "train/loss_ctc": 0.39334219694137573, "train/loss_error": 0.4375942349433899, "train/loss_total": 0.428743839263916 }, { "epoch": 1.5626502805236442, "step": 5849, "train/loss_ctc": 1.051781415939331, "train/loss_error": 0.4775007665157318, "train/loss_total": 0.5923569202423096 }, { "epoch": 1.5629174458990116, "grad_norm": 1.9554424285888672, "learning_rate": 2.063051028586695e-05, "loss": 0.5026, "step": 5850 }, { "epoch": 1.5629174458990116, "step": 5850, "train/loss_ctc": 0.7448375225067139, "train/loss_error": 0.4413624107837677, "train/loss_total": 0.5020574331283569 }, { "epoch": 1.5631846112743788, "step": 5851, "train/loss_ctc": 0.617917537689209, "train/loss_error": 0.41863349080085754, "train/loss_total": 0.4584903120994568 }, { "epoch": 1.5634517766497462, "step": 5852, "train/loss_ctc": 0.40787938237190247, "train/loss_error": 0.48349034786224365, "train/loss_total": 0.46836814284324646 }, { "epoch": 1.5637189420251136, "step": 5853, "train/loss_ctc": 0.5137991309165955, "train/loss_error": 0.47094041109085083, "train/loss_total": 0.47951218485832214 }, { "epoch": 1.5639861074004808, "step": 5854, "train/loss_ctc": 0.5902777910232544, "train/loss_error": 0.43405771255493164, "train/loss_total": 0.4653017222881317 }, { "epoch": 1.5642532727758482, "step": 5855, "train/loss_ctc": 0.6479878425598145, "train/loss_error": 0.513927161693573, "train/loss_total": 0.5407392978668213 }, { "epoch": 1.5645204381512157, "step": 5856, "train/loss_ctc": 0.5232327580451965, "train/loss_error": 0.4442721903324127, "train/loss_total": 0.4600643217563629 }, { "epoch": 1.5647876035265829, "step": 5857, "train/loss_ctc": 0.6151925325393677, "train/loss_error": 0.41952913999557495, "train/loss_total": 0.458661824464798 }, { "epoch": 1.5650547689019503, "step": 5858, "train/loss_ctc": 1.7864848375320435, "train/loss_error": 0.48652881383895874, "train/loss_total": 0.7465200424194336 }, { "epoch": 1.5653219342773177, "step": 5859, "train/loss_ctc": 0.9972195625305176, "train/loss_error": 0.4934995770454407, "train/loss_total": 0.594243586063385 }, { "epoch": 1.565589099652685, "grad_norm": 2.018155813217163, "learning_rate": 2.061448036334491e-05, "loss": 0.5174, "step": 5860 }, { "epoch": 1.565589099652685, "step": 5860, "train/loss_ctc": 0.6648036241531372, "train/loss_error": 0.49613192677497864, "train/loss_total": 0.5298662781715393 }, { "epoch": 1.5658562650280525, "step": 5861, "train/loss_ctc": 1.0445291996002197, "train/loss_error": 0.4293643534183502, "train/loss_total": 0.5523973703384399 }, { "epoch": 1.5661234304034197, "step": 5862, "train/loss_ctc": 1.459172010421753, "train/loss_error": 0.45347902178764343, "train/loss_total": 0.6546176671981812 }, { "epoch": 1.566390595778787, "step": 5863, "train/loss_ctc": 1.060116171836853, "train/loss_error": 0.45527294278144836, "train/loss_total": 0.5762416124343872 }, { "epoch": 1.5666577611541546, "step": 5864, "train/loss_ctc": 0.9024445414543152, "train/loss_error": 0.40908578038215637, "train/loss_total": 0.5077575445175171 }, { "epoch": 1.5669249265295218, "step": 5865, "train/loss_ctc": 0.48521000146865845, "train/loss_error": 0.39171090722084045, "train/loss_total": 0.41041073203086853 }, { "epoch": 1.5671920919048892, "step": 5866, "train/loss_ctc": 0.8887054920196533, "train/loss_error": 0.46142154932022095, "train/loss_total": 0.5468783378601074 }, { "epoch": 1.5674592572802566, "step": 5867, "train/loss_ctc": 1.6638123989105225, "train/loss_error": 0.4964831471443176, "train/loss_total": 0.7299489974975586 }, { "epoch": 1.5677264226556238, "step": 5868, "train/loss_ctc": 1.2136867046356201, "train/loss_error": 0.46570268273353577, "train/loss_total": 0.6152994632720947 }, { "epoch": 1.5679935880309912, "step": 5869, "train/loss_ctc": 0.5981712937355042, "train/loss_error": 0.39703723788261414, "train/loss_total": 0.4372640550136566 }, { "epoch": 1.5682607534063586, "grad_norm": 1.4962252378463745, "learning_rate": 2.059845044082287e-05, "loss": 0.5561, "step": 5870 }, { "epoch": 1.5682607534063586, "step": 5870, "train/loss_ctc": 0.8179614543914795, "train/loss_error": 0.427391916513443, "train/loss_total": 0.5055058598518372 }, { "epoch": 1.5685279187817258, "step": 5871, "train/loss_ctc": 0.27501770853996277, "train/loss_error": 0.4888235330581665, "train/loss_total": 0.4460623860359192 }, { "epoch": 1.5687950841570932, "step": 5872, "train/loss_ctc": 0.6794959306716919, "train/loss_error": 0.4056101143360138, "train/loss_total": 0.46038728952407837 }, { "epoch": 1.5690622495324607, "step": 5873, "train/loss_ctc": 0.5610116124153137, "train/loss_error": 0.4302375018596649, "train/loss_total": 0.4563923180103302 }, { "epoch": 1.5693294149078278, "step": 5874, "train/loss_ctc": 0.8459022045135498, "train/loss_error": 0.4410346448421478, "train/loss_total": 0.5220081806182861 }, { "epoch": 1.5695965802831953, "step": 5875, "train/loss_ctc": 0.567523717880249, "train/loss_error": 0.45792844891548157, "train/loss_total": 0.4798474907875061 }, { "epoch": 1.5698637456585627, "step": 5876, "train/loss_ctc": 0.5740588307380676, "train/loss_error": 0.4381850063800812, "train/loss_total": 0.46535977721214294 }, { "epoch": 1.5701309110339299, "step": 5877, "train/loss_ctc": 0.8953396677970886, "train/loss_error": 0.4582923948764801, "train/loss_total": 0.5457018613815308 }, { "epoch": 1.5703980764092975, "step": 5878, "train/loss_ctc": 0.9379245042800903, "train/loss_error": 0.4366059899330139, "train/loss_total": 0.5368697047233582 }, { "epoch": 1.5706652417846647, "step": 5879, "train/loss_ctc": 1.084177851676941, "train/loss_error": 0.4169785976409912, "train/loss_total": 0.5504184365272522 }, { "epoch": 1.570932407160032, "grad_norm": 1.4928867816925049, "learning_rate": 2.058242051830083e-05, "loss": 0.4969, "step": 5880 }, { "epoch": 1.570932407160032, "step": 5880, "train/loss_ctc": 0.5848242044448853, "train/loss_error": 0.41429051756858826, "train/loss_total": 0.44839727878570557 }, { "epoch": 1.5711995725353995, "step": 5881, "train/loss_ctc": 0.9494029879570007, "train/loss_error": 0.43737784028053284, "train/loss_total": 0.5397828817367554 }, { "epoch": 1.5714667379107667, "step": 5882, "train/loss_ctc": 0.9281593561172485, "train/loss_error": 0.4420335590839386, "train/loss_total": 0.5392587184906006 }, { "epoch": 1.5717339032861342, "step": 5883, "train/loss_ctc": 0.41587212681770325, "train/loss_error": 0.496362566947937, "train/loss_total": 0.48026448488235474 }, { "epoch": 1.5720010686615016, "step": 5884, "train/loss_ctc": 0.5996510982513428, "train/loss_error": 0.4361638128757477, "train/loss_total": 0.46886128187179565 }, { "epoch": 1.5722682340368688, "step": 5885, "train/loss_ctc": 1.1171259880065918, "train/loss_error": 0.5350671410560608, "train/loss_total": 0.6514788866043091 }, { "epoch": 1.5725353994122362, "step": 5886, "train/loss_ctc": 1.0904104709625244, "train/loss_error": 0.45236626267433167, "train/loss_total": 0.5799751281738281 }, { "epoch": 1.5728025647876036, "step": 5887, "train/loss_ctc": 0.7796259522438049, "train/loss_error": 0.40493330359458923, "train/loss_total": 0.47987186908721924 }, { "epoch": 1.5730697301629708, "step": 5888, "train/loss_ctc": 1.7319341897964478, "train/loss_error": 0.4610438942909241, "train/loss_total": 0.7152220010757446 }, { "epoch": 1.5733368955383382, "step": 5889, "train/loss_ctc": 1.0111125707626343, "train/loss_error": 0.45267826318740845, "train/loss_total": 0.5643651485443115 }, { "epoch": 1.5736040609137056, "grad_norm": 1.2020477056503296, "learning_rate": 2.0566390595778787e-05, "loss": 0.5467, "step": 5890 }, { "epoch": 1.5736040609137056, "step": 5890, "train/loss_ctc": 0.3983783721923828, "train/loss_error": 0.4585324823856354, "train/loss_total": 0.4465016722679138 }, { "epoch": 1.5738712262890728, "step": 5891, "train/loss_ctc": 0.5460779070854187, "train/loss_error": 0.4868152439594269, "train/loss_total": 0.49866777658462524 }, { "epoch": 1.5741383916644403, "step": 5892, "train/loss_ctc": 1.1183983087539673, "train/loss_error": 0.4498947560787201, "train/loss_total": 0.5835955142974854 }, { "epoch": 1.5744055570398077, "step": 5893, "train/loss_ctc": 0.7365429401397705, "train/loss_error": 0.4393271207809448, "train/loss_total": 0.4987702965736389 }, { "epoch": 1.5746727224151749, "step": 5894, "train/loss_ctc": 0.29903092980384827, "train/loss_error": 0.4350459575653076, "train/loss_total": 0.4078429639339447 }, { "epoch": 1.5749398877905425, "step": 5895, "train/loss_ctc": 0.5620901584625244, "train/loss_error": 0.4291292130947113, "train/loss_total": 0.4557214081287384 }, { "epoch": 1.5752070531659097, "step": 5896, "train/loss_ctc": 0.652836799621582, "train/loss_error": 0.39441797137260437, "train/loss_total": 0.44610172510147095 }, { "epoch": 1.575474218541277, "step": 5897, "train/loss_ctc": 0.8152613043785095, "train/loss_error": 0.49184155464172363, "train/loss_total": 0.556525468826294 }, { "epoch": 1.5757413839166445, "step": 5898, "train/loss_ctc": 1.6750917434692383, "train/loss_error": 0.47141674160957336, "train/loss_total": 0.7121517658233643 }, { "epoch": 1.5760085492920117, "step": 5899, "train/loss_ctc": 0.5711711645126343, "train/loss_error": 0.4501984119415283, "train/loss_total": 0.47439298033714294 }, { "epoch": 1.5762757146673791, "grad_norm": 2.5773420333862305, "learning_rate": 2.0550360673256745e-05, "loss": 0.508, "step": 5900 }, { "epoch": 1.5762757146673791, "step": 5900, "train/loss_ctc": 0.8343077301979065, "train/loss_error": 0.48994266986846924, "train/loss_total": 0.5588157176971436 }, { "epoch": 1.5765428800427466, "step": 5901, "train/loss_ctc": 0.4034423232078552, "train/loss_error": 0.423746258020401, "train/loss_total": 0.4196854829788208 }, { "epoch": 1.5768100454181138, "step": 5902, "train/loss_ctc": 1.0948106050491333, "train/loss_error": 0.5072765946388245, "train/loss_total": 0.6247833967208862 }, { "epoch": 1.5770772107934812, "step": 5903, "train/loss_ctc": 0.4650520384311676, "train/loss_error": 0.3885250389575958, "train/loss_total": 0.40383046865463257 }, { "epoch": 1.5773443761688486, "step": 5904, "train/loss_ctc": 0.47208863496780396, "train/loss_error": 0.4350249767303467, "train/loss_total": 0.44243770837783813 }, { "epoch": 1.5776115415442158, "step": 5905, "train/loss_ctc": 1.153953194618225, "train/loss_error": 0.5322776436805725, "train/loss_total": 0.656612753868103 }, { "epoch": 1.5778787069195832, "step": 5906, "train/loss_ctc": 0.9980287551879883, "train/loss_error": 0.5231760740280151, "train/loss_total": 0.6181465983390808 }, { "epoch": 1.5781458722949506, "step": 5907, "train/loss_ctc": 0.8353413939476013, "train/loss_error": 0.5512488484382629, "train/loss_total": 0.6080673933029175 }, { "epoch": 1.5784130376703178, "step": 5908, "train/loss_ctc": 0.4215032458305359, "train/loss_error": 0.4132128953933716, "train/loss_total": 0.4148709774017334 }, { "epoch": 1.5786802030456852, "step": 5909, "train/loss_ctc": 0.9847202897071838, "train/loss_error": 0.5137943625450134, "train/loss_total": 0.6079795360565186 }, { "epoch": 1.5789473684210527, "grad_norm": 2.261688709259033, "learning_rate": 2.0534330750734703e-05, "loss": 0.5355, "step": 5910 }, { "epoch": 1.5789473684210527, "step": 5910, "train/loss_ctc": 1.7273753881454468, "train/loss_error": 0.49086156487464905, "train/loss_total": 0.7381643056869507 }, { "epoch": 1.5792145337964199, "step": 5911, "train/loss_ctc": 0.7716805934906006, "train/loss_error": 0.46647873520851135, "train/loss_total": 0.5275191068649292 }, { "epoch": 1.5794816991717875, "step": 5912, "train/loss_ctc": 0.8398208618164062, "train/loss_error": 0.46727487444877625, "train/loss_total": 0.5417841076850891 }, { "epoch": 1.5797488645471547, "step": 5913, "train/loss_ctc": 1.0368120670318604, "train/loss_error": 0.4918227791786194, "train/loss_total": 0.6008206605911255 }, { "epoch": 1.5800160299225219, "step": 5914, "train/loss_ctc": 0.8309495449066162, "train/loss_error": 0.46851128339767456, "train/loss_total": 0.5409989356994629 }, { "epoch": 1.5802831952978895, "step": 5915, "train/loss_ctc": 0.9166048765182495, "train/loss_error": 0.49970462918281555, "train/loss_total": 0.5830847024917603 }, { "epoch": 1.5805503606732567, "step": 5916, "train/loss_ctc": 1.2912895679473877, "train/loss_error": 0.5025288462638855, "train/loss_total": 0.6602810025215149 }, { "epoch": 1.5808175260486241, "step": 5917, "train/loss_ctc": 0.581480085849762, "train/loss_error": 0.514367938041687, "train/loss_total": 0.527790367603302 }, { "epoch": 1.5810846914239916, "step": 5918, "train/loss_ctc": 1.2511093616485596, "train/loss_error": 0.43941617012023926, "train/loss_total": 0.6017547845840454 }, { "epoch": 1.5813518567993587, "step": 5919, "train/loss_ctc": 0.45813125371932983, "train/loss_error": 0.45843639969825745, "train/loss_total": 0.45837539434432983 }, { "epoch": 1.5816190221747262, "grad_norm": 1.6213362216949463, "learning_rate": 2.0518300828212664e-05, "loss": 0.5781, "step": 5920 }, { "epoch": 1.5816190221747262, "step": 5920, "train/loss_ctc": 1.0049736499786377, "train/loss_error": 0.4820575714111328, "train/loss_total": 0.5866408348083496 }, { "epoch": 1.5818861875500936, "step": 5921, "train/loss_ctc": 1.2987768650054932, "train/loss_error": 0.43893903493881226, "train/loss_total": 0.6109066009521484 }, { "epoch": 1.5821533529254608, "step": 5922, "train/loss_ctc": 0.22023294866085052, "train/loss_error": 0.47486528754234314, "train/loss_total": 0.4239388108253479 }, { "epoch": 1.5824205183008282, "step": 5923, "train/loss_ctc": 0.5820282101631165, "train/loss_error": 0.47366270422935486, "train/loss_total": 0.49533581733703613 }, { "epoch": 1.5826876836761956, "step": 5924, "train/loss_ctc": 1.379737377166748, "train/loss_error": 0.4817304015159607, "train/loss_total": 0.6613317728042603 }, { "epoch": 1.5829548490515628, "step": 5925, "train/loss_ctc": 0.5885477066040039, "train/loss_error": 0.4378221035003662, "train/loss_total": 0.4679672420024872 }, { "epoch": 1.5832220144269302, "step": 5926, "train/loss_ctc": 0.7739861011505127, "train/loss_error": 0.43597233295440674, "train/loss_total": 0.5035750865936279 }, { "epoch": 1.5834891798022976, "step": 5927, "train/loss_ctc": 0.5884535312652588, "train/loss_error": 0.4076332151889801, "train/loss_total": 0.4437972903251648 }, { "epoch": 1.5837563451776648, "step": 5928, "train/loss_ctc": 0.39768946170806885, "train/loss_error": 0.44369447231292725, "train/loss_total": 0.4344934821128845 }, { "epoch": 1.5840235105530325, "step": 5929, "train/loss_ctc": 0.8476256728172302, "train/loss_error": 0.5035023093223572, "train/loss_total": 0.5723269581794739 }, { "epoch": 1.5842906759283997, "grad_norm": 4.093037128448486, "learning_rate": 2.0502270905690626e-05, "loss": 0.52, "step": 5930 }, { "epoch": 1.5842906759283997, "step": 5930, "train/loss_ctc": 0.8338731527328491, "train/loss_error": 0.5963470339775085, "train/loss_total": 0.6438522338867188 }, { "epoch": 1.5845578413037669, "step": 5931, "train/loss_ctc": 0.6036719083786011, "train/loss_error": 0.4532517194747925, "train/loss_total": 0.4833357632160187 }, { "epoch": 1.5848250066791345, "step": 5932, "train/loss_ctc": 0.9667089581489563, "train/loss_error": 0.45763763785362244, "train/loss_total": 0.5594519376754761 }, { "epoch": 1.5850921720545017, "step": 5933, "train/loss_ctc": 0.6708055734634399, "train/loss_error": 0.4858621656894684, "train/loss_total": 0.5228508710861206 }, { "epoch": 1.5853593374298691, "step": 5934, "train/loss_ctc": 0.7473471164703369, "train/loss_error": 0.4313126504421234, "train/loss_total": 0.49451953172683716 }, { "epoch": 1.5856265028052365, "step": 5935, "train/loss_ctc": 0.250593364238739, "train/loss_error": 0.4930073916912079, "train/loss_total": 0.4445246160030365 }, { "epoch": 1.5858936681806037, "step": 5936, "train/loss_ctc": 1.1830120086669922, "train/loss_error": 0.4725179374217987, "train/loss_total": 0.6146167516708374 }, { "epoch": 1.5861608335559712, "step": 5937, "train/loss_ctc": 0.524537205696106, "train/loss_error": 0.43851423263549805, "train/loss_total": 0.45571884512901306 }, { "epoch": 1.5864279989313386, "step": 5938, "train/loss_ctc": 0.6206238865852356, "train/loss_error": 0.4218832552433014, "train/loss_total": 0.4616314172744751 }, { "epoch": 1.5866951643067058, "step": 5939, "train/loss_ctc": 0.1688719540834427, "train/loss_error": 0.4514337480068207, "train/loss_total": 0.3949214220046997 }, { "epoch": 1.5869623296820732, "grad_norm": 4.467155456542969, "learning_rate": 2.0486240983168584e-05, "loss": 0.5075, "step": 5940 }, { "epoch": 1.5869623296820732, "step": 5940, "train/loss_ctc": 1.37384831905365, "train/loss_error": 0.47048860788345337, "train/loss_total": 0.6511605978012085 }, { "epoch": 1.5872294950574406, "step": 5941, "train/loss_ctc": 1.0275052785873413, "train/loss_error": 0.4268701672554016, "train/loss_total": 0.5469971895217896 }, { "epoch": 1.5874966604328078, "step": 5942, "train/loss_ctc": 0.7511488199234009, "train/loss_error": 0.4819566309452057, "train/loss_total": 0.5357950925827026 }, { "epoch": 1.5877638258081752, "step": 5943, "train/loss_ctc": 0.43229514360427856, "train/loss_error": 0.4351305067539215, "train/loss_total": 0.43456345796585083 }, { "epoch": 1.5880309911835426, "step": 5944, "train/loss_ctc": 0.8455795645713806, "train/loss_error": 0.4560365676879883, "train/loss_total": 0.5339452028274536 }, { "epoch": 1.5882981565589098, "step": 5945, "train/loss_ctc": 1.3671367168426514, "train/loss_error": 0.4416775405406952, "train/loss_total": 0.6267693638801575 }, { "epoch": 1.5885653219342775, "step": 5946, "train/loss_ctc": 0.45504292845726013, "train/loss_error": 0.43981024622917175, "train/loss_total": 0.4428567886352539 }, { "epoch": 1.5888324873096447, "step": 5947, "train/loss_ctc": 0.4657820463180542, "train/loss_error": 0.38555586338043213, "train/loss_total": 0.4016011357307434 }, { "epoch": 1.589099652685012, "step": 5948, "train/loss_ctc": 0.7213872671127319, "train/loss_error": 0.5163252353668213, "train/loss_total": 0.5573376417160034 }, { "epoch": 1.5893668180603795, "step": 5949, "train/loss_ctc": 1.0034229755401611, "train/loss_error": 0.468176931142807, "train/loss_total": 0.5752261281013489 }, { "epoch": 1.5896339834357467, "grad_norm": 1.439244031906128, "learning_rate": 2.0470211060646542e-05, "loss": 0.5306, "step": 5950 }, { "epoch": 1.5896339834357467, "step": 5950, "train/loss_ctc": 1.0386981964111328, "train/loss_error": 0.43142226338386536, "train/loss_total": 0.5528774261474609 }, { "epoch": 1.589901148811114, "step": 5951, "train/loss_ctc": 0.2832421064376831, "train/loss_error": 0.4074249863624573, "train/loss_total": 0.3825884163379669 }, { "epoch": 1.5901683141864815, "step": 5952, "train/loss_ctc": 1.002947211265564, "train/loss_error": 0.41449040174484253, "train/loss_total": 0.5321817398071289 }, { "epoch": 1.5904354795618487, "step": 5953, "train/loss_ctc": 0.8736175298690796, "train/loss_error": 0.4997371435165405, "train/loss_total": 0.5745131969451904 }, { "epoch": 1.5907026449372161, "step": 5954, "train/loss_ctc": 1.0989058017730713, "train/loss_error": 0.38597801327705383, "train/loss_total": 0.5285636186599731 }, { "epoch": 1.5909698103125836, "step": 5955, "train/loss_ctc": 0.8845319747924805, "train/loss_error": 0.5762394070625305, "train/loss_total": 0.6378979086875916 }, { "epoch": 1.5912369756879507, "step": 5956, "train/loss_ctc": 0.4246128499507904, "train/loss_error": 0.46852347254753113, "train/loss_total": 0.45974135398864746 }, { "epoch": 1.5915041410633182, "step": 5957, "train/loss_ctc": 0.8348128795623779, "train/loss_error": 0.3767004609107971, "train/loss_total": 0.4683229327201843 }, { "epoch": 1.5917713064386856, "step": 5958, "train/loss_ctc": 0.7091145515441895, "train/loss_error": 0.5740495324134827, "train/loss_total": 0.601062536239624 }, { "epoch": 1.5920384718140528, "step": 5959, "train/loss_ctc": 0.6369538307189941, "train/loss_error": 0.4045058488845825, "train/loss_total": 0.45099544525146484 }, { "epoch": 1.5923056371894202, "grad_norm": 2.214142084121704, "learning_rate": 2.04541811381245e-05, "loss": 0.5189, "step": 5960 }, { "epoch": 1.5923056371894202, "step": 5960, "train/loss_ctc": 0.5547970533370972, "train/loss_error": 0.43453559279441833, "train/loss_total": 0.4585878849029541 }, { "epoch": 1.5925728025647876, "step": 5961, "train/loss_ctc": 0.5796909332275391, "train/loss_error": 0.4605664312839508, "train/loss_total": 0.48439133167266846 }, { "epoch": 1.5928399679401548, "step": 5962, "train/loss_ctc": 1.247915506362915, "train/loss_error": 0.4713332951068878, "train/loss_total": 0.6266497373580933 }, { "epoch": 1.5931071333155225, "step": 5963, "train/loss_ctc": 0.5255464315414429, "train/loss_error": 0.511440634727478, "train/loss_total": 0.514261782169342 }, { "epoch": 1.5933742986908896, "step": 5964, "train/loss_ctc": 0.472854882478714, "train/loss_error": 0.4318222999572754, "train/loss_total": 0.4400288462638855 }, { "epoch": 1.593641464066257, "step": 5965, "train/loss_ctc": 1.232595682144165, "train/loss_error": 0.42867133021354675, "train/loss_total": 0.5894562005996704 }, { "epoch": 1.5939086294416245, "step": 5966, "train/loss_ctc": 0.7922999262809753, "train/loss_error": 0.45747610926628113, "train/loss_total": 0.5244408845901489 }, { "epoch": 1.5941757948169917, "step": 5967, "train/loss_ctc": 0.51210618019104, "train/loss_error": 0.4921877980232239, "train/loss_total": 0.4961714744567871 }, { "epoch": 1.594442960192359, "step": 5968, "train/loss_ctc": 0.4610884487628937, "train/loss_error": 0.49579307436943054, "train/loss_total": 0.4888521432876587 }, { "epoch": 1.5947101255677265, "step": 5969, "train/loss_ctc": 0.9394167065620422, "train/loss_error": 0.49091845750808716, "train/loss_total": 0.580618143081665 }, { "epoch": 1.5949772909430937, "grad_norm": 1.4034264087677002, "learning_rate": 2.0438151215602458e-05, "loss": 0.5203, "step": 5970 }, { "epoch": 1.5949772909430937, "step": 5970, "train/loss_ctc": 0.5596132874488831, "train/loss_error": 0.47261279821395874, "train/loss_total": 0.49001288414001465 }, { "epoch": 1.5952444563184611, "step": 5971, "train/loss_ctc": 0.6067080497741699, "train/loss_error": 0.507052481174469, "train/loss_total": 0.5269836187362671 }, { "epoch": 1.5955116216938285, "step": 5972, "train/loss_ctc": 0.3988029658794403, "train/loss_error": 0.5421503186225891, "train/loss_total": 0.5134808421134949 }, { "epoch": 1.5957787870691957, "step": 5973, "train/loss_ctc": 1.5068109035491943, "train/loss_error": 0.45830056071281433, "train/loss_total": 0.6680026054382324 }, { "epoch": 1.5960459524445632, "step": 5974, "train/loss_ctc": 1.9742271900177002, "train/loss_error": 0.5314924716949463, "train/loss_total": 0.8200394511222839 }, { "epoch": 1.5963131178199306, "step": 5975, "train/loss_ctc": 0.7684749960899353, "train/loss_error": 0.45866042375564575, "train/loss_total": 0.5206233263015747 }, { "epoch": 1.5965802831952978, "step": 5976, "train/loss_ctc": 0.9537531733512878, "train/loss_error": 0.47893214225769043, "train/loss_total": 0.5738963484764099 }, { "epoch": 1.5968474485706654, "step": 5977, "train/loss_ctc": 0.6613110899925232, "train/loss_error": 0.4552910029888153, "train/loss_total": 0.49649500846862793 }, { "epoch": 1.5971146139460326, "step": 5978, "train/loss_ctc": 1.0784680843353271, "train/loss_error": 0.4550504982471466, "train/loss_total": 0.5797340273857117 }, { "epoch": 1.5973817793213998, "step": 5979, "train/loss_ctc": 0.883877694606781, "train/loss_error": 0.4694346785545349, "train/loss_total": 0.5523232817649841 }, { "epoch": 1.5976489446967674, "grad_norm": 2.390901803970337, "learning_rate": 2.0422121293080416e-05, "loss": 0.5742, "step": 5980 }, { "epoch": 1.5976489446967674, "step": 5980, "train/loss_ctc": 1.2829105854034424, "train/loss_error": 0.4803272485733032, "train/loss_total": 0.64084392786026 }, { "epoch": 1.5979161100721346, "step": 5981, "train/loss_ctc": 0.42565441131591797, "train/loss_error": 0.4801010191440582, "train/loss_total": 0.4692116975784302 }, { "epoch": 1.598183275447502, "step": 5982, "train/loss_ctc": 0.5789954662322998, "train/loss_error": 0.4666495621204376, "train/loss_total": 0.489118754863739 }, { "epoch": 1.5984504408228695, "step": 5983, "train/loss_ctc": 0.5753426551818848, "train/loss_error": 0.4842613935470581, "train/loss_total": 0.5024776458740234 }, { "epoch": 1.5987176061982367, "step": 5984, "train/loss_ctc": 0.20764005184173584, "train/loss_error": 0.37852081656455994, "train/loss_total": 0.3443446755409241 }, { "epoch": 1.598984771573604, "step": 5985, "train/loss_ctc": 0.5620561838150024, "train/loss_error": 0.5026957392692566, "train/loss_total": 0.5145678520202637 }, { "epoch": 1.5992519369489715, "step": 5986, "train/loss_ctc": 0.5931710600852966, "train/loss_error": 0.4693506062030792, "train/loss_total": 0.4941147267818451 }, { "epoch": 1.5995191023243387, "step": 5987, "train/loss_ctc": 0.8143784403800964, "train/loss_error": 0.5535770058631897, "train/loss_total": 0.6057373285293579 }, { "epoch": 1.599786267699706, "step": 5988, "train/loss_ctc": 0.9252487421035767, "train/loss_error": 0.42143940925598145, "train/loss_total": 0.5222012996673584 }, { "epoch": 1.6000534330750735, "step": 5989, "train/loss_ctc": 0.7621682286262512, "train/loss_error": 0.451844722032547, "train/loss_total": 0.5139094591140747 }, { "epoch": 1.6003205984504407, "grad_norm": 2.350083351135254, "learning_rate": 2.0406091370558378e-05, "loss": 0.5097, "step": 5990 }, { "epoch": 1.6003205984504407, "step": 5990, "train/loss_ctc": 0.4906279146671295, "train/loss_error": 0.4895309805870056, "train/loss_total": 0.48975035548210144 }, { "epoch": 1.6005877638258081, "step": 5991, "train/loss_ctc": 0.4380837678909302, "train/loss_error": 0.4235905408859253, "train/loss_total": 0.4264891743659973 }, { "epoch": 1.6008549292011756, "step": 5992, "train/loss_ctc": 0.5680895447731018, "train/loss_error": 0.4643605649471283, "train/loss_total": 0.48510634899139404 }, { "epoch": 1.6011220945765428, "step": 5993, "train/loss_ctc": 1.1461012363433838, "train/loss_error": 0.4379904270172119, "train/loss_total": 0.5796126127243042 }, { "epoch": 1.6013892599519104, "step": 5994, "train/loss_ctc": 0.7836073040962219, "train/loss_error": 0.46934443712234497, "train/loss_total": 0.5321969985961914 }, { "epoch": 1.6016564253272776, "step": 5995, "train/loss_ctc": 0.46873247623443604, "train/loss_error": 0.4300661087036133, "train/loss_total": 0.4377993941307068 }, { "epoch": 1.6019235907026448, "step": 5996, "train/loss_ctc": 1.265977144241333, "train/loss_error": 0.47263655066490173, "train/loss_total": 0.6313046813011169 }, { "epoch": 1.6021907560780124, "step": 5997, "train/loss_ctc": 1.031886100769043, "train/loss_error": 0.4831756353378296, "train/loss_total": 0.5929177403450012 }, { "epoch": 1.6024579214533796, "step": 5998, "train/loss_ctc": 0.5322213172912598, "train/loss_error": 0.4638780355453491, "train/loss_total": 0.47754669189453125 }, { "epoch": 1.602725086828747, "step": 5999, "train/loss_ctc": 0.7436435222625732, "train/loss_error": 0.48876142501831055, "train/loss_total": 0.53973788022995 }, { "epoch": 1.6029922522041145, "grad_norm": 1.6481372117996216, "learning_rate": 2.0390061448036336e-05, "loss": 0.5192, "step": 6000 }, { "epoch": 1.6029922522041145, "step": 6000, "train/loss_ctc": 0.5024752020835876, "train/loss_error": 0.4904945492744446, "train/loss_total": 0.49289068579673767 }, { "epoch": 1.6032594175794816, "step": 6001, "train/loss_ctc": 1.2284560203552246, "train/loss_error": 0.5570632815361023, "train/loss_total": 0.6913418769836426 }, { "epoch": 1.603526582954849, "step": 6002, "train/loss_ctc": 0.7584400177001953, "train/loss_error": 0.446868896484375, "train/loss_total": 0.5091831684112549 }, { "epoch": 1.6037937483302165, "step": 6003, "train/loss_ctc": 0.8530656695365906, "train/loss_error": 0.4495416283607483, "train/loss_total": 0.5302464365959167 }, { "epoch": 1.6040609137055837, "step": 6004, "train/loss_ctc": 0.5986770391464233, "train/loss_error": 0.45607390999794006, "train/loss_total": 0.48459455370903015 }, { "epoch": 1.604328079080951, "step": 6005, "train/loss_ctc": 1.658034086227417, "train/loss_error": 0.4885748624801636, "train/loss_total": 0.7224667072296143 }, { "epoch": 1.6045952444563185, "step": 6006, "train/loss_ctc": 0.7144611477851868, "train/loss_error": 0.37625986337661743, "train/loss_total": 0.44390010833740234 }, { "epoch": 1.6048624098316857, "step": 6007, "train/loss_ctc": 0.6728532314300537, "train/loss_error": 0.44994622468948364, "train/loss_total": 0.4945276379585266 }, { "epoch": 1.6051295752070531, "step": 6008, "train/loss_ctc": 1.4725151062011719, "train/loss_error": 0.49644792079925537, "train/loss_total": 0.6916613578796387 }, { "epoch": 1.6053967405824205, "step": 6009, "train/loss_ctc": 0.7711373567581177, "train/loss_error": 0.5326830148696899, "train/loss_total": 0.5803738832473755 }, { "epoch": 1.6056639059577877, "grad_norm": 1.9005740880966187, "learning_rate": 2.0374031525514294e-05, "loss": 0.5641, "step": 6010 }, { "epoch": 1.6056639059577877, "step": 6010, "train/loss_ctc": 0.9642424583435059, "train/loss_error": 0.5336339473724365, "train/loss_total": 0.6197556257247925 }, { "epoch": 1.6059310713331554, "step": 6011, "train/loss_ctc": 1.192055106163025, "train/loss_error": 0.5068488717079163, "train/loss_total": 0.6438901424407959 }, { "epoch": 1.6061982367085226, "step": 6012, "train/loss_ctc": 0.47960028052330017, "train/loss_error": 0.45466044545173645, "train/loss_total": 0.4596484303474426 }, { "epoch": 1.6064654020838898, "step": 6013, "train/loss_ctc": 0.538474440574646, "train/loss_error": 0.4360620379447937, "train/loss_total": 0.45654451847076416 }, { "epoch": 1.6067325674592574, "step": 6014, "train/loss_ctc": 1.5344579219818115, "train/loss_error": 0.45324891805648804, "train/loss_total": 0.6694906949996948 }, { "epoch": 1.6069997328346246, "step": 6015, "train/loss_ctc": 0.41026365756988525, "train/loss_error": 0.43604907393455505, "train/loss_total": 0.4308919906616211 }, { "epoch": 1.607266898209992, "step": 6016, "train/loss_ctc": 0.4154773950576782, "train/loss_error": 0.43948814272880554, "train/loss_total": 0.43468600511550903 }, { "epoch": 1.6075340635853594, "step": 6017, "train/loss_ctc": 0.6585385203361511, "train/loss_error": 0.4448085427284241, "train/loss_total": 0.48755455017089844 }, { "epoch": 1.6078012289607266, "step": 6018, "train/loss_ctc": 1.4862699508666992, "train/loss_error": 0.4798775613307953, "train/loss_total": 0.6811560392379761 }, { "epoch": 1.608068394336094, "step": 6019, "train/loss_ctc": 0.642986536026001, "train/loss_error": 0.4400065243244171, "train/loss_total": 0.480602502822876 }, { "epoch": 1.6083355597114615, "grad_norm": 2.136563539505005, "learning_rate": 2.0358001602992252e-05, "loss": 0.5364, "step": 6020 }, { "epoch": 1.6083355597114615, "step": 6020, "train/loss_ctc": 0.752724289894104, "train/loss_error": 0.396774560213089, "train/loss_total": 0.4679645299911499 }, { "epoch": 1.6086027250868287, "step": 6021, "train/loss_ctc": 1.2875242233276367, "train/loss_error": 0.47223711013793945, "train/loss_total": 0.6352945566177368 }, { "epoch": 1.608869890462196, "step": 6022, "train/loss_ctc": 0.5835148096084595, "train/loss_error": 0.5010108351707458, "train/loss_total": 0.5175116062164307 }, { "epoch": 1.6091370558375635, "step": 6023, "train/loss_ctc": 0.8158818483352661, "train/loss_error": 0.5029796361923218, "train/loss_total": 0.5655601024627686 }, { "epoch": 1.6094042212129307, "step": 6024, "train/loss_ctc": 1.590501308441162, "train/loss_error": 0.4093545377254486, "train/loss_total": 0.6455838680267334 }, { "epoch": 1.6096713865882981, "step": 6025, "train/loss_ctc": 0.7664840817451477, "train/loss_error": 0.4313814640045166, "train/loss_total": 0.4984019994735718 }, { "epoch": 1.6099385519636655, "step": 6026, "train/loss_ctc": 0.2905723452568054, "train/loss_error": 0.4693238437175751, "train/loss_total": 0.43357354402542114 }, { "epoch": 1.6102057173390327, "step": 6027, "train/loss_ctc": 1.280681848526001, "train/loss_error": 0.4607584476470947, "train/loss_total": 0.6247431635856628 }, { "epoch": 1.6104728827144004, "step": 6028, "train/loss_ctc": 0.33997106552124023, "train/loss_error": 0.48285186290740967, "train/loss_total": 0.4542756974697113 }, { "epoch": 1.6107400480897676, "step": 6029, "train/loss_ctc": 0.6222082376480103, "train/loss_error": 0.5068718194961548, "train/loss_total": 0.5299391150474548 }, { "epoch": 1.6110072134651348, "grad_norm": 2.4510772228240967, "learning_rate": 2.034197168047021e-05, "loss": 0.5373, "step": 6030 }, { "epoch": 1.6110072134651348, "step": 6030, "train/loss_ctc": 0.5401511192321777, "train/loss_error": 0.42555156350135803, "train/loss_total": 0.4484714865684509 }, { "epoch": 1.6112743788405024, "step": 6031, "train/loss_ctc": 1.2990164756774902, "train/loss_error": 0.4766843020915985, "train/loss_total": 0.641150712966919 }, { "epoch": 1.6115415442158696, "step": 6032, "train/loss_ctc": 0.6792734861373901, "train/loss_error": 0.4990472197532654, "train/loss_total": 0.5350924730300903 }, { "epoch": 1.611808709591237, "step": 6033, "train/loss_ctc": 0.5811967849731445, "train/loss_error": 0.42402520775794983, "train/loss_total": 0.4554595351219177 }, { "epoch": 1.6120758749666044, "step": 6034, "train/loss_ctc": 0.7553355693817139, "train/loss_error": 0.4516933560371399, "train/loss_total": 0.5124217867851257 }, { "epoch": 1.6123430403419716, "step": 6035, "train/loss_ctc": 0.7285250425338745, "train/loss_error": 0.5279330015182495, "train/loss_total": 0.5680514574050903 }, { "epoch": 1.612610205717339, "step": 6036, "train/loss_ctc": 1.100064754486084, "train/loss_error": 0.4869060218334198, "train/loss_total": 0.6095377802848816 }, { "epoch": 1.6128773710927065, "step": 6037, "train/loss_ctc": 0.5893990993499756, "train/loss_error": 0.43992146849632263, "train/loss_total": 0.46981698274612427 }, { "epoch": 1.6131445364680737, "step": 6038, "train/loss_ctc": 0.4510694742202759, "train/loss_error": 0.40263113379478455, "train/loss_total": 0.41231879591941833 }, { "epoch": 1.613411701843441, "step": 6039, "train/loss_ctc": 0.7363511919975281, "train/loss_error": 0.4311763644218445, "train/loss_total": 0.49221134185791016 }, { "epoch": 1.6136788672188085, "grad_norm": 4.072748184204102, "learning_rate": 2.0325941757948168e-05, "loss": 0.5145, "step": 6040 }, { "epoch": 1.6136788672188085, "step": 6040, "train/loss_ctc": 0.8215646743774414, "train/loss_error": 0.44432365894317627, "train/loss_total": 0.5197718739509583 }, { "epoch": 1.6139460325941757, "step": 6041, "train/loss_ctc": 1.443993091583252, "train/loss_error": 0.5316972136497498, "train/loss_total": 0.7141563892364502 }, { "epoch": 1.614213197969543, "step": 6042, "train/loss_ctc": 0.33586543798446655, "train/loss_error": 0.43286600708961487, "train/loss_total": 0.4134659171104431 }, { "epoch": 1.6144803633449105, "step": 6043, "train/loss_ctc": 1.1785261631011963, "train/loss_error": 0.42930954694747925, "train/loss_total": 0.5791528820991516 }, { "epoch": 1.6147475287202777, "step": 6044, "train/loss_ctc": 0.5675633549690247, "train/loss_error": 0.4301116168498993, "train/loss_total": 0.45760196447372437 }, { "epoch": 1.6150146940956454, "step": 6045, "train/loss_ctc": 0.8161651492118835, "train/loss_error": 0.5251566767692566, "train/loss_total": 0.5833583474159241 }, { "epoch": 1.6152818594710125, "step": 6046, "train/loss_ctc": 0.5387197732925415, "train/loss_error": 0.42440882325172424, "train/loss_total": 0.4472710192203522 }, { "epoch": 1.6155490248463797, "step": 6047, "train/loss_ctc": 0.5625997185707092, "train/loss_error": 0.45617884397506714, "train/loss_total": 0.477463036775589 }, { "epoch": 1.6158161902217474, "step": 6048, "train/loss_ctc": 0.7127718925476074, "train/loss_error": 0.3867070972919464, "train/loss_total": 0.4519200921058655 }, { "epoch": 1.6160833555971146, "step": 6049, "train/loss_ctc": 1.1192972660064697, "train/loss_error": 0.5459132790565491, "train/loss_total": 0.6605900526046753 }, { "epoch": 1.616350520972482, "grad_norm": 2.2422561645507812, "learning_rate": 2.030991183542613e-05, "loss": 0.5305, "step": 6050 }, { "epoch": 1.616350520972482, "step": 6050, "train/loss_ctc": 0.38060542941093445, "train/loss_error": 0.49160149693489075, "train/loss_total": 0.4694022834300995 }, { "epoch": 1.6166176863478494, "step": 6051, "train/loss_ctc": 1.1404163837432861, "train/loss_error": 0.43223127722740173, "train/loss_total": 0.5738682746887207 }, { "epoch": 1.6168848517232166, "step": 6052, "train/loss_ctc": 0.6891706585884094, "train/loss_error": 0.45935600996017456, "train/loss_total": 0.5053189396858215 }, { "epoch": 1.617152017098584, "step": 6053, "train/loss_ctc": 0.32801228761672974, "train/loss_error": 0.4297666847705841, "train/loss_total": 0.4094158113002777 }, { "epoch": 1.6174191824739514, "step": 6054, "train/loss_ctc": 0.28264808654785156, "train/loss_error": 0.5039035677909851, "train/loss_total": 0.45965248346328735 }, { "epoch": 1.6176863478493186, "step": 6055, "train/loss_ctc": 0.6125212907791138, "train/loss_error": 0.4664846658706665, "train/loss_total": 0.49569201469421387 }, { "epoch": 1.617953513224686, "step": 6056, "train/loss_ctc": 0.4393444359302521, "train/loss_error": 0.48454806208610535, "train/loss_total": 0.47550734877586365 }, { "epoch": 1.6182206786000535, "step": 6057, "train/loss_ctc": 0.4666830897331238, "train/loss_error": 0.436687707901001, "train/loss_total": 0.4426867961883545 }, { "epoch": 1.6184878439754207, "step": 6058, "train/loss_ctc": 0.6106119155883789, "train/loss_error": 0.41294756531715393, "train/loss_total": 0.4524804353713989 }, { "epoch": 1.618755009350788, "step": 6059, "train/loss_ctc": 0.45992520451545715, "train/loss_error": 0.4771709442138672, "train/loss_total": 0.47372180223464966 }, { "epoch": 1.6190221747261555, "grad_norm": 2.027082681655884, "learning_rate": 2.0293881912904088e-05, "loss": 0.4758, "step": 6060 }, { "epoch": 1.6190221747261555, "step": 6060, "train/loss_ctc": 1.2792885303497314, "train/loss_error": 0.4800233840942383, "train/loss_total": 0.6398764252662659 }, { "epoch": 1.6192893401015227, "step": 6061, "train/loss_ctc": 0.7423929572105408, "train/loss_error": 0.46856552362442017, "train/loss_total": 0.5233310461044312 }, { "epoch": 1.6195565054768903, "step": 6062, "train/loss_ctc": 0.8967189788818359, "train/loss_error": 0.5661494135856628, "train/loss_total": 0.6322633624076843 }, { "epoch": 1.6198236708522575, "step": 6063, "train/loss_ctc": 0.6286917924880981, "train/loss_error": 0.42029571533203125, "train/loss_total": 0.4619749188423157 }, { "epoch": 1.620090836227625, "step": 6064, "train/loss_ctc": 0.6894213557243347, "train/loss_error": 0.4369293451309204, "train/loss_total": 0.4874277710914612 }, { "epoch": 1.6203580016029924, "step": 6065, "train/loss_ctc": 0.7586473226547241, "train/loss_error": 0.43996885418891907, "train/loss_total": 0.5037045478820801 }, { "epoch": 1.6206251669783596, "step": 6066, "train/loss_ctc": 0.43308621644973755, "train/loss_error": 0.5052856802940369, "train/loss_total": 0.49084579944610596 }, { "epoch": 1.620892332353727, "step": 6067, "train/loss_ctc": 0.7483451962471008, "train/loss_error": 0.48777586221694946, "train/loss_total": 0.5398897528648376 }, { "epoch": 1.6211594977290944, "step": 6068, "train/loss_ctc": 0.9878952503204346, "train/loss_error": 0.49951285123825073, "train/loss_total": 0.5971893072128296 }, { "epoch": 1.6214266631044616, "step": 6069, "train/loss_ctc": 1.0018507242202759, "train/loss_error": 0.41506385803222656, "train/loss_total": 0.5324212312698364 }, { "epoch": 1.621693828479829, "grad_norm": 1.6728336811065674, "learning_rate": 2.0277851990382046e-05, "loss": 0.5409, "step": 6070 }, { "epoch": 1.621693828479829, "step": 6070, "train/loss_ctc": 0.9630059599876404, "train/loss_error": 0.44628670811653137, "train/loss_total": 0.5496305823326111 }, { "epoch": 1.6219609938551964, "step": 6071, "train/loss_ctc": 0.9778018593788147, "train/loss_error": 0.46655017137527466, "train/loss_total": 0.5688005089759827 }, { "epoch": 1.6222281592305636, "step": 6072, "train/loss_ctc": 1.2815605401992798, "train/loss_error": 0.49565190076828003, "train/loss_total": 0.6528336405754089 }, { "epoch": 1.622495324605931, "step": 6073, "train/loss_ctc": 0.44433677196502686, "train/loss_error": 0.44342711567878723, "train/loss_total": 0.4436090588569641 }, { "epoch": 1.6227624899812985, "step": 6074, "train/loss_ctc": 0.5478572249412537, "train/loss_error": 0.4685955047607422, "train/loss_total": 0.4844478666782379 }, { "epoch": 1.6230296553566657, "step": 6075, "train/loss_ctc": 1.0310661792755127, "train/loss_error": 0.46204397082328796, "train/loss_total": 0.5758484601974487 }, { "epoch": 1.623296820732033, "step": 6076, "train/loss_ctc": 1.024327039718628, "train/loss_error": 0.480326771736145, "train/loss_total": 0.5891268253326416 }, { "epoch": 1.6235639861074005, "step": 6077, "train/loss_ctc": 0.7427366375923157, "train/loss_error": 0.4566609859466553, "train/loss_total": 0.5138761401176453 }, { "epoch": 1.6238311514827677, "step": 6078, "train/loss_ctc": 0.4776815176010132, "train/loss_error": 0.4468275308609009, "train/loss_total": 0.4529983401298523 }, { "epoch": 1.6240983168581353, "step": 6079, "train/loss_ctc": 0.777957558631897, "train/loss_error": 0.45952922105789185, "train/loss_total": 0.5232149362564087 }, { "epoch": 1.6243654822335025, "grad_norm": 1.5730773210525513, "learning_rate": 2.0261822067860004e-05, "loss": 0.5354, "step": 6080 }, { "epoch": 1.6243654822335025, "step": 6080, "train/loss_ctc": 1.3668885231018066, "train/loss_error": 0.5189009308815002, "train/loss_total": 0.6884984970092773 }, { "epoch": 1.62463264760887, "step": 6081, "train/loss_ctc": 0.5832171440124512, "train/loss_error": 0.4596099257469177, "train/loss_total": 0.4843313694000244 }, { "epoch": 1.6248998129842374, "step": 6082, "train/loss_ctc": 0.605983555316925, "train/loss_error": 0.4182492792606354, "train/loss_total": 0.45579615235328674 }, { "epoch": 1.6251669783596046, "step": 6083, "train/loss_ctc": 0.34478601813316345, "train/loss_error": 0.5006887316703796, "train/loss_total": 0.46950820088386536 }, { "epoch": 1.625434143734972, "step": 6084, "train/loss_ctc": 1.407538890838623, "train/loss_error": 0.5068089365959167, "train/loss_total": 0.6869549751281738 }, { "epoch": 1.6257013091103394, "step": 6085, "train/loss_ctc": 0.4596099257469177, "train/loss_error": 0.4661155343055725, "train/loss_total": 0.4648144245147705 }, { "epoch": 1.6259684744857066, "step": 6086, "train/loss_ctc": 1.1183847188949585, "train/loss_error": 0.5251676440238953, "train/loss_total": 0.6438111066818237 }, { "epoch": 1.626235639861074, "step": 6087, "train/loss_ctc": 0.6781337857246399, "train/loss_error": 0.4240078032016754, "train/loss_total": 0.47483301162719727 }, { "epoch": 1.6265028052364414, "step": 6088, "train/loss_ctc": 0.6955827474594116, "train/loss_error": 0.4793221950531006, "train/loss_total": 0.5225743055343628 }, { "epoch": 1.6267699706118086, "step": 6089, "train/loss_ctc": 0.5883538126945496, "train/loss_error": 0.4762668013572693, "train/loss_total": 0.49868422746658325 }, { "epoch": 1.627037135987176, "grad_norm": 3.1141436100006104, "learning_rate": 2.0245792145337962e-05, "loss": 0.539, "step": 6090 }, { "epoch": 1.627037135987176, "step": 6090, "train/loss_ctc": 0.34049010276794434, "train/loss_error": 0.42416027188301086, "train/loss_total": 0.40742623805999756 }, { "epoch": 1.6273043013625434, "step": 6091, "train/loss_ctc": 0.37966012954711914, "train/loss_error": 0.43880128860473633, "train/loss_total": 0.4269730746746063 }, { "epoch": 1.6275714667379106, "step": 6092, "train/loss_ctc": 0.32049837708473206, "train/loss_error": 0.49200284481048584, "train/loss_total": 0.4577019512653351 }, { "epoch": 1.6278386321132783, "step": 6093, "train/loss_ctc": 0.41361936926841736, "train/loss_error": 0.42474454641342163, "train/loss_total": 0.4225195348262787 }, { "epoch": 1.6281057974886455, "step": 6094, "train/loss_ctc": 0.35776180028915405, "train/loss_error": 0.5369222164154053, "train/loss_total": 0.5010901093482971 }, { "epoch": 1.6283729628640127, "step": 6095, "train/loss_ctc": 0.4276254177093506, "train/loss_error": 0.4645794630050659, "train/loss_total": 0.4571886658668518 }, { "epoch": 1.6286401282393803, "step": 6096, "train/loss_ctc": 0.5577742457389832, "train/loss_error": 0.45940089225769043, "train/loss_total": 0.4790755808353424 }, { "epoch": 1.6289072936147475, "step": 6097, "train/loss_ctc": 0.7304449081420898, "train/loss_error": 0.5293887257575989, "train/loss_total": 0.569599986076355 }, { "epoch": 1.629174458990115, "step": 6098, "train/loss_ctc": 1.638390302658081, "train/loss_error": 0.5481424927711487, "train/loss_total": 0.7661920785903931 }, { "epoch": 1.6294416243654823, "step": 6099, "train/loss_ctc": 0.8292579650878906, "train/loss_error": 0.44936761260032654, "train/loss_total": 0.5253456830978394 }, { "epoch": 1.6297087897408495, "grad_norm": 2.000704765319824, "learning_rate": 2.0229762222815927e-05, "loss": 0.5013, "step": 6100 }, { "epoch": 1.6297087897408495, "step": 6100, "train/loss_ctc": 0.8855751752853394, "train/loss_error": 0.44877737760543823, "train/loss_total": 0.5361369252204895 }, { "epoch": 1.629975955116217, "step": 6101, "train/loss_ctc": 0.8258923888206482, "train/loss_error": 0.45118677616119385, "train/loss_total": 0.5261279344558716 }, { "epoch": 1.6302431204915844, "step": 6102, "train/loss_ctc": 1.0510804653167725, "train/loss_error": 0.44822725653648376, "train/loss_total": 0.5687978863716125 }, { "epoch": 1.6305102858669516, "step": 6103, "train/loss_ctc": 0.42173999547958374, "train/loss_error": 0.4648981988430023, "train/loss_total": 0.4562665522098541 }, { "epoch": 1.630777451242319, "step": 6104, "train/loss_ctc": 1.1496541500091553, "train/loss_error": 0.49342086911201477, "train/loss_total": 0.6246675252914429 }, { "epoch": 1.6310446166176864, "step": 6105, "train/loss_ctc": 0.6327944993972778, "train/loss_error": 0.4820324778556824, "train/loss_total": 0.5121848583221436 }, { "epoch": 1.6313117819930536, "step": 6106, "train/loss_ctc": 0.6470736265182495, "train/loss_error": 0.5438253879547119, "train/loss_total": 0.5644750595092773 }, { "epoch": 1.631578947368421, "step": 6107, "train/loss_ctc": 0.7360379695892334, "train/loss_error": 0.4150542914867401, "train/loss_total": 0.47925102710723877 }, { "epoch": 1.6318461127437884, "step": 6108, "train/loss_ctc": 0.8498796224594116, "train/loss_error": 0.4534582793712616, "train/loss_total": 0.5327425599098206 }, { "epoch": 1.6321132781191556, "step": 6109, "train/loss_ctc": 0.8062534332275391, "train/loss_error": 0.51627516746521, "train/loss_total": 0.5742708444595337 }, { "epoch": 1.6323804434945233, "grad_norm": 2.561194896697998, "learning_rate": 2.0213732300293885e-05, "loss": 0.5375, "step": 6110 }, { "epoch": 1.6323804434945233, "step": 6110, "train/loss_ctc": 1.0232915878295898, "train/loss_error": 0.3958595097064972, "train/loss_total": 0.5213459134101868 }, { "epoch": 1.6326476088698905, "step": 6111, "train/loss_ctc": 0.6548898220062256, "train/loss_error": 0.442366361618042, "train/loss_total": 0.4848710894584656 }, { "epoch": 1.6329147742452577, "step": 6112, "train/loss_ctc": 0.6610603332519531, "train/loss_error": 0.4256388247013092, "train/loss_total": 0.472723126411438 }, { "epoch": 1.6331819396206253, "step": 6113, "train/loss_ctc": 0.4385572075843811, "train/loss_error": 0.46469029784202576, "train/loss_total": 0.4594636857509613 }, { "epoch": 1.6334491049959925, "step": 6114, "train/loss_ctc": 0.9342586994171143, "train/loss_error": 0.4933362901210785, "train/loss_total": 0.5815207958221436 }, { "epoch": 1.63371627037136, "step": 6115, "train/loss_ctc": 0.5072053074836731, "train/loss_error": 0.43668627738952637, "train/loss_total": 0.45079007744789124 }, { "epoch": 1.6339834357467273, "step": 6116, "train/loss_ctc": 0.4593643546104431, "train/loss_error": 0.5466763377189636, "train/loss_total": 0.5292139053344727 }, { "epoch": 1.6342506011220945, "step": 6117, "train/loss_ctc": 1.3961503505706787, "train/loss_error": 0.46555155515670776, "train/loss_total": 0.6516713500022888 }, { "epoch": 1.634517766497462, "step": 6118, "train/loss_ctc": 0.6170435547828674, "train/loss_error": 0.42175644636154175, "train/loss_total": 0.46081387996673584 }, { "epoch": 1.6347849318728294, "step": 6119, "train/loss_ctc": 0.6013831496238708, "train/loss_error": 0.4544079601764679, "train/loss_total": 0.48380300402641296 }, { "epoch": 1.6350520972481966, "grad_norm": 1.7903224229812622, "learning_rate": 2.0197702377771843e-05, "loss": 0.5096, "step": 6120 }, { "epoch": 1.6350520972481966, "step": 6120, "train/loss_ctc": 0.9293798208236694, "train/loss_error": 0.499128133058548, "train/loss_total": 0.5851784944534302 }, { "epoch": 1.635319262623564, "step": 6121, "train/loss_ctc": 0.9660838842391968, "train/loss_error": 0.4456997811794281, "train/loss_total": 0.5497766137123108 }, { "epoch": 1.6355864279989314, "step": 6122, "train/loss_ctc": 0.9759846329689026, "train/loss_error": 0.47758546471595764, "train/loss_total": 0.5772652626037598 }, { "epoch": 1.6358535933742986, "step": 6123, "train/loss_ctc": 0.5336685180664062, "train/loss_error": 0.4838845133781433, "train/loss_total": 0.4938413202762604 }, { "epoch": 1.636120758749666, "step": 6124, "train/loss_ctc": 0.7827286124229431, "train/loss_error": 0.3865768313407898, "train/loss_total": 0.4658071994781494 }, { "epoch": 1.6363879241250334, "step": 6125, "train/loss_ctc": 1.0735223293304443, "train/loss_error": 0.4459781348705292, "train/loss_total": 0.5714870095252991 }, { "epoch": 1.6366550895004006, "step": 6126, "train/loss_ctc": 0.8320246934890747, "train/loss_error": 0.4778696298599243, "train/loss_total": 0.5487006306648254 }, { "epoch": 1.6369222548757683, "step": 6127, "train/loss_ctc": 0.7841416597366333, "train/loss_error": 0.4460044801235199, "train/loss_total": 0.5136319398880005 }, { "epoch": 1.6371894202511355, "step": 6128, "train/loss_ctc": 1.1565067768096924, "train/loss_error": 0.37773841619491577, "train/loss_total": 0.5334920883178711 }, { "epoch": 1.6374565856265026, "step": 6129, "train/loss_ctc": 0.47146931290626526, "train/loss_error": 0.4871530532836914, "train/loss_total": 0.4840162992477417 }, { "epoch": 1.6377237510018703, "grad_norm": 2.055474281311035, "learning_rate": 2.01816724552498e-05, "loss": 0.5323, "step": 6130 }, { "epoch": 1.6377237510018703, "step": 6130, "train/loss_ctc": 1.078384280204773, "train/loss_error": 0.5142573118209839, "train/loss_total": 0.6270827054977417 }, { "epoch": 1.6379909163772375, "step": 6131, "train/loss_ctc": 0.5075216293334961, "train/loss_error": 0.45689254999160767, "train/loss_total": 0.46701836585998535 }, { "epoch": 1.638258081752605, "step": 6132, "train/loss_ctc": 1.320711374282837, "train/loss_error": 0.4472356140613556, "train/loss_total": 0.6219307780265808 }, { "epoch": 1.6385252471279723, "step": 6133, "train/loss_ctc": 0.5424935817718506, "train/loss_error": 0.4182352125644684, "train/loss_total": 0.4430868923664093 }, { "epoch": 1.6387924125033395, "step": 6134, "train/loss_ctc": 1.0428061485290527, "train/loss_error": 0.5331892371177673, "train/loss_total": 0.6351126432418823 }, { "epoch": 1.639059577878707, "step": 6135, "train/loss_ctc": 1.04395592212677, "train/loss_error": 0.42621493339538574, "train/loss_total": 0.5497631430625916 }, { "epoch": 1.6393267432540743, "step": 6136, "train/loss_ctc": 0.5274849534034729, "train/loss_error": 0.4988376200199127, "train/loss_total": 0.5045670866966248 }, { "epoch": 1.6395939086294415, "step": 6137, "train/loss_ctc": 0.9337635040283203, "train/loss_error": 0.5080999135971069, "train/loss_total": 0.5932326316833496 }, { "epoch": 1.639861074004809, "step": 6138, "train/loss_ctc": 1.2812418937683105, "train/loss_error": 0.4495338499546051, "train/loss_total": 0.6158754825592041 }, { "epoch": 1.6401282393801764, "step": 6139, "train/loss_ctc": 0.752464771270752, "train/loss_error": 0.39764901995658875, "train/loss_total": 0.4686121940612793 }, { "epoch": 1.6403954047555436, "grad_norm": 2.2872612476348877, "learning_rate": 2.016564253272776e-05, "loss": 0.5526, "step": 6140 }, { "epoch": 1.6403954047555436, "step": 6140, "train/loss_ctc": 0.8265272378921509, "train/loss_error": 0.46931639313697815, "train/loss_total": 0.5407585501670837 }, { "epoch": 1.640662570130911, "step": 6141, "train/loss_ctc": 0.6968627572059631, "train/loss_error": 0.4470462501049042, "train/loss_total": 0.4970095753669739 }, { "epoch": 1.6409297355062784, "step": 6142, "train/loss_ctc": 0.47857728600502014, "train/loss_error": 0.4275168180465698, "train/loss_total": 0.4377289116382599 }, { "epoch": 1.6411969008816456, "step": 6143, "train/loss_ctc": 1.4740333557128906, "train/loss_error": 0.48224180936813354, "train/loss_total": 0.6806001663208008 }, { "epoch": 1.6414640662570132, "step": 6144, "train/loss_ctc": 0.41089844703674316, "train/loss_error": 0.4757311940193176, "train/loss_total": 0.4627646505832672 }, { "epoch": 1.6417312316323804, "step": 6145, "train/loss_ctc": 0.6463489532470703, "train/loss_error": 0.49551209807395935, "train/loss_total": 0.5256794691085815 }, { "epoch": 1.6419983970077476, "step": 6146, "train/loss_ctc": 1.6535859107971191, "train/loss_error": 0.4288223683834076, "train/loss_total": 0.6737750768661499 }, { "epoch": 1.6422655623831153, "step": 6147, "train/loss_ctc": 0.649359405040741, "train/loss_error": 0.47369614243507385, "train/loss_total": 0.5088288187980652 }, { "epoch": 1.6425327277584825, "step": 6148, "train/loss_ctc": 1.2664021253585815, "train/loss_error": 0.4689752757549286, "train/loss_total": 0.6284606456756592 }, { "epoch": 1.6427998931338499, "step": 6149, "train/loss_ctc": 0.39482763409614563, "train/loss_error": 0.4485432505607605, "train/loss_total": 0.4378001093864441 }, { "epoch": 1.6430670585092173, "grad_norm": 3.8035807609558105, "learning_rate": 2.0149612610205717e-05, "loss": 0.5393, "step": 6150 }, { "epoch": 1.6430670585092173, "step": 6150, "train/loss_ctc": 0.45413219928741455, "train/loss_error": 0.43370509147644043, "train/loss_total": 0.43779054284095764 }, { "epoch": 1.6433342238845845, "step": 6151, "train/loss_ctc": 0.8200814723968506, "train/loss_error": 0.47880443930625916, "train/loss_total": 0.5470598340034485 }, { "epoch": 1.643601389259952, "step": 6152, "train/loss_ctc": 1.0575989484786987, "train/loss_error": 0.5121285915374756, "train/loss_total": 0.6212226748466492 }, { "epoch": 1.6438685546353193, "step": 6153, "train/loss_ctc": 0.7590277791023254, "train/loss_error": 0.4439801573753357, "train/loss_total": 0.5069897174835205 }, { "epoch": 1.6441357200106865, "step": 6154, "train/loss_ctc": 0.9921849966049194, "train/loss_error": 0.4724377691745758, "train/loss_total": 0.5763872265815735 }, { "epoch": 1.644402885386054, "step": 6155, "train/loss_ctc": 0.7465953826904297, "train/loss_error": 0.4269102215766907, "train/loss_total": 0.49084725975990295 }, { "epoch": 1.6446700507614214, "step": 6156, "train/loss_ctc": 0.6836202144622803, "train/loss_error": 0.44397634267807007, "train/loss_total": 0.4919050931930542 }, { "epoch": 1.6449372161367886, "step": 6157, "train/loss_ctc": 1.5148208141326904, "train/loss_error": 0.4429451823234558, "train/loss_total": 0.6573203206062317 }, { "epoch": 1.645204381512156, "step": 6158, "train/loss_ctc": 0.6793915629386902, "train/loss_error": 0.3823058605194092, "train/loss_total": 0.4417229890823364 }, { "epoch": 1.6454715468875234, "step": 6159, "train/loss_ctc": 0.6209709048271179, "train/loss_error": 0.5140868425369263, "train/loss_total": 0.5354636907577515 }, { "epoch": 1.6457387122628906, "grad_norm": 3.134065866470337, "learning_rate": 2.013358268768368e-05, "loss": 0.5307, "step": 6160 }, { "epoch": 1.6457387122628906, "step": 6160, "train/loss_ctc": 1.742824912071228, "train/loss_error": 0.46207278966903687, "train/loss_total": 0.7182232141494751 }, { "epoch": 1.6460058776382582, "step": 6161, "train/loss_ctc": 1.1961355209350586, "train/loss_error": 0.4851915240287781, "train/loss_total": 0.6273803114891052 }, { "epoch": 1.6462730430136254, "step": 6162, "train/loss_ctc": 0.584746778011322, "train/loss_error": 0.5200886726379395, "train/loss_total": 0.5330203175544739 }, { "epoch": 1.6465402083889926, "step": 6163, "train/loss_ctc": 0.5279680490493774, "train/loss_error": 0.409903347492218, "train/loss_total": 0.4335162937641144 }, { "epoch": 1.6468073737643603, "step": 6164, "train/loss_ctc": 0.546939492225647, "train/loss_error": 0.4282330274581909, "train/loss_total": 0.4519743323326111 }, { "epoch": 1.6470745391397275, "step": 6165, "train/loss_ctc": 1.4277632236480713, "train/loss_error": 0.47634750604629517, "train/loss_total": 0.6666306257247925 }, { "epoch": 1.6473417045150949, "step": 6166, "train/loss_ctc": 0.5703325867652893, "train/loss_error": 0.498836487531662, "train/loss_total": 0.5131357312202454 }, { "epoch": 1.6476088698904623, "step": 6167, "train/loss_ctc": 0.5606952905654907, "train/loss_error": 0.43440449237823486, "train/loss_total": 0.45966267585754395 }, { "epoch": 1.6478760352658295, "step": 6168, "train/loss_ctc": 0.4190649092197418, "train/loss_error": 0.4504012167453766, "train/loss_total": 0.4441339671611786 }, { "epoch": 1.648143200641197, "step": 6169, "train/loss_ctc": 0.9873605370521545, "train/loss_error": 0.4194333851337433, "train/loss_total": 0.5330188274383545 }, { "epoch": 1.6484103660165643, "grad_norm": 1.2772736549377441, "learning_rate": 2.0117552765161637e-05, "loss": 0.5381, "step": 6170 }, { "epoch": 1.6484103660165643, "step": 6170, "train/loss_ctc": 0.8440560698509216, "train/loss_error": 0.3879346251487732, "train/loss_total": 0.4791589379310608 }, { "epoch": 1.6486775313919315, "step": 6171, "train/loss_ctc": 0.9424467086791992, "train/loss_error": 0.49871689081192017, "train/loss_total": 0.5874629020690918 }, { "epoch": 1.648944696767299, "step": 6172, "train/loss_ctc": 0.5221552848815918, "train/loss_error": 0.43869513273239136, "train/loss_total": 0.4553871750831604 }, { "epoch": 1.6492118621426664, "step": 6173, "train/loss_ctc": 1.0813472270965576, "train/loss_error": 0.510966420173645, "train/loss_total": 0.6250426173210144 }, { "epoch": 1.6494790275180335, "step": 6174, "train/loss_ctc": 0.46505409479141235, "train/loss_error": 0.48942601680755615, "train/loss_total": 0.48455163836479187 }, { "epoch": 1.649746192893401, "step": 6175, "train/loss_ctc": 0.7868707180023193, "train/loss_error": 0.47160932421684265, "train/loss_total": 0.5346616506576538 }, { "epoch": 1.6500133582687684, "step": 6176, "train/loss_ctc": 0.902486264705658, "train/loss_error": 0.428886353969574, "train/loss_total": 0.5236063599586487 }, { "epoch": 1.6502805236441356, "step": 6177, "train/loss_ctc": 0.5433724522590637, "train/loss_error": 0.4894985556602478, "train/loss_total": 0.5002733469009399 }, { "epoch": 1.6505476890195032, "step": 6178, "train/loss_ctc": 0.5418771505355835, "train/loss_error": 0.44098129868507385, "train/loss_total": 0.46116048097610474 }, { "epoch": 1.6508148543948704, "step": 6179, "train/loss_ctc": 0.8887848854064941, "train/loss_error": 0.42177072167396545, "train/loss_total": 0.5151735544204712 }, { "epoch": 1.6510820197702378, "grad_norm": 1.388123869895935, "learning_rate": 2.0101522842639595e-05, "loss": 0.5166, "step": 6180 }, { "epoch": 1.6510820197702378, "step": 6180, "train/loss_ctc": 0.49367913603782654, "train/loss_error": 0.47058314085006714, "train/loss_total": 0.475202351808548 }, { "epoch": 1.6513491851456052, "step": 6181, "train/loss_ctc": 1.0294475555419922, "train/loss_error": 0.38753485679626465, "train/loss_total": 0.5159174203872681 }, { "epoch": 1.6516163505209724, "step": 6182, "train/loss_ctc": 1.4714391231536865, "train/loss_error": 0.43072208762168884, "train/loss_total": 0.6388654708862305 }, { "epoch": 1.6518835158963399, "step": 6183, "train/loss_ctc": 0.8367962837219238, "train/loss_error": 0.40155521035194397, "train/loss_total": 0.4886034429073334 }, { "epoch": 1.6521506812717073, "step": 6184, "train/loss_ctc": 0.830829381942749, "train/loss_error": 0.4444064497947693, "train/loss_total": 0.5216910243034363 }, { "epoch": 1.6524178466470745, "step": 6185, "train/loss_ctc": 0.8561028242111206, "train/loss_error": 0.5076531767845154, "train/loss_total": 0.5773431062698364 }, { "epoch": 1.652685012022442, "step": 6186, "train/loss_ctc": 0.5975338220596313, "train/loss_error": 0.46197599172592163, "train/loss_total": 0.4890875816345215 }, { "epoch": 1.6529521773978093, "step": 6187, "train/loss_ctc": 0.6845202445983887, "train/loss_error": 0.3963644504547119, "train/loss_total": 0.45399558544158936 }, { "epoch": 1.6532193427731765, "step": 6188, "train/loss_ctc": 0.6393767595291138, "train/loss_error": 0.38147690892219543, "train/loss_total": 0.43305689096450806 }, { "epoch": 1.653486508148544, "step": 6189, "train/loss_ctc": 1.2324495315551758, "train/loss_error": 0.4717864990234375, "train/loss_total": 0.6239191293716431 }, { "epoch": 1.6537536735239113, "grad_norm": 1.6292712688446045, "learning_rate": 2.0085492920117553e-05, "loss": 0.5218, "step": 6190 }, { "epoch": 1.6537536735239113, "step": 6190, "train/loss_ctc": 0.527646005153656, "train/loss_error": 0.41907623410224915, "train/loss_total": 0.44079017639160156 }, { "epoch": 1.6540208388992785, "step": 6191, "train/loss_ctc": 0.48648297786712646, "train/loss_error": 0.4751776456832886, "train/loss_total": 0.47743871808052063 }, { "epoch": 1.654288004274646, "step": 6192, "train/loss_ctc": 1.1876835823059082, "train/loss_error": 0.5164358615875244, "train/loss_total": 0.6506854295730591 }, { "epoch": 1.6545551696500134, "step": 6193, "train/loss_ctc": 1.021716594696045, "train/loss_error": 0.5110579133033752, "train/loss_total": 0.613189697265625 }, { "epoch": 1.6548223350253806, "step": 6194, "train/loss_ctc": 0.8708648681640625, "train/loss_error": 0.5040827393531799, "train/loss_total": 0.5774391889572144 }, { "epoch": 1.6550895004007482, "step": 6195, "train/loss_ctc": 0.5917785167694092, "train/loss_error": 0.4364709258079529, "train/loss_total": 0.4675324559211731 }, { "epoch": 1.6553566657761154, "step": 6196, "train/loss_ctc": 1.290351390838623, "train/loss_error": 0.41515275835990906, "train/loss_total": 0.5901924967765808 }, { "epoch": 1.6556238311514828, "step": 6197, "train/loss_ctc": 0.44732940196990967, "train/loss_error": 0.4929836690425873, "train/loss_total": 0.4838528335094452 }, { "epoch": 1.6558909965268502, "step": 6198, "train/loss_ctc": 0.4208759069442749, "train/loss_error": 0.49088254570961, "train/loss_total": 0.476881206035614 }, { "epoch": 1.6561581619022174, "step": 6199, "train/loss_ctc": 0.5204654932022095, "train/loss_error": 0.4607877731323242, "train/loss_total": 0.4727233350276947 }, { "epoch": 1.6564253272775848, "grad_norm": 1.6429365873336792, "learning_rate": 2.006946299759551e-05, "loss": 0.5251, "step": 6200 }, { "epoch": 1.6564253272775848, "step": 6200, "train/loss_ctc": 1.0414150953292847, "train/loss_error": 0.471908837556839, "train/loss_total": 0.585810124874115 }, { "epoch": 1.6566924926529523, "step": 6201, "train/loss_ctc": 0.4349070191383362, "train/loss_error": 0.3816607594490051, "train/loss_total": 0.3923100233078003 }, { "epoch": 1.6569596580283195, "step": 6202, "train/loss_ctc": 0.6587903499603271, "train/loss_error": 0.42315593361854553, "train/loss_total": 0.4702828526496887 }, { "epoch": 1.6572268234036869, "step": 6203, "train/loss_ctc": 0.6054128408432007, "train/loss_error": 0.4202704429626465, "train/loss_total": 0.4572989344596863 }, { "epoch": 1.6574939887790543, "step": 6204, "train/loss_ctc": 0.7560945749282837, "train/loss_error": 0.3968856930732727, "train/loss_total": 0.4687274694442749 }, { "epoch": 1.6577611541544215, "step": 6205, "train/loss_ctc": 0.8325481414794922, "train/loss_error": 0.45294445753097534, "train/loss_total": 0.5288652181625366 }, { "epoch": 1.658028319529789, "step": 6206, "train/loss_ctc": 1.2131335735321045, "train/loss_error": 0.46716347336769104, "train/loss_total": 0.6163575053215027 }, { "epoch": 1.6582954849051563, "step": 6207, "train/loss_ctc": 0.4897226095199585, "train/loss_error": 0.41647884249687195, "train/loss_total": 0.4311276078224182 }, { "epoch": 1.6585626502805235, "step": 6208, "train/loss_ctc": 0.9247257709503174, "train/loss_error": 0.49157488346099854, "train/loss_total": 0.5782050490379333 }, { "epoch": 1.6588298156558912, "step": 6209, "train/loss_ctc": 0.4415745735168457, "train/loss_error": 0.5028232932090759, "train/loss_total": 0.49057355523109436 }, { "epoch": 1.6590969810312584, "grad_norm": 1.9938980340957642, "learning_rate": 2.005343307507347e-05, "loss": 0.502, "step": 6210 }, { "epoch": 1.6590969810312584, "step": 6210, "train/loss_ctc": 0.410258948802948, "train/loss_error": 0.4531780481338501, "train/loss_total": 0.44459423422813416 }, { "epoch": 1.6593641464066256, "step": 6211, "train/loss_ctc": 0.39306676387786865, "train/loss_error": 0.4437517523765564, "train/loss_total": 0.4336147904396057 }, { "epoch": 1.6596313117819932, "step": 6212, "train/loss_ctc": 0.8957265615463257, "train/loss_error": 0.5193086862564087, "train/loss_total": 0.594592273235321 }, { "epoch": 1.6598984771573604, "step": 6213, "train/loss_ctc": 1.0494588613510132, "train/loss_error": 0.4393727481365204, "train/loss_total": 0.5613899827003479 }, { "epoch": 1.6601656425327278, "step": 6214, "train/loss_ctc": 1.2701574563980103, "train/loss_error": 0.4853423237800598, "train/loss_total": 0.6423053741455078 }, { "epoch": 1.6604328079080952, "step": 6215, "train/loss_ctc": 1.3943431377410889, "train/loss_error": 0.4826473593711853, "train/loss_total": 0.6649865508079529 }, { "epoch": 1.6606999732834624, "step": 6216, "train/loss_ctc": 0.9391624331474304, "train/loss_error": 0.4056765139102936, "train/loss_total": 0.512373685836792 }, { "epoch": 1.6609671386588298, "step": 6217, "train/loss_ctc": 0.7932335734367371, "train/loss_error": 0.43060028553009033, "train/loss_total": 0.5031269788742065 }, { "epoch": 1.6612343040341973, "step": 6218, "train/loss_ctc": 1.276105523109436, "train/loss_error": 0.5102866291999817, "train/loss_total": 0.6634504199028015 }, { "epoch": 1.6615014694095644, "step": 6219, "train/loss_ctc": 0.5388504266738892, "train/loss_error": 0.4311378300189972, "train/loss_total": 0.4526803493499756 }, { "epoch": 1.6617686347849319, "grad_norm": 2.4153201580047607, "learning_rate": 2.003740315255143e-05, "loss": 0.5473, "step": 6220 }, { "epoch": 1.6617686347849319, "step": 6220, "train/loss_ctc": 0.9998565912246704, "train/loss_error": 0.4754335284233093, "train/loss_total": 0.5803181529045105 }, { "epoch": 1.6620358001602993, "step": 6221, "train/loss_ctc": 1.3290188312530518, "train/loss_error": 0.4657518267631531, "train/loss_total": 0.6384052634239197 }, { "epoch": 1.6623029655356665, "step": 6222, "train/loss_ctc": 0.6937525272369385, "train/loss_error": 0.5040186047554016, "train/loss_total": 0.5419654250144958 }, { "epoch": 1.662570130911034, "step": 6223, "train/loss_ctc": 0.7908089756965637, "train/loss_error": 0.48168814182281494, "train/loss_total": 0.5435123443603516 }, { "epoch": 1.6628372962864013, "step": 6224, "train/loss_ctc": 0.4107074737548828, "train/loss_error": 0.5034838318824768, "train/loss_total": 0.48492854833602905 }, { "epoch": 1.6631044616617685, "step": 6225, "train/loss_ctc": 1.240797996520996, "train/loss_error": 0.4938507676124573, "train/loss_total": 0.643240213394165 }, { "epoch": 1.6633716270371361, "step": 6226, "train/loss_ctc": 0.5181012749671936, "train/loss_error": 0.4737239480018616, "train/loss_total": 0.4825994372367859 }, { "epoch": 1.6636387924125033, "step": 6227, "train/loss_ctc": 0.4051295816898346, "train/loss_error": 0.4489264190196991, "train/loss_total": 0.44016706943511963 }, { "epoch": 1.6639059577878705, "step": 6228, "train/loss_ctc": 0.9284549951553345, "train/loss_error": 0.5105276703834534, "train/loss_total": 0.5941131114959717 }, { "epoch": 1.6641731231632382, "step": 6229, "train/loss_ctc": 0.8495526313781738, "train/loss_error": 0.4437720477581024, "train/loss_total": 0.5249281525611877 }, { "epoch": 1.6644402885386054, "grad_norm": 1.9622882604599, "learning_rate": 2.002137323002939e-05, "loss": 0.5474, "step": 6230 }, { "epoch": 1.6644402885386054, "step": 6230, "train/loss_ctc": 0.886972188949585, "train/loss_error": 0.5393778085708618, "train/loss_total": 0.6088966727256775 }, { "epoch": 1.6647074539139728, "step": 6231, "train/loss_ctc": 0.4992474913597107, "train/loss_error": 0.41694530844688416, "train/loss_total": 0.4334057569503784 }, { "epoch": 1.6649746192893402, "step": 6232, "train/loss_ctc": 1.7347761392593384, "train/loss_error": 0.49475446343421936, "train/loss_total": 0.7427588105201721 }, { "epoch": 1.6652417846647074, "step": 6233, "train/loss_ctc": 0.9647271633148193, "train/loss_error": 0.49148866534233093, "train/loss_total": 0.5861364006996155 }, { "epoch": 1.6655089500400748, "step": 6234, "train/loss_ctc": 0.5936669111251831, "train/loss_error": 0.49098944664001465, "train/loss_total": 0.5115249752998352 }, { "epoch": 1.6657761154154422, "step": 6235, "train/loss_ctc": 0.7381762862205505, "train/loss_error": 0.49957165122032166, "train/loss_total": 0.5472925901412964 }, { "epoch": 1.6660432807908094, "step": 6236, "train/loss_ctc": 0.9726402759552002, "train/loss_error": 0.4874357283115387, "train/loss_total": 0.5844766497612 }, { "epoch": 1.6663104461661769, "step": 6237, "train/loss_ctc": 0.8270728588104248, "train/loss_error": 0.448535680770874, "train/loss_total": 0.5242431163787842 }, { "epoch": 1.6665776115415443, "step": 6238, "train/loss_ctc": 1.0969510078430176, "train/loss_error": 0.4694330394268036, "train/loss_total": 0.5949366092681885 }, { "epoch": 1.6668447769169115, "step": 6239, "train/loss_ctc": 0.43483513593673706, "train/loss_error": 0.44273611903190613, "train/loss_total": 0.44115591049194336 }, { "epoch": 1.6671119422922789, "grad_norm": 1.4403445720672607, "learning_rate": 2.0005343307507347e-05, "loss": 0.5575, "step": 6240 }, { "epoch": 1.6671119422922789, "step": 6240, "train/loss_ctc": 0.44376468658447266, "train/loss_error": 0.5142338871955872, "train/loss_total": 0.5001400709152222 }, { "epoch": 1.6673791076676463, "step": 6241, "train/loss_ctc": 0.7323503494262695, "train/loss_error": 0.5209810137748718, "train/loss_total": 0.5632548928260803 }, { "epoch": 1.6676462730430135, "step": 6242, "train/loss_ctc": 0.4377982020378113, "train/loss_error": 0.35292142629623413, "train/loss_total": 0.369896799325943 }, { "epoch": 1.6679134384183811, "step": 6243, "train/loss_ctc": 0.9900755882263184, "train/loss_error": 0.4635067880153656, "train/loss_total": 0.568820595741272 }, { "epoch": 1.6681806037937483, "step": 6244, "train/loss_ctc": 0.9386529922485352, "train/loss_error": 0.43279483914375305, "train/loss_total": 0.5339664816856384 }, { "epoch": 1.6684477691691155, "step": 6245, "train/loss_ctc": 0.7805861234664917, "train/loss_error": 0.46554192900657654, "train/loss_total": 0.5285507440567017 }, { "epoch": 1.6687149345444832, "step": 6246, "train/loss_ctc": 1.0399549007415771, "train/loss_error": 0.4551260471343994, "train/loss_total": 0.572091817855835 }, { "epoch": 1.6689820999198504, "step": 6247, "train/loss_ctc": 1.6914594173431396, "train/loss_error": 0.459750235080719, "train/loss_total": 0.706092119216919 }, { "epoch": 1.6692492652952178, "step": 6248, "train/loss_ctc": 1.0342843532562256, "train/loss_error": 0.48784759640693665, "train/loss_total": 0.5971349477767944 }, { "epoch": 1.6695164306705852, "step": 6249, "train/loss_ctc": 0.2967837452888489, "train/loss_error": 0.4101560115814209, "train/loss_total": 0.38748157024383545 }, { "epoch": 1.6697835960459524, "grad_norm": 1.6862049102783203, "learning_rate": 1.9989313384985305e-05, "loss": 0.5327, "step": 6250 }, { "epoch": 1.6697835960459524, "step": 6250, "train/loss_ctc": 0.4132092595100403, "train/loss_error": 0.5257971286773682, "train/loss_total": 0.5032795667648315 }, { "epoch": 1.6700507614213198, "step": 6251, "train/loss_ctc": 0.881592333316803, "train/loss_error": 0.45856985449790955, "train/loss_total": 0.5431743860244751 }, { "epoch": 1.6703179267966872, "step": 6252, "train/loss_ctc": 0.5975666046142578, "train/loss_error": 0.5071127414703369, "train/loss_total": 0.52520352602005 }, { "epoch": 1.6705850921720544, "step": 6253, "train/loss_ctc": 0.640957236289978, "train/loss_error": 0.4391884207725525, "train/loss_total": 0.47954219579696655 }, { "epoch": 1.6708522575474218, "step": 6254, "train/loss_ctc": 0.677207887172699, "train/loss_error": 0.4634414613246918, "train/loss_total": 0.5061947703361511 }, { "epoch": 1.6711194229227893, "step": 6255, "train/loss_ctc": 1.291831135749817, "train/loss_error": 0.44573506712913513, "train/loss_total": 0.6149542927742004 }, { "epoch": 1.6713865882981565, "step": 6256, "train/loss_ctc": 0.5942795276641846, "train/loss_error": 0.4865492582321167, "train/loss_total": 0.5080953240394592 }, { "epoch": 1.6716537536735239, "step": 6257, "train/loss_ctc": 1.0013527870178223, "train/loss_error": 0.5244803428649902, "train/loss_total": 0.6198548078536987 }, { "epoch": 1.6719209190488913, "step": 6258, "train/loss_ctc": 0.3036145865917206, "train/loss_error": 0.4229363203048706, "train/loss_total": 0.39907199144363403 }, { "epoch": 1.6721880844242585, "step": 6259, "train/loss_ctc": 0.7150803804397583, "train/loss_error": 0.4947523772716522, "train/loss_total": 0.5388180017471313 }, { "epoch": 1.6724552497996261, "grad_norm": 1.4445197582244873, "learning_rate": 1.9973283462463263e-05, "loss": 0.5238, "step": 6260 }, { "epoch": 1.6724552497996261, "step": 6260, "train/loss_ctc": 0.7170612812042236, "train/loss_error": 0.4370722472667694, "train/loss_total": 0.4930700659751892 }, { "epoch": 1.6727224151749933, "step": 6261, "train/loss_ctc": 0.4991896152496338, "train/loss_error": 0.44107961654663086, "train/loss_total": 0.4527016282081604 }, { "epoch": 1.6729895805503605, "step": 6262, "train/loss_ctc": 0.4636501371860504, "train/loss_error": 0.504610538482666, "train/loss_total": 0.49641847610473633 }, { "epoch": 1.6732567459257282, "step": 6263, "train/loss_ctc": 0.5177140235900879, "train/loss_error": 0.44183483719825745, "train/loss_total": 0.4570106863975525 }, { "epoch": 1.6735239113010953, "step": 6264, "train/loss_ctc": 0.8124319911003113, "train/loss_error": 0.45915886759757996, "train/loss_total": 0.5298135280609131 }, { "epoch": 1.6737910766764628, "step": 6265, "train/loss_ctc": 0.8317234516143799, "train/loss_error": 0.385794997215271, "train/loss_total": 0.4749807119369507 }, { "epoch": 1.6740582420518302, "step": 6266, "train/loss_ctc": 0.5201596617698669, "train/loss_error": 0.4992498755455017, "train/loss_total": 0.5034318566322327 }, { "epoch": 1.6743254074271974, "step": 6267, "train/loss_ctc": 0.4959426820278168, "train/loss_error": 0.5002658367156982, "train/loss_total": 0.4994012117385864 }, { "epoch": 1.6745925728025648, "step": 6268, "train/loss_ctc": 0.5059871673583984, "train/loss_error": 0.5208836197853088, "train/loss_total": 0.5179043412208557 }, { "epoch": 1.6748597381779322, "step": 6269, "train/loss_ctc": 0.7852357625961304, "train/loss_error": 0.4272119998931885, "train/loss_total": 0.49881675839424133 }, { "epoch": 1.6751269035532994, "grad_norm": 2.3746895790100098, "learning_rate": 1.995725353994122e-05, "loss": 0.4924, "step": 6270 }, { "epoch": 1.6751269035532994, "step": 6270, "train/loss_ctc": 0.572502851486206, "train/loss_error": 0.400453120470047, "train/loss_total": 0.4348630905151367 }, { "epoch": 1.6753940689286668, "step": 6271, "train/loss_ctc": 0.7953367233276367, "train/loss_error": 0.4830915629863739, "train/loss_total": 0.5455406308174133 }, { "epoch": 1.6756612343040342, "step": 6272, "train/loss_ctc": 0.41938403248786926, "train/loss_error": 0.48888206481933594, "train/loss_total": 0.47498247027397156 }, { "epoch": 1.6759283996794014, "step": 6273, "train/loss_ctc": 0.8154796957969666, "train/loss_error": 0.5049918293952942, "train/loss_total": 0.5670893788337708 }, { "epoch": 1.6761955650547689, "step": 6274, "train/loss_ctc": 1.0014830827713013, "train/loss_error": 0.4816373586654663, "train/loss_total": 0.5856065154075623 }, { "epoch": 1.6764627304301363, "step": 6275, "train/loss_ctc": 0.5006552934646606, "train/loss_error": 0.46972203254699707, "train/loss_total": 0.47590869665145874 }, { "epoch": 1.6767298958055035, "step": 6276, "train/loss_ctc": 0.424108624458313, "train/loss_error": 0.4527171850204468, "train/loss_total": 0.44699549674987793 }, { "epoch": 1.676997061180871, "step": 6277, "train/loss_ctc": 0.8483899831771851, "train/loss_error": 0.45189642906188965, "train/loss_total": 0.5311951637268066 }, { "epoch": 1.6772642265562383, "step": 6278, "train/loss_ctc": 0.6497703790664673, "train/loss_error": 0.46789219975471497, "train/loss_total": 0.5042678713798523 }, { "epoch": 1.6775313919316055, "step": 6279, "train/loss_ctc": 1.3210158348083496, "train/loss_error": 0.5463425517082214, "train/loss_total": 0.7012771964073181 }, { "epoch": 1.6777985573069731, "grad_norm": 7.280346870422363, "learning_rate": 1.9941223617419186e-05, "loss": 0.5268, "step": 6280 }, { "epoch": 1.6777985573069731, "step": 6280, "train/loss_ctc": 0.6623435616493225, "train/loss_error": 0.45582830905914307, "train/loss_total": 0.49713134765625 }, { "epoch": 1.6780657226823403, "step": 6281, "train/loss_ctc": 1.111490249633789, "train/loss_error": 0.5511354804039001, "train/loss_total": 0.6632064580917358 }, { "epoch": 1.6783328880577078, "step": 6282, "train/loss_ctc": 0.7624887824058533, "train/loss_error": 0.44764137268066406, "train/loss_total": 0.5106108784675598 }, { "epoch": 1.6786000534330752, "step": 6283, "train/loss_ctc": 1.8545702695846558, "train/loss_error": 0.5069673657417297, "train/loss_total": 0.7764879465103149 }, { "epoch": 1.6788672188084424, "step": 6284, "train/loss_ctc": 1.726609230041504, "train/loss_error": 0.4874911308288574, "train/loss_total": 0.7353147864341736 }, { "epoch": 1.6791343841838098, "step": 6285, "train/loss_ctc": 0.5171281099319458, "train/loss_error": 0.5046774744987488, "train/loss_total": 0.5071675777435303 }, { "epoch": 1.6794015495591772, "step": 6286, "train/loss_ctc": 0.8728606104850769, "train/loss_error": 0.4679848253726959, "train/loss_total": 0.5489599704742432 }, { "epoch": 1.6796687149345444, "step": 6287, "train/loss_ctc": 0.731553316116333, "train/loss_error": 0.4409746527671814, "train/loss_total": 0.49909037351608276 }, { "epoch": 1.6799358803099118, "step": 6288, "train/loss_ctc": 0.6347924470901489, "train/loss_error": 0.4579417407512665, "train/loss_total": 0.49331188201904297 }, { "epoch": 1.6802030456852792, "step": 6289, "train/loss_ctc": 0.5100141763687134, "train/loss_error": 0.38159486651420593, "train/loss_total": 0.40727871656417847 }, { "epoch": 1.6804702110606464, "grad_norm": 13.554994583129883, "learning_rate": 1.9925193694897144e-05, "loss": 0.5639, "step": 6290 }, { "epoch": 1.6804702110606464, "step": 6290, "train/loss_ctc": 1.0563325881958008, "train/loss_error": 0.47239553928375244, "train/loss_total": 0.58918297290802 }, { "epoch": 1.6807373764360138, "step": 6291, "train/loss_ctc": 0.6097450256347656, "train/loss_error": 0.4549753665924072, "train/loss_total": 0.48592931032180786 }, { "epoch": 1.6810045418113813, "step": 6292, "train/loss_ctc": 0.7999070882797241, "train/loss_error": 0.49624577164649963, "train/loss_total": 0.5569780468940735 }, { "epoch": 1.6812717071867485, "step": 6293, "train/loss_ctc": 0.4441712498664856, "train/loss_error": 0.5114201307296753, "train/loss_total": 0.4979703724384308 }, { "epoch": 1.681538872562116, "step": 6294, "train/loss_ctc": 0.5437859296798706, "train/loss_error": 0.501310408115387, "train/loss_total": 0.5098055005073547 }, { "epoch": 1.6818060379374833, "step": 6295, "train/loss_ctc": 0.8417035937309265, "train/loss_error": 0.5154982805252075, "train/loss_total": 0.5807393789291382 }, { "epoch": 1.6820732033128507, "step": 6296, "train/loss_ctc": 1.0898418426513672, "train/loss_error": 0.539947509765625, "train/loss_total": 0.6499264240264893 }, { "epoch": 1.6823403686882181, "step": 6297, "train/loss_ctc": 0.659522533416748, "train/loss_error": 0.43998441100120544, "train/loss_total": 0.4838920533657074 }, { "epoch": 1.6826075340635853, "step": 6298, "train/loss_ctc": 1.6291167736053467, "train/loss_error": 0.5143835544586182, "train/loss_total": 0.7373301982879639 }, { "epoch": 1.6828746994389527, "step": 6299, "train/loss_ctc": 1.655642032623291, "train/loss_error": 0.43883952498435974, "train/loss_total": 0.6822000741958618 }, { "epoch": 1.6831418648143202, "grad_norm": 3.2491166591644287, "learning_rate": 1.9909163772375102e-05, "loss": 0.5774, "step": 6300 }, { "epoch": 1.6831418648143202, "step": 6300, "train/loss_ctc": 1.3253146409988403, "train/loss_error": 0.4583422839641571, "train/loss_total": 0.6317367553710938 }, { "epoch": 1.6834090301896873, "step": 6301, "train/loss_ctc": 0.5544523000717163, "train/loss_error": 0.47347933053970337, "train/loss_total": 0.489673912525177 }, { "epoch": 1.6836761955650548, "step": 6302, "train/loss_ctc": 1.7263104915618896, "train/loss_error": 0.48030492663383484, "train/loss_total": 0.7295060157775879 }, { "epoch": 1.6839433609404222, "step": 6303, "train/loss_ctc": 0.7910584807395935, "train/loss_error": 0.42986592650413513, "train/loss_total": 0.5021044611930847 }, { "epoch": 1.6842105263157894, "step": 6304, "train/loss_ctc": 0.5410000681877136, "train/loss_error": 0.4614081084728241, "train/loss_total": 0.47732651233673096 }, { "epoch": 1.6844776916911568, "step": 6305, "train/loss_ctc": 0.5970184803009033, "train/loss_error": 0.43838778138160706, "train/loss_total": 0.47011393308639526 }, { "epoch": 1.6847448570665242, "step": 6306, "train/loss_ctc": 0.8162462711334229, "train/loss_error": 0.45927342772483826, "train/loss_total": 0.5306680202484131 }, { "epoch": 1.6850120224418914, "step": 6307, "train/loss_ctc": 0.5013809204101562, "train/loss_error": 0.548895537853241, "train/loss_total": 0.5393926501274109 }, { "epoch": 1.6852791878172588, "step": 6308, "train/loss_ctc": 0.7145058512687683, "train/loss_error": 0.42108914256095886, "train/loss_total": 0.47977250814437866 }, { "epoch": 1.6855463531926262, "step": 6309, "train/loss_ctc": 0.3299272656440735, "train/loss_error": 0.4354928135871887, "train/loss_total": 0.41437971591949463 }, { "epoch": 1.6858135185679934, "grad_norm": 1.545698881149292, "learning_rate": 1.989313384985306e-05, "loss": 0.5265, "step": 6310 }, { "epoch": 1.6858135185679934, "step": 6310, "train/loss_ctc": 0.5078820586204529, "train/loss_error": 0.484382301568985, "train/loss_total": 0.4890822768211365 }, { "epoch": 1.686080683943361, "step": 6311, "train/loss_ctc": 1.4928576946258545, "train/loss_error": 0.5020698308944702, "train/loss_total": 0.7002274394035339 }, { "epoch": 1.6863478493187283, "step": 6312, "train/loss_ctc": 0.7164351940155029, "train/loss_error": 0.40988442301750183, "train/loss_total": 0.4711945652961731 }, { "epoch": 1.6866150146940957, "step": 6313, "train/loss_ctc": 1.0467274188995361, "train/loss_error": 0.43718793988227844, "train/loss_total": 0.5590958595275879 }, { "epoch": 1.686882180069463, "step": 6314, "train/loss_ctc": 0.46224963665008545, "train/loss_error": 0.48538705706596375, "train/loss_total": 0.4807595908641815 }, { "epoch": 1.6871493454448303, "step": 6315, "train/loss_ctc": 0.6910891532897949, "train/loss_error": 0.5517683625221252, "train/loss_total": 0.5796325206756592 }, { "epoch": 1.6874165108201977, "step": 6316, "train/loss_ctc": 0.913066029548645, "train/loss_error": 0.5149657726287842, "train/loss_total": 0.5945858359336853 }, { "epoch": 1.6876836761955651, "step": 6317, "train/loss_ctc": 0.23599690198898315, "train/loss_error": 0.4593605101108551, "train/loss_total": 0.41468778252601624 }, { "epoch": 1.6879508415709323, "step": 6318, "train/loss_ctc": 1.14168119430542, "train/loss_error": 0.47850847244262695, "train/loss_total": 0.6111429929733276 }, { "epoch": 1.6882180069462998, "step": 6319, "train/loss_ctc": 0.47539544105529785, "train/loss_error": 0.36633533239364624, "train/loss_total": 0.38814735412597656 }, { "epoch": 1.6884851723216672, "grad_norm": 1.6783465147018433, "learning_rate": 1.9877103927331018e-05, "loss": 0.5289, "step": 6320 }, { "epoch": 1.6884851723216672, "step": 6320, "train/loss_ctc": 0.4985625147819519, "train/loss_error": 0.4286728799343109, "train/loss_total": 0.44265079498291016 }, { "epoch": 1.6887523376970344, "step": 6321, "train/loss_ctc": 0.3548581600189209, "train/loss_error": 0.46510109305381775, "train/loss_total": 0.4430525302886963 }, { "epoch": 1.6890195030724018, "step": 6322, "train/loss_ctc": 0.9775292873382568, "train/loss_error": 0.4454513192176819, "train/loss_total": 0.551866888999939 }, { "epoch": 1.6892866684477692, "step": 6323, "train/loss_ctc": 1.21437406539917, "train/loss_error": 0.525640606880188, "train/loss_total": 0.6633872985839844 }, { "epoch": 1.6895538338231364, "step": 6324, "train/loss_ctc": 1.2291446924209595, "train/loss_error": 0.4795353412628174, "train/loss_total": 0.6294572353363037 }, { "epoch": 1.689820999198504, "step": 6325, "train/loss_ctc": 0.661784291267395, "train/loss_error": 0.415194571018219, "train/loss_total": 0.46451252698898315 }, { "epoch": 1.6900881645738712, "step": 6326, "train/loss_ctc": 0.8853757381439209, "train/loss_error": 0.4745931625366211, "train/loss_total": 0.556749701499939 }, { "epoch": 1.6903553299492384, "step": 6327, "train/loss_ctc": 1.4498518705368042, "train/loss_error": 0.4892440140247345, "train/loss_total": 0.6813656091690063 }, { "epoch": 1.690622495324606, "step": 6328, "train/loss_ctc": 0.6622817516326904, "train/loss_error": 0.466812402009964, "train/loss_total": 0.5059062838554382 }, { "epoch": 1.6908896606999733, "step": 6329, "train/loss_ctc": 0.8800030946731567, "train/loss_error": 0.4684107005596161, "train/loss_total": 0.5507291555404663 }, { "epoch": 1.6911568260753407, "grad_norm": 2.2658915519714355, "learning_rate": 1.9861074004808976e-05, "loss": 0.549, "step": 6330 }, { "epoch": 1.6911568260753407, "step": 6330, "train/loss_ctc": 0.8028113842010498, "train/loss_error": 0.4135476350784302, "train/loss_total": 0.4914003908634186 }, { "epoch": 1.691423991450708, "step": 6331, "train/loss_ctc": 0.2922425866127014, "train/loss_error": 0.45425742864608765, "train/loss_total": 0.4218544661998749 }, { "epoch": 1.6916911568260753, "step": 6332, "train/loss_ctc": 0.7482994794845581, "train/loss_error": 0.4603242874145508, "train/loss_total": 0.5179193019866943 }, { "epoch": 1.6919583222014427, "step": 6333, "train/loss_ctc": 0.7211194634437561, "train/loss_error": 0.42338600754737854, "train/loss_total": 0.4829327166080475 }, { "epoch": 1.6922254875768101, "step": 6334, "train/loss_ctc": 0.6165605187416077, "train/loss_error": 0.4650372862815857, "train/loss_total": 0.495341956615448 }, { "epoch": 1.6924926529521773, "step": 6335, "train/loss_ctc": 1.4224225282669067, "train/loss_error": 0.42037785053253174, "train/loss_total": 0.6207867860794067 }, { "epoch": 1.6927598183275447, "step": 6336, "train/loss_ctc": 1.3880891799926758, "train/loss_error": 0.4508078396320343, "train/loss_total": 0.6382641196250916 }, { "epoch": 1.6930269837029122, "step": 6337, "train/loss_ctc": 1.1413441896438599, "train/loss_error": 0.48356738686561584, "train/loss_total": 0.6151227355003357 }, { "epoch": 1.6932941490782794, "step": 6338, "train/loss_ctc": 1.3700671195983887, "train/loss_error": 0.4718962609767914, "train/loss_total": 0.6515304446220398 }, { "epoch": 1.6935613144536468, "step": 6339, "train/loss_ctc": 0.7483517527580261, "train/loss_error": 0.4842904210090637, "train/loss_total": 0.5371026992797852 }, { "epoch": 1.6938284798290142, "grad_norm": 1.945789098739624, "learning_rate": 1.9845044082286938e-05, "loss": 0.5472, "step": 6340 }, { "epoch": 1.6938284798290142, "step": 6340, "train/loss_ctc": 0.9613708257675171, "train/loss_error": 0.4369928240776062, "train/loss_total": 0.5418684482574463 }, { "epoch": 1.6940956452043814, "step": 6341, "train/loss_ctc": 0.8084843158721924, "train/loss_error": 0.44068998098373413, "train/loss_total": 0.5142488479614258 }, { "epoch": 1.694362810579749, "step": 6342, "train/loss_ctc": 0.3937767446041107, "train/loss_error": 0.4493081867694855, "train/loss_total": 0.438201904296875 }, { "epoch": 1.6946299759551162, "step": 6343, "train/loss_ctc": 0.3943617343902588, "train/loss_error": 0.4536518156528473, "train/loss_total": 0.4417937994003296 }, { "epoch": 1.6948971413304834, "step": 6344, "train/loss_ctc": 0.5033655166625977, "train/loss_error": 0.45700603723526, "train/loss_total": 0.46627795696258545 }, { "epoch": 1.695164306705851, "step": 6345, "train/loss_ctc": 0.49440163373947144, "train/loss_error": 0.5364015102386475, "train/loss_total": 0.5280015468597412 }, { "epoch": 1.6954314720812182, "step": 6346, "train/loss_ctc": 0.7203752398490906, "train/loss_error": 0.4372999966144562, "train/loss_total": 0.4939150810241699 }, { "epoch": 1.6956986374565857, "step": 6347, "train/loss_ctc": 0.7190820574760437, "train/loss_error": 0.42956170439720154, "train/loss_total": 0.4874657690525055 }, { "epoch": 1.695965802831953, "step": 6348, "train/loss_ctc": 0.7083717584609985, "train/loss_error": 0.476333886384964, "train/loss_total": 0.5227414965629578 }, { "epoch": 1.6962329682073203, "step": 6349, "train/loss_ctc": 0.8484284281730652, "train/loss_error": 0.4135425090789795, "train/loss_total": 0.5005196928977966 }, { "epoch": 1.6965001335826877, "grad_norm": 6.445531368255615, "learning_rate": 1.9829014159764896e-05, "loss": 0.4935, "step": 6350 }, { "epoch": 1.6965001335826877, "step": 6350, "train/loss_ctc": 0.28319022059440613, "train/loss_error": 0.4392317235469818, "train/loss_total": 0.4080234169960022 }, { "epoch": 1.6967672989580551, "step": 6351, "train/loss_ctc": 0.9090582132339478, "train/loss_error": 0.4497237503528595, "train/loss_total": 0.5415906310081482 }, { "epoch": 1.6970344643334223, "step": 6352, "train/loss_ctc": 1.366794466972351, "train/loss_error": 0.5317018628120422, "train/loss_total": 0.698720395565033 }, { "epoch": 1.6973016297087897, "step": 6353, "train/loss_ctc": 0.7060199975967407, "train/loss_error": 0.45153340697288513, "train/loss_total": 0.5024307370185852 }, { "epoch": 1.6975687950841571, "step": 6354, "train/loss_ctc": 0.8656884431838989, "train/loss_error": 0.5540769696235657, "train/loss_total": 0.6163992881774902 }, { "epoch": 1.6978359604595243, "step": 6355, "train/loss_ctc": 0.43652215600013733, "train/loss_error": 0.4955368638038635, "train/loss_total": 0.4837339520454407 }, { "epoch": 1.6981031258348918, "step": 6356, "train/loss_ctc": 0.5006271600723267, "train/loss_error": 0.43585753440856934, "train/loss_total": 0.44881147146224976 }, { "epoch": 1.6983702912102592, "step": 6357, "train/loss_ctc": 0.7648947238922119, "train/loss_error": 0.4141330122947693, "train/loss_total": 0.4842853546142578 }, { "epoch": 1.6986374565856264, "step": 6358, "train/loss_ctc": 0.4880622923374176, "train/loss_error": 0.583452582359314, "train/loss_total": 0.5643745064735413 }, { "epoch": 1.698904621960994, "step": 6359, "train/loss_ctc": 0.5717172622680664, "train/loss_error": 0.4960080683231354, "train/loss_total": 0.5111498832702637 }, { "epoch": 1.6991717873363612, "grad_norm": 6.7918195724487305, "learning_rate": 1.9812984237242854e-05, "loss": 0.526, "step": 6360 }, { "epoch": 1.6991717873363612, "step": 6360, "train/loss_ctc": 0.7267928123474121, "train/loss_error": 0.4656728208065033, "train/loss_total": 0.517896831035614 }, { "epoch": 1.6994389527117284, "step": 6361, "train/loss_ctc": 0.6696938276290894, "train/loss_error": 0.49371659755706787, "train/loss_total": 0.5289120674133301 }, { "epoch": 1.699706118087096, "step": 6362, "train/loss_ctc": 1.1068907976150513, "train/loss_error": 0.4531773328781128, "train/loss_total": 0.5839200615882874 }, { "epoch": 1.6999732834624632, "step": 6363, "train/loss_ctc": 1.1858365535736084, "train/loss_error": 0.48434165120124817, "train/loss_total": 0.6246406435966492 }, { "epoch": 1.7002404488378307, "step": 6364, "train/loss_ctc": 0.9570278525352478, "train/loss_error": 0.4531632363796234, "train/loss_total": 0.5539361834526062 }, { "epoch": 1.700507614213198, "step": 6365, "train/loss_ctc": 0.8363232612609863, "train/loss_error": 0.4091893136501312, "train/loss_total": 0.4946160912513733 }, { "epoch": 1.7007747795885653, "step": 6366, "train/loss_ctc": 0.6643720269203186, "train/loss_error": 0.4676489233970642, "train/loss_total": 0.5069935321807861 }, { "epoch": 1.7010419449639327, "step": 6367, "train/loss_ctc": 0.5867053866386414, "train/loss_error": 0.45605283975601196, "train/loss_total": 0.4821833372116089 }, { "epoch": 1.7013091103393, "step": 6368, "train/loss_ctc": 1.0166035890579224, "train/loss_error": 0.4407913386821747, "train/loss_total": 0.5559538006782532 }, { "epoch": 1.7015762757146673, "step": 6369, "train/loss_ctc": 0.3867703080177307, "train/loss_error": 0.491784930229187, "train/loss_total": 0.47078201174736023 }, { "epoch": 1.7018434410900347, "grad_norm": 0.9367349743843079, "learning_rate": 1.9796954314720812e-05, "loss": 0.532, "step": 6370 }, { "epoch": 1.7018434410900347, "step": 6370, "train/loss_ctc": 1.526334524154663, "train/loss_error": 0.4266437590122223, "train/loss_total": 0.6465818881988525 }, { "epoch": 1.7021106064654021, "step": 6371, "train/loss_ctc": 0.8373297452926636, "train/loss_error": 0.4825778007507324, "train/loss_total": 0.5535281896591187 }, { "epoch": 1.7023777718407693, "step": 6372, "train/loss_ctc": 0.24653586745262146, "train/loss_error": 0.42663493752479553, "train/loss_total": 0.3906151354312897 }, { "epoch": 1.7026449372161367, "step": 6373, "train/loss_ctc": 1.1821767091751099, "train/loss_error": 0.45421701669692993, "train/loss_total": 0.599808931350708 }, { "epoch": 1.7029121025915042, "step": 6374, "train/loss_ctc": 0.7038556933403015, "train/loss_error": 0.4940284490585327, "train/loss_total": 0.5359938740730286 }, { "epoch": 1.7031792679668714, "step": 6375, "train/loss_ctc": 1.3245385885238647, "train/loss_error": 0.45425039529800415, "train/loss_total": 0.6283080577850342 }, { "epoch": 1.703446433342239, "step": 6376, "train/loss_ctc": 0.5771211385726929, "train/loss_error": 0.4559386968612671, "train/loss_total": 0.4801751971244812 }, { "epoch": 1.7037135987176062, "step": 6377, "train/loss_ctc": 0.3670393228530884, "train/loss_error": 0.44711098074913025, "train/loss_total": 0.4310966432094574 }, { "epoch": 1.7039807640929734, "step": 6378, "train/loss_ctc": 0.9751162528991699, "train/loss_error": 0.46705523133277893, "train/loss_total": 0.568667471408844 }, { "epoch": 1.704247929468341, "step": 6379, "train/loss_ctc": 0.54598069190979, "train/loss_error": 0.37584900856018066, "train/loss_total": 0.4098753333091736 }, { "epoch": 1.7045150948437082, "grad_norm": 1.4748255014419556, "learning_rate": 1.978092439219877e-05, "loss": 0.5245, "step": 6380 }, { "epoch": 1.7045150948437082, "step": 6380, "train/loss_ctc": 1.7167136669158936, "train/loss_error": 0.47904422879219055, "train/loss_total": 0.7265781164169312 }, { "epoch": 1.7047822602190756, "step": 6381, "train/loss_ctc": 0.5581309199333191, "train/loss_error": 0.41782164573669434, "train/loss_total": 0.44588351249694824 }, { "epoch": 1.705049425594443, "step": 6382, "train/loss_ctc": 0.500856876373291, "train/loss_error": 0.41383522748947144, "train/loss_total": 0.4312395751476288 }, { "epoch": 1.7053165909698103, "step": 6383, "train/loss_ctc": 1.058288812637329, "train/loss_error": 0.4766475558280945, "train/loss_total": 0.5929758548736572 }, { "epoch": 1.7055837563451777, "step": 6384, "train/loss_ctc": 0.956682026386261, "train/loss_error": 0.47435954213142395, "train/loss_total": 0.5708240270614624 }, { "epoch": 1.705850921720545, "step": 6385, "train/loss_ctc": 0.7606977224349976, "train/loss_error": 0.47400912642478943, "train/loss_total": 0.53134685754776 }, { "epoch": 1.7061180870959123, "step": 6386, "train/loss_ctc": 0.7210033535957336, "train/loss_error": 0.4487280249595642, "train/loss_total": 0.5031830668449402 }, { "epoch": 1.7063852524712797, "step": 6387, "train/loss_ctc": 0.3078280985355377, "train/loss_error": 0.3704836070537567, "train/loss_total": 0.3579525053501129 }, { "epoch": 1.7066524178466471, "step": 6388, "train/loss_ctc": 0.9359006881713867, "train/loss_error": 0.4301398694515228, "train/loss_total": 0.5312920212745667 }, { "epoch": 1.7069195832220143, "step": 6389, "train/loss_ctc": 0.5624653697013855, "train/loss_error": 0.5043702125549316, "train/loss_total": 0.5159892439842224 }, { "epoch": 1.7071867485973817, "grad_norm": 1.7701245546340942, "learning_rate": 1.976489446967673e-05, "loss": 0.5207, "step": 6390 }, { "epoch": 1.7071867485973817, "step": 6390, "train/loss_ctc": 1.1276600360870361, "train/loss_error": 0.4895256757736206, "train/loss_total": 0.6171525716781616 }, { "epoch": 1.7074539139727491, "step": 6391, "train/loss_ctc": 1.5905966758728027, "train/loss_error": 0.5021309852600098, "train/loss_total": 0.7198241353034973 }, { "epoch": 1.7077210793481163, "step": 6392, "train/loss_ctc": 0.2682605981826782, "train/loss_error": 0.4956395924091339, "train/loss_total": 0.4501637816429138 }, { "epoch": 1.707988244723484, "step": 6393, "train/loss_ctc": 1.6009324789047241, "train/loss_error": 0.4947930574417114, "train/loss_total": 0.716020941734314 }, { "epoch": 1.7082554100988512, "step": 6394, "train/loss_ctc": 0.4464324116706848, "train/loss_error": 0.42091020941734314, "train/loss_total": 0.42601466178894043 }, { "epoch": 1.7085225754742186, "step": 6395, "train/loss_ctc": 0.6421708464622498, "train/loss_error": 0.4657624065876007, "train/loss_total": 0.5010440945625305 }, { "epoch": 1.708789740849586, "step": 6396, "train/loss_ctc": 0.652941107749939, "train/loss_error": 0.4376363456249237, "train/loss_total": 0.48069727420806885 }, { "epoch": 1.7090569062249532, "step": 6397, "train/loss_ctc": 0.731090784072876, "train/loss_error": 0.4566503167152405, "train/loss_total": 0.5115384459495544 }, { "epoch": 1.7093240716003206, "step": 6398, "train/loss_ctc": 0.9354138374328613, "train/loss_error": 0.4920365512371063, "train/loss_total": 0.5807120203971863 }, { "epoch": 1.709591236975688, "step": 6399, "train/loss_ctc": 0.8041470050811768, "train/loss_error": 0.40992116928100586, "train/loss_total": 0.4887663722038269 }, { "epoch": 1.7098584023510552, "grad_norm": 4.158308982849121, "learning_rate": 1.974886454715469e-05, "loss": 0.5492, "step": 6400 }, { "epoch": 1.7098584023510552, "step": 6400, "train/loss_ctc": 0.7124217748641968, "train/loss_error": 0.43952763080596924, "train/loss_total": 0.4941064715385437 }, { "epoch": 1.7101255677264227, "step": 6401, "train/loss_ctc": 0.8911750316619873, "train/loss_error": 0.4839191436767578, "train/loss_total": 0.5653703212738037 }, { "epoch": 1.71039273310179, "step": 6402, "train/loss_ctc": 0.6972451210021973, "train/loss_error": 0.465557724237442, "train/loss_total": 0.5118951797485352 }, { "epoch": 1.7106598984771573, "step": 6403, "train/loss_ctc": 1.081472396850586, "train/loss_error": 0.5283841490745544, "train/loss_total": 0.6390017867088318 }, { "epoch": 1.7109270638525247, "step": 6404, "train/loss_ctc": 1.11074960231781, "train/loss_error": 0.45790693163871765, "train/loss_total": 0.5884754657745361 }, { "epoch": 1.711194229227892, "step": 6405, "train/loss_ctc": 0.32587578892707825, "train/loss_error": 0.43126380443573, "train/loss_total": 0.410186231136322 }, { "epoch": 1.7114613946032593, "step": 6406, "train/loss_ctc": 0.5651745796203613, "train/loss_error": 0.5448325872421265, "train/loss_total": 0.5489010214805603 }, { "epoch": 1.7117285599786267, "step": 6407, "train/loss_ctc": 0.5947192907333374, "train/loss_error": 0.42523613572120667, "train/loss_total": 0.4591327905654907 }, { "epoch": 1.7119957253539941, "step": 6408, "train/loss_ctc": 0.5406104922294617, "train/loss_error": 0.4246855080127716, "train/loss_total": 0.44787052273750305 }, { "epoch": 1.7122628907293613, "step": 6409, "train/loss_ctc": 1.170541763305664, "train/loss_error": 0.41007471084594727, "train/loss_total": 0.5621681213378906 }, { "epoch": 1.712530056104729, "grad_norm": 2.274930238723755, "learning_rate": 1.9732834624632648e-05, "loss": 0.5227, "step": 6410 }, { "epoch": 1.712530056104729, "step": 6410, "train/loss_ctc": 0.8193842172622681, "train/loss_error": 0.4314707815647125, "train/loss_total": 0.5090534687042236 }, { "epoch": 1.7127972214800962, "step": 6411, "train/loss_ctc": 0.6437365412712097, "train/loss_error": 0.43185269832611084, "train/loss_total": 0.47422948479652405 }, { "epoch": 1.7130643868554636, "step": 6412, "train/loss_ctc": 0.8763477206230164, "train/loss_error": 0.5115174651145935, "train/loss_total": 0.5844835042953491 }, { "epoch": 1.713331552230831, "step": 6413, "train/loss_ctc": 0.8949520587921143, "train/loss_error": 0.44426438212394714, "train/loss_total": 0.5344018936157227 }, { "epoch": 1.7135987176061982, "step": 6414, "train/loss_ctc": 1.1193228960037231, "train/loss_error": 0.5355461835861206, "train/loss_total": 0.652301549911499 }, { "epoch": 1.7138658829815656, "step": 6415, "train/loss_ctc": 0.7178125381469727, "train/loss_error": 0.3899899125099182, "train/loss_total": 0.45555442571640015 }, { "epoch": 1.714133048356933, "step": 6416, "train/loss_ctc": 0.5858711004257202, "train/loss_error": 0.43206286430358887, "train/loss_total": 0.4628245234489441 }, { "epoch": 1.7144002137323002, "step": 6417, "train/loss_ctc": 1.0015379190444946, "train/loss_error": 0.47637850046157837, "train/loss_total": 0.5814104080200195 }, { "epoch": 1.7146673791076676, "step": 6418, "train/loss_ctc": 0.7588163614273071, "train/loss_error": 0.4602302014827728, "train/loss_total": 0.5199474692344666 }, { "epoch": 1.714934544483035, "step": 6419, "train/loss_ctc": 0.4501141905784607, "train/loss_error": 0.5106776356697083, "train/loss_total": 0.4985649585723877 }, { "epoch": 1.7152017098584023, "grad_norm": 1.8532273769378662, "learning_rate": 1.9716804702110606e-05, "loss": 0.5273, "step": 6420 }, { "epoch": 1.7152017098584023, "step": 6420, "train/loss_ctc": 0.4491233825683594, "train/loss_error": 0.43930187821388245, "train/loss_total": 0.44126617908477783 }, { "epoch": 1.7154688752337697, "step": 6421, "train/loss_ctc": 0.293009877204895, "train/loss_error": 0.3564567565917969, "train/loss_total": 0.343767374753952 }, { "epoch": 1.715736040609137, "step": 6422, "train/loss_ctc": 0.670164942741394, "train/loss_error": 0.43623828887939453, "train/loss_total": 0.48302364349365234 }, { "epoch": 1.7160032059845043, "step": 6423, "train/loss_ctc": 0.7018942832946777, "train/loss_error": 0.4479488134384155, "train/loss_total": 0.4987379312515259 }, { "epoch": 1.716270371359872, "step": 6424, "train/loss_ctc": 0.6504123210906982, "train/loss_error": 0.4789436161518097, "train/loss_total": 0.5132373571395874 }, { "epoch": 1.7165375367352391, "step": 6425, "train/loss_ctc": 0.7033178806304932, "train/loss_error": 0.4228207468986511, "train/loss_total": 0.4789201617240906 }, { "epoch": 1.7168047021106063, "step": 6426, "train/loss_ctc": 0.8142249584197998, "train/loss_error": 0.466755747795105, "train/loss_total": 0.536249577999115 }, { "epoch": 1.717071867485974, "step": 6427, "train/loss_ctc": 0.8770468235015869, "train/loss_error": 0.4601757526397705, "train/loss_total": 0.5435499548912048 }, { "epoch": 1.7173390328613412, "step": 6428, "train/loss_ctc": 0.47852611541748047, "train/loss_error": 0.465157687664032, "train/loss_total": 0.4678313732147217 }, { "epoch": 1.7176061982367086, "step": 6429, "train/loss_ctc": 1.108410358428955, "train/loss_error": 0.46916428208351135, "train/loss_total": 0.5970134735107422 }, { "epoch": 1.717873363612076, "grad_norm": 1.8434514999389648, "learning_rate": 1.9700774779588564e-05, "loss": 0.4904, "step": 6430 }, { "epoch": 1.717873363612076, "step": 6430, "train/loss_ctc": 1.1562087535858154, "train/loss_error": 0.531578779220581, "train/loss_total": 0.65650475025177 }, { "epoch": 1.7181405289874432, "step": 6431, "train/loss_ctc": 0.5412903428077698, "train/loss_error": 0.4244226813316345, "train/loss_total": 0.4477962255477905 }, { "epoch": 1.7184076943628106, "step": 6432, "train/loss_ctc": 0.7300724387168884, "train/loss_error": 0.4696095883846283, "train/loss_total": 0.5217021703720093 }, { "epoch": 1.718674859738178, "step": 6433, "train/loss_ctc": 1.052189826965332, "train/loss_error": 0.4820028841495514, "train/loss_total": 0.5960403084754944 }, { "epoch": 1.7189420251135452, "step": 6434, "train/loss_ctc": 0.5895950794219971, "train/loss_error": 0.46158793568611145, "train/loss_total": 0.487189382314682 }, { "epoch": 1.7192091904889126, "step": 6435, "train/loss_ctc": 0.7200562357902527, "train/loss_error": 0.4962194859981537, "train/loss_total": 0.5409868359565735 }, { "epoch": 1.71947635586428, "step": 6436, "train/loss_ctc": 1.3203539848327637, "train/loss_error": 0.4401937425136566, "train/loss_total": 0.6162258386611938 }, { "epoch": 1.7197435212396472, "step": 6437, "train/loss_ctc": 1.0939408540725708, "train/loss_error": 0.4434491991996765, "train/loss_total": 0.5735475420951843 }, { "epoch": 1.7200106866150147, "step": 6438, "train/loss_ctc": 0.5625264644622803, "train/loss_error": 0.4471420645713806, "train/loss_total": 0.4702189564704895 }, { "epoch": 1.720277851990382, "step": 6439, "train/loss_ctc": 0.5305975675582886, "train/loss_error": 0.5001949667930603, "train/loss_total": 0.5062755346298218 }, { "epoch": 1.7205450173657493, "grad_norm": 1.6554665565490723, "learning_rate": 1.9684744857066522e-05, "loss": 0.5416, "step": 6440 }, { "epoch": 1.7205450173657493, "step": 6440, "train/loss_ctc": 0.873512864112854, "train/loss_error": 0.5021539330482483, "train/loss_total": 0.5764257311820984 }, { "epoch": 1.720812182741117, "step": 6441, "train/loss_ctc": 0.8292276263237, "train/loss_error": 0.5193049311637878, "train/loss_total": 0.5812894701957703 }, { "epoch": 1.721079348116484, "step": 6442, "train/loss_ctc": 0.334719717502594, "train/loss_error": 0.43620574474334717, "train/loss_total": 0.415908545255661 }, { "epoch": 1.7213465134918513, "step": 6443, "train/loss_ctc": 1.0284016132354736, "train/loss_error": 0.44680073857307434, "train/loss_total": 0.5631209015846252 }, { "epoch": 1.721613678867219, "step": 6444, "train/loss_ctc": 0.8647671937942505, "train/loss_error": 0.4787229299545288, "train/loss_total": 0.555931806564331 }, { "epoch": 1.7218808442425861, "step": 6445, "train/loss_ctc": 0.773671567440033, "train/loss_error": 0.4255281388759613, "train/loss_total": 0.49515682458877563 }, { "epoch": 1.7221480096179536, "step": 6446, "train/loss_ctc": 0.781705915927887, "train/loss_error": 0.44906744360923767, "train/loss_total": 0.5155951380729675 }, { "epoch": 1.722415174993321, "step": 6447, "train/loss_ctc": 0.2644555866718292, "train/loss_error": 0.41678720712661743, "train/loss_total": 0.38632088899612427 }, { "epoch": 1.7226823403686882, "step": 6448, "train/loss_ctc": 0.8203039169311523, "train/loss_error": 0.40389886498451233, "train/loss_total": 0.48717987537384033 }, { "epoch": 1.7229495057440556, "step": 6449, "train/loss_ctc": 0.8307955861091614, "train/loss_error": 0.4295666813850403, "train/loss_total": 0.5098124742507935 }, { "epoch": 1.723216671119423, "grad_norm": 2.039742946624756, "learning_rate": 1.9668714934544483e-05, "loss": 0.5087, "step": 6450 }, { "epoch": 1.723216671119423, "step": 6450, "train/loss_ctc": 0.9059450030326843, "train/loss_error": 0.3907927870750427, "train/loss_total": 0.49382323026657104 }, { "epoch": 1.7234838364947902, "step": 6451, "train/loss_ctc": 1.1805998086929321, "train/loss_error": 0.4460780620574951, "train/loss_total": 0.5929824113845825 }, { "epoch": 1.7237510018701576, "step": 6452, "train/loss_ctc": 1.530820369720459, "train/loss_error": 0.4750630855560303, "train/loss_total": 0.6862145662307739 }, { "epoch": 1.724018167245525, "step": 6453, "train/loss_ctc": 0.519119143486023, "train/loss_error": 0.4285491704940796, "train/loss_total": 0.44666317105293274 }, { "epoch": 1.7242853326208922, "step": 6454, "train/loss_ctc": 0.6953854560852051, "train/loss_error": 0.43242889642715454, "train/loss_total": 0.4850202202796936 }, { "epoch": 1.7245524979962596, "step": 6455, "train/loss_ctc": 0.6778512001037598, "train/loss_error": 0.45873963832855225, "train/loss_total": 0.5025619864463806 }, { "epoch": 1.724819663371627, "step": 6456, "train/loss_ctc": 0.7914284467697144, "train/loss_error": 0.3941320776939392, "train/loss_total": 0.4735913872718811 }, { "epoch": 1.7250868287469943, "step": 6457, "train/loss_ctc": 0.9826220273971558, "train/loss_error": 0.43453478813171387, "train/loss_total": 0.5441522598266602 }, { "epoch": 1.725353994122362, "step": 6458, "train/loss_ctc": 0.6756314039230347, "train/loss_error": 0.5490409135818481, "train/loss_total": 0.5743589997291565 }, { "epoch": 1.725621159497729, "step": 6459, "train/loss_ctc": 0.6450780034065247, "train/loss_error": 0.4957197904586792, "train/loss_total": 0.5255914330482483 }, { "epoch": 1.7258883248730963, "grad_norm": 5.180128574371338, "learning_rate": 1.9652685012022445e-05, "loss": 0.5325, "step": 6460 }, { "epoch": 1.7258883248730963, "step": 6460, "train/loss_ctc": 0.6152278184890747, "train/loss_error": 0.4464748203754425, "train/loss_total": 0.48022541403770447 }, { "epoch": 1.726155490248464, "step": 6461, "train/loss_ctc": 0.7559090852737427, "train/loss_error": 0.40404197573661804, "train/loss_total": 0.4744153916835785 }, { "epoch": 1.7264226556238311, "step": 6462, "train/loss_ctc": 0.9955195188522339, "train/loss_error": 0.5060257911682129, "train/loss_total": 0.603924572467804 }, { "epoch": 1.7266898209991985, "step": 6463, "train/loss_ctc": 0.817145824432373, "train/loss_error": 0.4682783782482147, "train/loss_total": 0.5380518436431885 }, { "epoch": 1.726956986374566, "step": 6464, "train/loss_ctc": 1.1964466571807861, "train/loss_error": 0.4592475891113281, "train/loss_total": 0.6066874265670776 }, { "epoch": 1.7272241517499332, "step": 6465, "train/loss_ctc": 0.7181363701820374, "train/loss_error": 0.37501388788223267, "train/loss_total": 0.4436383843421936 }, { "epoch": 1.7274913171253006, "step": 6466, "train/loss_ctc": 0.8773382306098938, "train/loss_error": 0.45535197854042053, "train/loss_total": 0.539749264717102 }, { "epoch": 1.727758482500668, "step": 6467, "train/loss_ctc": 0.6851166486740112, "train/loss_error": 0.47200068831443787, "train/loss_total": 0.5146238803863525 }, { "epoch": 1.7280256478760352, "step": 6468, "train/loss_ctc": 0.4078938961029053, "train/loss_error": 0.4735894799232483, "train/loss_total": 0.4604503810405731 }, { "epoch": 1.7282928132514026, "step": 6469, "train/loss_ctc": 1.014918565750122, "train/loss_error": 0.5234718918800354, "train/loss_total": 0.6217612624168396 }, { "epoch": 1.72855997862677, "grad_norm": 2.8798413276672363, "learning_rate": 1.9636655089500403e-05, "loss": 0.5284, "step": 6470 }, { "epoch": 1.72855997862677, "step": 6470, "train/loss_ctc": 0.7494542598724365, "train/loss_error": 0.4403855800628662, "train/loss_total": 0.5021993517875671 }, { "epoch": 1.7288271440021372, "step": 6471, "train/loss_ctc": 1.3139450550079346, "train/loss_error": 0.46009838581085205, "train/loss_total": 0.6308677196502686 }, { "epoch": 1.7290943093775046, "step": 6472, "train/loss_ctc": 1.1425106525421143, "train/loss_error": 0.4846477508544922, "train/loss_total": 0.6162203550338745 }, { "epoch": 1.729361474752872, "step": 6473, "train/loss_ctc": 0.3912706971168518, "train/loss_error": 0.44993355870246887, "train/loss_total": 0.438200980424881 }, { "epoch": 1.7296286401282392, "step": 6474, "train/loss_ctc": 0.4044252932071686, "train/loss_error": 0.4089678227901459, "train/loss_total": 0.40805932879447937 }, { "epoch": 1.7298958055036069, "step": 6475, "train/loss_ctc": 0.3086020350456238, "train/loss_error": 0.4378250241279602, "train/loss_total": 0.41198042035102844 }, { "epoch": 1.730162970878974, "step": 6476, "train/loss_ctc": 1.1444607973098755, "train/loss_error": 0.4651871919631958, "train/loss_total": 0.6010419130325317 }, { "epoch": 1.7304301362543413, "step": 6477, "train/loss_ctc": 1.0702216625213623, "train/loss_error": 0.5290992856025696, "train/loss_total": 0.6373237371444702 }, { "epoch": 1.730697301629709, "step": 6478, "train/loss_ctc": 0.428567111492157, "train/loss_error": 0.44641536474227905, "train/loss_total": 0.4428457021713257 }, { "epoch": 1.7309644670050761, "step": 6479, "train/loss_ctc": 0.9110591411590576, "train/loss_error": 0.5126396417617798, "train/loss_total": 0.5923235416412354 }, { "epoch": 1.7312316323804435, "grad_norm": 3.4394850730895996, "learning_rate": 1.962062516697836e-05, "loss": 0.5281, "step": 6480 }, { "epoch": 1.7312316323804435, "step": 6480, "train/loss_ctc": 0.303678035736084, "train/loss_error": 0.43002229928970337, "train/loss_total": 0.4047534465789795 }, { "epoch": 1.731498797755811, "step": 6481, "train/loss_ctc": 0.614997923374176, "train/loss_error": 0.4847332835197449, "train/loss_total": 0.510786235332489 }, { "epoch": 1.7317659631311781, "step": 6482, "train/loss_ctc": 0.3217390477657318, "train/loss_error": 0.42352616786956787, "train/loss_total": 0.4031687378883362 }, { "epoch": 1.7320331285065456, "step": 6483, "train/loss_ctc": 1.2139517068862915, "train/loss_error": 0.3849344551563263, "train/loss_total": 0.5507379174232483 }, { "epoch": 1.732300293881913, "step": 6484, "train/loss_ctc": 0.2878967821598053, "train/loss_error": 0.4249548017978668, "train/loss_total": 0.3975432217121124 }, { "epoch": 1.7325674592572802, "step": 6485, "train/loss_ctc": 0.6463884115219116, "train/loss_error": 0.46049630641937256, "train/loss_total": 0.49767476320266724 }, { "epoch": 1.7328346246326476, "step": 6486, "train/loss_ctc": 0.8846145868301392, "train/loss_error": 0.4353247582912445, "train/loss_total": 0.5251827239990234 }, { "epoch": 1.733101790008015, "step": 6487, "train/loss_ctc": 0.8296603560447693, "train/loss_error": 0.4572935402393341, "train/loss_total": 0.5317668914794922 }, { "epoch": 1.7333689553833822, "step": 6488, "train/loss_ctc": 0.9457229375839233, "train/loss_error": 0.40211573243141174, "train/loss_total": 0.510837197303772 }, { "epoch": 1.7336361207587496, "step": 6489, "train/loss_ctc": 1.1443395614624023, "train/loss_error": 0.5445793271064758, "train/loss_total": 0.6645313501358032 }, { "epoch": 1.733903286134117, "grad_norm": 2.59609317779541, "learning_rate": 1.960459524445632e-05, "loss": 0.4997, "step": 6490 }, { "epoch": 1.733903286134117, "step": 6490, "train/loss_ctc": 0.754325270652771, "train/loss_error": 0.48936012387275696, "train/loss_total": 0.5423531532287598 }, { "epoch": 1.7341704515094842, "step": 6491, "train/loss_ctc": 0.7137182950973511, "train/loss_error": 0.483476847410202, "train/loss_total": 0.5295251607894897 }, { "epoch": 1.7344376168848519, "step": 6492, "train/loss_ctc": 1.3743548393249512, "train/loss_error": 0.5108821392059326, "train/loss_total": 0.6835767030715942 }, { "epoch": 1.734704782260219, "step": 6493, "train/loss_ctc": 0.8326050043106079, "train/loss_error": 0.3883051872253418, "train/loss_total": 0.477165162563324 }, { "epoch": 1.7349719476355863, "step": 6494, "train/loss_ctc": 0.40812948346138, "train/loss_error": 0.46095040440559387, "train/loss_total": 0.4503862261772156 }, { "epoch": 1.735239113010954, "step": 6495, "train/loss_ctc": 0.5772461295127869, "train/loss_error": 0.482940673828125, "train/loss_total": 0.5018017888069153 }, { "epoch": 1.735506278386321, "step": 6496, "train/loss_ctc": 1.0237951278686523, "train/loss_error": 0.46483808755874634, "train/loss_total": 0.5766295194625854 }, { "epoch": 1.7357734437616885, "step": 6497, "train/loss_ctc": 1.1713814735412598, "train/loss_error": 0.445322185754776, "train/loss_total": 0.5905340909957886 }, { "epoch": 1.736040609137056, "step": 6498, "train/loss_ctc": 1.1132361888885498, "train/loss_error": 0.4553053677082062, "train/loss_total": 0.5868915319442749 }, { "epoch": 1.7363077745124231, "step": 6499, "train/loss_ctc": 0.6127001047134399, "train/loss_error": 0.4243003726005554, "train/loss_total": 0.46198034286499023 }, { "epoch": 1.7365749398877905, "grad_norm": 17.324071884155273, "learning_rate": 1.9588565321934277e-05, "loss": 0.5401, "step": 6500 }, { "epoch": 1.7365749398877905, "step": 6500, "train/loss_ctc": 0.5074772834777832, "train/loss_error": 0.4528592824935913, "train/loss_total": 0.4637829065322876 }, { "epoch": 1.736842105263158, "step": 6501, "train/loss_ctc": 1.0415740013122559, "train/loss_error": 0.4475312829017639, "train/loss_total": 0.5663398504257202 }, { "epoch": 1.7371092706385252, "step": 6502, "train/loss_ctc": 1.2602425813674927, "train/loss_error": 0.5154016613960266, "train/loss_total": 0.6643698215484619 }, { "epoch": 1.7373764360138926, "step": 6503, "train/loss_ctc": 0.34144920110702515, "train/loss_error": 0.46880388259887695, "train/loss_total": 0.4433329701423645 }, { "epoch": 1.73764360138926, "step": 6504, "train/loss_ctc": 1.255018949508667, "train/loss_error": 0.44112884998321533, "train/loss_total": 0.6039068698883057 }, { "epoch": 1.7379107667646272, "step": 6505, "train/loss_ctc": 0.9582951068878174, "train/loss_error": 0.47430914640426636, "train/loss_total": 0.5711063146591187 }, { "epoch": 1.7381779321399946, "step": 6506, "train/loss_ctc": 0.4148823022842407, "train/loss_error": 0.46789926290512085, "train/loss_total": 0.45729586482048035 }, { "epoch": 1.738445097515362, "step": 6507, "train/loss_ctc": 0.8333191275596619, "train/loss_error": 0.4704172909259796, "train/loss_total": 0.5429976582527161 }, { "epoch": 1.7387122628907292, "step": 6508, "train/loss_ctc": 0.8822110891342163, "train/loss_error": 0.48420771956443787, "train/loss_total": 0.5638083815574646 }, { "epoch": 1.7389794282660969, "step": 6509, "train/loss_ctc": 0.9370736479759216, "train/loss_error": 0.4445267915725708, "train/loss_total": 0.543036162853241 }, { "epoch": 1.739246593641464, "grad_norm": 2.087625026702881, "learning_rate": 1.957253539941224e-05, "loss": 0.542, "step": 6510 }, { "epoch": 1.739246593641464, "step": 6510, "train/loss_ctc": 0.8294140100479126, "train/loss_error": 0.3995816707611084, "train/loss_total": 0.48554813861846924 }, { "epoch": 1.7395137590168315, "step": 6511, "train/loss_ctc": 0.8480974435806274, "train/loss_error": 0.5031858682632446, "train/loss_total": 0.5721681714057922 }, { "epoch": 1.739780924392199, "step": 6512, "train/loss_ctc": 0.7560106515884399, "train/loss_error": 0.4699205160140991, "train/loss_total": 0.5271385312080383 }, { "epoch": 1.740048089767566, "step": 6513, "train/loss_ctc": 1.2893092632293701, "train/loss_error": 0.4929461181163788, "train/loss_total": 0.652218759059906 }, { "epoch": 1.7403152551429335, "step": 6514, "train/loss_ctc": 1.1512396335601807, "train/loss_error": 0.46824246644973755, "train/loss_total": 0.6048418879508972 }, { "epoch": 1.740582420518301, "step": 6515, "train/loss_ctc": 1.1123030185699463, "train/loss_error": 0.4307210147380829, "train/loss_total": 0.5670374035835266 }, { "epoch": 1.7408495858936681, "step": 6516, "train/loss_ctc": 0.4913215935230255, "train/loss_error": 0.50774085521698, "train/loss_total": 0.5044569969177246 }, { "epoch": 1.7411167512690355, "step": 6517, "train/loss_ctc": 0.5840162038803101, "train/loss_error": 0.4527885913848877, "train/loss_total": 0.4790341258049011 }, { "epoch": 1.741383916644403, "step": 6518, "train/loss_ctc": 0.5203635692596436, "train/loss_error": 0.4449547827243805, "train/loss_total": 0.4600365459918976 }, { "epoch": 1.7416510820197701, "step": 6519, "train/loss_ctc": 0.6093775033950806, "train/loss_error": 0.4379332661628723, "train/loss_total": 0.47222211956977844 }, { "epoch": 1.7419182473951376, "grad_norm": 3.281332492828369, "learning_rate": 1.9556505476890197e-05, "loss": 0.5325, "step": 6520 }, { "epoch": 1.7419182473951376, "step": 6520, "train/loss_ctc": 0.6686952114105225, "train/loss_error": 0.5421944856643677, "train/loss_total": 0.5674946308135986 }, { "epoch": 1.742185412770505, "step": 6521, "train/loss_ctc": 0.9171596169471741, "train/loss_error": 0.4638296663761139, "train/loss_total": 0.5544956922531128 }, { "epoch": 1.7424525781458722, "step": 6522, "train/loss_ctc": 0.30282506346702576, "train/loss_error": 0.4878493845462799, "train/loss_total": 0.45084452629089355 }, { "epoch": 1.7427197435212396, "step": 6523, "train/loss_ctc": 0.5898715853691101, "train/loss_error": 0.4770246744155884, "train/loss_total": 0.4995940625667572 }, { "epoch": 1.742986908896607, "step": 6524, "train/loss_ctc": 0.8059735298156738, "train/loss_error": 0.5097211599349976, "train/loss_total": 0.5689716339111328 }, { "epoch": 1.7432540742719742, "step": 6525, "train/loss_ctc": 0.8403152227401733, "train/loss_error": 0.4781298339366913, "train/loss_total": 0.5505669116973877 }, { "epoch": 1.7435212396473418, "step": 6526, "train/loss_ctc": 0.9902054667472839, "train/loss_error": 0.5062884092330933, "train/loss_total": 0.6030718088150024 }, { "epoch": 1.743788405022709, "step": 6527, "train/loss_ctc": 0.3855273425579071, "train/loss_error": 0.4619552493095398, "train/loss_total": 0.44666966795921326 }, { "epoch": 1.7440555703980765, "step": 6528, "train/loss_ctc": 0.94962477684021, "train/loss_error": 0.453835129737854, "train/loss_total": 0.5529930591583252 }, { "epoch": 1.7443227357734439, "step": 6529, "train/loss_ctc": 0.6865105628967285, "train/loss_error": 0.46152764558792114, "train/loss_total": 0.5065242648124695 }, { "epoch": 1.744589901148811, "grad_norm": 7.748332500457764, "learning_rate": 1.9540475554368155e-05, "loss": 0.5301, "step": 6530 }, { "epoch": 1.744589901148811, "step": 6530, "train/loss_ctc": 1.2743934392929077, "train/loss_error": 0.44773203134536743, "train/loss_total": 0.6130642890930176 }, { "epoch": 1.7448570665241785, "step": 6531, "train/loss_ctc": 0.70616614818573, "train/loss_error": 0.42535287141799927, "train/loss_total": 0.4815155267715454 }, { "epoch": 1.745124231899546, "step": 6532, "train/loss_ctc": 0.3423747420310974, "train/loss_error": 0.5107154846191406, "train/loss_total": 0.4770473539829254 }, { "epoch": 1.745391397274913, "step": 6533, "train/loss_ctc": 0.6199319362640381, "train/loss_error": 0.5131523013114929, "train/loss_total": 0.534508228302002 }, { "epoch": 1.7456585626502805, "step": 6534, "train/loss_ctc": 0.5011804103851318, "train/loss_error": 0.41628995537757874, "train/loss_total": 0.43326807022094727 }, { "epoch": 1.745925728025648, "step": 6535, "train/loss_ctc": 0.4734094738960266, "train/loss_error": 0.45839130878448486, "train/loss_total": 0.46139493584632874 }, { "epoch": 1.7461928934010151, "step": 6536, "train/loss_ctc": 0.7299231290817261, "train/loss_error": 0.40039414167404175, "train/loss_total": 0.46629995107650757 }, { "epoch": 1.7464600587763826, "step": 6537, "train/loss_ctc": 0.6644172668457031, "train/loss_error": 0.4143226444721222, "train/loss_total": 0.46434158086776733 }, { "epoch": 1.74672722415175, "step": 6538, "train/loss_ctc": 1.2654070854187012, "train/loss_error": 0.45107170939445496, "train/loss_total": 0.6139388084411621 }, { "epoch": 1.7469943895271172, "step": 6539, "train/loss_ctc": 0.8617087602615356, "train/loss_error": 0.4917110800743103, "train/loss_total": 0.5657106041908264 }, { "epoch": 1.7472615549024848, "grad_norm": 2.2028324604034424, "learning_rate": 1.9524445631846113e-05, "loss": 0.5111, "step": 6540 }, { "epoch": 1.7472615549024848, "step": 6540, "train/loss_ctc": 0.8206130266189575, "train/loss_error": 0.47140857577323914, "train/loss_total": 0.5412495136260986 }, { "epoch": 1.747528720277852, "step": 6541, "train/loss_ctc": 0.6093478202819824, "train/loss_error": 0.4793115258216858, "train/loss_total": 0.5053187608718872 }, { "epoch": 1.7477958856532192, "step": 6542, "train/loss_ctc": 0.805457353591919, "train/loss_error": 0.43144306540489197, "train/loss_total": 0.5062459707260132 }, { "epoch": 1.7480630510285868, "step": 6543, "train/loss_ctc": 0.5937134027481079, "train/loss_error": 0.4243945777416229, "train/loss_total": 0.45825833082199097 }, { "epoch": 1.748330216403954, "step": 6544, "train/loss_ctc": 0.5079163312911987, "train/loss_error": 0.48198366165161133, "train/loss_total": 0.4871702194213867 }, { "epoch": 1.7485973817793214, "step": 6545, "train/loss_ctc": 0.8166587352752686, "train/loss_error": 0.4711062014102936, "train/loss_total": 0.5402166843414307 }, { "epoch": 1.7488645471546889, "step": 6546, "train/loss_ctc": 0.5052358508110046, "train/loss_error": 0.3745986521244049, "train/loss_total": 0.4007260799407959 }, { "epoch": 1.749131712530056, "step": 6547, "train/loss_ctc": 0.5933493375778198, "train/loss_error": 0.4024348556995392, "train/loss_total": 0.44061776995658875 }, { "epoch": 1.7493988779054235, "step": 6548, "train/loss_ctc": 1.1139816045761108, "train/loss_error": 0.48172685503959656, "train/loss_total": 0.6081777811050415 }, { "epoch": 1.749666043280791, "step": 6549, "train/loss_ctc": 0.39598679542541504, "train/loss_error": 0.3975832760334015, "train/loss_total": 0.3972640037536621 }, { "epoch": 1.749933208656158, "grad_norm": 1.9683330059051514, "learning_rate": 1.950841570932407e-05, "loss": 0.4885, "step": 6550 }, { "epoch": 1.749933208656158, "step": 6550, "train/loss_ctc": 0.3968197703361511, "train/loss_error": 0.3928624987602234, "train/loss_total": 0.3936539888381958 }, { "epoch": 1.7502003740315255, "step": 6551, "train/loss_ctc": 0.4934898018836975, "train/loss_error": 0.4615994095802307, "train/loss_total": 0.46797749400138855 }, { "epoch": 1.750467539406893, "step": 6552, "train/loss_ctc": 0.4737112522125244, "train/loss_error": 0.422776997089386, "train/loss_total": 0.43296384811401367 }, { "epoch": 1.7507347047822601, "step": 6553, "train/loss_ctc": 0.43507882952690125, "train/loss_error": 0.5029072165489197, "train/loss_total": 0.4893415570259094 }, { "epoch": 1.7510018701576275, "step": 6554, "train/loss_ctc": 0.6512909531593323, "train/loss_error": 0.513593852519989, "train/loss_total": 0.5411332845687866 }, { "epoch": 1.751269035532995, "step": 6555, "train/loss_ctc": 0.799286425113678, "train/loss_error": 0.4666650891304016, "train/loss_total": 0.5331893563270569 }, { "epoch": 1.7515362009083622, "step": 6556, "train/loss_ctc": 0.7195686101913452, "train/loss_error": 0.46558839082717896, "train/loss_total": 0.5163844227790833 }, { "epoch": 1.7518033662837298, "step": 6557, "train/loss_ctc": 0.5416541695594788, "train/loss_error": 0.472051203250885, "train/loss_total": 0.4859718084335327 }, { "epoch": 1.752070531659097, "step": 6558, "train/loss_ctc": 0.6246817708015442, "train/loss_error": 0.4276958703994751, "train/loss_total": 0.4670930504798889 }, { "epoch": 1.7523376970344642, "step": 6559, "train/loss_ctc": 0.46126535534858704, "train/loss_error": 0.557634174823761, "train/loss_total": 0.5383604168891907 }, { "epoch": 1.7526048624098318, "grad_norm": 2.315936326980591, "learning_rate": 1.949238578680203e-05, "loss": 0.4866, "step": 6560 }, { "epoch": 1.7526048624098318, "step": 6560, "train/loss_ctc": 1.1846880912780762, "train/loss_error": 0.4372081160545349, "train/loss_total": 0.5867041349411011 }, { "epoch": 1.752872027785199, "step": 6561, "train/loss_ctc": 1.1573982238769531, "train/loss_error": 0.45868387818336487, "train/loss_total": 0.5984267592430115 }, { "epoch": 1.7531391931605664, "step": 6562, "train/loss_ctc": 0.6032348871231079, "train/loss_error": 0.4404837191104889, "train/loss_total": 0.47303396463394165 }, { "epoch": 1.7534063585359339, "step": 6563, "train/loss_ctc": 0.6308608055114746, "train/loss_error": 0.4806779623031616, "train/loss_total": 0.5107145309448242 }, { "epoch": 1.753673523911301, "step": 6564, "train/loss_ctc": 1.1878986358642578, "train/loss_error": 0.4649074375629425, "train/loss_total": 0.6095056533813477 }, { "epoch": 1.7539406892866685, "step": 6565, "train/loss_ctc": 0.7616859078407288, "train/loss_error": 0.4966399669647217, "train/loss_total": 0.549649178981781 }, { "epoch": 1.7542078546620359, "step": 6566, "train/loss_ctc": 0.5944618582725525, "train/loss_error": 0.4542713761329651, "train/loss_total": 0.482309490442276 }, { "epoch": 1.754475020037403, "step": 6567, "train/loss_ctc": 0.792864203453064, "train/loss_error": 0.4705621302127838, "train/loss_total": 0.5350225567817688 }, { "epoch": 1.7547421854127705, "step": 6568, "train/loss_ctc": 0.8171442151069641, "train/loss_error": 0.478654146194458, "train/loss_total": 0.5463521480560303 }, { "epoch": 1.755009350788138, "step": 6569, "train/loss_ctc": 0.7003143429756165, "train/loss_error": 0.49506357312202454, "train/loss_total": 0.5361137390136719 }, { "epoch": 1.755276516163505, "grad_norm": 3.233759641647339, "learning_rate": 1.947635586427999e-05, "loss": 0.5428, "step": 6570 }, { "epoch": 1.755276516163505, "step": 6570, "train/loss_ctc": 1.13927161693573, "train/loss_error": 0.4830143451690674, "train/loss_total": 0.6142657995223999 }, { "epoch": 1.7555436815388725, "step": 6571, "train/loss_ctc": 0.4210033118724823, "train/loss_error": 0.49834349751472473, "train/loss_total": 0.4828754663467407 }, { "epoch": 1.75581084691424, "step": 6572, "train/loss_ctc": 0.49253717064857483, "train/loss_error": 0.42379793524742126, "train/loss_total": 0.4375457763671875 }, { "epoch": 1.7560780122896071, "step": 6573, "train/loss_ctc": 1.3130104541778564, "train/loss_error": 0.3959285616874695, "train/loss_total": 0.5793449878692627 }, { "epoch": 1.7563451776649748, "step": 6574, "train/loss_ctc": 0.5256903171539307, "train/loss_error": 0.42049071192741394, "train/loss_total": 0.44153064489364624 }, { "epoch": 1.756612343040342, "step": 6575, "train/loss_ctc": 0.6752986907958984, "train/loss_error": 0.4480026960372925, "train/loss_total": 0.4934619069099426 }, { "epoch": 1.7568795084157092, "step": 6576, "train/loss_ctc": 0.6093730330467224, "train/loss_error": 0.4189448654651642, "train/loss_total": 0.4570305049419403 }, { "epoch": 1.7571466737910768, "step": 6577, "train/loss_ctc": 1.403442144393921, "train/loss_error": 0.5061479806900024, "train/loss_total": 0.685606837272644 }, { "epoch": 1.757413839166444, "step": 6578, "train/loss_ctc": 0.6015603542327881, "train/loss_error": 0.39542078971862793, "train/loss_total": 0.4366486966609955 }, { "epoch": 1.7576810045418114, "step": 6579, "train/loss_ctc": 0.4936176538467407, "train/loss_error": 0.4412931799888611, "train/loss_total": 0.45175808668136597 }, { "epoch": 1.7579481699171788, "grad_norm": 15.756571769714355, "learning_rate": 1.946032594175795e-05, "loss": 0.508, "step": 6580 }, { "epoch": 1.7579481699171788, "step": 6580, "train/loss_ctc": 1.207892894744873, "train/loss_error": 0.46644893288612366, "train/loss_total": 0.6147377490997314 }, { "epoch": 1.758215335292546, "step": 6581, "train/loss_ctc": 0.7382459044456482, "train/loss_error": 0.4963666498661041, "train/loss_total": 0.5447425246238708 }, { "epoch": 1.7584825006679135, "step": 6582, "train/loss_ctc": 0.4498801529407501, "train/loss_error": 0.4478967487812042, "train/loss_total": 0.44829344749450684 }, { "epoch": 1.7587496660432809, "step": 6583, "train/loss_ctc": 0.6636461019515991, "train/loss_error": 0.4779145419597626, "train/loss_total": 0.5150608420372009 }, { "epoch": 1.759016831418648, "step": 6584, "train/loss_ctc": 0.22790206968784332, "train/loss_error": 0.47331979870796204, "train/loss_total": 0.4242362678050995 }, { "epoch": 1.7592839967940155, "step": 6585, "train/loss_ctc": 0.7954222559928894, "train/loss_error": 0.5113441944122314, "train/loss_total": 0.568159818649292 }, { "epoch": 1.759551162169383, "step": 6586, "train/loss_ctc": 0.4573895335197449, "train/loss_error": 0.5040908455848694, "train/loss_total": 0.49475058913230896 }, { "epoch": 1.75981832754475, "step": 6587, "train/loss_ctc": 0.629004180431366, "train/loss_error": 0.5348383188247681, "train/loss_total": 0.5536714792251587 }, { "epoch": 1.7600854929201175, "step": 6588, "train/loss_ctc": 0.6842284202575684, "train/loss_error": 0.5663933753967285, "train/loss_total": 0.5899603962898254 }, { "epoch": 1.760352658295485, "step": 6589, "train/loss_ctc": 0.8786418437957764, "train/loss_error": 0.4658154845237732, "train/loss_total": 0.5483807325363159 }, { "epoch": 1.7606198236708521, "grad_norm": 2.38140606880188, "learning_rate": 1.9444296019235907e-05, "loss": 0.5302, "step": 6590 }, { "epoch": 1.7606198236708521, "step": 6590, "train/loss_ctc": 0.898750901222229, "train/loss_error": 0.43167296051979065, "train/loss_total": 0.5250885486602783 }, { "epoch": 1.7608869890462198, "step": 6591, "train/loss_ctc": 1.2842963933944702, "train/loss_error": 0.49674198031425476, "train/loss_total": 0.6542528867721558 }, { "epoch": 1.761154154421587, "step": 6592, "train/loss_ctc": 0.5118257403373718, "train/loss_error": 0.49520161747932434, "train/loss_total": 0.4985264539718628 }, { "epoch": 1.7614213197969542, "step": 6593, "train/loss_ctc": 1.4714555740356445, "train/loss_error": 0.540698230266571, "train/loss_total": 0.7268496751785278 }, { "epoch": 1.7616884851723218, "step": 6594, "train/loss_ctc": 1.2484261989593506, "train/loss_error": 0.5091018676757812, "train/loss_total": 0.6569667458534241 }, { "epoch": 1.761955650547689, "step": 6595, "train/loss_ctc": 0.7319824695587158, "train/loss_error": 0.45859193801879883, "train/loss_total": 0.5132700800895691 }, { "epoch": 1.7622228159230564, "step": 6596, "train/loss_ctc": 1.172082781791687, "train/loss_error": 0.4587920308113098, "train/loss_total": 0.6014502048492432 }, { "epoch": 1.7624899812984238, "step": 6597, "train/loss_ctc": 0.5791876912117004, "train/loss_error": 0.4224211275577545, "train/loss_total": 0.45377445220947266 }, { "epoch": 1.762757146673791, "step": 6598, "train/loss_ctc": 0.328532338142395, "train/loss_error": 0.44852614402770996, "train/loss_total": 0.4245273768901825 }, { "epoch": 1.7630243120491584, "step": 6599, "train/loss_ctc": 0.5666121244430542, "train/loss_error": 0.5134109854698181, "train/loss_total": 0.5240511894226074 }, { "epoch": 1.7632914774245259, "grad_norm": 1.5899536609649658, "learning_rate": 1.9428266096713865e-05, "loss": 0.5579, "step": 6600 }, { "epoch": 1.7632914774245259, "step": 6600, "train/loss_ctc": 1.2607536315917969, "train/loss_error": 0.45237454771995544, "train/loss_total": 0.6140503883361816 }, { "epoch": 1.763558642799893, "step": 6601, "train/loss_ctc": 0.7121890783309937, "train/loss_error": 0.5024451613426208, "train/loss_total": 0.5443939566612244 }, { "epoch": 1.7638258081752605, "step": 6602, "train/loss_ctc": 0.8698438405990601, "train/loss_error": 0.394356906414032, "train/loss_total": 0.48945432901382446 }, { "epoch": 1.7640929735506279, "step": 6603, "train/loss_ctc": 0.894527792930603, "train/loss_error": 0.4904148280620575, "train/loss_total": 0.5712374448776245 }, { "epoch": 1.764360138925995, "step": 6604, "train/loss_ctc": 0.7820484638214111, "train/loss_error": 0.4621247351169586, "train/loss_total": 0.526109516620636 }, { "epoch": 1.7646273043013625, "step": 6605, "train/loss_ctc": 0.6953677535057068, "train/loss_error": 0.42889896035194397, "train/loss_total": 0.482192724943161 }, { "epoch": 1.76489446967673, "step": 6606, "train/loss_ctc": 0.6935340762138367, "train/loss_error": 0.46384161710739136, "train/loss_total": 0.5097801089286804 }, { "epoch": 1.7651616350520971, "step": 6607, "train/loss_ctc": 0.7448965311050415, "train/loss_error": 0.44435739517211914, "train/loss_total": 0.5044652223587036 }, { "epoch": 1.7654288004274648, "step": 6608, "train/loss_ctc": 1.0530779361724854, "train/loss_error": 0.5000510811805725, "train/loss_total": 0.6106564402580261 }, { "epoch": 1.765695965802832, "step": 6609, "train/loss_ctc": 0.744866132736206, "train/loss_error": 0.5083456039428711, "train/loss_total": 0.5556497573852539 }, { "epoch": 1.7659631311781991, "grad_norm": 2.4996612071990967, "learning_rate": 1.9412236174191823e-05, "loss": 0.5408, "step": 6610 }, { "epoch": 1.7659631311781991, "step": 6610, "train/loss_ctc": 0.6662842035293579, "train/loss_error": 0.4495968818664551, "train/loss_total": 0.49293434619903564 }, { "epoch": 1.7662302965535668, "step": 6611, "train/loss_ctc": 0.5242382884025574, "train/loss_error": 0.40103647112846375, "train/loss_total": 0.4256768524646759 }, { "epoch": 1.766497461928934, "step": 6612, "train/loss_ctc": 0.6609079837799072, "train/loss_error": 0.5079461932182312, "train/loss_total": 0.5385385751724243 }, { "epoch": 1.7667646273043014, "step": 6613, "train/loss_ctc": 0.9883180856704712, "train/loss_error": 0.4543225169181824, "train/loss_total": 0.5611216425895691 }, { "epoch": 1.7670317926796688, "step": 6614, "train/loss_ctc": 0.4031430184841156, "train/loss_error": 0.4953678846359253, "train/loss_total": 0.4769229292869568 }, { "epoch": 1.767298958055036, "step": 6615, "train/loss_ctc": 0.7092008590698242, "train/loss_error": 0.43966180086135864, "train/loss_total": 0.49356961250305176 }, { "epoch": 1.7675661234304034, "step": 6616, "train/loss_ctc": 0.5707471370697021, "train/loss_error": 0.45221614837646484, "train/loss_total": 0.4759223461151123 }, { "epoch": 1.7678332888057708, "step": 6617, "train/loss_ctc": 0.8288997411727905, "train/loss_error": 0.4591529071331024, "train/loss_total": 0.53310227394104 }, { "epoch": 1.768100454181138, "step": 6618, "train/loss_ctc": 0.7707816362380981, "train/loss_error": 0.4597894251346588, "train/loss_total": 0.5219879150390625 }, { "epoch": 1.7683676195565055, "step": 6619, "train/loss_ctc": 0.619367778301239, "train/loss_error": 0.43633657693862915, "train/loss_total": 0.4729428291320801 }, { "epoch": 1.7686347849318729, "grad_norm": 2.4866130352020264, "learning_rate": 1.939620625166978e-05, "loss": 0.4993, "step": 6620 }, { "epoch": 1.7686347849318729, "step": 6620, "train/loss_ctc": 0.7353513240814209, "train/loss_error": 0.5147174000740051, "train/loss_total": 0.5588442087173462 }, { "epoch": 1.76890195030724, "step": 6621, "train/loss_ctc": 0.3714216649532318, "train/loss_error": 0.505530059337616, "train/loss_total": 0.4787083864212036 }, { "epoch": 1.7691691156826075, "step": 6622, "train/loss_ctc": 0.7088191509246826, "train/loss_error": 0.5243972539901733, "train/loss_total": 0.561281681060791 }, { "epoch": 1.769436281057975, "step": 6623, "train/loss_ctc": 0.6230453848838806, "train/loss_error": 0.4240294396877289, "train/loss_total": 0.46383264660835266 }, { "epoch": 1.769703446433342, "step": 6624, "train/loss_ctc": 0.6339603662490845, "train/loss_error": 0.42488330602645874, "train/loss_total": 0.4666987359523773 }, { "epoch": 1.7699706118087097, "step": 6625, "train/loss_ctc": 1.1092960834503174, "train/loss_error": 0.47427210211753845, "train/loss_total": 0.6012768745422363 }, { "epoch": 1.770237777184077, "step": 6626, "train/loss_ctc": 0.81305992603302, "train/loss_error": 0.4822598993778229, "train/loss_total": 0.5484199523925781 }, { "epoch": 1.7705049425594444, "step": 6627, "train/loss_ctc": 1.0953456163406372, "train/loss_error": 0.4831717908382416, "train/loss_total": 0.6056065559387207 }, { "epoch": 1.7707721079348118, "step": 6628, "train/loss_ctc": 0.9059106111526489, "train/loss_error": 0.4567507803440094, "train/loss_total": 0.5465827584266663 }, { "epoch": 1.771039273310179, "step": 6629, "train/loss_ctc": 1.5256719589233398, "train/loss_error": 0.4923487901687622, "train/loss_total": 0.6990134119987488 }, { "epoch": 1.7713064386855464, "grad_norm": 2.0266306400299072, "learning_rate": 1.9380176329147742e-05, "loss": 0.553, "step": 6630 }, { "epoch": 1.7713064386855464, "step": 6630, "train/loss_ctc": 0.39331066608428955, "train/loss_error": 0.45756444334983826, "train/loss_total": 0.44471368193626404 }, { "epoch": 1.7715736040609138, "step": 6631, "train/loss_ctc": 0.7184994220733643, "train/loss_error": 0.46857964992523193, "train/loss_total": 0.5185636281967163 }, { "epoch": 1.771840769436281, "step": 6632, "train/loss_ctc": 0.23131701350212097, "train/loss_error": 0.4822748899459839, "train/loss_total": 0.4320833086967468 }, { "epoch": 1.7721079348116484, "step": 6633, "train/loss_ctc": 0.5822893977165222, "train/loss_error": 0.502873957157135, "train/loss_total": 0.5187570452690125 }, { "epoch": 1.7723751001870158, "step": 6634, "train/loss_ctc": 1.5298962593078613, "train/loss_error": 0.41966262459754944, "train/loss_total": 0.6417093276977539 }, { "epoch": 1.772642265562383, "step": 6635, "train/loss_ctc": 1.3824219703674316, "train/loss_error": 0.47406086325645447, "train/loss_total": 0.6557331085205078 }, { "epoch": 1.7729094309377504, "step": 6636, "train/loss_ctc": 0.9112694263458252, "train/loss_error": 0.421299546957016, "train/loss_total": 0.5192935466766357 }, { "epoch": 1.7731765963131179, "step": 6637, "train/loss_ctc": 0.9239009618759155, "train/loss_error": 0.4049697518348694, "train/loss_total": 0.5087559819221497 }, { "epoch": 1.773443761688485, "step": 6638, "train/loss_ctc": 0.9378050565719604, "train/loss_error": 0.5606948733329773, "train/loss_total": 0.6361169219017029 }, { "epoch": 1.7737109270638525, "step": 6639, "train/loss_ctc": 0.26306602358818054, "train/loss_error": 0.5016962885856628, "train/loss_total": 0.4539702236652374 }, { "epoch": 1.77397809243922, "grad_norm": 1.618384599685669, "learning_rate": 1.9364146406625704e-05, "loss": 0.533, "step": 6640 }, { "epoch": 1.77397809243922, "step": 6640, "train/loss_ctc": 0.6756768226623535, "train/loss_error": 0.47661450505256653, "train/loss_total": 0.5164269804954529 }, { "epoch": 1.774245257814587, "step": 6641, "train/loss_ctc": 1.4519777297973633, "train/loss_error": 0.475261390209198, "train/loss_total": 0.6706047058105469 }, { "epoch": 1.7745124231899547, "step": 6642, "train/loss_ctc": 0.478004515171051, "train/loss_error": 0.4855519235134125, "train/loss_total": 0.4840424358844757 }, { "epoch": 1.774779588565322, "step": 6643, "train/loss_ctc": 0.2820775508880615, "train/loss_error": 0.47559937834739685, "train/loss_total": 0.4368950128555298 }, { "epoch": 1.7750467539406893, "step": 6644, "train/loss_ctc": 0.7909464836120605, "train/loss_error": 0.5056402683258057, "train/loss_total": 0.5627015233039856 }, { "epoch": 1.7753139193160568, "step": 6645, "train/loss_ctc": 0.6293945908546448, "train/loss_error": 0.39839789271354675, "train/loss_total": 0.4445972442626953 }, { "epoch": 1.775581084691424, "step": 6646, "train/loss_ctc": 1.1054246425628662, "train/loss_error": 0.4584163725376129, "train/loss_total": 0.5878180265426636 }, { "epoch": 1.7758482500667914, "step": 6647, "train/loss_ctc": 1.036766767501831, "train/loss_error": 0.4597947299480438, "train/loss_total": 0.5751891136169434 }, { "epoch": 1.7761154154421588, "step": 6648, "train/loss_ctc": 0.9003992080688477, "train/loss_error": 0.5350946187973022, "train/loss_total": 0.6081555485725403 }, { "epoch": 1.776382580817526, "step": 6649, "train/loss_ctc": 1.00624680519104, "train/loss_error": 0.4109709858894348, "train/loss_total": 0.5300261974334717 }, { "epoch": 1.7766497461928934, "grad_norm": 1.7361677885055542, "learning_rate": 1.9348116484103662e-05, "loss": 0.5416, "step": 6650 }, { "epoch": 1.7766497461928934, "step": 6650, "train/loss_ctc": 0.44846639037132263, "train/loss_error": 0.38700440526008606, "train/loss_total": 0.3992968201637268 }, { "epoch": 1.7769169115682608, "step": 6651, "train/loss_ctc": 0.9704482555389404, "train/loss_error": 0.48641470074653625, "train/loss_total": 0.583221435546875 }, { "epoch": 1.777184076943628, "step": 6652, "train/loss_ctc": 1.1142486333847046, "train/loss_error": 0.509818434715271, "train/loss_total": 0.6307045221328735 }, { "epoch": 1.7774512423189954, "step": 6653, "train/loss_ctc": 0.5317303538322449, "train/loss_error": 0.4052007794380188, "train/loss_total": 0.43050670623779297 }, { "epoch": 1.7777184076943628, "step": 6654, "train/loss_ctc": 0.6434420943260193, "train/loss_error": 0.41760140657424927, "train/loss_total": 0.4627695679664612 }, { "epoch": 1.77798557306973, "step": 6655, "train/loss_ctc": 0.4923107326030731, "train/loss_error": 0.4984213709831238, "train/loss_total": 0.49719923734664917 }, { "epoch": 1.7782527384450977, "step": 6656, "train/loss_ctc": 0.7500253915786743, "train/loss_error": 0.4541607201099396, "train/loss_total": 0.5133336782455444 }, { "epoch": 1.7785199038204649, "step": 6657, "train/loss_ctc": 0.43184366822242737, "train/loss_error": 0.47392737865448, "train/loss_total": 0.46551063656806946 }, { "epoch": 1.778787069195832, "step": 6658, "train/loss_ctc": 0.5315148234367371, "train/loss_error": 0.4759291410446167, "train/loss_total": 0.4870463013648987 }, { "epoch": 1.7790542345711997, "step": 6659, "train/loss_ctc": 0.9730671644210815, "train/loss_error": 0.5028005838394165, "train/loss_total": 0.5968539118766785 }, { "epoch": 1.779321399946567, "grad_norm": 1.8102748394012451, "learning_rate": 1.933208656158162e-05, "loss": 0.5066, "step": 6660 }, { "epoch": 1.779321399946567, "step": 6660, "train/loss_ctc": 0.4761693477630615, "train/loss_error": 0.47171658277511597, "train/loss_total": 0.4726071357727051 }, { "epoch": 1.7795885653219343, "step": 6661, "train/loss_ctc": 0.5497022867202759, "train/loss_error": 0.43676772713661194, "train/loss_total": 0.4593546688556671 }, { "epoch": 1.7798557306973017, "step": 6662, "train/loss_ctc": 1.2963217496871948, "train/loss_error": 0.5493068695068359, "train/loss_total": 0.6987098455429077 }, { "epoch": 1.780122896072669, "step": 6663, "train/loss_ctc": 0.5594434142112732, "train/loss_error": 0.37794941663742065, "train/loss_total": 0.4142482280731201 }, { "epoch": 1.7803900614480364, "step": 6664, "train/loss_ctc": 0.5668476223945618, "train/loss_error": 0.43373432755470276, "train/loss_total": 0.4603569805622101 }, { "epoch": 1.7806572268234038, "step": 6665, "train/loss_ctc": 0.8531550168991089, "train/loss_error": 0.48420119285583496, "train/loss_total": 0.5579919815063477 }, { "epoch": 1.780924392198771, "step": 6666, "train/loss_ctc": 0.44971954822540283, "train/loss_error": 0.4608289897441864, "train/loss_total": 0.45860710740089417 }, { "epoch": 1.7811915575741384, "step": 6667, "train/loss_ctc": 0.7145791053771973, "train/loss_error": 0.486960232257843, "train/loss_total": 0.5324839949607849 }, { "epoch": 1.7814587229495058, "step": 6668, "train/loss_ctc": 0.6906750798225403, "train/loss_error": 0.44984331727027893, "train/loss_total": 0.49800968170166016 }, { "epoch": 1.781725888324873, "step": 6669, "train/loss_ctc": 0.7687886953353882, "train/loss_error": 0.5342205762863159, "train/loss_total": 0.5811342000961304 }, { "epoch": 1.7819930537002404, "grad_norm": 1.8359601497650146, "learning_rate": 1.9316056639059578e-05, "loss": 0.5134, "step": 6670 }, { "epoch": 1.7819930537002404, "step": 6670, "train/loss_ctc": 0.9619835615158081, "train/loss_error": 0.5012896656990051, "train/loss_total": 0.5934284329414368 }, { "epoch": 1.7822602190756078, "step": 6671, "train/loss_ctc": 1.0613322257995605, "train/loss_error": 0.47500258684158325, "train/loss_total": 0.5922685265541077 }, { "epoch": 1.782527384450975, "step": 6672, "train/loss_ctc": 0.9270861148834229, "train/loss_error": 0.6217041015625, "train/loss_total": 0.6827805042266846 }, { "epoch": 1.7827945498263427, "step": 6673, "train/loss_ctc": 0.6143835783004761, "train/loss_error": 0.46471667289733887, "train/loss_total": 0.49465006589889526 }, { "epoch": 1.7830617152017099, "step": 6674, "train/loss_ctc": 1.1776247024536133, "train/loss_error": 0.4572960436344147, "train/loss_total": 0.6013617515563965 }, { "epoch": 1.783328880577077, "step": 6675, "train/loss_ctc": 0.49870747327804565, "train/loss_error": 0.3706284165382385, "train/loss_total": 0.39624422788619995 }, { "epoch": 1.7835960459524447, "step": 6676, "train/loss_ctc": 2.324699640274048, "train/loss_error": 0.4280570447444916, "train/loss_total": 0.8073855638504028 }, { "epoch": 1.783863211327812, "step": 6677, "train/loss_ctc": 0.5581080317497253, "train/loss_error": 0.4228174090385437, "train/loss_total": 0.4498755633831024 }, { "epoch": 1.7841303767031793, "step": 6678, "train/loss_ctc": 0.6866922974586487, "train/loss_error": 0.42474058270454407, "train/loss_total": 0.4771309196949005 }, { "epoch": 1.7843975420785467, "step": 6679, "train/loss_ctc": 0.5740494728088379, "train/loss_error": 0.44824713468551636, "train/loss_total": 0.4734076261520386 }, { "epoch": 1.784664707453914, "grad_norm": 2.0848517417907715, "learning_rate": 1.930002671653754e-05, "loss": 0.5569, "step": 6680 }, { "epoch": 1.784664707453914, "step": 6680, "train/loss_ctc": 1.1491485834121704, "train/loss_error": 0.4529840648174286, "train/loss_total": 0.592216968536377 }, { "epoch": 1.7849318728292813, "step": 6681, "train/loss_ctc": 0.4953794479370117, "train/loss_error": 0.43643659353256226, "train/loss_total": 0.4482251703739166 }, { "epoch": 1.7851990382046488, "step": 6682, "train/loss_ctc": 1.2823755741119385, "train/loss_error": 0.5443596243858337, "train/loss_total": 0.6919628381729126 }, { "epoch": 1.785466203580016, "step": 6683, "train/loss_ctc": 0.9495301246643066, "train/loss_error": 0.4802078306674957, "train/loss_total": 0.5740723013877869 }, { "epoch": 1.7857333689553834, "step": 6684, "train/loss_ctc": 1.0221810340881348, "train/loss_error": 0.47413167357444763, "train/loss_total": 0.5837415456771851 }, { "epoch": 1.7860005343307508, "step": 6685, "train/loss_ctc": 1.6294598579406738, "train/loss_error": 0.4937007427215576, "train/loss_total": 0.7208526134490967 }, { "epoch": 1.786267699706118, "step": 6686, "train/loss_ctc": 0.8766798973083496, "train/loss_error": 0.46151405572891235, "train/loss_total": 0.5445472598075867 }, { "epoch": 1.7865348650814854, "step": 6687, "train/loss_ctc": 0.6536130309104919, "train/loss_error": 0.46486562490463257, "train/loss_total": 0.5026150941848755 }, { "epoch": 1.7868020304568528, "step": 6688, "train/loss_ctc": 0.7048241496086121, "train/loss_error": 0.48539870977401733, "train/loss_total": 0.5292838215827942 }, { "epoch": 1.78706919583222, "step": 6689, "train/loss_ctc": 0.40322208404541016, "train/loss_error": 0.4795799255371094, "train/loss_total": 0.46430838108062744 }, { "epoch": 1.7873363612075877, "grad_norm": 2.9585065841674805, "learning_rate": 1.9283996794015498e-05, "loss": 0.5652, "step": 6690 }, { "epoch": 1.7873363612075877, "step": 6690, "train/loss_ctc": 0.7115352153778076, "train/loss_error": 0.4193113446235657, "train/loss_total": 0.4777561128139496 }, { "epoch": 1.7876035265829548, "step": 6691, "train/loss_ctc": 0.8966896533966064, "train/loss_error": 0.46477338671684265, "train/loss_total": 0.5511566400527954 }, { "epoch": 1.787870691958322, "step": 6692, "train/loss_ctc": 0.8422086834907532, "train/loss_error": 0.46084508299827576, "train/loss_total": 0.5371178388595581 }, { "epoch": 1.7881378573336897, "step": 6693, "train/loss_ctc": 0.7614785432815552, "train/loss_error": 0.4419045150279999, "train/loss_total": 0.5058193206787109 }, { "epoch": 1.7884050227090569, "step": 6694, "train/loss_ctc": 0.6663817763328552, "train/loss_error": 0.44438159465789795, "train/loss_total": 0.4887816309928894 }, { "epoch": 1.7886721880844243, "step": 6695, "train/loss_ctc": 0.8746823072433472, "train/loss_error": 0.5094104409217834, "train/loss_total": 0.5824648141860962 }, { "epoch": 1.7889393534597917, "step": 6696, "train/loss_ctc": 0.2876299023628235, "train/loss_error": 0.501280665397644, "train/loss_total": 0.4585505425930023 }, { "epoch": 1.789206518835159, "step": 6697, "train/loss_ctc": 0.7960531711578369, "train/loss_error": 0.45728799700737, "train/loss_total": 0.5250410437583923 }, { "epoch": 1.7894736842105263, "step": 6698, "train/loss_ctc": 0.5828316807746887, "train/loss_error": 0.40399178862571716, "train/loss_total": 0.439759761095047 }, { "epoch": 1.7897408495858937, "step": 6699, "train/loss_ctc": 0.6997296810150146, "train/loss_error": 0.42334213852882385, "train/loss_total": 0.47861963510513306 }, { "epoch": 1.790008014961261, "grad_norm": 1.3105260133743286, "learning_rate": 1.9267966871493456e-05, "loss": 0.5045, "step": 6700 }, { "epoch": 1.790008014961261, "step": 6700, "train/loss_ctc": 1.0346378087997437, "train/loss_error": 0.4503791630268097, "train/loss_total": 0.5672309398651123 }, { "epoch": 1.7902751803366284, "step": 6701, "train/loss_ctc": 0.4177423119544983, "train/loss_error": 0.3745916485786438, "train/loss_total": 0.3832217752933502 }, { "epoch": 1.7905423457119958, "step": 6702, "train/loss_ctc": 0.6544031500816345, "train/loss_error": 0.41532596945762634, "train/loss_total": 0.46314144134521484 }, { "epoch": 1.790809511087363, "step": 6703, "train/loss_ctc": 0.6837417483329773, "train/loss_error": 0.43453288078308105, "train/loss_total": 0.48437464237213135 }, { "epoch": 1.7910766764627304, "step": 6704, "train/loss_ctc": 0.5488333702087402, "train/loss_error": 0.4352872371673584, "train/loss_total": 0.4579964876174927 }, { "epoch": 1.7913438418380978, "step": 6705, "train/loss_ctc": 0.18714375793933868, "train/loss_error": 0.41892388463020325, "train/loss_total": 0.37256789207458496 }, { "epoch": 1.791611007213465, "step": 6706, "train/loss_ctc": 0.8630270957946777, "train/loss_error": 0.4792037010192871, "train/loss_total": 0.5559684038162231 }, { "epoch": 1.7918781725888326, "step": 6707, "train/loss_ctc": 0.5015614032745361, "train/loss_error": 0.43013882637023926, "train/loss_total": 0.4444233477115631 }, { "epoch": 1.7921453379641998, "step": 6708, "train/loss_ctc": 1.1881046295166016, "train/loss_error": 0.5389283895492554, "train/loss_total": 0.6687636375427246 }, { "epoch": 1.792412503339567, "step": 6709, "train/loss_ctc": 1.3281892538070679, "train/loss_error": 0.44612473249435425, "train/loss_total": 0.6225376129150391 }, { "epoch": 1.7926796687149347, "grad_norm": 2.369614601135254, "learning_rate": 1.9251936948971414e-05, "loss": 0.502, "step": 6710 }, { "epoch": 1.7926796687149347, "step": 6710, "train/loss_ctc": 0.7009913921356201, "train/loss_error": 0.4858173429965973, "train/loss_total": 0.5288521647453308 }, { "epoch": 1.7929468340903019, "step": 6711, "train/loss_ctc": 0.5779545903205872, "train/loss_error": 0.5981292724609375, "train/loss_total": 0.5940943360328674 }, { "epoch": 1.7932139994656693, "step": 6712, "train/loss_ctc": 0.9348673820495605, "train/loss_error": 0.46670275926589966, "train/loss_total": 0.5603356957435608 }, { "epoch": 1.7934811648410367, "step": 6713, "train/loss_ctc": 0.6314365863800049, "train/loss_error": 0.4803999662399292, "train/loss_total": 0.5106073021888733 }, { "epoch": 1.793748330216404, "step": 6714, "train/loss_ctc": 1.0255327224731445, "train/loss_error": 0.4635559320449829, "train/loss_total": 0.5759512782096863 }, { "epoch": 1.7940154955917713, "step": 6715, "train/loss_ctc": 0.28925570845603943, "train/loss_error": 0.4084819257259369, "train/loss_total": 0.38463667035102844 }, { "epoch": 1.7942826609671387, "step": 6716, "train/loss_ctc": 0.6630810499191284, "train/loss_error": 0.4999639391899109, "train/loss_total": 0.5325873494148254 }, { "epoch": 1.794549826342506, "step": 6717, "train/loss_ctc": 1.093639612197876, "train/loss_error": 0.4910420775413513, "train/loss_total": 0.6115615963935852 }, { "epoch": 1.7948169917178733, "step": 6718, "train/loss_ctc": 0.5109223127365112, "train/loss_error": 0.4518658518791199, "train/loss_total": 0.46367716789245605 }, { "epoch": 1.7950841570932408, "step": 6719, "train/loss_ctc": 0.6408749222755432, "train/loss_error": 0.49581000208854675, "train/loss_total": 0.524823009967804 }, { "epoch": 1.795351322468608, "grad_norm": 2.3494224548339844, "learning_rate": 1.9235907026449372e-05, "loss": 0.5287, "step": 6720 }, { "epoch": 1.795351322468608, "step": 6720, "train/loss_ctc": 0.9164072871208191, "train/loss_error": 0.45145511627197266, "train/loss_total": 0.5444455742835999 }, { "epoch": 1.7956184878439754, "step": 6721, "train/loss_ctc": 1.1133599281311035, "train/loss_error": 0.5438489317893982, "train/loss_total": 0.6577511429786682 }, { "epoch": 1.7958856532193428, "step": 6722, "train/loss_ctc": 0.7302244901657104, "train/loss_error": 0.5043506026268005, "train/loss_total": 0.5495253801345825 }, { "epoch": 1.79615281859471, "step": 6723, "train/loss_ctc": 0.6178808212280273, "train/loss_error": 0.4705685079097748, "train/loss_total": 0.5000309944152832 }, { "epoch": 1.7964199839700776, "step": 6724, "train/loss_ctc": 0.7094894647598267, "train/loss_error": 0.446796715259552, "train/loss_total": 0.49933528900146484 }, { "epoch": 1.7966871493454448, "step": 6725, "train/loss_ctc": 0.6030325889587402, "train/loss_error": 0.4851837158203125, "train/loss_total": 0.5087534785270691 }, { "epoch": 1.796954314720812, "step": 6726, "train/loss_ctc": 0.6634628772735596, "train/loss_error": 0.4341573417186737, "train/loss_total": 0.4800184667110443 }, { "epoch": 1.7972214800961797, "step": 6727, "train/loss_ctc": 0.9023181796073914, "train/loss_error": 0.4845874309539795, "train/loss_total": 0.5681335926055908 }, { "epoch": 1.7974886454715469, "step": 6728, "train/loss_ctc": 0.9089799523353577, "train/loss_error": 0.4528191089630127, "train/loss_total": 0.5440512895584106 }, { "epoch": 1.7977558108469143, "step": 6729, "train/loss_ctc": 0.6170253753662109, "train/loss_error": 0.46660083532333374, "train/loss_total": 0.4966857433319092 }, { "epoch": 1.7980229762222817, "grad_norm": 3.8840584754943848, "learning_rate": 1.921987710392733e-05, "loss": 0.5349, "step": 6730 }, { "epoch": 1.7980229762222817, "step": 6730, "train/loss_ctc": 0.8761953711509705, "train/loss_error": 0.4445662796497345, "train/loss_total": 0.5308920741081238 }, { "epoch": 1.7982901415976489, "step": 6731, "train/loss_ctc": 0.8930718898773193, "train/loss_error": 0.45240044593811035, "train/loss_total": 0.5405347347259521 }, { "epoch": 1.7985573069730163, "step": 6732, "train/loss_ctc": 0.26664888858795166, "train/loss_error": 0.38938361406326294, "train/loss_total": 0.3648366630077362 }, { "epoch": 1.7988244723483837, "step": 6733, "train/loss_ctc": 0.4129824638366699, "train/loss_error": 0.41851580142974854, "train/loss_total": 0.41740912199020386 }, { "epoch": 1.799091637723751, "step": 6734, "train/loss_ctc": 0.4999331831932068, "train/loss_error": 0.4890352487564087, "train/loss_total": 0.4912148416042328 }, { "epoch": 1.7993588030991183, "step": 6735, "train/loss_ctc": 0.47026312351226807, "train/loss_error": 0.4866989850997925, "train/loss_total": 0.48341184854507446 }, { "epoch": 1.7996259684744857, "step": 6736, "train/loss_ctc": 0.5986287593841553, "train/loss_error": 0.4919758439064026, "train/loss_total": 0.5133064389228821 }, { "epoch": 1.799893133849853, "step": 6737, "train/loss_ctc": 0.4101923108100891, "train/loss_error": 0.4853394627571106, "train/loss_total": 0.4703100323677063 }, { "epoch": 1.8001602992252204, "step": 6738, "train/loss_ctc": 1.2938648462295532, "train/loss_error": 0.5001358985900879, "train/loss_total": 0.658881664276123 }, { "epoch": 1.8004274646005878, "step": 6739, "train/loss_ctc": 0.8238961696624756, "train/loss_error": 0.46442341804504395, "train/loss_total": 0.5363179445266724 }, { "epoch": 1.800694629975955, "grad_norm": 4.0414137840271, "learning_rate": 1.920384718140529e-05, "loss": 0.5007, "step": 6740 }, { "epoch": 1.800694629975955, "step": 6740, "train/loss_ctc": 0.2956313490867615, "train/loss_error": 0.42082977294921875, "train/loss_total": 0.39579007029533386 }, { "epoch": 1.8009617953513226, "step": 6741, "train/loss_ctc": 0.5168855786323547, "train/loss_error": 0.5034971833229065, "train/loss_total": 0.5061748623847961 }, { "epoch": 1.8012289607266898, "step": 6742, "train/loss_ctc": 0.7907362580299377, "train/loss_error": 0.48941487073898315, "train/loss_total": 0.549679160118103 }, { "epoch": 1.8014961261020572, "step": 6743, "train/loss_ctc": 1.324108362197876, "train/loss_error": 0.48687744140625, "train/loss_total": 0.6543236374855042 }, { "epoch": 1.8017632914774246, "step": 6744, "train/loss_ctc": 1.6449599266052246, "train/loss_error": 0.47420844435691833, "train/loss_total": 0.7083587646484375 }, { "epoch": 1.8020304568527918, "step": 6745, "train/loss_ctc": 0.423138290643692, "train/loss_error": 0.4581558406352997, "train/loss_total": 0.45115232467651367 }, { "epoch": 1.8022976222281593, "step": 6746, "train/loss_ctc": 0.6251958608627319, "train/loss_error": 0.503006637096405, "train/loss_total": 0.5274444818496704 }, { "epoch": 1.8025647876035267, "step": 6747, "train/loss_ctc": 0.40716785192489624, "train/loss_error": 0.44890832901000977, "train/loss_total": 0.4405602216720581 }, { "epoch": 1.8028319529788939, "step": 6748, "train/loss_ctc": 0.6920232772827148, "train/loss_error": 0.431306928396225, "train/loss_total": 0.48345017433166504 }, { "epoch": 1.8030991183542613, "step": 6749, "train/loss_ctc": 1.1092147827148438, "train/loss_error": 0.4541233479976654, "train/loss_total": 0.585141658782959 }, { "epoch": 1.8033662837296287, "grad_norm": 2.3710601329803467, "learning_rate": 1.918781725888325e-05, "loss": 0.5302, "step": 6750 }, { "epoch": 1.8033662837296287, "step": 6750, "train/loss_ctc": 0.5708547234535217, "train/loss_error": 0.4627489447593689, "train/loss_total": 0.4843701124191284 }, { "epoch": 1.803633449104996, "step": 6751, "train/loss_ctc": 0.6894711852073669, "train/loss_error": 0.4618879556655884, "train/loss_total": 0.507404625415802 }, { "epoch": 1.8039006144803633, "step": 6752, "train/loss_ctc": 0.9854552149772644, "train/loss_error": 0.5065464377403259, "train/loss_total": 0.6023281812667847 }, { "epoch": 1.8041677798557307, "step": 6753, "train/loss_ctc": 0.6109880805015564, "train/loss_error": 0.5082823038101196, "train/loss_total": 0.5288234353065491 }, { "epoch": 1.804434945231098, "step": 6754, "train/loss_ctc": 1.0088698863983154, "train/loss_error": 0.4576893150806427, "train/loss_total": 0.5679254531860352 }, { "epoch": 1.8047021106064653, "step": 6755, "train/loss_ctc": 0.5239127278327942, "train/loss_error": 0.5225033164024353, "train/loss_total": 0.5227851867675781 }, { "epoch": 1.8049692759818328, "step": 6756, "train/loss_ctc": 1.1810158491134644, "train/loss_error": 0.4036678373813629, "train/loss_total": 0.5591374635696411 }, { "epoch": 1.8052364413572, "step": 6757, "train/loss_ctc": 0.7222840785980225, "train/loss_error": 0.45944294333457947, "train/loss_total": 0.5120111703872681 }, { "epoch": 1.8055036067325676, "step": 6758, "train/loss_ctc": 0.7672770619392395, "train/loss_error": 0.4178241789340973, "train/loss_total": 0.4877147674560547 }, { "epoch": 1.8057707721079348, "step": 6759, "train/loss_ctc": 0.8255380392074585, "train/loss_error": 0.4827585220336914, "train/loss_total": 0.5513144731521606 }, { "epoch": 1.8060379374833022, "grad_norm": 2.0745787620544434, "learning_rate": 1.9171787336361208e-05, "loss": 0.5324, "step": 6760 }, { "epoch": 1.8060379374833022, "step": 6760, "train/loss_ctc": 1.4819202423095703, "train/loss_error": 0.4912753105163574, "train/loss_total": 0.689404308795929 }, { "epoch": 1.8063051028586696, "step": 6761, "train/loss_ctc": 0.63629549741745, "train/loss_error": 0.4781353175640106, "train/loss_total": 0.5097673535346985 }, { "epoch": 1.8065722682340368, "step": 6762, "train/loss_ctc": 1.031846523284912, "train/loss_error": 0.4548097252845764, "train/loss_total": 0.5702171325683594 }, { "epoch": 1.8068394336094042, "step": 6763, "train/loss_ctc": 0.7097128033638, "train/loss_error": 0.46339941024780273, "train/loss_total": 0.5126620531082153 }, { "epoch": 1.8071065989847717, "step": 6764, "train/loss_ctc": 0.8318079710006714, "train/loss_error": 0.420326292514801, "train/loss_total": 0.5026226043701172 }, { "epoch": 1.8073737643601389, "step": 6765, "train/loss_ctc": 0.9451646208763123, "train/loss_error": 0.3402895927429199, "train/loss_total": 0.46126461029052734 }, { "epoch": 1.8076409297355063, "step": 6766, "train/loss_ctc": 0.6598052978515625, "train/loss_error": 0.4918368458747864, "train/loss_total": 0.5254305601119995 }, { "epoch": 1.8079080951108737, "step": 6767, "train/loss_ctc": 0.7508805394172668, "train/loss_error": 0.5137568712234497, "train/loss_total": 0.5611816048622131 }, { "epoch": 1.8081752604862409, "step": 6768, "train/loss_ctc": 0.8994000554084778, "train/loss_error": 0.4222383499145508, "train/loss_total": 0.5176706910133362 }, { "epoch": 1.8084424258616083, "step": 6769, "train/loss_ctc": 0.9529987573623657, "train/loss_error": 0.44168800115585327, "train/loss_total": 0.5439501404762268 }, { "epoch": 1.8087095912369757, "grad_norm": 4.579998016357422, "learning_rate": 1.9155757413839166e-05, "loss": 0.5394, "step": 6770 }, { "epoch": 1.8087095912369757, "step": 6770, "train/loss_ctc": 0.8902865648269653, "train/loss_error": 0.48358044028282166, "train/loss_total": 0.5649216771125793 }, { "epoch": 1.808976756612343, "step": 6771, "train/loss_ctc": 0.4430977702140808, "train/loss_error": 0.44968751072883606, "train/loss_total": 0.448369562625885 }, { "epoch": 1.8092439219877106, "step": 6772, "train/loss_ctc": 0.34177833795547485, "train/loss_error": 0.3820723593235016, "train/loss_total": 0.3740135729312897 }, { "epoch": 1.8095110873630778, "step": 6773, "train/loss_ctc": 1.530768871307373, "train/loss_error": 0.5322890281677246, "train/loss_total": 0.7319849729537964 }, { "epoch": 1.809778252738445, "step": 6774, "train/loss_ctc": 0.8988618850708008, "train/loss_error": 0.44102174043655396, "train/loss_total": 0.5325897932052612 }, { "epoch": 1.8100454181138126, "step": 6775, "train/loss_ctc": 0.5931001901626587, "train/loss_error": 0.4957610070705414, "train/loss_total": 0.5152288675308228 }, { "epoch": 1.8103125834891798, "step": 6776, "train/loss_ctc": 0.7425633668899536, "train/loss_error": 0.43775975704193115, "train/loss_total": 0.4987204670906067 }, { "epoch": 1.8105797488645472, "step": 6777, "train/loss_ctc": 0.7071281671524048, "train/loss_error": 0.5301597714424133, "train/loss_total": 0.5655534267425537 }, { "epoch": 1.8108469142399146, "step": 6778, "train/loss_ctc": 1.3763691186904907, "train/loss_error": 0.5265738368034363, "train/loss_total": 0.6965329051017761 }, { "epoch": 1.8111140796152818, "step": 6779, "train/loss_ctc": 0.7667423486709595, "train/loss_error": 0.42290911078453064, "train/loss_total": 0.4916757643222809 }, { "epoch": 1.8113812449906492, "grad_norm": 1.7976449728012085, "learning_rate": 1.9139727491317124e-05, "loss": 0.542, "step": 6780 }, { "epoch": 1.8113812449906492, "step": 6780, "train/loss_ctc": 0.7276370525360107, "train/loss_error": 0.4541514217853546, "train/loss_total": 0.5088485479354858 }, { "epoch": 1.8116484103660166, "step": 6781, "train/loss_ctc": 0.7012637853622437, "train/loss_error": 0.4586387872695923, "train/loss_total": 0.5071637630462646 }, { "epoch": 1.8119155757413838, "step": 6782, "train/loss_ctc": 0.5539824962615967, "train/loss_error": 0.48159417510032654, "train/loss_total": 0.49607184529304504 }, { "epoch": 1.8121827411167513, "step": 6783, "train/loss_ctc": 0.6605082154273987, "train/loss_error": 0.4511250853538513, "train/loss_total": 0.49300169944763184 }, { "epoch": 1.8124499064921187, "step": 6784, "train/loss_ctc": 0.5419016480445862, "train/loss_error": 0.45548340678215027, "train/loss_total": 0.47276705503463745 }, { "epoch": 1.8127170718674859, "step": 6785, "train/loss_ctc": 1.376255989074707, "train/loss_error": 0.4825715720653534, "train/loss_total": 0.6613084673881531 }, { "epoch": 1.8129842372428533, "step": 6786, "train/loss_ctc": 1.6739585399627686, "train/loss_error": 0.4842900037765503, "train/loss_total": 0.7222237586975098 }, { "epoch": 1.8132514026182207, "step": 6787, "train/loss_ctc": 1.0281152725219727, "train/loss_error": 0.4858957827091217, "train/loss_total": 0.5943397283554077 }, { "epoch": 1.813518567993588, "step": 6788, "train/loss_ctc": 0.5957825183868408, "train/loss_error": 0.4625970125198364, "train/loss_total": 0.4892341196537018 }, { "epoch": 1.8137857333689555, "step": 6789, "train/loss_ctc": 0.5584801435470581, "train/loss_error": 0.47411540150642395, "train/loss_total": 0.4909883737564087 }, { "epoch": 1.8140528987443227, "grad_norm": 1.684282660484314, "learning_rate": 1.9123697568795082e-05, "loss": 0.5436, "step": 6790 }, { "epoch": 1.8140528987443227, "step": 6790, "train/loss_ctc": 0.934492826461792, "train/loss_error": 0.5277001857757568, "train/loss_total": 0.6090587377548218 }, { "epoch": 1.81432006411969, "step": 6791, "train/loss_ctc": 0.8230606913566589, "train/loss_error": 0.4945005476474762, "train/loss_total": 0.5602126121520996 }, { "epoch": 1.8145872294950576, "step": 6792, "train/loss_ctc": 0.5536500215530396, "train/loss_error": 0.4500604271888733, "train/loss_total": 0.47077834606170654 }, { "epoch": 1.8148543948704248, "step": 6793, "train/loss_ctc": 0.4922824800014496, "train/loss_error": 0.45349371433258057, "train/loss_total": 0.46125146746635437 }, { "epoch": 1.8151215602457922, "step": 6794, "train/loss_ctc": 0.53746497631073, "train/loss_error": 0.4625805914402008, "train/loss_total": 0.4775574803352356 }, { "epoch": 1.8153887256211596, "step": 6795, "train/loss_ctc": 0.94352126121521, "train/loss_error": 0.5002174377441406, "train/loss_total": 0.5888782143592834 }, { "epoch": 1.8156558909965268, "step": 6796, "train/loss_ctc": 0.4965016841888428, "train/loss_error": 0.49625277519226074, "train/loss_total": 0.4963025450706482 }, { "epoch": 1.8159230563718942, "step": 6797, "train/loss_ctc": 0.5199134945869446, "train/loss_error": 0.43947869539260864, "train/loss_total": 0.4555656909942627 }, { "epoch": 1.8161902217472616, "step": 6798, "train/loss_ctc": 0.6866788864135742, "train/loss_error": 0.49911656975746155, "train/loss_total": 0.5366290807723999 }, { "epoch": 1.8164573871226288, "step": 6799, "train/loss_ctc": 0.4701113998889923, "train/loss_error": 0.4747745394706726, "train/loss_total": 0.47384190559387207 }, { "epoch": 1.8167245524979962, "grad_norm": 1.8795527219772339, "learning_rate": 1.9107667646273043e-05, "loss": 0.513, "step": 6800 }, { "epoch": 1.8167245524979962, "step": 6800, "train/loss_ctc": 0.44712144136428833, "train/loss_error": 0.4401138424873352, "train/loss_total": 0.44151535630226135 }, { "epoch": 1.8169917178733637, "step": 6801, "train/loss_ctc": 0.9325481653213501, "train/loss_error": 0.45351263880729675, "train/loss_total": 0.5493197441101074 }, { "epoch": 1.8172588832487309, "step": 6802, "train/loss_ctc": 0.3927098512649536, "train/loss_error": 0.4749280512332916, "train/loss_total": 0.458484411239624 }, { "epoch": 1.8175260486240983, "step": 6803, "train/loss_ctc": 1.2389334440231323, "train/loss_error": 0.4748282730579376, "train/loss_total": 0.6276493072509766 }, { "epoch": 1.8177932139994657, "step": 6804, "train/loss_ctc": 0.3663894832134247, "train/loss_error": 0.4184166193008423, "train/loss_total": 0.40801119804382324 }, { "epoch": 1.818060379374833, "step": 6805, "train/loss_ctc": 0.6505786180496216, "train/loss_error": 0.4714038074016571, "train/loss_total": 0.5072388052940369 }, { "epoch": 1.8183275447502005, "step": 6806, "train/loss_ctc": 0.6815381646156311, "train/loss_error": 0.478599488735199, "train/loss_total": 0.5191872119903564 }, { "epoch": 1.8185947101255677, "step": 6807, "train/loss_ctc": 1.3099260330200195, "train/loss_error": 0.5120109915733337, "train/loss_total": 0.6715940237045288 }, { "epoch": 1.818861875500935, "step": 6808, "train/loss_ctc": 0.33757346868515015, "train/loss_error": 0.41247373819351196, "train/loss_total": 0.3974936902523041 }, { "epoch": 1.8191290408763026, "step": 6809, "train/loss_ctc": 0.3055099844932556, "train/loss_error": 0.39675360918045044, "train/loss_total": 0.3785049021244049 }, { "epoch": 1.8193962062516698, "grad_norm": 2.1834499835968018, "learning_rate": 1.9091637723751e-05, "loss": 0.4959, "step": 6810 }, { "epoch": 1.8193962062516698, "step": 6810, "train/loss_ctc": 0.7049020528793335, "train/loss_error": 0.4818359911441803, "train/loss_total": 0.5264492034912109 }, { "epoch": 1.8196633716270372, "step": 6811, "train/loss_ctc": 0.6552839875221252, "train/loss_error": 0.5284262299537659, "train/loss_total": 0.5537977814674377 }, { "epoch": 1.8199305370024046, "step": 6812, "train/loss_ctc": 0.6579276919364929, "train/loss_error": 0.5306638479232788, "train/loss_total": 0.5561165809631348 }, { "epoch": 1.8201977023777718, "step": 6813, "train/loss_ctc": 1.0944870710372925, "train/loss_error": 0.47297775745391846, "train/loss_total": 0.5972796082496643 }, { "epoch": 1.8204648677531392, "step": 6814, "train/loss_ctc": 0.6955260038375854, "train/loss_error": 0.4348233640193939, "train/loss_total": 0.4869638979434967 }, { "epoch": 1.8207320331285066, "step": 6815, "train/loss_ctc": 1.0518193244934082, "train/loss_error": 0.45609158277511597, "train/loss_total": 0.5752371549606323 }, { "epoch": 1.8209991985038738, "step": 6816, "train/loss_ctc": 0.4209277331829071, "train/loss_error": 0.45731794834136963, "train/loss_total": 0.45003989338874817 }, { "epoch": 1.8212663638792412, "step": 6817, "train/loss_ctc": 0.3534833490848541, "train/loss_error": 0.4789999723434448, "train/loss_total": 0.4538966715335846 }, { "epoch": 1.8215335292546087, "step": 6818, "train/loss_ctc": 0.8862322568893433, "train/loss_error": 0.5294588804244995, "train/loss_total": 0.6008135676383972 }, { "epoch": 1.8218006946299758, "step": 6819, "train/loss_ctc": 0.5048921704292297, "train/loss_error": 0.44661104679107666, "train/loss_total": 0.4582672715187073 }, { "epoch": 1.8220678600053433, "grad_norm": 1.7590731382369995, "learning_rate": 1.9075607801228963e-05, "loss": 0.5259, "step": 6820 }, { "epoch": 1.8220678600053433, "step": 6820, "train/loss_ctc": 0.9136844873428345, "train/loss_error": 0.45449286699295044, "train/loss_total": 0.5463311672210693 }, { "epoch": 1.8223350253807107, "step": 6821, "train/loss_ctc": 0.883852481842041, "train/loss_error": 0.4853195548057556, "train/loss_total": 0.5650261640548706 }, { "epoch": 1.8226021907560779, "step": 6822, "train/loss_ctc": 1.0858120918273926, "train/loss_error": 0.47899797558784485, "train/loss_total": 0.6003608107566833 }, { "epoch": 1.8228693561314455, "step": 6823, "train/loss_ctc": 0.873337984085083, "train/loss_error": 0.4843703806400299, "train/loss_total": 0.5621639490127563 }, { "epoch": 1.8231365215068127, "step": 6824, "train/loss_ctc": 0.7669702172279358, "train/loss_error": 0.412799596786499, "train/loss_total": 0.48363372683525085 }, { "epoch": 1.82340368688218, "step": 6825, "train/loss_ctc": 1.0853116512298584, "train/loss_error": 0.42098772525787354, "train/loss_total": 0.5538524985313416 }, { "epoch": 1.8236708522575475, "step": 6826, "train/loss_ctc": 0.4685449004173279, "train/loss_error": 0.43607136607170105, "train/loss_total": 0.4425660967826843 }, { "epoch": 1.8239380176329147, "step": 6827, "train/loss_ctc": 0.8959986567497253, "train/loss_error": 0.46264922618865967, "train/loss_total": 0.5493191480636597 }, { "epoch": 1.8242051830082822, "step": 6828, "train/loss_ctc": 1.0613141059875488, "train/loss_error": 0.45082932710647583, "train/loss_total": 0.5729262828826904 }, { "epoch": 1.8244723483836496, "step": 6829, "train/loss_ctc": 0.8884198665618896, "train/loss_error": 0.4495813548564911, "train/loss_total": 0.5373491048812866 }, { "epoch": 1.8247395137590168, "grad_norm": 2.165419578552246, "learning_rate": 1.905957787870692e-05, "loss": 0.5414, "step": 6830 }, { "epoch": 1.8247395137590168, "step": 6830, "train/loss_ctc": 0.33712318539619446, "train/loss_error": 0.43314510583877563, "train/loss_total": 0.4139407277107239 }, { "epoch": 1.8250066791343842, "step": 6831, "train/loss_ctc": 1.4758234024047852, "train/loss_error": 0.5072265267372131, "train/loss_total": 0.7009459137916565 }, { "epoch": 1.8252738445097516, "step": 6832, "train/loss_ctc": 0.5003056526184082, "train/loss_error": 0.4461714029312134, "train/loss_total": 0.4569982886314392 }, { "epoch": 1.8255410098851188, "step": 6833, "train/loss_ctc": 1.0667964220046997, "train/loss_error": 0.41185262799263, "train/loss_total": 0.542841374874115 }, { "epoch": 1.8258081752604862, "step": 6834, "train/loss_ctc": 0.706567108631134, "train/loss_error": 0.5369104146957397, "train/loss_total": 0.5708417296409607 }, { "epoch": 1.8260753406358536, "step": 6835, "train/loss_ctc": 0.6918531060218811, "train/loss_error": 0.4805522859096527, "train/loss_total": 0.5228124260902405 }, { "epoch": 1.8263425060112208, "step": 6836, "train/loss_ctc": 0.3198351562023163, "train/loss_error": 0.40833038091659546, "train/loss_total": 0.3906313180923462 }, { "epoch": 1.8266096713865883, "step": 6837, "train/loss_ctc": 0.5312652587890625, "train/loss_error": 0.4938564598560333, "train/loss_total": 0.5013382434844971 }, { "epoch": 1.8268768367619557, "step": 6838, "train/loss_ctc": 0.5576462745666504, "train/loss_error": 0.5312471985816956, "train/loss_total": 0.5365270376205444 }, { "epoch": 1.8271440021373229, "step": 6839, "train/loss_ctc": 1.1181954145431519, "train/loss_error": 0.481740266084671, "train/loss_total": 0.6090313196182251 }, { "epoch": 1.8274111675126905, "grad_norm": 2.1938605308532715, "learning_rate": 1.904354795618488e-05, "loss": 0.5246, "step": 6840 }, { "epoch": 1.8274111675126905, "step": 6840, "train/loss_ctc": 0.6184290647506714, "train/loss_error": 0.43116098642349243, "train/loss_total": 0.4686146080493927 }, { "epoch": 1.8276783328880577, "step": 6841, "train/loss_ctc": 0.8599640727043152, "train/loss_error": 0.39943262934684753, "train/loss_total": 0.491538941860199 }, { "epoch": 1.827945498263425, "step": 6842, "train/loss_ctc": 0.964526891708374, "train/loss_error": 0.4873890280723572, "train/loss_total": 0.5828166007995605 }, { "epoch": 1.8282126636387925, "step": 6843, "train/loss_ctc": 0.5261502265930176, "train/loss_error": 0.4695449471473694, "train/loss_total": 0.480866014957428 }, { "epoch": 1.8284798290141597, "step": 6844, "train/loss_ctc": 0.44136524200439453, "train/loss_error": 0.43915873765945435, "train/loss_total": 0.43960005044937134 }, { "epoch": 1.8287469943895271, "step": 6845, "train/loss_ctc": 0.489139199256897, "train/loss_error": 0.4612983763217926, "train/loss_total": 0.46686655282974243 }, { "epoch": 1.8290141597648946, "step": 6846, "train/loss_ctc": 0.38435953855514526, "train/loss_error": 0.4299872815608978, "train/loss_total": 0.42086172103881836 }, { "epoch": 1.8292813251402618, "step": 6847, "train/loss_ctc": 0.5305707454681396, "train/loss_error": 0.4246055781841278, "train/loss_total": 0.4457986056804657 }, { "epoch": 1.8295484905156292, "step": 6848, "train/loss_ctc": 0.9318000674247742, "train/loss_error": 0.5117278695106506, "train/loss_total": 0.5957423448562622 }, { "epoch": 1.8298156558909966, "step": 6849, "train/loss_ctc": 0.8033941984176636, "train/loss_error": 0.4330989122390747, "train/loss_total": 0.5071579813957214 }, { "epoch": 1.8300828212663638, "grad_norm": 1.0531224012374878, "learning_rate": 1.9027518033662837e-05, "loss": 0.49, "step": 6850 }, { "epoch": 1.8300828212663638, "step": 6850, "train/loss_ctc": 0.43092817068099976, "train/loss_error": 0.42734187841415405, "train/loss_total": 0.4280591309070587 }, { "epoch": 1.8303499866417312, "step": 6851, "train/loss_ctc": 2.0678234100341797, "train/loss_error": 0.4949583113193512, "train/loss_total": 0.8095313310623169 }, { "epoch": 1.8306171520170986, "step": 6852, "train/loss_ctc": 0.9119318127632141, "train/loss_error": 0.4819806218147278, "train/loss_total": 0.567970871925354 }, { "epoch": 1.8308843173924658, "step": 6853, "train/loss_ctc": 0.7230292558670044, "train/loss_error": 0.4799802005290985, "train/loss_total": 0.5285900235176086 }, { "epoch": 1.8311514827678332, "step": 6854, "train/loss_ctc": 1.1198484897613525, "train/loss_error": 0.4875256419181824, "train/loss_total": 0.6139901876449585 }, { "epoch": 1.8314186481432007, "step": 6855, "train/loss_ctc": 0.6818907260894775, "train/loss_error": 0.4762550890445709, "train/loss_total": 0.5173822045326233 }, { "epoch": 1.8316858135185679, "step": 6856, "train/loss_ctc": 1.7320243120193481, "train/loss_error": 0.42469170689582825, "train/loss_total": 0.6861582398414612 }, { "epoch": 1.8319529788939355, "step": 6857, "train/loss_ctc": 0.49628564715385437, "train/loss_error": 0.46972474455833435, "train/loss_total": 0.47503694891929626 }, { "epoch": 1.8322201442693027, "step": 6858, "train/loss_ctc": 0.37277597188949585, "train/loss_error": 0.39759916067123413, "train/loss_total": 0.3926345109939575 }, { "epoch": 1.83248730964467, "step": 6859, "train/loss_ctc": 0.5505138039588928, "train/loss_error": 0.40103211998939514, "train/loss_total": 0.43092846870422363 }, { "epoch": 1.8327544750200375, "grad_norm": 2.0175387859344482, "learning_rate": 1.90114881111408e-05, "loss": 0.545, "step": 6860 }, { "epoch": 1.8327544750200375, "step": 6860, "train/loss_ctc": 0.5822404026985168, "train/loss_error": 0.3995313346385956, "train/loss_total": 0.4360731542110443 }, { "epoch": 1.8330216403954047, "step": 6861, "train/loss_ctc": 0.7044588327407837, "train/loss_error": 0.4095021188259125, "train/loss_total": 0.4684934616088867 }, { "epoch": 1.8332888057707721, "step": 6862, "train/loss_ctc": 0.5679694414138794, "train/loss_error": 0.46046292781829834, "train/loss_total": 0.48196423053741455 }, { "epoch": 1.8335559711461396, "step": 6863, "train/loss_ctc": 1.2854303121566772, "train/loss_error": 0.4463668465614319, "train/loss_total": 0.6141795516014099 }, { "epoch": 1.8338231365215067, "step": 6864, "train/loss_ctc": 0.6007161140441895, "train/loss_error": 0.4394895136356354, "train/loss_total": 0.4717348515987396 }, { "epoch": 1.8340903018968742, "step": 6865, "train/loss_ctc": 0.807583749294281, "train/loss_error": 0.483490914106369, "train/loss_total": 0.5483095049858093 }, { "epoch": 1.8343574672722416, "step": 6866, "train/loss_ctc": 0.7635319828987122, "train/loss_error": 0.4670204222202301, "train/loss_total": 0.5263227224349976 }, { "epoch": 1.8346246326476088, "step": 6867, "train/loss_ctc": 0.8215280771255493, "train/loss_error": 0.44149768352508545, "train/loss_total": 0.5175037384033203 }, { "epoch": 1.8348917980229762, "step": 6868, "train/loss_ctc": 0.5967684984207153, "train/loss_error": 0.4966670870780945, "train/loss_total": 0.5166873931884766 }, { "epoch": 1.8351589633983436, "step": 6869, "train/loss_ctc": 0.5741268396377563, "train/loss_error": 0.45098334550857544, "train/loss_total": 0.4756120443344116 }, { "epoch": 1.8354261287737108, "grad_norm": 1.36444091796875, "learning_rate": 1.8995458188618757e-05, "loss": 0.5057, "step": 6870 }, { "epoch": 1.8354261287737108, "step": 6870, "train/loss_ctc": 0.3594145178794861, "train/loss_error": 0.4215764105319977, "train/loss_total": 0.4091440439224243 }, { "epoch": 1.8356932941490782, "step": 6871, "train/loss_ctc": 0.6212953329086304, "train/loss_error": 0.474808007478714, "train/loss_total": 0.5041055083274841 }, { "epoch": 1.8359604595244456, "step": 6872, "train/loss_ctc": 0.9112836122512817, "train/loss_error": 0.4634268581867218, "train/loss_total": 0.5529981851577759 }, { "epoch": 1.8362276248998128, "step": 6873, "train/loss_ctc": 0.7191640138626099, "train/loss_error": 0.5019021034240723, "train/loss_total": 0.5453544855117798 }, { "epoch": 1.8364947902751805, "step": 6874, "train/loss_ctc": 0.7860001921653748, "train/loss_error": 0.4911912679672241, "train/loss_total": 0.5501530170440674 }, { "epoch": 1.8367619556505477, "step": 6875, "train/loss_ctc": 0.6876465678215027, "train/loss_error": 0.4814300835132599, "train/loss_total": 0.5226733684539795 }, { "epoch": 1.837029121025915, "step": 6876, "train/loss_ctc": 1.1931902170181274, "train/loss_error": 0.4485260546207428, "train/loss_total": 0.5974588990211487 }, { "epoch": 1.8372962864012825, "step": 6877, "train/loss_ctc": 1.110661268234253, "train/loss_error": 0.4927484393119812, "train/loss_total": 0.6163309812545776 }, { "epoch": 1.8375634517766497, "step": 6878, "train/loss_ctc": 0.8418538570404053, "train/loss_error": 0.5206976532936096, "train/loss_total": 0.5849288702011108 }, { "epoch": 1.8378306171520171, "step": 6879, "train/loss_ctc": 0.39582979679107666, "train/loss_error": 0.4054320156574249, "train/loss_total": 0.40351158380508423 }, { "epoch": 1.8380977825273845, "grad_norm": 2.8966991901397705, "learning_rate": 1.8979428266096715e-05, "loss": 0.5287, "step": 6880 }, { "epoch": 1.8380977825273845, "step": 6880, "train/loss_ctc": 0.47008776664733887, "train/loss_error": 0.578581690788269, "train/loss_total": 0.556882917881012 }, { "epoch": 1.8383649479027517, "step": 6881, "train/loss_ctc": 0.8450853824615479, "train/loss_error": 0.5168330669403076, "train/loss_total": 0.5824835300445557 }, { "epoch": 1.8386321132781192, "step": 6882, "train/loss_ctc": 0.3314306139945984, "train/loss_error": 0.4564191699028015, "train/loss_total": 0.4314214587211609 }, { "epoch": 1.8388992786534866, "step": 6883, "train/loss_ctc": 0.6127244234085083, "train/loss_error": 0.504206657409668, "train/loss_total": 0.5259102582931519 }, { "epoch": 1.8391664440288538, "step": 6884, "train/loss_ctc": 1.5664386749267578, "train/loss_error": 0.46062803268432617, "train/loss_total": 0.6817901730537415 }, { "epoch": 1.8394336094042212, "step": 6885, "train/loss_ctc": 0.5154651403427124, "train/loss_error": 0.48287874460220337, "train/loss_total": 0.48939603567123413 }, { "epoch": 1.8397007747795886, "step": 6886, "train/loss_ctc": 0.4729202091693878, "train/loss_error": 0.5204911828041077, "train/loss_total": 0.5109769701957703 }, { "epoch": 1.8399679401549558, "step": 6887, "train/loss_ctc": 0.7979881763458252, "train/loss_error": 0.399505615234375, "train/loss_total": 0.47920212149620056 }, { "epoch": 1.8402351055303234, "step": 6888, "train/loss_ctc": 0.6153454780578613, "train/loss_error": 0.44739145040512085, "train/loss_total": 0.4809822738170624 }, { "epoch": 1.8405022709056906, "step": 6889, "train/loss_ctc": 0.6188573837280273, "train/loss_error": 0.4871572256088257, "train/loss_total": 0.5134972333908081 }, { "epoch": 1.8407694362810578, "grad_norm": 2.563560962677002, "learning_rate": 1.8963398343574673e-05, "loss": 0.5253, "step": 6890 }, { "epoch": 1.8407694362810578, "step": 6890, "train/loss_ctc": 0.8341532349586487, "train/loss_error": 0.47699669003486633, "train/loss_total": 0.5484279990196228 }, { "epoch": 1.8410366016564255, "step": 6891, "train/loss_ctc": 0.7773447036743164, "train/loss_error": 0.4990786612033844, "train/loss_total": 0.5547318458557129 }, { "epoch": 1.8413037670317927, "step": 6892, "train/loss_ctc": 1.0668079853057861, "train/loss_error": 0.41400331258773804, "train/loss_total": 0.5445642471313477 }, { "epoch": 1.84157093240716, "step": 6893, "train/loss_ctc": 0.2101011872291565, "train/loss_error": 0.4709932208061218, "train/loss_total": 0.4188148081302643 }, { "epoch": 1.8418380977825275, "step": 6894, "train/loss_ctc": 0.5111405253410339, "train/loss_error": 0.44705960154533386, "train/loss_total": 0.45987579226493835 }, { "epoch": 1.8421052631578947, "step": 6895, "train/loss_ctc": 0.6944159269332886, "train/loss_error": 0.4212132692337036, "train/loss_total": 0.4758538007736206 }, { "epoch": 1.842372428533262, "step": 6896, "train/loss_ctc": 1.450956106185913, "train/loss_error": 0.4644920527935028, "train/loss_total": 0.6617848873138428 }, { "epoch": 1.8426395939086295, "step": 6897, "train/loss_ctc": 0.6187208294868469, "train/loss_error": 0.4257141947746277, "train/loss_total": 0.4643155336380005 }, { "epoch": 1.8429067592839967, "step": 6898, "train/loss_ctc": 1.0419747829437256, "train/loss_error": 0.41505342721939087, "train/loss_total": 0.5404376983642578 }, { "epoch": 1.8431739246593641, "step": 6899, "train/loss_ctc": 0.5394346714019775, "train/loss_error": 0.48338431119918823, "train/loss_total": 0.49459439516067505 }, { "epoch": 1.8434410900347316, "grad_norm": 1.8365941047668457, "learning_rate": 1.894736842105263e-05, "loss": 0.5163, "step": 6900 }, { "epoch": 1.8434410900347316, "step": 6900, "train/loss_ctc": 1.0276200771331787, "train/loss_error": 0.42689910531044006, "train/loss_total": 0.5470433235168457 }, { "epoch": 1.8437082554100988, "step": 6901, "train/loss_ctc": 1.1254920959472656, "train/loss_error": 0.44077712297439575, "train/loss_total": 0.5777201056480408 }, { "epoch": 1.8439754207854662, "step": 6902, "train/loss_ctc": 0.766832709312439, "train/loss_error": 0.4206511676311493, "train/loss_total": 0.4898874759674072 }, { "epoch": 1.8442425861608336, "step": 6903, "train/loss_ctc": 0.7861411571502686, "train/loss_error": 0.43133988976478577, "train/loss_total": 0.5023001432418823 }, { "epoch": 1.8445097515362008, "step": 6904, "train/loss_ctc": 0.7495500445365906, "train/loss_error": 0.43191808462142944, "train/loss_total": 0.49544447660446167 }, { "epoch": 1.8447769169115684, "step": 6905, "train/loss_ctc": 0.7416675090789795, "train/loss_error": 0.4378305971622467, "train/loss_total": 0.49859797954559326 }, { "epoch": 1.8450440822869356, "step": 6906, "train/loss_ctc": 0.6858025789260864, "train/loss_error": 0.46295902132987976, "train/loss_total": 0.507527768611908 }, { "epoch": 1.8453112476623028, "step": 6907, "train/loss_ctc": 1.0693600177764893, "train/loss_error": 0.46994632482528687, "train/loss_total": 0.5898290872573853 }, { "epoch": 1.8455784130376705, "step": 6908, "train/loss_ctc": 0.5600466132164001, "train/loss_error": 0.420807421207428, "train/loss_total": 0.44865524768829346 }, { "epoch": 1.8458455784130376, "step": 6909, "train/loss_ctc": 1.0156699419021606, "train/loss_error": 0.4807353913784027, "train/loss_total": 0.5877223014831543 }, { "epoch": 1.846112743788405, "grad_norm": 2.036390781402588, "learning_rate": 1.893133849853059e-05, "loss": 0.5245, "step": 6910 }, { "epoch": 1.846112743788405, "step": 6910, "train/loss_ctc": 0.49251192808151245, "train/loss_error": 0.464947909116745, "train/loss_total": 0.4704607427120209 }, { "epoch": 1.8463799091637725, "step": 6911, "train/loss_ctc": 1.0525434017181396, "train/loss_error": 0.46982306241989136, "train/loss_total": 0.586367130279541 }, { "epoch": 1.8466470745391397, "step": 6912, "train/loss_ctc": 0.8657374382019043, "train/loss_error": 0.49560546875, "train/loss_total": 0.5696318745613098 }, { "epoch": 1.846914239914507, "step": 6913, "train/loss_ctc": 1.1839319467544556, "train/loss_error": 0.4435266852378845, "train/loss_total": 0.5916077494621277 }, { "epoch": 1.8471814052898745, "step": 6914, "train/loss_ctc": 0.38313570618629456, "train/loss_error": 0.41287675499916077, "train/loss_total": 0.40692853927612305 }, { "epoch": 1.8474485706652417, "step": 6915, "train/loss_ctc": 0.6063377857208252, "train/loss_error": 0.4543566405773163, "train/loss_total": 0.4847528636455536 }, { "epoch": 1.8477157360406091, "step": 6916, "train/loss_ctc": 0.9461746215820312, "train/loss_error": 0.46125614643096924, "train/loss_total": 0.5582398772239685 }, { "epoch": 1.8479829014159765, "step": 6917, "train/loss_ctc": 0.9059287905693054, "train/loss_error": 0.6091077327728271, "train/loss_total": 0.6684719324111938 }, { "epoch": 1.8482500667913437, "step": 6918, "train/loss_ctc": 0.40202292799949646, "train/loss_error": 0.3686091899871826, "train/loss_total": 0.37529194355010986 }, { "epoch": 1.8485172321667112, "step": 6919, "train/loss_ctc": 0.7527114152908325, "train/loss_error": 0.49134182929992676, "train/loss_total": 0.5436157584190369 }, { "epoch": 1.8487843975420786, "grad_norm": 7.849874973297119, "learning_rate": 1.891530857600855e-05, "loss": 0.5255, "step": 6920 }, { "epoch": 1.8487843975420786, "step": 6920, "train/loss_ctc": 1.3149503469467163, "train/loss_error": 0.402992844581604, "train/loss_total": 0.5853843688964844 }, { "epoch": 1.8490515629174458, "step": 6921, "train/loss_ctc": 1.459925651550293, "train/loss_error": 0.5653191208839417, "train/loss_total": 0.744240403175354 }, { "epoch": 1.8493187282928134, "step": 6922, "train/loss_ctc": 0.7642722725868225, "train/loss_error": 0.4284760057926178, "train/loss_total": 0.4956352710723877 }, { "epoch": 1.8495858936681806, "step": 6923, "train/loss_ctc": 0.5582174062728882, "train/loss_error": 0.4359540045261383, "train/loss_total": 0.46040669083595276 }, { "epoch": 1.8498530590435478, "step": 6924, "train/loss_ctc": 0.950456976890564, "train/loss_error": 0.4924646019935608, "train/loss_total": 0.5840630531311035 }, { "epoch": 1.8501202244189154, "step": 6925, "train/loss_ctc": 0.8605262041091919, "train/loss_error": 0.4511353671550751, "train/loss_total": 0.5330135226249695 }, { "epoch": 1.8503873897942826, "step": 6926, "train/loss_ctc": 0.8740924000740051, "train/loss_error": 0.4081695079803467, "train/loss_total": 0.5013540983200073 }, { "epoch": 1.85065455516965, "step": 6927, "train/loss_ctc": 0.4123622477054596, "train/loss_error": 0.5236908793449402, "train/loss_total": 0.5014251470565796 }, { "epoch": 1.8509217205450175, "step": 6928, "train/loss_ctc": 1.086680293083191, "train/loss_error": 0.45051369071006775, "train/loss_total": 0.5777469873428345 }, { "epoch": 1.8511888859203847, "step": 6929, "train/loss_ctc": 1.3741121292114258, "train/loss_error": 0.47363826632499695, "train/loss_total": 0.6537330746650696 }, { "epoch": 1.851456051295752, "grad_norm": 1.8036516904830933, "learning_rate": 1.889927865348651e-05, "loss": 0.5637, "step": 6930 }, { "epoch": 1.851456051295752, "step": 6930, "train/loss_ctc": 0.621941328048706, "train/loss_error": 0.47110679745674133, "train/loss_total": 0.5012736916542053 }, { "epoch": 1.8517232166711195, "step": 6931, "train/loss_ctc": 0.7575602531433105, "train/loss_error": 0.4852752089500427, "train/loss_total": 0.5397322177886963 }, { "epoch": 1.8519903820464867, "step": 6932, "train/loss_ctc": 0.856509804725647, "train/loss_error": 0.452310174703598, "train/loss_total": 0.5331500768661499 }, { "epoch": 1.8522575474218541, "step": 6933, "train/loss_ctc": 0.45986998081207275, "train/loss_error": 0.38507264852523804, "train/loss_total": 0.400032103061676 }, { "epoch": 1.8525247127972215, "step": 6934, "train/loss_ctc": 0.429298996925354, "train/loss_error": 0.48994266986846924, "train/loss_total": 0.4778139591217041 }, { "epoch": 1.8527918781725887, "step": 6935, "train/loss_ctc": 1.5813698768615723, "train/loss_error": 0.4529910087585449, "train/loss_total": 0.6786668300628662 }, { "epoch": 1.8530590435479561, "step": 6936, "train/loss_ctc": 1.0187950134277344, "train/loss_error": 0.42524057626724243, "train/loss_total": 0.5439514517784119 }, { "epoch": 1.8533262089233236, "step": 6937, "train/loss_ctc": 0.7811141014099121, "train/loss_error": 0.4595860242843628, "train/loss_total": 0.5238916873931885 }, { "epoch": 1.8535933742986908, "step": 6938, "train/loss_ctc": 0.5719214677810669, "train/loss_error": 0.43625450134277344, "train/loss_total": 0.4633879065513611 }, { "epoch": 1.8538605396740584, "step": 6939, "train/loss_ctc": 0.6724160313606262, "train/loss_error": 0.47238051891326904, "train/loss_total": 0.5123876333236694 }, { "epoch": 1.8541277050494256, "grad_norm": 1.9457451105117798, "learning_rate": 1.8883248730964467e-05, "loss": 0.5174, "step": 6940 }, { "epoch": 1.8541277050494256, "step": 6940, "train/loss_ctc": 0.5606997609138489, "train/loss_error": 0.4797607362270355, "train/loss_total": 0.49594855308532715 }, { "epoch": 1.8543948704247928, "step": 6941, "train/loss_ctc": 0.8233028650283813, "train/loss_error": 0.43629005551338196, "train/loss_total": 0.5136926174163818 }, { "epoch": 1.8546620358001604, "step": 6942, "train/loss_ctc": 1.3756859302520752, "train/loss_error": 0.4938633143901825, "train/loss_total": 0.6702278852462769 }, { "epoch": 1.8549292011755276, "step": 6943, "train/loss_ctc": 0.7946372628211975, "train/loss_error": 0.43987777829170227, "train/loss_total": 0.5108296871185303 }, { "epoch": 1.855196366550895, "step": 6944, "train/loss_ctc": 0.4493754506111145, "train/loss_error": 0.44769835472106934, "train/loss_total": 0.44803377985954285 }, { "epoch": 1.8554635319262625, "step": 6945, "train/loss_ctc": 0.8495717644691467, "train/loss_error": 0.41258159279823303, "train/loss_total": 0.4999796152114868 }, { "epoch": 1.8557306973016297, "step": 6946, "train/loss_ctc": 0.5056740641593933, "train/loss_error": 0.5076968669891357, "train/loss_total": 0.5072923302650452 }, { "epoch": 1.855997862676997, "step": 6947, "train/loss_ctc": 1.0062713623046875, "train/loss_error": 0.4155653119087219, "train/loss_total": 0.533706545829773 }, { "epoch": 1.8562650280523645, "step": 6948, "train/loss_ctc": 0.8103013634681702, "train/loss_error": 0.45672541856765747, "train/loss_total": 0.52744060754776 }, { "epoch": 1.8565321934277317, "step": 6949, "train/loss_ctc": 0.6549242734909058, "train/loss_error": 0.4474840462207794, "train/loss_total": 0.48897212743759155 }, { "epoch": 1.856799358803099, "grad_norm": 1.6905674934387207, "learning_rate": 1.8867218808442425e-05, "loss": 0.5196, "step": 6950 }, { "epoch": 1.856799358803099, "step": 6950, "train/loss_ctc": 1.347303867340088, "train/loss_error": 0.4844072461128235, "train/loss_total": 0.6569865942001343 }, { "epoch": 1.8570665241784665, "step": 6951, "train/loss_ctc": 0.4628554582595825, "train/loss_error": 0.5347992181777954, "train/loss_total": 0.5204104781150818 }, { "epoch": 1.8573336895538337, "step": 6952, "train/loss_ctc": 1.1858571767807007, "train/loss_error": 0.45388171076774597, "train/loss_total": 0.6002768278121948 }, { "epoch": 1.8576008549292011, "step": 6953, "train/loss_ctc": 0.6954449415206909, "train/loss_error": 0.43972888588905334, "train/loss_total": 0.4908721148967743 }, { "epoch": 1.8578680203045685, "step": 6954, "train/loss_ctc": 0.24229802191257477, "train/loss_error": 0.46291449666023254, "train/loss_total": 0.4187912344932556 }, { "epoch": 1.8581351856799357, "step": 6955, "train/loss_ctc": 0.8378047347068787, "train/loss_error": 0.4771095812320709, "train/loss_total": 0.5492486357688904 }, { "epoch": 1.8584023510553034, "step": 6956, "train/loss_ctc": 1.2135940790176392, "train/loss_error": 0.4674849510192871, "train/loss_total": 0.6167067885398865 }, { "epoch": 1.8586695164306706, "step": 6957, "train/loss_ctc": 0.9777190685272217, "train/loss_error": 0.43248629570007324, "train/loss_total": 0.5415328741073608 }, { "epoch": 1.8589366818060378, "step": 6958, "train/loss_ctc": 0.7848145961761475, "train/loss_error": 0.4598483145236969, "train/loss_total": 0.5248415470123291 }, { "epoch": 1.8592038471814054, "step": 6959, "train/loss_ctc": 0.9257990121841431, "train/loss_error": 0.5153934955596924, "train/loss_total": 0.5974745750427246 }, { "epoch": 1.8594710125567726, "grad_norm": 1.4794050455093384, "learning_rate": 1.8851188885920383e-05, "loss": 0.5517, "step": 6960 }, { "epoch": 1.8594710125567726, "step": 6960, "train/loss_ctc": 0.7353523969650269, "train/loss_error": 0.47224462032318115, "train/loss_total": 0.5248661637306213 }, { "epoch": 1.85973817793214, "step": 6961, "train/loss_ctc": 0.37108078598976135, "train/loss_error": 0.4658588171005249, "train/loss_total": 0.4469032287597656 }, { "epoch": 1.8600053433075074, "step": 6962, "train/loss_ctc": 0.43508028984069824, "train/loss_error": 0.4914746880531311, "train/loss_total": 0.4801958203315735 }, { "epoch": 1.8602725086828746, "step": 6963, "train/loss_ctc": 0.8634896278381348, "train/loss_error": 0.5708053708076477, "train/loss_total": 0.6293421983718872 }, { "epoch": 1.860539674058242, "step": 6964, "train/loss_ctc": 1.0814721584320068, "train/loss_error": 0.4822908937931061, "train/loss_total": 0.602127194404602 }, { "epoch": 1.8608068394336095, "step": 6965, "train/loss_ctc": 0.5601170659065247, "train/loss_error": 0.417385071516037, "train/loss_total": 0.44593146443367004 }, { "epoch": 1.8610740048089767, "step": 6966, "train/loss_ctc": 1.038503646850586, "train/loss_error": 0.4614872932434082, "train/loss_total": 0.5768905878067017 }, { "epoch": 1.861341170184344, "step": 6967, "train/loss_ctc": 0.9233861565589905, "train/loss_error": 0.46518591046333313, "train/loss_total": 0.5568259358406067 }, { "epoch": 1.8616083355597115, "step": 6968, "train/loss_ctc": 0.5498242378234863, "train/loss_error": 0.4684072732925415, "train/loss_total": 0.48469066619873047 }, { "epoch": 1.8618755009350787, "step": 6969, "train/loss_ctc": 1.3111202716827393, "train/loss_error": 0.4786495566368103, "train/loss_total": 0.6451436877250671 }, { "epoch": 1.8621426663104461, "grad_norm": 1.612985610961914, "learning_rate": 1.8835158963398344e-05, "loss": 0.5393, "step": 6970 }, { "epoch": 1.8621426663104461, "step": 6970, "train/loss_ctc": 1.0028789043426514, "train/loss_error": 0.5021496415138245, "train/loss_total": 0.6022955179214478 }, { "epoch": 1.8624098316858135, "step": 6971, "train/loss_ctc": 1.0940778255462646, "train/loss_error": 0.4143112003803253, "train/loss_total": 0.5502645373344421 }, { "epoch": 1.8626769970611807, "step": 6972, "train/loss_ctc": 0.4639032185077667, "train/loss_error": 0.4204113185405731, "train/loss_total": 0.42910972237586975 }, { "epoch": 1.8629441624365484, "step": 6973, "train/loss_ctc": 1.1194077730178833, "train/loss_error": 0.4983583390712738, "train/loss_total": 0.6225682497024536 }, { "epoch": 1.8632113278119156, "step": 6974, "train/loss_ctc": 0.8746259808540344, "train/loss_error": 0.5261600017547607, "train/loss_total": 0.5958532094955444 }, { "epoch": 1.863478493187283, "step": 6975, "train/loss_ctc": 0.5064829587936401, "train/loss_error": 0.4652862548828125, "train/loss_total": 0.47352561354637146 }, { "epoch": 1.8637456585626504, "step": 6976, "train/loss_ctc": 1.3463733196258545, "train/loss_error": 0.4598894715309143, "train/loss_total": 0.6371862888336182 }, { "epoch": 1.8640128239380176, "step": 6977, "train/loss_ctc": 0.5080183744430542, "train/loss_error": 0.4960175156593323, "train/loss_total": 0.4984177052974701 }, { "epoch": 1.864279989313385, "step": 6978, "train/loss_ctc": 0.6604419946670532, "train/loss_error": 0.47673729062080383, "train/loss_total": 0.5134782195091248 }, { "epoch": 1.8645471546887524, "step": 6979, "train/loss_ctc": 0.21226587891578674, "train/loss_error": 0.4418628513813019, "train/loss_total": 0.39594346284866333 }, { "epoch": 1.8648143200641196, "grad_norm": 2.1379475593566895, "learning_rate": 1.8819129040876302e-05, "loss": 0.5319, "step": 6980 }, { "epoch": 1.8648143200641196, "step": 6980, "train/loss_ctc": 0.6741430759429932, "train/loss_error": 0.5152148008346558, "train/loss_total": 0.5470004677772522 }, { "epoch": 1.865081485439487, "step": 6981, "train/loss_ctc": 0.6163098812103271, "train/loss_error": 0.45447009801864624, "train/loss_total": 0.48683807253837585 }, { "epoch": 1.8653486508148545, "step": 6982, "train/loss_ctc": 0.901819109916687, "train/loss_error": 0.4987114667892456, "train/loss_total": 0.5793330073356628 }, { "epoch": 1.8656158161902217, "step": 6983, "train/loss_ctc": 0.6061899662017822, "train/loss_error": 0.492178350687027, "train/loss_total": 0.514980673789978 }, { "epoch": 1.865882981565589, "step": 6984, "train/loss_ctc": 1.0698456764221191, "train/loss_error": 0.5428450107574463, "train/loss_total": 0.6482451558113098 }, { "epoch": 1.8661501469409565, "step": 6985, "train/loss_ctc": 0.3554716110229492, "train/loss_error": 0.4027057886123657, "train/loss_total": 0.3932589590549469 }, { "epoch": 1.8664173123163237, "step": 6986, "train/loss_ctc": 1.2003333568572998, "train/loss_error": 0.4841531813144684, "train/loss_total": 0.6273891925811768 }, { "epoch": 1.866684477691691, "step": 6987, "train/loss_ctc": 0.4695112109184265, "train/loss_error": 0.41027823090553284, "train/loss_total": 0.42212486267089844 }, { "epoch": 1.8669516430670585, "step": 6988, "train/loss_ctc": 0.9792794585227966, "train/loss_error": 0.45147737860679626, "train/loss_total": 0.5570378303527832 }, { "epoch": 1.8672188084424257, "step": 6989, "train/loss_ctc": 0.3754531443119049, "train/loss_error": 0.42693647742271423, "train/loss_total": 0.4166398048400879 }, { "epoch": 1.8674859738177934, "grad_norm": 1.912876844406128, "learning_rate": 1.8803099118354264e-05, "loss": 0.5193, "step": 6990 }, { "epoch": 1.8674859738177934, "step": 6990, "train/loss_ctc": 0.7300562262535095, "train/loss_error": 0.4895673990249634, "train/loss_total": 0.5376651883125305 }, { "epoch": 1.8677531391931605, "step": 6991, "train/loss_ctc": 0.7815907001495361, "train/loss_error": 0.5207828879356384, "train/loss_total": 0.5729444622993469 }, { "epoch": 1.868020304568528, "step": 6992, "train/loss_ctc": 0.9473644495010376, "train/loss_error": 0.549422025680542, "train/loss_total": 0.6290104985237122 }, { "epoch": 1.8682874699438954, "step": 6993, "train/loss_ctc": 0.37206798791885376, "train/loss_error": 0.4810487627983093, "train/loss_total": 0.45925262570381165 }, { "epoch": 1.8685546353192626, "step": 6994, "train/loss_ctc": 1.2004470825195312, "train/loss_error": 0.4139857292175293, "train/loss_total": 0.5712779760360718 }, { "epoch": 1.86882180069463, "step": 6995, "train/loss_ctc": 1.2623403072357178, "train/loss_error": 0.5006750822067261, "train/loss_total": 0.6530081629753113 }, { "epoch": 1.8690889660699974, "step": 6996, "train/loss_ctc": 0.6520599126815796, "train/loss_error": 0.41724541783332825, "train/loss_total": 0.46420833468437195 }, { "epoch": 1.8693561314453646, "step": 6997, "train/loss_ctc": 0.9419439435005188, "train/loss_error": 0.44319939613342285, "train/loss_total": 0.542948305606842 }, { "epoch": 1.869623296820732, "step": 6998, "train/loss_ctc": 0.42270606756210327, "train/loss_error": 0.38949263095855713, "train/loss_total": 0.3961353302001953 }, { "epoch": 1.8698904621960994, "step": 6999, "train/loss_ctc": 0.7531962990760803, "train/loss_error": 0.5610697865486145, "train/loss_total": 0.5994951128959656 }, { "epoch": 1.8701576275714666, "grad_norm": 2.170713424682617, "learning_rate": 1.8787069195832222e-05, "loss": 0.5426, "step": 7000 }, { "epoch": 1.8701576275714666, "step": 7000, "train/loss_ctc": 1.011958122253418, "train/loss_error": 0.484476238489151, "train/loss_total": 0.5899726152420044 }, { "epoch": 1.870424792946834, "step": 7001, "train/loss_ctc": 1.3302444219589233, "train/loss_error": 0.5035251379013062, "train/loss_total": 0.6688690185546875 }, { "epoch": 1.8706919583222015, "step": 7002, "train/loss_ctc": 0.5779621601104736, "train/loss_error": 0.5107536315917969, "train/loss_total": 0.5241953730583191 }, { "epoch": 1.8709591236975687, "step": 7003, "train/loss_ctc": 1.0017341375350952, "train/loss_error": 0.48482710123062134, "train/loss_total": 0.5882085561752319 }, { "epoch": 1.8712262890729363, "step": 7004, "train/loss_ctc": 1.5679725408554077, "train/loss_error": 0.4221910536289215, "train/loss_total": 0.6513473987579346 }, { "epoch": 1.8714934544483035, "step": 7005, "train/loss_ctc": 2.028834819793701, "train/loss_error": 0.5360559821128845, "train/loss_total": 0.8346117734909058 }, { "epoch": 1.8717606198236707, "step": 7006, "train/loss_ctc": 0.6148815155029297, "train/loss_error": 0.4403500556945801, "train/loss_total": 0.4752563536167145 }, { "epoch": 1.8720277851990383, "step": 7007, "train/loss_ctc": 0.7071992754936218, "train/loss_error": 0.4595451056957245, "train/loss_total": 0.509075939655304 }, { "epoch": 1.8722949505744055, "step": 7008, "train/loss_ctc": 0.3238747715950012, "train/loss_error": 0.4065530002117157, "train/loss_total": 0.3900173604488373 }, { "epoch": 1.872562115949773, "step": 7009, "train/loss_ctc": 0.4834568500518799, "train/loss_error": 0.416550874710083, "train/loss_total": 0.4299320876598358 }, { "epoch": 1.8728292813251404, "grad_norm": 3.7452268600463867, "learning_rate": 1.877103927331018e-05, "loss": 0.5661, "step": 7010 }, { "epoch": 1.8728292813251404, "step": 7010, "train/loss_ctc": 0.4856733977794647, "train/loss_error": 0.43022963404655457, "train/loss_total": 0.4413183927536011 }, { "epoch": 1.8730964467005076, "step": 7011, "train/loss_ctc": 0.6126847267150879, "train/loss_error": 0.420347660779953, "train/loss_total": 0.4588150978088379 }, { "epoch": 1.873363612075875, "step": 7012, "train/loss_ctc": 1.2192938327789307, "train/loss_error": 0.4137287437915802, "train/loss_total": 0.5748417973518372 }, { "epoch": 1.8736307774512424, "step": 7013, "train/loss_ctc": 0.5299166440963745, "train/loss_error": 0.4158572852611542, "train/loss_total": 0.4386691451072693 }, { "epoch": 1.8738979428266096, "step": 7014, "train/loss_ctc": 1.0115165710449219, "train/loss_error": 0.4088989794254303, "train/loss_total": 0.5294225215911865 }, { "epoch": 1.874165108201977, "step": 7015, "train/loss_ctc": 0.9629536867141724, "train/loss_error": 0.4311867952346802, "train/loss_total": 0.5375401973724365 }, { "epoch": 1.8744322735773444, "step": 7016, "train/loss_ctc": 1.2602957487106323, "train/loss_error": 0.39379826188087463, "train/loss_total": 0.5670977830886841 }, { "epoch": 1.8746994389527116, "step": 7017, "train/loss_ctc": 0.8305743932723999, "train/loss_error": 0.4717276096343994, "train/loss_total": 0.5434969663619995 }, { "epoch": 1.874966604328079, "step": 7018, "train/loss_ctc": 0.8774864077568054, "train/loss_error": 0.48721843957901, "train/loss_total": 0.5652720332145691 }, { "epoch": 1.8752337697034465, "step": 7019, "train/loss_ctc": 3.2799482345581055, "train/loss_error": 0.5214735865592957, "train/loss_total": 1.0731685161590576 }, { "epoch": 1.8755009350788137, "grad_norm": 2.4840829372406006, "learning_rate": 1.8755009350788138e-05, "loss": 0.573, "step": 7020 }, { "epoch": 1.8755009350788137, "step": 7020, "train/loss_ctc": 0.6648116707801819, "train/loss_error": 0.4028575122356415, "train/loss_total": 0.4552483558654785 }, { "epoch": 1.8757681004541813, "step": 7021, "train/loss_ctc": 0.4712153673171997, "train/loss_error": 0.5242023468017578, "train/loss_total": 0.5136049389839172 }, { "epoch": 1.8760352658295485, "step": 7022, "train/loss_ctc": 0.5188028812408447, "train/loss_error": 0.4565189480781555, "train/loss_total": 0.4689757227897644 }, { "epoch": 1.8763024312049157, "step": 7023, "train/loss_ctc": 1.0103422403335571, "train/loss_error": 0.44215869903564453, "train/loss_total": 0.555795431137085 }, { "epoch": 1.8765695965802833, "step": 7024, "train/loss_ctc": 1.3111844062805176, "train/loss_error": 0.52940833568573, "train/loss_total": 0.6857635974884033 }, { "epoch": 1.8768367619556505, "step": 7025, "train/loss_ctc": 0.7931744456291199, "train/loss_error": 0.4837825298309326, "train/loss_total": 0.5456609129905701 }, { "epoch": 1.877103927331018, "step": 7026, "train/loss_ctc": 0.4424993395805359, "train/loss_error": 0.4450030028820038, "train/loss_total": 0.4445022940635681 }, { "epoch": 1.8773710927063854, "step": 7027, "train/loss_ctc": 0.3319064974784851, "train/loss_error": 0.45628443360328674, "train/loss_total": 0.4314088523387909 }, { "epoch": 1.8776382580817526, "step": 7028, "train/loss_ctc": 0.5869609117507935, "train/loss_error": 0.4025667905807495, "train/loss_total": 0.4394456148147583 }, { "epoch": 1.87790542345712, "step": 7029, "train/loss_ctc": 0.5577332377433777, "train/loss_error": 0.4328426718711853, "train/loss_total": 0.4578207731246948 }, { "epoch": 1.8781725888324874, "grad_norm": 2.4182088375091553, "learning_rate": 1.87389794282661e-05, "loss": 0.4998, "step": 7030 }, { "epoch": 1.8781725888324874, "step": 7030, "train/loss_ctc": 0.8465172052383423, "train/loss_error": 0.555767297744751, "train/loss_total": 0.6139172911643982 }, { "epoch": 1.8784397542078546, "step": 7031, "train/loss_ctc": 0.8219640254974365, "train/loss_error": 0.43995657563209534, "train/loss_total": 0.5163580775260925 }, { "epoch": 1.878706919583222, "step": 7032, "train/loss_ctc": 0.7700755596160889, "train/loss_error": 0.4757440388202667, "train/loss_total": 0.5346103310585022 }, { "epoch": 1.8789740849585894, "step": 7033, "train/loss_ctc": 0.6820366382598877, "train/loss_error": 0.4489193260669708, "train/loss_total": 0.49554282426834106 }, { "epoch": 1.8792412503339566, "step": 7034, "train/loss_ctc": 0.8600399494171143, "train/loss_error": 0.5232759714126587, "train/loss_total": 0.5906288027763367 }, { "epoch": 1.879508415709324, "step": 7035, "train/loss_ctc": 0.5865265130996704, "train/loss_error": 0.462543249130249, "train/loss_total": 0.48733991384506226 }, { "epoch": 1.8797755810846914, "step": 7036, "train/loss_ctc": 0.6582942008972168, "train/loss_error": 0.47066405415534973, "train/loss_total": 0.5081900954246521 }, { "epoch": 1.8800427464600586, "step": 7037, "train/loss_ctc": 0.41018062829971313, "train/loss_error": 0.49709179997444153, "train/loss_total": 0.47970959544181824 }, { "epoch": 1.8803099118354263, "step": 7038, "train/loss_ctc": 0.3928200602531433, "train/loss_error": 0.4944685101509094, "train/loss_total": 0.4741388261318207 }, { "epoch": 1.8805770772107935, "step": 7039, "train/loss_ctc": 0.44693300127983093, "train/loss_error": 0.41696232557296753, "train/loss_total": 0.4229564666748047 }, { "epoch": 1.8808442425861607, "grad_norm": 3.73503041267395, "learning_rate": 1.8722949505744058e-05, "loss": 0.5123, "step": 7040 }, { "epoch": 1.8808442425861607, "step": 7040, "train/loss_ctc": 0.9639636874198914, "train/loss_error": 0.4390276372432709, "train/loss_total": 0.5440148711204529 }, { "epoch": 1.8811114079615283, "step": 7041, "train/loss_ctc": 0.6926119327545166, "train/loss_error": 0.45485830307006836, "train/loss_total": 0.502409040927887 }, { "epoch": 1.8813785733368955, "step": 7042, "train/loss_ctc": 1.2169727087020874, "train/loss_error": 0.47391635179519653, "train/loss_total": 0.6225275993347168 }, { "epoch": 1.881645738712263, "step": 7043, "train/loss_ctc": 1.2647500038146973, "train/loss_error": 0.4339914917945862, "train/loss_total": 0.6001431941986084 }, { "epoch": 1.8819129040876303, "step": 7044, "train/loss_ctc": 0.43672144412994385, "train/loss_error": 0.4563271999359131, "train/loss_total": 0.45240604877471924 }, { "epoch": 1.8821800694629975, "step": 7045, "train/loss_ctc": 0.5697957277297974, "train/loss_error": 0.4658595323562622, "train/loss_total": 0.48664677143096924 }, { "epoch": 1.882447234838365, "step": 7046, "train/loss_ctc": 0.6995164155960083, "train/loss_error": 0.458871990442276, "train/loss_total": 0.5070008635520935 }, { "epoch": 1.8827144002137324, "step": 7047, "train/loss_ctc": 0.41928982734680176, "train/loss_error": 0.41340839862823486, "train/loss_total": 0.4145846962928772 }, { "epoch": 1.8829815655890996, "step": 7048, "train/loss_ctc": 0.8719068169593811, "train/loss_error": 0.4789508283138275, "train/loss_total": 0.5575420260429382 }, { "epoch": 1.883248730964467, "step": 7049, "train/loss_ctc": 1.1074728965759277, "train/loss_error": 0.3914766013622284, "train/loss_total": 0.5346758365631104 }, { "epoch": 1.8835158963398344, "grad_norm": 3.418436050415039, "learning_rate": 1.8706919583222016e-05, "loss": 0.5222, "step": 7050 }, { "epoch": 1.8835158963398344, "step": 7050, "train/loss_ctc": 0.5869382619857788, "train/loss_error": 0.42285314202308655, "train/loss_total": 0.45567017793655396 }, { "epoch": 1.8837830617152016, "step": 7051, "train/loss_ctc": 1.162255048751831, "train/loss_error": 0.42606768012046814, "train/loss_total": 0.5733051300048828 }, { "epoch": 1.884050227090569, "step": 7052, "train/loss_ctc": 1.183090090751648, "train/loss_error": 0.41693413257598877, "train/loss_total": 0.5701653361320496 }, { "epoch": 1.8843173924659364, "step": 7053, "train/loss_ctc": 0.8142169713973999, "train/loss_error": 0.42279401421546936, "train/loss_total": 0.5010786056518555 }, { "epoch": 1.8845845578413036, "step": 7054, "train/loss_ctc": 0.5356130599975586, "train/loss_error": 0.5162050127983093, "train/loss_total": 0.5200866460800171 }, { "epoch": 1.8848517232166713, "step": 7055, "train/loss_ctc": 0.3867095410823822, "train/loss_error": 0.38058555126190186, "train/loss_total": 0.38181036710739136 }, { "epoch": 1.8851188885920385, "step": 7056, "train/loss_ctc": 0.7970703840255737, "train/loss_error": 0.44168686866760254, "train/loss_total": 0.5127636194229126 }, { "epoch": 1.8853860539674057, "step": 7057, "train/loss_ctc": 0.4778136610984802, "train/loss_error": 0.405900239944458, "train/loss_total": 0.42028293013572693 }, { "epoch": 1.8856532193427733, "step": 7058, "train/loss_ctc": 0.9828970432281494, "train/loss_error": 0.4816454350948334, "train/loss_total": 0.5818957686424255 }, { "epoch": 1.8859203847181405, "step": 7059, "train/loss_ctc": 0.41003066301345825, "train/loss_error": 0.4249078631401062, "train/loss_total": 0.4219324290752411 }, { "epoch": 1.886187550093508, "grad_norm": 4.765143394470215, "learning_rate": 1.8690889660699974e-05, "loss": 0.4939, "step": 7060 }, { "epoch": 1.886187550093508, "step": 7060, "train/loss_ctc": 0.4319307208061218, "train/loss_error": 0.47322437167167664, "train/loss_total": 0.4649656414985657 }, { "epoch": 1.8864547154688753, "step": 7061, "train/loss_ctc": 0.5967254638671875, "train/loss_error": 0.366649329662323, "train/loss_total": 0.4126645624637604 }, { "epoch": 1.8867218808442425, "step": 7062, "train/loss_ctc": 0.4063837230205536, "train/loss_error": 0.4995267391204834, "train/loss_total": 0.4808981418609619 }, { "epoch": 1.88698904621961, "step": 7063, "train/loss_ctc": 0.3104285001754761, "train/loss_error": 0.39513203501701355, "train/loss_total": 0.3781913220882416 }, { "epoch": 1.8872562115949774, "step": 7064, "train/loss_ctc": 0.338514506816864, "train/loss_error": 0.46956753730773926, "train/loss_total": 0.4433569312095642 }, { "epoch": 1.8875233769703446, "step": 7065, "train/loss_ctc": 0.41722816228866577, "train/loss_error": 0.42326581478118896, "train/loss_total": 0.4220582842826843 }, { "epoch": 1.887790542345712, "step": 7066, "train/loss_ctc": 0.4168550670146942, "train/loss_error": 0.4231358468532562, "train/loss_total": 0.42187970876693726 }, { "epoch": 1.8880577077210794, "step": 7067, "train/loss_ctc": 1.0648925304412842, "train/loss_error": 0.49198099970817566, "train/loss_total": 0.6065633296966553 }, { "epoch": 1.8883248730964466, "step": 7068, "train/loss_ctc": 1.0560624599456787, "train/loss_error": 0.4531862139701843, "train/loss_total": 0.5737614631652832 }, { "epoch": 1.888592038471814, "step": 7069, "train/loss_ctc": 0.8447210788726807, "train/loss_error": 0.4910111129283905, "train/loss_total": 0.5617530941963196 }, { "epoch": 1.8888592038471814, "grad_norm": 3.143998861312866, "learning_rate": 1.8674859738177932e-05, "loss": 0.4766, "step": 7070 }, { "epoch": 1.8888592038471814, "step": 7070, "train/loss_ctc": 0.5070978403091431, "train/loss_error": 0.40265893936157227, "train/loss_total": 0.4235467314720154 }, { "epoch": 1.8891263692225486, "step": 7071, "train/loss_ctc": 0.3130285143852234, "train/loss_error": 0.4631633162498474, "train/loss_total": 0.43313637375831604 }, { "epoch": 1.8893935345979163, "step": 7072, "train/loss_ctc": 0.6096898317337036, "train/loss_error": 0.4943409264087677, "train/loss_total": 0.5174106955528259 }, { "epoch": 1.8896606999732835, "step": 7073, "train/loss_ctc": 0.754445493221283, "train/loss_error": 0.4220508635044098, "train/loss_total": 0.4885298013687134 }, { "epoch": 1.8899278653486506, "step": 7074, "train/loss_ctc": 0.7794444561004639, "train/loss_error": 0.5010070204734802, "train/loss_total": 0.556694507598877 }, { "epoch": 1.8901950307240183, "step": 7075, "train/loss_ctc": 1.2334392070770264, "train/loss_error": 0.4724588096141815, "train/loss_total": 0.6246548891067505 }, { "epoch": 1.8904621960993855, "step": 7076, "train/loss_ctc": 0.7645275592803955, "train/loss_error": 0.43823477625846863, "train/loss_total": 0.5034933090209961 }, { "epoch": 1.890729361474753, "step": 7077, "train/loss_ctc": 0.7255479693412781, "train/loss_error": 0.45124319195747375, "train/loss_total": 0.5061041116714478 }, { "epoch": 1.8909965268501203, "step": 7078, "train/loss_ctc": 0.5491148829460144, "train/loss_error": 0.39123421907424927, "train/loss_total": 0.4228103756904602 }, { "epoch": 1.8912636922254875, "step": 7079, "train/loss_ctc": 0.5588786602020264, "train/loss_error": 0.3830893635749817, "train/loss_total": 0.4182472229003906 }, { "epoch": 1.891530857600855, "grad_norm": 1.7710316181182861, "learning_rate": 1.865882981565589e-05, "loss": 0.4895, "step": 7080 }, { "epoch": 1.891530857600855, "step": 7080, "train/loss_ctc": 1.1861803531646729, "train/loss_error": 0.4660733938217163, "train/loss_total": 0.6100947856903076 }, { "epoch": 1.8917980229762223, "step": 7081, "train/loss_ctc": 0.7462509870529175, "train/loss_error": 0.4949912428855896, "train/loss_total": 0.5452432036399841 }, { "epoch": 1.8920651883515895, "step": 7082, "train/loss_ctc": 0.7714029550552368, "train/loss_error": 0.49148303270339966, "train/loss_total": 0.5474669933319092 }, { "epoch": 1.892332353726957, "step": 7083, "train/loss_ctc": 0.7463726997375488, "train/loss_error": 0.4851641356945038, "train/loss_total": 0.5374058485031128 }, { "epoch": 1.8925995191023244, "step": 7084, "train/loss_ctc": 1.182779312133789, "train/loss_error": 0.4430372416973114, "train/loss_total": 0.5909856557846069 }, { "epoch": 1.8928666844776916, "step": 7085, "train/loss_ctc": 0.7393012642860413, "train/loss_error": 0.4696255326271057, "train/loss_total": 0.5235607028007507 }, { "epoch": 1.893133849853059, "step": 7086, "train/loss_ctc": 0.9324251413345337, "train/loss_error": 0.4596358835697174, "train/loss_total": 0.5541937351226807 }, { "epoch": 1.8934010152284264, "step": 7087, "train/loss_ctc": 1.0941929817199707, "train/loss_error": 0.5124555826187134, "train/loss_total": 0.6288030743598938 }, { "epoch": 1.8936681806037936, "step": 7088, "train/loss_ctc": 1.2620038986206055, "train/loss_error": 0.4274655282497406, "train/loss_total": 0.5943732261657715 }, { "epoch": 1.8939353459791612, "step": 7089, "train/loss_ctc": 0.4286125898361206, "train/loss_error": 0.5020621418952942, "train/loss_total": 0.4873722195625305 }, { "epoch": 1.8942025113545284, "grad_norm": 2.59810209274292, "learning_rate": 1.864279989313385e-05, "loss": 0.562, "step": 7090 }, { "epoch": 1.8942025113545284, "step": 7090, "train/loss_ctc": 1.32856023311615, "train/loss_error": 0.45915135741233826, "train/loss_total": 0.6330331563949585 }, { "epoch": 1.8944696767298959, "step": 7091, "train/loss_ctc": 0.8441085815429688, "train/loss_error": 0.44519802927970886, "train/loss_total": 0.5249801874160767 }, { "epoch": 1.8947368421052633, "step": 7092, "train/loss_ctc": 0.7562706470489502, "train/loss_error": 0.46047624945640564, "train/loss_total": 0.5196351408958435 }, { "epoch": 1.8950040074806305, "step": 7093, "train/loss_ctc": 1.463631510734558, "train/loss_error": 0.4936424195766449, "train/loss_total": 0.6876402497291565 }, { "epoch": 1.8952711728559979, "step": 7094, "train/loss_ctc": 0.6097721457481384, "train/loss_error": 0.4563267230987549, "train/loss_total": 0.48701581358909607 }, { "epoch": 1.8955383382313653, "step": 7095, "train/loss_ctc": 0.5158193707466125, "train/loss_error": 0.4622781574726105, "train/loss_total": 0.4729864001274109 }, { "epoch": 1.8958055036067325, "step": 7096, "train/loss_ctc": 1.097113847732544, "train/loss_error": 0.5024526715278625, "train/loss_total": 0.6213849186897278 }, { "epoch": 1.8960726689821, "step": 7097, "train/loss_ctc": 0.7735931873321533, "train/loss_error": 0.3916221261024475, "train/loss_total": 0.4680163562297821 }, { "epoch": 1.8963398343574673, "step": 7098, "train/loss_ctc": 0.49749863147735596, "train/loss_error": 0.4344426691532135, "train/loss_total": 0.4470538794994354 }, { "epoch": 1.8966069997328345, "step": 7099, "train/loss_ctc": 0.8651007413864136, "train/loss_error": 0.5114603042602539, "train/loss_total": 0.5821883678436279 }, { "epoch": 1.896874165108202, "grad_norm": 2.1336734294891357, "learning_rate": 1.862676997061181e-05, "loss": 0.5444, "step": 7100 }, { "epoch": 1.896874165108202, "step": 7100, "train/loss_ctc": 0.7792839407920837, "train/loss_error": 0.40002408623695374, "train/loss_total": 0.4758760631084442 }, { "epoch": 1.8971413304835694, "step": 7101, "train/loss_ctc": 1.6209877729415894, "train/loss_error": 0.5165055990219116, "train/loss_total": 0.737402081489563 }, { "epoch": 1.8974084958589366, "step": 7102, "train/loss_ctc": 0.6385856866836548, "train/loss_error": 0.43822741508483887, "train/loss_total": 0.478299081325531 }, { "epoch": 1.897675661234304, "step": 7103, "train/loss_ctc": 1.1751326322555542, "train/loss_error": 0.5162872076034546, "train/loss_total": 0.6480562686920166 }, { "epoch": 1.8979428266096714, "step": 7104, "train/loss_ctc": 0.3543788492679596, "train/loss_error": 0.5019153952598572, "train/loss_total": 0.47240808606147766 }, { "epoch": 1.8982099919850386, "step": 7105, "train/loss_ctc": 0.7552688717842102, "train/loss_error": 0.514899730682373, "train/loss_total": 0.5629735589027405 }, { "epoch": 1.8984771573604062, "step": 7106, "train/loss_ctc": 0.5105252265930176, "train/loss_error": 0.514480710029602, "train/loss_total": 0.5136896371841431 }, { "epoch": 1.8987443227357734, "step": 7107, "train/loss_ctc": 0.8398176431655884, "train/loss_error": 0.4882356822490692, "train/loss_total": 0.558552086353302 }, { "epoch": 1.8990114881111408, "step": 7108, "train/loss_ctc": 0.3218405246734619, "train/loss_error": 0.4621223509311676, "train/loss_total": 0.4340659976005554 }, { "epoch": 1.8992786534865083, "step": 7109, "train/loss_ctc": 0.6501730680465698, "train/loss_error": 0.47723114490509033, "train/loss_total": 0.5118195414543152 }, { "epoch": 1.8995458188618755, "grad_norm": 1.6626842021942139, "learning_rate": 1.8610740048089768e-05, "loss": 0.5393, "step": 7110 }, { "epoch": 1.8995458188618755, "step": 7110, "train/loss_ctc": 1.0456920862197876, "train/loss_error": 0.46276798844337463, "train/loss_total": 0.579352855682373 }, { "epoch": 1.8998129842372429, "step": 7111, "train/loss_ctc": 0.5856520533561707, "train/loss_error": 0.39208880066871643, "train/loss_total": 0.4308014512062073 }, { "epoch": 1.9000801496126103, "step": 7112, "train/loss_ctc": 1.293684959411621, "train/loss_error": 0.48256567120552063, "train/loss_total": 0.6447895765304565 }, { "epoch": 1.9003473149879775, "step": 7113, "train/loss_ctc": 0.6594662070274353, "train/loss_error": 0.4601854979991913, "train/loss_total": 0.500041663646698 }, { "epoch": 1.900614480363345, "step": 7114, "train/loss_ctc": 0.5858115553855896, "train/loss_error": 0.49504154920578003, "train/loss_total": 0.5131955742835999 }, { "epoch": 1.9008816457387123, "step": 7115, "train/loss_ctc": 0.5179648995399475, "train/loss_error": 0.4503999948501587, "train/loss_total": 0.4639129936695099 }, { "epoch": 1.9011488111140795, "step": 7116, "train/loss_ctc": 0.5652971267700195, "train/loss_error": 0.5033245086669922, "train/loss_total": 0.5157190561294556 }, { "epoch": 1.901415976489447, "step": 7117, "train/loss_ctc": 0.44649413228034973, "train/loss_error": 0.4744795858860016, "train/loss_total": 0.4688825011253357 }, { "epoch": 1.9016831418648144, "step": 7118, "train/loss_ctc": 0.7567451596260071, "train/loss_error": 0.46997249126434326, "train/loss_total": 0.5273270606994629 }, { "epoch": 1.9019503072401815, "step": 7119, "train/loss_ctc": 1.2288389205932617, "train/loss_error": 0.4339231252670288, "train/loss_total": 0.5929062962532043 }, { "epoch": 1.9022174726155492, "grad_norm": 12.011702537536621, "learning_rate": 1.8594710125567726e-05, "loss": 0.5237, "step": 7120 }, { "epoch": 1.9022174726155492, "step": 7120, "train/loss_ctc": 0.5539122819900513, "train/loss_error": 0.47021088004112244, "train/loss_total": 0.48695117235183716 }, { "epoch": 1.9024846379909164, "step": 7121, "train/loss_ctc": 0.6484755277633667, "train/loss_error": 0.5124943852424622, "train/loss_total": 0.5396906137466431 }, { "epoch": 1.9027518033662836, "step": 7122, "train/loss_ctc": 0.7142050266265869, "train/loss_error": 0.4406863749027252, "train/loss_total": 0.4953901171684265 }, { "epoch": 1.9030189687416512, "step": 7123, "train/loss_ctc": 0.9650884866714478, "train/loss_error": 0.36777979135513306, "train/loss_total": 0.48724156618118286 }, { "epoch": 1.9032861341170184, "step": 7124, "train/loss_ctc": 2.154383897781372, "train/loss_error": 0.4740862250328064, "train/loss_total": 0.8101457357406616 }, { "epoch": 1.9035532994923858, "step": 7125, "train/loss_ctc": 0.4512530565261841, "train/loss_error": 0.47701025009155273, "train/loss_total": 0.47185882925987244 }, { "epoch": 1.9038204648677532, "step": 7126, "train/loss_ctc": 0.7459738850593567, "train/loss_error": 0.47629037499427795, "train/loss_total": 0.5302270650863647 }, { "epoch": 1.9040876302431204, "step": 7127, "train/loss_ctc": 0.9166914224624634, "train/loss_error": 0.4788084924221039, "train/loss_total": 0.5663850903511047 }, { "epoch": 1.9043547956184879, "step": 7128, "train/loss_ctc": 0.5715407729148865, "train/loss_error": 0.4610177278518677, "train/loss_total": 0.4831223487854004 }, { "epoch": 1.9046219609938553, "step": 7129, "train/loss_ctc": 0.42416563630104065, "train/loss_error": 0.4535020887851715, "train/loss_total": 0.44763481616973877 }, { "epoch": 1.9048891263692225, "grad_norm": 2.4197609424591064, "learning_rate": 1.8578680203045684e-05, "loss": 0.5319, "step": 7130 }, { "epoch": 1.9048891263692225, "step": 7130, "train/loss_ctc": 0.4327976405620575, "train/loss_error": 0.4781865179538727, "train/loss_total": 0.4691087603569031 }, { "epoch": 1.90515629174459, "step": 7131, "train/loss_ctc": 0.3797556757926941, "train/loss_error": 0.4487699270248413, "train/loss_total": 0.4349670708179474 }, { "epoch": 1.9054234571199573, "step": 7132, "train/loss_ctc": 0.5183283686637878, "train/loss_error": 0.4729146659374237, "train/loss_total": 0.48199743032455444 }, { "epoch": 1.9056906224953245, "step": 7133, "train/loss_ctc": 1.0833775997161865, "train/loss_error": 0.5224566459655762, "train/loss_total": 0.6346408128738403 }, { "epoch": 1.905957787870692, "step": 7134, "train/loss_ctc": 0.8971548080444336, "train/loss_error": 0.4669431149959564, "train/loss_total": 0.552985429763794 }, { "epoch": 1.9062249532460593, "step": 7135, "train/loss_ctc": 0.45823097229003906, "train/loss_error": 0.4260397255420685, "train/loss_total": 0.4324779808521271 }, { "epoch": 1.9064921186214265, "step": 7136, "train/loss_ctc": 0.7151502370834351, "train/loss_error": 0.48744261264801025, "train/loss_total": 0.5329841375350952 }, { "epoch": 1.9067592839967942, "step": 7137, "train/loss_ctc": 0.8748169541358948, "train/loss_error": 0.4814116954803467, "train/loss_total": 0.5600927472114563 }, { "epoch": 1.9070264493721614, "step": 7138, "train/loss_ctc": 1.344559907913208, "train/loss_error": 0.44864028692245483, "train/loss_total": 0.6278241872787476 }, { "epoch": 1.9072936147475286, "step": 7139, "train/loss_ctc": 1.0598649978637695, "train/loss_error": 0.48916465044021606, "train/loss_total": 0.6033047437667847 }, { "epoch": 1.9075607801228962, "grad_norm": 1.0623610019683838, "learning_rate": 1.8562650280523642e-05, "loss": 0.533, "step": 7140 }, { "epoch": 1.9075607801228962, "step": 7140, "train/loss_ctc": 0.8177936673164368, "train/loss_error": 0.5059104561805725, "train/loss_total": 0.5682871341705322 }, { "epoch": 1.9078279454982634, "step": 7141, "train/loss_ctc": 0.41180965304374695, "train/loss_error": 0.447175532579422, "train/loss_total": 0.44010236859321594 }, { "epoch": 1.9080951108736308, "step": 7142, "train/loss_ctc": 0.24101264774799347, "train/loss_error": 0.4689919352531433, "train/loss_total": 0.42339611053466797 }, { "epoch": 1.9083622762489982, "step": 7143, "train/loss_ctc": 0.5607057809829712, "train/loss_error": 0.4447580873966217, "train/loss_total": 0.4679476320743561 }, { "epoch": 1.9086294416243654, "step": 7144, "train/loss_ctc": 1.1374614238739014, "train/loss_error": 0.44398924708366394, "train/loss_total": 0.5826836824417114 }, { "epoch": 1.9088966069997328, "step": 7145, "train/loss_ctc": 0.6138846278190613, "train/loss_error": 0.5154069066047668, "train/loss_total": 0.5351024866104126 }, { "epoch": 1.9091637723751003, "step": 7146, "train/loss_ctc": 0.697998583316803, "train/loss_error": 0.3991454839706421, "train/loss_total": 0.4589161276817322 }, { "epoch": 1.9094309377504675, "step": 7147, "train/loss_ctc": 0.36438459157943726, "train/loss_error": 0.5616600513458252, "train/loss_total": 0.5222049355506897 }, { "epoch": 1.9096981031258349, "step": 7148, "train/loss_ctc": 0.5996346473693848, "train/loss_error": 0.489760160446167, "train/loss_total": 0.5117350816726685 }, { "epoch": 1.9099652685012023, "step": 7149, "train/loss_ctc": 1.0054504871368408, "train/loss_error": 0.49846768379211426, "train/loss_total": 0.5998642444610596 }, { "epoch": 1.9102324338765695, "grad_norm": 2.5746877193450928, "learning_rate": 1.8546620358001603e-05, "loss": 0.511, "step": 7150 }, { "epoch": 1.9102324338765695, "step": 7150, "train/loss_ctc": 0.5174267888069153, "train/loss_error": 0.5251861810684204, "train/loss_total": 0.5236343145370483 }, { "epoch": 1.910499599251937, "step": 7151, "train/loss_ctc": 0.9733759164810181, "train/loss_error": 0.5138444900512695, "train/loss_total": 0.6057507991790771 }, { "epoch": 1.9107667646273043, "step": 7152, "train/loss_ctc": 0.42816874384880066, "train/loss_error": 0.4451059401035309, "train/loss_total": 0.44171851873397827 }, { "epoch": 1.9110339300026715, "step": 7153, "train/loss_ctc": 0.9085953831672668, "train/loss_error": 0.45651617646217346, "train/loss_total": 0.54693204164505 }, { "epoch": 1.9113010953780392, "step": 7154, "train/loss_ctc": 0.6746848821640015, "train/loss_error": 0.5314412713050842, "train/loss_total": 0.5600900053977966 }, { "epoch": 1.9115682607534064, "step": 7155, "train/loss_ctc": 0.7826130390167236, "train/loss_error": 0.5190335512161255, "train/loss_total": 0.5717494487762451 }, { "epoch": 1.9118354261287736, "step": 7156, "train/loss_ctc": 1.9906615018844604, "train/loss_error": 0.4639786183834076, "train/loss_total": 0.7693151831626892 }, { "epoch": 1.9121025915041412, "step": 7157, "train/loss_ctc": 0.927069365978241, "train/loss_error": 0.49766507744789124, "train/loss_total": 0.5835459232330322 }, { "epoch": 1.9123697568795084, "step": 7158, "train/loss_ctc": 0.42535555362701416, "train/loss_error": 0.4605633020401001, "train/loss_total": 0.4535217583179474 }, { "epoch": 1.9126369222548758, "step": 7159, "train/loss_ctc": 1.408955693244934, "train/loss_error": 0.5196390748023987, "train/loss_total": 0.6975023746490479 }, { "epoch": 1.9129040876302432, "grad_norm": 2.6004908084869385, "learning_rate": 1.853059043547956e-05, "loss": 0.5754, "step": 7160 }, { "epoch": 1.9129040876302432, "step": 7160, "train/loss_ctc": 0.7576152682304382, "train/loss_error": 0.4929496645927429, "train/loss_total": 0.5458828210830688 }, { "epoch": 1.9131712530056104, "step": 7161, "train/loss_ctc": 0.946539580821991, "train/loss_error": 0.4700400233268738, "train/loss_total": 0.5653399229049683 }, { "epoch": 1.9134384183809778, "step": 7162, "train/loss_ctc": 0.6426920890808105, "train/loss_error": 0.4422350823879242, "train/loss_total": 0.4823265075683594 }, { "epoch": 1.9137055837563453, "step": 7163, "train/loss_ctc": 1.4329736232757568, "train/loss_error": 0.47532355785369873, "train/loss_total": 0.6668535470962524 }, { "epoch": 1.9139727491317124, "step": 7164, "train/loss_ctc": 1.5155904293060303, "train/loss_error": 0.4878837466239929, "train/loss_total": 0.6934250593185425 }, { "epoch": 1.9142399145070799, "step": 7165, "train/loss_ctc": 0.634631335735321, "train/loss_error": 0.48290711641311646, "train/loss_total": 0.5132519602775574 }, { "epoch": 1.9145070798824473, "step": 7166, "train/loss_ctc": 0.6783571243286133, "train/loss_error": 0.5285806655883789, "train/loss_total": 0.5585359334945679 }, { "epoch": 1.9147742452578145, "step": 7167, "train/loss_ctc": 0.4269808530807495, "train/loss_error": 0.40275856852531433, "train/loss_total": 0.40760302543640137 }, { "epoch": 1.915041410633182, "step": 7168, "train/loss_ctc": 1.0894720554351807, "train/loss_error": 0.44018855690956116, "train/loss_total": 0.5700452923774719 }, { "epoch": 1.9153085760085493, "step": 7169, "train/loss_ctc": 0.7635195255279541, "train/loss_error": 0.4157174229621887, "train/loss_total": 0.48527786135673523 }, { "epoch": 1.9155757413839165, "grad_norm": 2.9599788188934326, "learning_rate": 1.8514560512957523e-05, "loss": 0.5489, "step": 7170 }, { "epoch": 1.9155757413839165, "step": 7170, "train/loss_ctc": 0.5843344926834106, "train/loss_error": 0.49019014835357666, "train/loss_total": 0.5090190172195435 }, { "epoch": 1.9158429067592841, "step": 7171, "train/loss_ctc": 0.3575563132762909, "train/loss_error": 0.5638018250465393, "train/loss_total": 0.5225527286529541 }, { "epoch": 1.9161100721346513, "step": 7172, "train/loss_ctc": 0.742619514465332, "train/loss_error": 0.4249618649482727, "train/loss_total": 0.4884933829307556 }, { "epoch": 1.9163772375100185, "step": 7173, "train/loss_ctc": 0.882457971572876, "train/loss_error": 0.5460140705108643, "train/loss_total": 0.6133028864860535 }, { "epoch": 1.9166444028853862, "step": 7174, "train/loss_ctc": 0.6319107413291931, "train/loss_error": 0.4638780653476715, "train/loss_total": 0.49748462438583374 }, { "epoch": 1.9169115682607534, "step": 7175, "train/loss_ctc": 1.236106276512146, "train/loss_error": 0.4581744372844696, "train/loss_total": 0.6137608289718628 }, { "epoch": 1.9171787336361208, "step": 7176, "train/loss_ctc": 1.085978627204895, "train/loss_error": 0.4879798889160156, "train/loss_total": 0.6075796484947205 }, { "epoch": 1.9174458990114882, "step": 7177, "train/loss_ctc": 0.6207278966903687, "train/loss_error": 0.5013235807418823, "train/loss_total": 0.5252044796943665 }, { "epoch": 1.9177130643868554, "step": 7178, "train/loss_ctc": 1.2983417510986328, "train/loss_error": 0.5269086360931396, "train/loss_total": 0.6811952590942383 }, { "epoch": 1.9179802297622228, "step": 7179, "train/loss_ctc": 0.8046037554740906, "train/loss_error": 0.44814687967300415, "train/loss_total": 0.5194382667541504 }, { "epoch": 1.9182473951375902, "grad_norm": 7.9085588455200195, "learning_rate": 1.849853059043548e-05, "loss": 0.5578, "step": 7180 }, { "epoch": 1.9182473951375902, "step": 7180, "train/loss_ctc": 0.8025795221328735, "train/loss_error": 0.6052992343902588, "train/loss_total": 0.6447553038597107 }, { "epoch": 1.9185145605129574, "step": 7181, "train/loss_ctc": 0.7710569500923157, "train/loss_error": 0.4001096487045288, "train/loss_total": 0.4742991328239441 }, { "epoch": 1.9187817258883249, "step": 7182, "train/loss_ctc": 0.6964290738105774, "train/loss_error": 0.44939935207366943, "train/loss_total": 0.49880528450012207 }, { "epoch": 1.9190488912636923, "step": 7183, "train/loss_ctc": 0.6170340776443481, "train/loss_error": 0.42861315608024597, "train/loss_total": 0.46629735827445984 }, { "epoch": 1.9193160566390595, "step": 7184, "train/loss_ctc": 0.8849607706069946, "train/loss_error": 0.4437297582626343, "train/loss_total": 0.5319759845733643 }, { "epoch": 1.9195832220144269, "step": 7185, "train/loss_ctc": 0.8716760873794556, "train/loss_error": 0.41822290420532227, "train/loss_total": 0.5089135766029358 }, { "epoch": 1.9198503873897943, "step": 7186, "train/loss_ctc": 0.28244251012802124, "train/loss_error": 0.46373239159584045, "train/loss_total": 0.4274744391441345 }, { "epoch": 1.9201175527651615, "step": 7187, "train/loss_ctc": 1.1015803813934326, "train/loss_error": 0.5069613456726074, "train/loss_total": 0.6258851289749146 }, { "epoch": 1.9203847181405291, "step": 7188, "train/loss_ctc": 0.9709644317626953, "train/loss_error": 0.5648291110992432, "train/loss_total": 0.6460561752319336 }, { "epoch": 1.9206518835158963, "step": 7189, "train/loss_ctc": 0.48179733753204346, "train/loss_error": 0.44569942355155945, "train/loss_total": 0.45291900634765625 }, { "epoch": 1.9209190488912637, "grad_norm": 1.6132842302322388, "learning_rate": 1.848250066791344e-05, "loss": 0.5277, "step": 7190 }, { "epoch": 1.9209190488912637, "step": 7190, "train/loss_ctc": 1.1791067123413086, "train/loss_error": 0.44642865657806396, "train/loss_total": 0.5929642915725708 }, { "epoch": 1.9211862142666312, "step": 7191, "train/loss_ctc": 1.2515491247177124, "train/loss_error": 0.4712168574333191, "train/loss_total": 0.6272833347320557 }, { "epoch": 1.9214533796419984, "step": 7192, "train/loss_ctc": 1.0425273180007935, "train/loss_error": 0.4646233916282654, "train/loss_total": 0.5802041888237 }, { "epoch": 1.9217205450173658, "step": 7193, "train/loss_ctc": 1.8159961700439453, "train/loss_error": 0.46149805188179016, "train/loss_total": 0.7323976755142212 }, { "epoch": 1.9219877103927332, "step": 7194, "train/loss_ctc": 0.3302993178367615, "train/loss_error": 0.4250865876674652, "train/loss_total": 0.4061291217803955 }, { "epoch": 1.9222548757681004, "step": 7195, "train/loss_ctc": 0.44481053948402405, "train/loss_error": 0.5144351124763489, "train/loss_total": 0.5005102157592773 }, { "epoch": 1.9225220411434678, "step": 7196, "train/loss_ctc": 0.553388237953186, "train/loss_error": 0.48845192790031433, "train/loss_total": 0.5014392137527466 }, { "epoch": 1.9227892065188352, "step": 7197, "train/loss_ctc": 0.7575743198394775, "train/loss_error": 0.49814873933792114, "train/loss_total": 0.5500338673591614 }, { "epoch": 1.9230563718942024, "step": 7198, "train/loss_ctc": 1.1887407302856445, "train/loss_error": 0.49484196305274963, "train/loss_total": 0.6336216926574707 }, { "epoch": 1.9233235372695698, "step": 7199, "train/loss_ctc": 1.1041699647903442, "train/loss_error": 0.4225287437438965, "train/loss_total": 0.5588570237159729 }, { "epoch": 1.9235907026449373, "grad_norm": 1.624426007270813, "learning_rate": 1.8466470745391397e-05, "loss": 0.5683, "step": 7200 }, { "epoch": 1.9235907026449373, "step": 7200, "train/loss_ctc": 0.5701992511749268, "train/loss_error": 0.48536378145217896, "train/loss_total": 0.5023308992385864 }, { "epoch": 1.9238578680203045, "step": 7201, "train/loss_ctc": 0.578799307346344, "train/loss_error": 0.5035591125488281, "train/loss_total": 0.5186071395874023 }, { "epoch": 1.9241250333956719, "step": 7202, "train/loss_ctc": 0.8234370946884155, "train/loss_error": 0.43796008825302124, "train/loss_total": 0.5150555372238159 }, { "epoch": 1.9243921987710393, "step": 7203, "train/loss_ctc": 1.07077956199646, "train/loss_error": 0.4925728440284729, "train/loss_total": 0.6082141995429993 }, { "epoch": 1.9246593641464065, "step": 7204, "train/loss_ctc": 0.5457064509391785, "train/loss_error": 0.4728666841983795, "train/loss_total": 0.48743465542793274 }, { "epoch": 1.9249265295217741, "step": 7205, "train/loss_ctc": 1.3530333042144775, "train/loss_error": 0.48554807901382446, "train/loss_total": 0.6590451002120972 }, { "epoch": 1.9251936948971413, "step": 7206, "train/loss_ctc": 0.8576055765151978, "train/loss_error": 0.37292221188545227, "train/loss_total": 0.46985888481140137 }, { "epoch": 1.9254608602725087, "step": 7207, "train/loss_ctc": 0.6359678506851196, "train/loss_error": 0.5452402234077454, "train/loss_total": 0.5633857250213623 }, { "epoch": 1.9257280256478762, "step": 7208, "train/loss_ctc": 0.7643295526504517, "train/loss_error": 0.4330587089061737, "train/loss_total": 0.4993128776550293 }, { "epoch": 1.9259951910232433, "step": 7209, "train/loss_ctc": 0.7673923373222351, "train/loss_error": 0.504500687122345, "train/loss_total": 0.557079017162323 }, { "epoch": 1.9262623563986108, "grad_norm": 2.8042917251586914, "learning_rate": 1.845044082286936e-05, "loss": 0.538, "step": 7210 }, { "epoch": 1.9262623563986108, "step": 7210, "train/loss_ctc": 1.2751868963241577, "train/loss_error": 0.43062418699264526, "train/loss_total": 0.5995367765426636 }, { "epoch": 1.9265295217739782, "step": 7211, "train/loss_ctc": 0.6141065359115601, "train/loss_error": 0.4336944818496704, "train/loss_total": 0.4697768986225128 }, { "epoch": 1.9267966871493454, "step": 7212, "train/loss_ctc": 0.504391074180603, "train/loss_error": 0.41619715094566345, "train/loss_total": 0.4338359236717224 }, { "epoch": 1.9270638525247128, "step": 7213, "train/loss_ctc": 0.4424336552619934, "train/loss_error": 0.44126415252685547, "train/loss_total": 0.4414980709552765 }, { "epoch": 1.9273310179000802, "step": 7214, "train/loss_ctc": 1.114383339881897, "train/loss_error": 0.5028634667396545, "train/loss_total": 0.6251674890518188 }, { "epoch": 1.9275981832754474, "step": 7215, "train/loss_ctc": 0.8856082558631897, "train/loss_error": 0.45991945266723633, "train/loss_total": 0.5450572371482849 }, { "epoch": 1.9278653486508148, "step": 7216, "train/loss_ctc": 0.542969822883606, "train/loss_error": 0.49572882056236267, "train/loss_total": 0.5051770210266113 }, { "epoch": 1.9281325140261822, "step": 7217, "train/loss_ctc": 1.6705111265182495, "train/loss_error": 0.492135614156723, "train/loss_total": 0.7278107404708862 }, { "epoch": 1.9283996794015494, "step": 7218, "train/loss_ctc": 0.5993344783782959, "train/loss_error": 0.48820430040359497, "train/loss_total": 0.5104303359985352 }, { "epoch": 1.928666844776917, "step": 7219, "train/loss_ctc": 0.9920656681060791, "train/loss_error": 0.4866993725299835, "train/loss_total": 0.5877726078033447 }, { "epoch": 1.9289340101522843, "grad_norm": 2.7776715755462646, "learning_rate": 1.8434410900347317e-05, "loss": 0.5446, "step": 7220 }, { "epoch": 1.9289340101522843, "step": 7220, "train/loss_ctc": 0.5602770447731018, "train/loss_error": 0.46735879778862, "train/loss_total": 0.48594245314598083 }, { "epoch": 1.9292011755276515, "step": 7221, "train/loss_ctc": 0.6924214363098145, "train/loss_error": 0.48186981678009033, "train/loss_total": 0.5239801406860352 }, { "epoch": 1.929468340903019, "step": 7222, "train/loss_ctc": 1.0322566032409668, "train/loss_error": 0.5289211869239807, "train/loss_total": 0.62958824634552 }, { "epoch": 1.9297355062783863, "step": 7223, "train/loss_ctc": 0.6151571869850159, "train/loss_error": 0.4433138072490692, "train/loss_total": 0.477682501077652 }, { "epoch": 1.9300026716537537, "step": 7224, "train/loss_ctc": 0.5282735824584961, "train/loss_error": 0.49158990383148193, "train/loss_total": 0.49892663955688477 }, { "epoch": 1.9302698370291211, "step": 7225, "train/loss_ctc": 0.9128148555755615, "train/loss_error": 0.44028040766716003, "train/loss_total": 0.5347872972488403 }, { "epoch": 1.9305370024044883, "step": 7226, "train/loss_ctc": 0.7938690185546875, "train/loss_error": 0.4829763174057007, "train/loss_total": 0.545154869556427 }, { "epoch": 1.9308041677798558, "step": 7227, "train/loss_ctc": 1.0198489427566528, "train/loss_error": 0.5452756285667419, "train/loss_total": 0.6401903033256531 }, { "epoch": 1.9310713331552232, "step": 7228, "train/loss_ctc": 0.6069476008415222, "train/loss_error": 0.5582784414291382, "train/loss_total": 0.5680122971534729 }, { "epoch": 1.9313384985305904, "step": 7229, "train/loss_ctc": 0.7463628649711609, "train/loss_error": 0.5335603356361389, "train/loss_total": 0.5761208534240723 }, { "epoch": 1.9316056639059578, "grad_norm": 1.7729588747024536, "learning_rate": 1.8418380977825275e-05, "loss": 0.548, "step": 7230 }, { "epoch": 1.9316056639059578, "step": 7230, "train/loss_ctc": 0.3982890844345093, "train/loss_error": 0.43959522247314453, "train/loss_total": 0.4313340187072754 }, { "epoch": 1.9318728292813252, "step": 7231, "train/loss_ctc": 1.2793761491775513, "train/loss_error": 0.4948265850543976, "train/loss_total": 0.6517364978790283 }, { "epoch": 1.9321399946566924, "step": 7232, "train/loss_ctc": 0.6801880598068237, "train/loss_error": 0.4567071199417114, "train/loss_total": 0.5014033317565918 }, { "epoch": 1.9324071600320598, "step": 7233, "train/loss_ctc": 1.1736092567443848, "train/loss_error": 0.4692928194999695, "train/loss_total": 0.6101561188697815 }, { "epoch": 1.9326743254074272, "step": 7234, "train/loss_ctc": 1.2457270622253418, "train/loss_error": 0.46898025274276733, "train/loss_total": 0.6243296265602112 }, { "epoch": 1.9329414907827944, "step": 7235, "train/loss_ctc": 0.7319210767745972, "train/loss_error": 0.42703238129615784, "train/loss_total": 0.48801010847091675 }, { "epoch": 1.933208656158162, "step": 7236, "train/loss_ctc": 0.9100964069366455, "train/loss_error": 0.516606330871582, "train/loss_total": 0.5953043699264526 }, { "epoch": 1.9334758215335293, "step": 7237, "train/loss_ctc": 0.6570815443992615, "train/loss_error": 0.44336560368537903, "train/loss_total": 0.48610877990722656 }, { "epoch": 1.9337429869088965, "step": 7238, "train/loss_ctc": 0.5707201957702637, "train/loss_error": 0.4718717932701111, "train/loss_total": 0.49164146184921265 }, { "epoch": 1.934010152284264, "step": 7239, "train/loss_ctc": 0.981895387172699, "train/loss_error": 0.4912714660167694, "train/loss_total": 0.5893962383270264 }, { "epoch": 1.9342773176596313, "grad_norm": 2.234222412109375, "learning_rate": 1.8402351055303233e-05, "loss": 0.5469, "step": 7240 }, { "epoch": 1.9342773176596313, "step": 7240, "train/loss_ctc": 1.0888173580169678, "train/loss_error": 0.4319850206375122, "train/loss_total": 0.5633515119552612 }, { "epoch": 1.9345444830349987, "step": 7241, "train/loss_ctc": 0.7677876949310303, "train/loss_error": 0.5052247643470764, "train/loss_total": 0.5577373504638672 }, { "epoch": 1.9348116484103661, "step": 7242, "train/loss_ctc": 1.0347940921783447, "train/loss_error": 0.5109751224517822, "train/loss_total": 0.6157389283180237 }, { "epoch": 1.9350788137857333, "step": 7243, "train/loss_ctc": 0.732948899269104, "train/loss_error": 0.4613451063632965, "train/loss_total": 0.5156658887863159 }, { "epoch": 1.9353459791611007, "step": 7244, "train/loss_ctc": 0.7014908194541931, "train/loss_error": 0.4160054922103882, "train/loss_total": 0.4731025695800781 }, { "epoch": 1.9356131445364682, "step": 7245, "train/loss_ctc": 0.45061445236206055, "train/loss_error": 0.4326231777667999, "train/loss_total": 0.4362214207649231 }, { "epoch": 1.9358803099118354, "step": 7246, "train/loss_ctc": 0.9971581101417542, "train/loss_error": 0.5060456991195679, "train/loss_total": 0.6042681932449341 }, { "epoch": 1.9361474752872028, "step": 7247, "train/loss_ctc": 0.8788666725158691, "train/loss_error": 0.473631888628006, "train/loss_total": 0.5546788573265076 }, { "epoch": 1.9364146406625702, "step": 7248, "train/loss_ctc": 0.7677059769630432, "train/loss_error": 0.47100555896759033, "train/loss_total": 0.530345618724823 }, { "epoch": 1.9366818060379374, "step": 7249, "train/loss_ctc": 1.0385150909423828, "train/loss_error": 0.45543941855430603, "train/loss_total": 0.5720545649528503 }, { "epoch": 1.9369489714133048, "grad_norm": 2.1464169025421143, "learning_rate": 1.838632113278119e-05, "loss": 0.5423, "step": 7250 }, { "epoch": 1.9369489714133048, "step": 7250, "train/loss_ctc": 0.2675802409648895, "train/loss_error": 0.47147104144096375, "train/loss_total": 0.4306929111480713 }, { "epoch": 1.9372161367886722, "step": 7251, "train/loss_ctc": 0.8570787310600281, "train/loss_error": 0.508456289768219, "train/loss_total": 0.5781807899475098 }, { "epoch": 1.9374833021640394, "step": 7252, "train/loss_ctc": 0.2902938425540924, "train/loss_error": 0.374673992395401, "train/loss_total": 0.3577979803085327 }, { "epoch": 1.937750467539407, "step": 7253, "train/loss_ctc": 0.5761269927024841, "train/loss_error": 0.5546077489852905, "train/loss_total": 0.5589116215705872 }, { "epoch": 1.9380176329147742, "step": 7254, "train/loss_ctc": 0.25166821479797363, "train/loss_error": 0.44069957733154297, "train/loss_total": 0.4028933048248291 }, { "epoch": 1.9382847982901414, "step": 7255, "train/loss_ctc": 0.5557010173797607, "train/loss_error": 0.3994429111480713, "train/loss_total": 0.4306945204734802 }, { "epoch": 1.938551963665509, "step": 7256, "train/loss_ctc": 0.5975894927978516, "train/loss_error": 0.408331036567688, "train/loss_total": 0.4461827278137207 }, { "epoch": 1.9388191290408763, "step": 7257, "train/loss_ctc": 0.4987468123435974, "train/loss_error": 0.44024431705474854, "train/loss_total": 0.45194482803344727 }, { "epoch": 1.9390862944162437, "step": 7258, "train/loss_ctc": 0.6618072986602783, "train/loss_error": 0.3940172493457794, "train/loss_total": 0.44757527112960815 }, { "epoch": 1.9393534597916111, "step": 7259, "train/loss_ctc": 0.686241865158081, "train/loss_error": 0.46452412009239197, "train/loss_total": 0.5088676810264587 }, { "epoch": 1.9396206251669783, "grad_norm": 1.566837191581726, "learning_rate": 1.8370291210259152e-05, "loss": 0.4614, "step": 7260 }, { "epoch": 1.9396206251669783, "step": 7260, "train/loss_ctc": 0.42138606309890747, "train/loss_error": 0.40356090664863586, "train/loss_total": 0.40712594985961914 }, { "epoch": 1.9398877905423457, "step": 7261, "train/loss_ctc": 0.5793806314468384, "train/loss_error": 0.44643935561180115, "train/loss_total": 0.47302761673927307 }, { "epoch": 1.9401549559177131, "step": 7262, "train/loss_ctc": 0.7252069115638733, "train/loss_error": 0.4820597171783447, "train/loss_total": 0.5306891798973083 }, { "epoch": 1.9404221212930803, "step": 7263, "train/loss_ctc": 1.2560842037200928, "train/loss_error": 0.426714152097702, "train/loss_total": 0.5925881862640381 }, { "epoch": 1.9406892866684478, "step": 7264, "train/loss_ctc": 1.0390797853469849, "train/loss_error": 0.4722229242324829, "train/loss_total": 0.5855942964553833 }, { "epoch": 1.9409564520438152, "step": 7265, "train/loss_ctc": 1.07976233959198, "train/loss_error": 0.5146569013595581, "train/loss_total": 0.6276779770851135 }, { "epoch": 1.9412236174191824, "step": 7266, "train/loss_ctc": 1.0005313158035278, "train/loss_error": 0.41813817620277405, "train/loss_total": 0.5346168279647827 }, { "epoch": 1.9414907827945498, "step": 7267, "train/loss_ctc": 0.8329426050186157, "train/loss_error": 0.5060239434242249, "train/loss_total": 0.571407675743103 }, { "epoch": 1.9417579481699172, "step": 7268, "train/loss_ctc": 0.9697518348693848, "train/loss_error": 0.44666799902915955, "train/loss_total": 0.5512847900390625 }, { "epoch": 1.9420251135452844, "step": 7269, "train/loss_ctc": 0.8939329981803894, "train/loss_error": 0.5705706477165222, "train/loss_total": 0.6352431178092957 }, { "epoch": 1.942292278920652, "grad_norm": 1.95779287815094, "learning_rate": 1.835426128773711e-05, "loss": 0.5509, "step": 7270 }, { "epoch": 1.942292278920652, "step": 7270, "train/loss_ctc": 0.6471577882766724, "train/loss_error": 0.4168926477432251, "train/loss_total": 0.46294569969177246 }, { "epoch": 1.9425594442960192, "step": 7271, "train/loss_ctc": 0.5516809225082397, "train/loss_error": 0.43226614594459534, "train/loss_total": 0.4561491012573242 }, { "epoch": 1.9428266096713864, "step": 7272, "train/loss_ctc": 0.8130946755409241, "train/loss_error": 0.49664199352264404, "train/loss_total": 0.5599325299263 }, { "epoch": 1.943093775046754, "step": 7273, "train/loss_ctc": 0.9406599402427673, "train/loss_error": 0.46546927094459534, "train/loss_total": 0.5605074167251587 }, { "epoch": 1.9433609404221213, "step": 7274, "train/loss_ctc": 0.4384283721446991, "train/loss_error": 0.4255480170249939, "train/loss_total": 0.4281240999698639 }, { "epoch": 1.9436281057974887, "step": 7275, "train/loss_ctc": 0.6849582195281982, "train/loss_error": 0.4558475613594055, "train/loss_total": 0.501669704914093 }, { "epoch": 1.943895271172856, "step": 7276, "train/loss_ctc": 1.127924919128418, "train/loss_error": 0.47400975227355957, "train/loss_total": 0.6047928333282471 }, { "epoch": 1.9441624365482233, "step": 7277, "train/loss_ctc": 0.5879080891609192, "train/loss_error": 0.430050253868103, "train/loss_total": 0.46162182092666626 }, { "epoch": 1.9444296019235907, "step": 7278, "train/loss_ctc": 0.6681497693061829, "train/loss_error": 0.518860936164856, "train/loss_total": 0.5487186908721924 }, { "epoch": 1.9446967672989581, "step": 7279, "train/loss_ctc": 0.5736120939254761, "train/loss_error": 0.45225274562835693, "train/loss_total": 0.47652462124824524 }, { "epoch": 1.9449639326743253, "grad_norm": 2.815074920654297, "learning_rate": 1.833823136521507e-05, "loss": 0.5061, "step": 7280 }, { "epoch": 1.9449639326743253, "step": 7280, "train/loss_ctc": 0.9769181609153748, "train/loss_error": 0.42916715145111084, "train/loss_total": 0.5387173891067505 }, { "epoch": 1.9452310980496927, "step": 7281, "train/loss_ctc": 0.6460146903991699, "train/loss_error": 0.44563835859298706, "train/loss_total": 0.4857136607170105 }, { "epoch": 1.9454982634250602, "step": 7282, "train/loss_ctc": 0.7619000673294067, "train/loss_error": 0.4812839925289154, "train/loss_total": 0.5374072194099426 }, { "epoch": 1.9457654288004274, "step": 7283, "train/loss_ctc": 0.5117214918136597, "train/loss_error": 0.43382367491722107, "train/loss_total": 0.4494032561779022 }, { "epoch": 1.9460325941757948, "step": 7284, "train/loss_ctc": 0.9180561304092407, "train/loss_error": 0.42072242498397827, "train/loss_total": 0.5201891660690308 }, { "epoch": 1.9462997595511622, "step": 7285, "train/loss_ctc": 0.9120031595230103, "train/loss_error": 0.5328482985496521, "train/loss_total": 0.6086792945861816 }, { "epoch": 1.9465669249265294, "step": 7286, "train/loss_ctc": 0.5885496139526367, "train/loss_error": 0.444165974855423, "train/loss_total": 0.47304272651672363 }, { "epoch": 1.946834090301897, "step": 7287, "train/loss_ctc": 0.3039366900920868, "train/loss_error": 0.40910348296165466, "train/loss_total": 0.38807013630867004 }, { "epoch": 1.9471012556772642, "step": 7288, "train/loss_ctc": 0.4459298253059387, "train/loss_error": 0.4731627404689789, "train/loss_total": 0.46771615743637085 }, { "epoch": 1.9473684210526314, "step": 7289, "train/loss_ctc": 0.45670241117477417, "train/loss_error": 0.5244609117507935, "train/loss_total": 0.5109091997146606 }, { "epoch": 1.947635586427999, "grad_norm": 3.6524622440338135, "learning_rate": 1.8322201442693027e-05, "loss": 0.498, "step": 7290 }, { "epoch": 1.947635586427999, "step": 7290, "train/loss_ctc": 0.668971061706543, "train/loss_error": 0.45355793833732605, "train/loss_total": 0.49664056301116943 }, { "epoch": 1.9479027518033663, "step": 7291, "train/loss_ctc": 0.6661545038223267, "train/loss_error": 0.39449673891067505, "train/loss_total": 0.4488282799720764 }, { "epoch": 1.9481699171787337, "step": 7292, "train/loss_ctc": 0.40536555647850037, "train/loss_error": 0.4076739549636841, "train/loss_total": 0.4072122871875763 }, { "epoch": 1.948437082554101, "step": 7293, "train/loss_ctc": 0.42218804359436035, "train/loss_error": 0.4265187084674835, "train/loss_total": 0.4256525933742523 }, { "epoch": 1.9487042479294683, "step": 7294, "train/loss_ctc": 0.4042498469352722, "train/loss_error": 0.5745166540145874, "train/loss_total": 0.5404632687568665 }, { "epoch": 1.9489714133048357, "step": 7295, "train/loss_ctc": 0.9838479161262512, "train/loss_error": 0.4621181786060333, "train/loss_total": 0.5664641261100769 }, { "epoch": 1.9492385786802031, "step": 7296, "train/loss_ctc": 0.6421383619308472, "train/loss_error": 0.42208459973335266, "train/loss_total": 0.46609532833099365 }, { "epoch": 1.9495057440555703, "step": 7297, "train/loss_ctc": 0.908928394317627, "train/loss_error": 0.5140016078948975, "train/loss_total": 0.5929870009422302 }, { "epoch": 1.9497729094309377, "step": 7298, "train/loss_ctc": 0.6866602897644043, "train/loss_error": 0.4945129156112671, "train/loss_total": 0.5329424142837524 }, { "epoch": 1.9500400748063051, "step": 7299, "train/loss_ctc": 0.4913080334663391, "train/loss_error": 0.4636181890964508, "train/loss_total": 0.4691561460494995 }, { "epoch": 1.9503072401816723, "grad_norm": 2.627354145050049, "learning_rate": 1.8306171520170985e-05, "loss": 0.4946, "step": 7300 }, { "epoch": 1.9503072401816723, "step": 7300, "train/loss_ctc": 0.4485170841217041, "train/loss_error": 0.45248186588287354, "train/loss_total": 0.4516889154911041 }, { "epoch": 1.9505744055570398, "step": 7301, "train/loss_ctc": 0.9051002264022827, "train/loss_error": 0.4308278262615204, "train/loss_total": 0.5256823301315308 }, { "epoch": 1.9508415709324072, "step": 7302, "train/loss_ctc": 0.4549886882305145, "train/loss_error": 0.4200060963630676, "train/loss_total": 0.42700260877609253 }, { "epoch": 1.9511087363077744, "step": 7303, "train/loss_ctc": 0.988162636756897, "train/loss_error": 0.5581488013267517, "train/loss_total": 0.6441515684127808 }, { "epoch": 1.951375901683142, "step": 7304, "train/loss_ctc": 0.4370925724506378, "train/loss_error": 0.44450029730796814, "train/loss_total": 0.44301876425743103 }, { "epoch": 1.9516430670585092, "step": 7305, "train/loss_ctc": 0.7304053902626038, "train/loss_error": 0.4493235945701599, "train/loss_total": 0.5055399537086487 }, { "epoch": 1.9519102324338766, "step": 7306, "train/loss_ctc": 0.907569169998169, "train/loss_error": 0.455737441778183, "train/loss_total": 0.5461037755012512 }, { "epoch": 1.952177397809244, "step": 7307, "train/loss_ctc": 0.5976409912109375, "train/loss_error": 0.3785998821258545, "train/loss_total": 0.4224081039428711 }, { "epoch": 1.9524445631846112, "step": 7308, "train/loss_ctc": 0.7269567251205444, "train/loss_error": 0.45575442910194397, "train/loss_total": 0.5099948644638062 }, { "epoch": 1.9527117285599787, "step": 7309, "train/loss_ctc": 0.688848078250885, "train/loss_error": 0.5604695081710815, "train/loss_total": 0.5861452221870422 }, { "epoch": 1.952978893935346, "grad_norm": 2.5489652156829834, "learning_rate": 1.8290141597648943e-05, "loss": 0.5062, "step": 7310 }, { "epoch": 1.952978893935346, "step": 7310, "train/loss_ctc": 0.5144813656806946, "train/loss_error": 0.40756624937057495, "train/loss_total": 0.4289492666721344 }, { "epoch": 1.9532460593107133, "step": 7311, "train/loss_ctc": 0.6012526750564575, "train/loss_error": 0.43853527307510376, "train/loss_total": 0.4710787534713745 }, { "epoch": 1.9535132246860807, "step": 7312, "train/loss_ctc": 0.8610554337501526, "train/loss_error": 0.40866488218307495, "train/loss_total": 0.49914300441741943 }, { "epoch": 1.953780390061448, "step": 7313, "train/loss_ctc": 0.7060007452964783, "train/loss_error": 0.4612181484699249, "train/loss_total": 0.5101746916770935 }, { "epoch": 1.9540475554368153, "step": 7314, "train/loss_ctc": 0.5740218162536621, "train/loss_error": 0.45934003591537476, "train/loss_total": 0.48227638006210327 }, { "epoch": 1.9543147208121827, "step": 7315, "train/loss_ctc": 0.999692976474762, "train/loss_error": 0.5131327509880066, "train/loss_total": 0.6104447841644287 }, { "epoch": 1.9545818861875501, "step": 7316, "train/loss_ctc": 0.600488543510437, "train/loss_error": 0.44118231534957886, "train/loss_total": 0.4730435609817505 }, { "epoch": 1.9548490515629173, "step": 7317, "train/loss_ctc": 0.7494605779647827, "train/loss_error": 0.46761611104011536, "train/loss_total": 0.5239850282669067 }, { "epoch": 1.9551162169382847, "step": 7318, "train/loss_ctc": 1.0797624588012695, "train/loss_error": 0.48168981075286865, "train/loss_total": 0.6013043522834778 }, { "epoch": 1.9553833823136522, "step": 7319, "train/loss_ctc": 0.9169532060623169, "train/loss_error": 0.4633423089981079, "train/loss_total": 0.5540645122528076 }, { "epoch": 1.9556505476890194, "grad_norm": 1.9928098917007446, "learning_rate": 1.8274111675126904e-05, "loss": 0.5154, "step": 7320 }, { "epoch": 1.9556505476890194, "step": 7320, "train/loss_ctc": 0.5372933149337769, "train/loss_error": 0.4999637305736542, "train/loss_total": 0.5074296593666077 }, { "epoch": 1.955917713064387, "step": 7321, "train/loss_ctc": 0.9361735582351685, "train/loss_error": 0.44197559356689453, "train/loss_total": 0.5408151745796204 }, { "epoch": 1.9561848784397542, "step": 7322, "train/loss_ctc": 0.6149551868438721, "train/loss_error": 0.5032796263694763, "train/loss_total": 0.5256147384643555 }, { "epoch": 1.9564520438151216, "step": 7323, "train/loss_ctc": 0.6971532702445984, "train/loss_error": 0.44493576884269714, "train/loss_total": 0.4953792691230774 }, { "epoch": 1.956719209190489, "step": 7324, "train/loss_ctc": 0.8353725671768188, "train/loss_error": 0.40078118443489075, "train/loss_total": 0.4876994490623474 }, { "epoch": 1.9569863745658562, "step": 7325, "train/loss_ctc": 0.45559948682785034, "train/loss_error": 0.4152223765850067, "train/loss_total": 0.42329782247543335 }, { "epoch": 1.9572535399412236, "step": 7326, "train/loss_ctc": 0.8794701099395752, "train/loss_error": 0.4034801423549652, "train/loss_total": 0.49867814779281616 }, { "epoch": 1.957520705316591, "step": 7327, "train/loss_ctc": 0.5443901419639587, "train/loss_error": 0.46585795283317566, "train/loss_total": 0.48156440258026123 }, { "epoch": 1.9577878706919583, "step": 7328, "train/loss_ctc": 0.30775198340415955, "train/loss_error": 0.4677051901817322, "train/loss_total": 0.43571457266807556 }, { "epoch": 1.9580550360673257, "step": 7329, "train/loss_ctc": 1.1363930702209473, "train/loss_error": 0.4730997383594513, "train/loss_total": 0.6057584285736084 }, { "epoch": 1.958322201442693, "grad_norm": 1.875556468963623, "learning_rate": 1.8258081752604862e-05, "loss": 0.5002, "step": 7330 }, { "epoch": 1.958322201442693, "step": 7330, "train/loss_ctc": 1.1788829565048218, "train/loss_error": 0.4575665593147278, "train/loss_total": 0.6018298268318176 }, { "epoch": 1.9585893668180603, "step": 7331, "train/loss_ctc": 0.6993611454963684, "train/loss_error": 0.4122730791568756, "train/loss_total": 0.4696906805038452 }, { "epoch": 1.9588565321934277, "step": 7332, "train/loss_ctc": 0.503984808921814, "train/loss_error": 0.38259050250053406, "train/loss_total": 0.40686938166618347 }, { "epoch": 1.9591236975687951, "step": 7333, "train/loss_ctc": 1.471205234527588, "train/loss_error": 0.38642409443855286, "train/loss_total": 0.6033803224563599 }, { "epoch": 1.9593908629441623, "step": 7334, "train/loss_ctc": 0.6807968616485596, "train/loss_error": 0.5080122351646423, "train/loss_total": 0.5425691604614258 }, { "epoch": 1.95965802831953, "step": 7335, "train/loss_ctc": 0.5544759035110474, "train/loss_error": 0.4476192891597748, "train/loss_total": 0.46899062395095825 }, { "epoch": 1.9599251936948971, "step": 7336, "train/loss_ctc": 0.8562121391296387, "train/loss_error": 0.46975234150886536, "train/loss_total": 0.5470443367958069 }, { "epoch": 1.9601923590702643, "step": 7337, "train/loss_ctc": 1.0338095426559448, "train/loss_error": 0.4603573977947235, "train/loss_total": 0.5750478506088257 }, { "epoch": 1.960459524445632, "step": 7338, "train/loss_ctc": 0.7484607696533203, "train/loss_error": 0.41407835483551025, "train/loss_total": 0.4809548258781433 }, { "epoch": 1.9607266898209992, "step": 7339, "train/loss_ctc": 0.831292986869812, "train/loss_error": 0.37477099895477295, "train/loss_total": 0.46607542037963867 }, { "epoch": 1.9609938551963666, "grad_norm": 1.5880793333053589, "learning_rate": 1.824205183008282e-05, "loss": 0.5162, "step": 7340 }, { "epoch": 1.9609938551963666, "step": 7340, "train/loss_ctc": 0.726417064666748, "train/loss_error": 0.48410314321517944, "train/loss_total": 0.5325659513473511 }, { "epoch": 1.961261020571734, "step": 7341, "train/loss_ctc": 0.5255703926086426, "train/loss_error": 0.49852269887924194, "train/loss_total": 0.5039322376251221 }, { "epoch": 1.9615281859471012, "step": 7342, "train/loss_ctc": 0.3231968283653259, "train/loss_error": 0.42849403619766235, "train/loss_total": 0.4074345827102661 }, { "epoch": 1.9617953513224686, "step": 7343, "train/loss_ctc": 1.1120545864105225, "train/loss_error": 0.4180472195148468, "train/loss_total": 0.5568487048149109 }, { "epoch": 1.962062516697836, "step": 7344, "train/loss_ctc": 0.5356913805007935, "train/loss_error": 0.4359515309333801, "train/loss_total": 0.45589950680732727 }, { "epoch": 1.9623296820732032, "step": 7345, "train/loss_ctc": 1.1785035133361816, "train/loss_error": 0.48229533433914185, "train/loss_total": 0.6215369701385498 }, { "epoch": 1.9625968474485707, "step": 7346, "train/loss_ctc": 0.7460125088691711, "train/loss_error": 0.4386199414730072, "train/loss_total": 0.500098466873169 }, { "epoch": 1.962864012823938, "step": 7347, "train/loss_ctc": 0.9539121389389038, "train/loss_error": 0.48462459444999695, "train/loss_total": 0.5784821510314941 }, { "epoch": 1.9631311781993053, "step": 7348, "train/loss_ctc": 0.6570532917976379, "train/loss_error": 0.42346489429473877, "train/loss_total": 0.4701825678348541 }, { "epoch": 1.9633983435746727, "step": 7349, "train/loss_ctc": 0.3124731183052063, "train/loss_error": 0.42495253682136536, "train/loss_total": 0.402456670999527 }, { "epoch": 1.96366550895004, "grad_norm": 1.720970630645752, "learning_rate": 1.8226021907560782e-05, "loss": 0.5029, "step": 7350 }, { "epoch": 1.96366550895004, "step": 7350, "train/loss_ctc": 0.7812719345092773, "train/loss_error": 0.44532841444015503, "train/loss_total": 0.5125171542167664 }, { "epoch": 1.9639326743254073, "step": 7351, "train/loss_ctc": 0.4731971323490143, "train/loss_error": 0.48789599537849426, "train/loss_total": 0.4849562346935272 }, { "epoch": 1.964199839700775, "step": 7352, "train/loss_ctc": 1.1543781757354736, "train/loss_error": 0.4225316643714905, "train/loss_total": 0.5689009428024292 }, { "epoch": 1.9644670050761421, "step": 7353, "train/loss_ctc": 0.4403492212295532, "train/loss_error": 0.4468154311180115, "train/loss_total": 0.4455222189426422 }, { "epoch": 1.9647341704515093, "step": 7354, "train/loss_ctc": 0.4735463857650757, "train/loss_error": 0.3924909234046936, "train/loss_total": 0.40870201587677 }, { "epoch": 1.965001335826877, "step": 7355, "train/loss_ctc": 0.4744114875793457, "train/loss_error": 0.46476030349731445, "train/loss_total": 0.4666905701160431 }, { "epoch": 1.9652685012022442, "step": 7356, "train/loss_ctc": 0.8177571296691895, "train/loss_error": 0.49160099029541016, "train/loss_total": 0.5568322539329529 }, { "epoch": 1.9655356665776116, "step": 7357, "train/loss_ctc": 0.6361986994743347, "train/loss_error": 0.5000619292259216, "train/loss_total": 0.5272892713546753 }, { "epoch": 1.965802831952979, "step": 7358, "train/loss_ctc": 1.2577600479125977, "train/loss_error": 0.4724752604961395, "train/loss_total": 0.6295322179794312 }, { "epoch": 1.9660699973283462, "step": 7359, "train/loss_ctc": 1.2743912935256958, "train/loss_error": 0.45048806071281433, "train/loss_total": 0.6152687072753906 }, { "epoch": 1.9663371627037136, "grad_norm": 1.6370176076889038, "learning_rate": 1.820999198503874e-05, "loss": 0.5216, "step": 7360 }, { "epoch": 1.9663371627037136, "step": 7360, "train/loss_ctc": 0.5464348793029785, "train/loss_error": 0.4385152757167816, "train/loss_total": 0.4600992202758789 }, { "epoch": 1.966604328079081, "step": 7361, "train/loss_ctc": 0.7197577953338623, "train/loss_error": 0.5089032649993896, "train/loss_total": 0.5510741472244263 }, { "epoch": 1.9668714934544482, "step": 7362, "train/loss_ctc": 1.4624660015106201, "train/loss_error": 0.4507107138633728, "train/loss_total": 0.6530617475509644 }, { "epoch": 1.9671386588298156, "step": 7363, "train/loss_ctc": 0.41263124346733093, "train/loss_error": 0.47890204191207886, "train/loss_total": 0.4656478762626648 }, { "epoch": 1.967405824205183, "step": 7364, "train/loss_ctc": 0.6065933704376221, "train/loss_error": 0.5617315173149109, "train/loss_total": 0.57070392370224 }, { "epoch": 1.9676729895805503, "step": 7365, "train/loss_ctc": 1.0551338195800781, "train/loss_error": 0.4720107614994049, "train/loss_total": 0.5886353850364685 }, { "epoch": 1.9679401549559177, "step": 7366, "train/loss_ctc": 0.6595081090927124, "train/loss_error": 0.4595889747142792, "train/loss_total": 0.4995728135108948 }, { "epoch": 1.968207320331285, "step": 7367, "train/loss_ctc": 1.737196922302246, "train/loss_error": 0.5067638158798218, "train/loss_total": 0.7528504133224487 }, { "epoch": 1.9684744857066523, "step": 7368, "train/loss_ctc": 0.6913958191871643, "train/loss_error": 0.41116687655448914, "train/loss_total": 0.4672126770019531 }, { "epoch": 1.96874165108202, "step": 7369, "train/loss_ctc": 0.6422016620635986, "train/loss_error": 0.4378848969936371, "train/loss_total": 0.47874826192855835 }, { "epoch": 1.9690088164573871, "grad_norm": 2.274383544921875, "learning_rate": 1.8193962062516698e-05, "loss": 0.5488, "step": 7370 }, { "epoch": 1.9690088164573871, "step": 7370, "train/loss_ctc": 0.5174590945243835, "train/loss_error": 0.4409925043582916, "train/loss_total": 0.45628583431243896 }, { "epoch": 1.9692759818327543, "step": 7371, "train/loss_ctc": 1.6762182712554932, "train/loss_error": 0.5059393644332886, "train/loss_total": 0.7399951815605164 }, { "epoch": 1.969543147208122, "step": 7372, "train/loss_ctc": 0.44844532012939453, "train/loss_error": 0.39854297041893005, "train/loss_total": 0.40852347016334534 }, { "epoch": 1.9698103125834892, "step": 7373, "train/loss_ctc": 0.562877893447876, "train/loss_error": 0.5506152510643005, "train/loss_total": 0.5530678033828735 }, { "epoch": 1.9700774779588566, "step": 7374, "train/loss_ctc": 0.3029070496559143, "train/loss_error": 0.4549252390861511, "train/loss_total": 0.42452162504196167 }, { "epoch": 1.970344643334224, "step": 7375, "train/loss_ctc": 0.8495191335678101, "train/loss_error": 0.4881955683231354, "train/loss_total": 0.5604602694511414 }, { "epoch": 1.9706118087095912, "step": 7376, "train/loss_ctc": 0.4317455589771271, "train/loss_error": 0.5075421929359436, "train/loss_total": 0.49238288402557373 }, { "epoch": 1.9708789740849586, "step": 7377, "train/loss_ctc": 0.8041526079177856, "train/loss_error": 0.41924962401390076, "train/loss_total": 0.49623024463653564 }, { "epoch": 1.971146139460326, "step": 7378, "train/loss_ctc": 0.43018293380737305, "train/loss_error": 0.42711371183395386, "train/loss_total": 0.4277275800704956 }, { "epoch": 1.9714133048356932, "step": 7379, "train/loss_ctc": 0.8924826979637146, "train/loss_error": 0.460906058549881, "train/loss_total": 0.5472214221954346 }, { "epoch": 1.9716804702110606, "grad_norm": 2.3655595779418945, "learning_rate": 1.817793213999466e-05, "loss": 0.5106, "step": 7380 }, { "epoch": 1.9716804702110606, "step": 7380, "train/loss_ctc": 1.2558352947235107, "train/loss_error": 0.5399116277694702, "train/loss_total": 0.6830964088439941 }, { "epoch": 1.971947635586428, "step": 7381, "train/loss_ctc": 0.7347010374069214, "train/loss_error": 0.43020787835121155, "train/loss_total": 0.4911065101623535 }, { "epoch": 1.9722148009617952, "step": 7382, "train/loss_ctc": 0.818812370300293, "train/loss_error": 0.41303813457489014, "train/loss_total": 0.4941929876804352 }, { "epoch": 1.9724819663371627, "step": 7383, "train/loss_ctc": 0.8098565936088562, "train/loss_error": 0.42515745759010315, "train/loss_total": 0.5020973086357117 }, { "epoch": 1.97274913171253, "step": 7384, "train/loss_ctc": 0.8164392113685608, "train/loss_error": 0.4791467785835266, "train/loss_total": 0.5466052889823914 }, { "epoch": 1.9730162970878973, "step": 7385, "train/loss_ctc": 0.9089685678482056, "train/loss_error": 0.38568246364593506, "train/loss_total": 0.4903396964073181 }, { "epoch": 1.973283462463265, "step": 7386, "train/loss_ctc": 1.3724981546401978, "train/loss_error": 0.49424073100090027, "train/loss_total": 0.6698921918869019 }, { "epoch": 1.973550627838632, "step": 7387, "train/loss_ctc": 0.76678466796875, "train/loss_error": 0.5006311535835266, "train/loss_total": 0.5538618564605713 }, { "epoch": 1.9738177932139993, "step": 7388, "train/loss_ctc": 0.8285017609596252, "train/loss_error": 0.48352107405662537, "train/loss_total": 0.5525172352790833 }, { "epoch": 1.974084958589367, "step": 7389, "train/loss_ctc": 0.3874847888946533, "train/loss_error": 0.41168418526649475, "train/loss_total": 0.4068443179130554 }, { "epoch": 1.9743521239647341, "grad_norm": 1.374990463256836, "learning_rate": 1.8161902217472618e-05, "loss": 0.5391, "step": 7390 }, { "epoch": 1.9743521239647341, "step": 7390, "train/loss_ctc": 1.2063847780227661, "train/loss_error": 0.4651981592178345, "train/loss_total": 0.6134355068206787 }, { "epoch": 1.9746192893401016, "step": 7391, "train/loss_ctc": 0.6987848877906799, "train/loss_error": 0.4906212091445923, "train/loss_total": 0.5322539806365967 }, { "epoch": 1.974886454715469, "step": 7392, "train/loss_ctc": 0.5923253893852234, "train/loss_error": 0.4460747241973877, "train/loss_total": 0.4753248691558838 }, { "epoch": 1.9751536200908362, "step": 7393, "train/loss_ctc": 0.5818907022476196, "train/loss_error": 0.44569501280784607, "train/loss_total": 0.47293418645858765 }, { "epoch": 1.9754207854662036, "step": 7394, "train/loss_ctc": 1.0521620512008667, "train/loss_error": 0.3889751732349396, "train/loss_total": 0.5216125249862671 }, { "epoch": 1.975687950841571, "step": 7395, "train/loss_ctc": 1.4952657222747803, "train/loss_error": 0.4683676064014435, "train/loss_total": 0.6737472414970398 }, { "epoch": 1.9759551162169382, "step": 7396, "train/loss_ctc": 0.6627525687217712, "train/loss_error": 0.4620495140552521, "train/loss_total": 0.502190113067627 }, { "epoch": 1.9762222815923056, "step": 7397, "train/loss_ctc": 0.7936193943023682, "train/loss_error": 0.5122817754745483, "train/loss_total": 0.5685492753982544 }, { "epoch": 1.976489446967673, "step": 7398, "train/loss_ctc": 0.7735035419464111, "train/loss_error": 0.40977010130882263, "train/loss_total": 0.4825168251991272 }, { "epoch": 1.9767566123430402, "step": 7399, "train/loss_ctc": 0.6663718819618225, "train/loss_error": 0.4035186171531677, "train/loss_total": 0.4560892879962921 }, { "epoch": 1.9770237777184076, "grad_norm": 4.055820941925049, "learning_rate": 1.8145872294950576e-05, "loss": 0.5299, "step": 7400 }, { "epoch": 1.9770237777184076, "step": 7400, "train/loss_ctc": 0.5026873350143433, "train/loss_error": 0.43024253845214844, "train/loss_total": 0.4447315037250519 }, { "epoch": 1.977290943093775, "step": 7401, "train/loss_ctc": 0.7371323704719543, "train/loss_error": 0.43559902906417847, "train/loss_total": 0.49590569734573364 }, { "epoch": 1.9775581084691423, "step": 7402, "train/loss_ctc": 0.5890699625015259, "train/loss_error": 0.4332364797592163, "train/loss_total": 0.4644031822681427 }, { "epoch": 1.97782527384451, "step": 7403, "train/loss_ctc": 0.5813475847244263, "train/loss_error": 0.4041951894760132, "train/loss_total": 0.43962568044662476 }, { "epoch": 1.978092439219877, "step": 7404, "train/loss_ctc": 0.8235964179039001, "train/loss_error": 0.5060489177703857, "train/loss_total": 0.5695583820343018 }, { "epoch": 1.9783596045952443, "step": 7405, "train/loss_ctc": 0.866084098815918, "train/loss_error": 0.5079419612884521, "train/loss_total": 0.5795704126358032 }, { "epoch": 1.978626769970612, "step": 7406, "train/loss_ctc": 0.7184976935386658, "train/loss_error": 0.517117977142334, "train/loss_total": 0.5573939085006714 }, { "epoch": 1.9788939353459791, "step": 7407, "train/loss_ctc": 0.4214937090873718, "train/loss_error": 0.4486564099788666, "train/loss_total": 0.44322389364242554 }, { "epoch": 1.9791611007213465, "step": 7408, "train/loss_ctc": 0.7324497699737549, "train/loss_error": 0.4890540838241577, "train/loss_total": 0.537733256816864 }, { "epoch": 1.979428266096714, "step": 7409, "train/loss_ctc": 0.6007074117660522, "train/loss_error": 0.4555521011352539, "train/loss_total": 0.48458316922187805 }, { "epoch": 1.9796954314720812, "grad_norm": 1.7451330423355103, "learning_rate": 1.8129842372428534e-05, "loss": 0.5017, "step": 7410 }, { "epoch": 1.9796954314720812, "step": 7410, "train/loss_ctc": 0.8341988325119019, "train/loss_error": 0.4362916052341461, "train/loss_total": 0.5158730745315552 }, { "epoch": 1.9799625968474486, "step": 7411, "train/loss_ctc": 1.0939924716949463, "train/loss_error": 0.5283448696136475, "train/loss_total": 0.6414744257926941 }, { "epoch": 1.980229762222816, "step": 7412, "train/loss_ctc": 1.3753604888916016, "train/loss_error": 0.43524205684661865, "train/loss_total": 0.6232657432556152 }, { "epoch": 1.9804969275981832, "step": 7413, "train/loss_ctc": 0.4080989360809326, "train/loss_error": 0.46491044759750366, "train/loss_total": 0.4535481631755829 }, { "epoch": 1.9807640929735506, "step": 7414, "train/loss_ctc": 1.154545783996582, "train/loss_error": 0.4594355523586273, "train/loss_total": 0.5984575748443604 }, { "epoch": 1.981031258348918, "step": 7415, "train/loss_ctc": 0.922415018081665, "train/loss_error": 0.43312016129493713, "train/loss_total": 0.5309791564941406 }, { "epoch": 1.9812984237242852, "step": 7416, "train/loss_ctc": 1.1711843013763428, "train/loss_error": 0.4419529139995575, "train/loss_total": 0.5877991914749146 }, { "epoch": 1.9815655890996526, "step": 7417, "train/loss_ctc": 0.801776647567749, "train/loss_error": 0.5132972002029419, "train/loss_total": 0.5709930658340454 }, { "epoch": 1.98183275447502, "step": 7418, "train/loss_ctc": 0.293697714805603, "train/loss_error": 0.4654330015182495, "train/loss_total": 0.4310859441757202 }, { "epoch": 1.9820999198503872, "step": 7419, "train/loss_ctc": 0.7499774098396301, "train/loss_error": 0.4383491575717926, "train/loss_total": 0.500674843788147 }, { "epoch": 1.9823670852257549, "grad_norm": 2.4452552795410156, "learning_rate": 1.8113812449906492e-05, "loss": 0.5454, "step": 7420 }, { "epoch": 1.9823670852257549, "step": 7420, "train/loss_ctc": 0.9261151552200317, "train/loss_error": 0.4943946599960327, "train/loss_total": 0.5807387828826904 }, { "epoch": 1.982634250601122, "step": 7421, "train/loss_ctc": 0.5515404343605042, "train/loss_error": 0.4681842625141144, "train/loss_total": 0.4848555028438568 }, { "epoch": 1.9829014159764895, "step": 7422, "train/loss_ctc": 0.9539410471916199, "train/loss_error": 0.4783390164375305, "train/loss_total": 0.5734593868255615 }, { "epoch": 1.983168581351857, "step": 7423, "train/loss_ctc": 1.800449013710022, "train/loss_error": 0.47063979506492615, "train/loss_total": 0.7366016507148743 }, { "epoch": 1.9834357467272241, "step": 7424, "train/loss_ctc": 0.9405884146690369, "train/loss_error": 0.4255625903606415, "train/loss_total": 0.5285677909851074 }, { "epoch": 1.9837029121025915, "step": 7425, "train/loss_ctc": 0.6936644315719604, "train/loss_error": 0.4555644094944, "train/loss_total": 0.50318443775177 }, { "epoch": 1.983970077477959, "step": 7426, "train/loss_ctc": 0.6889529824256897, "train/loss_error": 0.49568694829940796, "train/loss_total": 0.5343401432037354 }, { "epoch": 1.9842372428533261, "step": 7427, "train/loss_ctc": 0.8541650772094727, "train/loss_error": 0.48246994614601135, "train/loss_total": 0.5568089485168457 }, { "epoch": 1.9845044082286936, "step": 7428, "train/loss_ctc": 1.0632214546203613, "train/loss_error": 0.4892353415489197, "train/loss_total": 0.604032576084137 }, { "epoch": 1.984771573604061, "step": 7429, "train/loss_ctc": 0.8167860507965088, "train/loss_error": 0.5031419396400452, "train/loss_total": 0.5658707618713379 }, { "epoch": 1.9850387389794282, "grad_norm": 2.550027847290039, "learning_rate": 1.809778252738445e-05, "loss": 0.5668, "step": 7430 }, { "epoch": 1.9850387389794282, "step": 7430, "train/loss_ctc": 1.5019015073776245, "train/loss_error": 0.4891827404499054, "train/loss_total": 0.6917265057563782 }, { "epoch": 1.9853059043547956, "step": 7431, "train/loss_ctc": 0.6793525815010071, "train/loss_error": 0.4557037055492401, "train/loss_total": 0.5004334449768066 }, { "epoch": 1.985573069730163, "step": 7432, "train/loss_ctc": 0.5911048650741577, "train/loss_error": 0.46954184770584106, "train/loss_total": 0.49385446310043335 }, { "epoch": 1.9858402351055302, "step": 7433, "train/loss_ctc": 0.4709145724773407, "train/loss_error": 0.34559178352355957, "train/loss_total": 0.3706563413143158 }, { "epoch": 1.9861074004808976, "step": 7434, "train/loss_ctc": 0.9456668496131897, "train/loss_error": 0.49792248010635376, "train/loss_total": 0.5874713659286499 }, { "epoch": 1.986374565856265, "step": 7435, "train/loss_ctc": 1.5166507959365845, "train/loss_error": 0.4581461548805237, "train/loss_total": 0.6698470711708069 }, { "epoch": 1.9866417312316322, "step": 7436, "train/loss_ctc": 1.9578887224197388, "train/loss_error": 0.47208404541015625, "train/loss_total": 0.7692450284957886 }, { "epoch": 1.9869088966069999, "step": 7437, "train/loss_ctc": 0.3798299729824066, "train/loss_error": 0.40765559673309326, "train/loss_total": 0.40209048986434937 }, { "epoch": 1.987176061982367, "step": 7438, "train/loss_ctc": 0.8509248495101929, "train/loss_error": 0.4237521290779114, "train/loss_total": 0.5091866850852966 }, { "epoch": 1.9874432273577345, "step": 7439, "train/loss_ctc": 0.6683623790740967, "train/loss_error": 0.5201117992401123, "train/loss_total": 0.5497618913650513 }, { "epoch": 1.987710392733102, "grad_norm": 1.735605239868164, "learning_rate": 1.808175260486241e-05, "loss": 0.5544, "step": 7440 }, { "epoch": 1.987710392733102, "step": 7440, "train/loss_ctc": 0.3891054391860962, "train/loss_error": 0.5552603006362915, "train/loss_total": 0.5220293402671814 }, { "epoch": 1.987977558108469, "step": 7441, "train/loss_ctc": 0.8774805665016174, "train/loss_error": 0.4458732008934021, "train/loss_total": 0.5321946740150452 }, { "epoch": 1.9882447234838365, "step": 7442, "train/loss_ctc": 0.704649806022644, "train/loss_error": 0.4688977003097534, "train/loss_total": 0.5160481333732605 }, { "epoch": 1.988511888859204, "step": 7443, "train/loss_ctc": 0.4605393707752228, "train/loss_error": 0.44091030955314636, "train/loss_total": 0.4448361396789551 }, { "epoch": 1.9887790542345711, "step": 7444, "train/loss_ctc": 0.5341641902923584, "train/loss_error": 0.4299413561820984, "train/loss_total": 0.45078593492507935 }, { "epoch": 1.9890462196099385, "step": 7445, "train/loss_ctc": 0.5093053579330444, "train/loss_error": 0.4906805157661438, "train/loss_total": 0.49440550804138184 }, { "epoch": 1.989313384985306, "step": 7446, "train/loss_ctc": 0.4949483275413513, "train/loss_error": 0.500732958316803, "train/loss_total": 0.49957603216171265 }, { "epoch": 1.9895805503606732, "step": 7447, "train/loss_ctc": 1.1255435943603516, "train/loss_error": 0.43826931715011597, "train/loss_total": 0.575724184513092 }, { "epoch": 1.9898477157360406, "step": 7448, "train/loss_ctc": 1.2310433387756348, "train/loss_error": 0.5152883529663086, "train/loss_total": 0.6584393978118896 }, { "epoch": 1.990114881111408, "step": 7449, "train/loss_ctc": 1.1393167972564697, "train/loss_error": 0.48443785309791565, "train/loss_total": 0.6154136657714844 }, { "epoch": 1.9903820464867752, "grad_norm": 2.5010008811950684, "learning_rate": 1.806572268234037e-05, "loss": 0.5309, "step": 7450 }, { "epoch": 1.9903820464867752, "step": 7450, "train/loss_ctc": 0.703045129776001, "train/loss_error": 0.45906326174736023, "train/loss_total": 0.5078596472740173 }, { "epoch": 1.9906492118621428, "step": 7451, "train/loss_ctc": 0.5733463168144226, "train/loss_error": 0.4137536585330963, "train/loss_total": 0.4456721842288971 }, { "epoch": 1.99091637723751, "step": 7452, "train/loss_ctc": 0.6281587481498718, "train/loss_error": 0.36908599734306335, "train/loss_total": 0.4209005534648895 }, { "epoch": 1.9911835426128772, "step": 7453, "train/loss_ctc": 1.173439621925354, "train/loss_error": 0.4612864553928375, "train/loss_total": 0.6037170886993408 }, { "epoch": 1.9914507079882449, "step": 7454, "train/loss_ctc": 0.8052888512611389, "train/loss_error": 0.46032679080963135, "train/loss_total": 0.529319167137146 }, { "epoch": 1.991717873363612, "step": 7455, "train/loss_ctc": 1.176283359527588, "train/loss_error": 0.43619173765182495, "train/loss_total": 0.5842100381851196 }, { "epoch": 1.9919850387389795, "step": 7456, "train/loss_ctc": 0.5690280795097351, "train/loss_error": 0.45865902304649353, "train/loss_total": 0.48073285818099976 }, { "epoch": 1.992252204114347, "step": 7457, "train/loss_ctc": 0.3164609372615814, "train/loss_error": 0.4260733425617218, "train/loss_total": 0.4041508436203003 }, { "epoch": 1.992519369489714, "step": 7458, "train/loss_ctc": 0.865572452545166, "train/loss_error": 0.457843154668808, "train/loss_total": 0.5393890142440796 }, { "epoch": 1.9927865348650815, "step": 7459, "train/loss_ctc": 0.8105039596557617, "train/loss_error": 0.48238101601600647, "train/loss_total": 0.5480055809020996 }, { "epoch": 1.993053700240449, "grad_norm": 2.228071451187134, "learning_rate": 1.8049692759818328e-05, "loss": 0.5064, "step": 7460 }, { "epoch": 1.993053700240449, "step": 7460, "train/loss_ctc": 0.4244551658630371, "train/loss_error": 0.4070362150669098, "train/loss_total": 0.4105200171470642 }, { "epoch": 1.9933208656158161, "step": 7461, "train/loss_ctc": 0.8660256266593933, "train/loss_error": 0.43465709686279297, "train/loss_total": 0.520930826663971 }, { "epoch": 1.9935880309911835, "step": 7462, "train/loss_ctc": 0.5596590638160706, "train/loss_error": 0.5431366562843323, "train/loss_total": 0.5464411377906799 }, { "epoch": 1.993855196366551, "step": 7463, "train/loss_ctc": 0.8813518285751343, "train/loss_error": 0.4578631818294525, "train/loss_total": 0.5425609350204468 }, { "epoch": 1.9941223617419181, "step": 7464, "train/loss_ctc": 0.8396725058555603, "train/loss_error": 0.5037518739700317, "train/loss_total": 0.5709360241889954 }, { "epoch": 1.9943895271172856, "step": 7465, "train/loss_ctc": 0.6289376020431519, "train/loss_error": 0.4670395851135254, "train/loss_total": 0.4994192123413086 }, { "epoch": 1.994656692492653, "step": 7466, "train/loss_ctc": 1.5219926834106445, "train/loss_error": 0.4647425711154938, "train/loss_total": 0.6761926412582397 }, { "epoch": 1.9949238578680202, "step": 7467, "train/loss_ctc": 0.7334601879119873, "train/loss_error": 0.476689875125885, "train/loss_total": 0.5280439853668213 }, { "epoch": 1.9951910232433878, "step": 7468, "train/loss_ctc": 1.0149065256118774, "train/loss_error": 0.519966721534729, "train/loss_total": 0.6189547181129456 }, { "epoch": 1.995458188618755, "step": 7469, "train/loss_ctc": 0.4074942469596863, "train/loss_error": 0.43346500396728516, "train/loss_total": 0.4282708764076233 }, { "epoch": 1.9957253539941222, "grad_norm": 2.0261220932006836, "learning_rate": 1.8033662837296286e-05, "loss": 0.5342, "step": 7470 }, { "epoch": 1.9957253539941222, "step": 7470, "train/loss_ctc": 0.4517844617366791, "train/loss_error": 0.48729798197746277, "train/loss_total": 0.4801952838897705 }, { "epoch": 1.9959925193694898, "step": 7471, "train/loss_ctc": 0.3463854193687439, "train/loss_error": 0.4617060124874115, "train/loss_total": 0.43864190578460693 }, { "epoch": 1.996259684744857, "step": 7472, "train/loss_ctc": 1.3321470022201538, "train/loss_error": 0.43452805280685425, "train/loss_total": 0.6140518188476562 }, { "epoch": 1.9965268501202245, "step": 7473, "train/loss_ctc": 0.8312912583351135, "train/loss_error": 0.423769474029541, "train/loss_total": 0.5052738189697266 }, { "epoch": 1.9967940154955919, "step": 7474, "train/loss_ctc": 0.7691642642021179, "train/loss_error": 0.47577354311943054, "train/loss_total": 0.5344517230987549 }, { "epoch": 1.997061180870959, "step": 7475, "train/loss_ctc": 0.4717530608177185, "train/loss_error": 0.46921834349632263, "train/loss_total": 0.46972528100013733 }, { "epoch": 1.9973283462463265, "step": 7476, "train/loss_ctc": 0.671133279800415, "train/loss_error": 0.4692085385322571, "train/loss_total": 0.5095934867858887 }, { "epoch": 1.997595511621694, "step": 7477, "train/loss_ctc": 0.899063229560852, "train/loss_error": 0.4897417426109314, "train/loss_total": 0.5716060400009155 }, { "epoch": 1.997862676997061, "step": 7478, "train/loss_ctc": 0.753142237663269, "train/loss_error": 0.4618726670742035, "train/loss_total": 0.5201265811920166 }, { "epoch": 1.9981298423724285, "step": 7479, "train/loss_ctc": 0.4299218952655792, "train/loss_error": 0.4552501440048218, "train/loss_total": 0.45018449425697327 }, { "epoch": 1.998397007747796, "grad_norm": 2.4789023399353027, "learning_rate": 1.8017632914774244e-05, "loss": 0.5094, "step": 7480 }, { "epoch": 1.998397007747796, "step": 7480, "train/loss_ctc": 0.6268199682235718, "train/loss_error": 0.4431930184364319, "train/loss_total": 0.4799184203147888 }, { "epoch": 1.9986641731231631, "step": 7481, "train/loss_ctc": 0.42417287826538086, "train/loss_error": 0.3697344660758972, "train/loss_total": 0.38062214851379395 }, { "epoch": 1.9989313384985306, "step": 7482, "train/loss_ctc": 0.4802950322628021, "train/loss_error": 0.48452720046043396, "train/loss_total": 0.483680784702301 }, { "epoch": 1.999198503873898, "step": 7483, "train/loss_ctc": 0.8969626426696777, "train/loss_error": 0.5120487213134766, "train/loss_total": 0.5890315175056458 }, { "epoch": 1.9994656692492652, "step": 7484, "train/loss_ctc": 0.5957927107810974, "train/loss_error": 0.4649437367916107, "train/loss_total": 0.491113543510437 }, { "epoch": 1.9997328346246328, "step": 7485, "train/loss_ctc": 0.9430185556411743, "train/loss_error": 0.4829719066619873, "train/loss_total": 0.5749812722206116 }, { "epoch": 2.0, "eval_eval/f1_0": 0.5853440761566162, "eval_eval/f1_1": 0.8210049271583557, "eval_eval/precision_0": 0.8014683127403259, "eval_eval/precision_1": 0.7354008555412292, "eval_eval/recall_0": 0.46102410554885864, "eval_eval/recall_1": 0.929163932800293, "eval_eval/wer": 0.16613700588841387, "eval_runtime": 34.7731, "eval_samples_per_second": 13.2, "eval_steps_per_second": 13.2, "step": 7486 }, { "epoch": 2.0, "step": 7486, "train/loss_ctc": 0.8511250615119934, "train/loss_error": 0.4810538589954376, "train/loss_total": 0.5550680756568909 }, { "epoch": 2.000267165375367, "step": 7487, "train/loss_ctc": 0.826941728591919, "train/loss_error": 0.4787582457065582, "train/loss_total": 0.5483949184417725 }, { "epoch": 2.000534330750735, "step": 7488, "train/loss_ctc": 0.8258293867111206, "train/loss_error": 0.4709959924221039, "train/loss_total": 0.5419626832008362 }, { "epoch": 2.000801496126102, "step": 7489, "train/loss_ctc": 0.6098414659500122, "train/loss_error": 0.4840376079082489, "train/loss_total": 0.5091983675956726 }, { "epoch": 2.0010686615014692, "grad_norm": 1.6106692552566528, "learning_rate": 1.8001602992252202e-05, "loss": 0.5154, "step": 7490 }, { "epoch": 2.0010686615014692, "step": 7490, "train/loss_ctc": 0.3901689946651459, "train/loss_error": 0.5032496452331543, "train/loss_total": 0.48063352704048157 }, { "epoch": 2.001335826876837, "step": 7491, "train/loss_ctc": 0.548038125038147, "train/loss_error": 0.4780060052871704, "train/loss_total": 0.4920124411582947 }, { "epoch": 2.001602992252204, "step": 7492, "train/loss_ctc": 0.6572932600975037, "train/loss_error": 0.48024994134902954, "train/loss_total": 0.5156586170196533 }, { "epoch": 2.0018701576275713, "step": 7493, "train/loss_ctc": 1.1573803424835205, "train/loss_error": 0.4994584023952484, "train/loss_total": 0.6310428380966187 }, { "epoch": 2.002137323002939, "step": 7494, "train/loss_ctc": 0.7265976667404175, "train/loss_error": 0.4834030270576477, "train/loss_total": 0.5320419669151306 }, { "epoch": 2.002404488378306, "step": 7495, "train/loss_ctc": 1.015177607536316, "train/loss_error": 0.4624421298503876, "train/loss_total": 0.5729892253875732 }, { "epoch": 2.0026716537536737, "step": 7496, "train/loss_ctc": 0.8922746181488037, "train/loss_error": 0.4326251149177551, "train/loss_total": 0.5245550274848938 }, { "epoch": 2.002938819129041, "step": 7497, "train/loss_ctc": 0.2618001103401184, "train/loss_error": 0.4101402461528778, "train/loss_total": 0.38047224283218384 }, { "epoch": 2.003205984504408, "step": 7498, "train/loss_ctc": 1.834489345550537, "train/loss_error": 0.412615567445755, "train/loss_total": 0.6969903707504272 }, { "epoch": 2.0034731498797758, "step": 7499, "train/loss_ctc": 0.41273486614227295, "train/loss_error": 0.45425930619239807, "train/loss_total": 0.44595444202423096 }, { "epoch": 2.003740315255143, "grad_norm": 2.119175434112549, "learning_rate": 1.7985573069730163e-05, "loss": 0.5272, "step": 7500 }, { "epoch": 2.003740315255143, "step": 7500, "train/loss_ctc": 0.5836670398712158, "train/loss_error": 0.47855496406555176, "train/loss_total": 0.4995773732662201 }, { "epoch": 2.00400748063051, "step": 7501, "train/loss_ctc": 2.0622503757476807, "train/loss_error": 0.5157569646835327, "train/loss_total": 0.8250556588172913 }, { "epoch": 2.004274646005878, "step": 7502, "train/loss_ctc": 0.681830644607544, "train/loss_error": 0.559997022151947, "train/loss_total": 0.5843637585639954 }, { "epoch": 2.004541811381245, "step": 7503, "train/loss_ctc": 0.524761438369751, "train/loss_error": 0.45498785376548767, "train/loss_total": 0.4689425826072693 }, { "epoch": 2.004808976756612, "step": 7504, "train/loss_ctc": 0.3116613030433655, "train/loss_error": 0.4876537322998047, "train/loss_total": 0.4524552524089813 }, { "epoch": 2.00507614213198, "step": 7505, "train/loss_ctc": 1.0394585132598877, "train/loss_error": 0.43167421221733093, "train/loss_total": 0.5532311201095581 }, { "epoch": 2.005343307507347, "step": 7506, "train/loss_ctc": 0.43244296312332153, "train/loss_error": 0.4974910616874695, "train/loss_total": 0.48448145389556885 }, { "epoch": 2.005610472882714, "step": 7507, "train/loss_ctc": 0.6837560534477234, "train/loss_error": 0.48644012212753296, "train/loss_total": 0.5259033441543579 }, { "epoch": 2.005877638258082, "step": 7508, "train/loss_ctc": 0.9720332622528076, "train/loss_error": 0.3865083158016205, "train/loss_total": 0.5036133527755737 }, { "epoch": 2.006144803633449, "step": 7509, "train/loss_ctc": 0.7038779854774475, "train/loss_error": 0.483970582485199, "train/loss_total": 0.5279520750045776 }, { "epoch": 2.0064119690088162, "grad_norm": 1.9345060586929321, "learning_rate": 1.796954314720812e-05, "loss": 0.5426, "step": 7510 }, { "epoch": 2.0064119690088162, "step": 7510, "train/loss_ctc": 1.3930819034576416, "train/loss_error": 0.4955037534236908, "train/loss_total": 0.675019383430481 }, { "epoch": 2.006679134384184, "step": 7511, "train/loss_ctc": 0.5236043930053711, "train/loss_error": 0.4146715998649597, "train/loss_total": 0.43645817041397095 }, { "epoch": 2.006946299759551, "step": 7512, "train/loss_ctc": 0.6226226687431335, "train/loss_error": 0.49465349316596985, "train/loss_total": 0.5202473402023315 }, { "epoch": 2.0072134651349187, "step": 7513, "train/loss_ctc": 0.16638433933258057, "train/loss_error": 0.39966318011283875, "train/loss_total": 0.35300740599632263 }, { "epoch": 2.007480630510286, "step": 7514, "train/loss_ctc": 1.1126703023910522, "train/loss_error": 0.4367368817329407, "train/loss_total": 0.5719236135482788 }, { "epoch": 2.007747795885653, "step": 7515, "train/loss_ctc": 0.4581153690814972, "train/loss_error": 0.40000975131988525, "train/loss_total": 0.41163086891174316 }, { "epoch": 2.0080149612610207, "step": 7516, "train/loss_ctc": 0.575782835483551, "train/loss_error": 0.4308124780654907, "train/loss_total": 0.45980656147003174 }, { "epoch": 2.008282126636388, "step": 7517, "train/loss_ctc": 0.4994819164276123, "train/loss_error": 0.46899792551994324, "train/loss_total": 0.475094735622406 }, { "epoch": 2.008549292011755, "step": 7518, "train/loss_ctc": 0.6168725490570068, "train/loss_error": 0.4595649838447571, "train/loss_total": 0.49102652072906494 }, { "epoch": 2.008816457387123, "step": 7519, "train/loss_ctc": 0.8452218770980835, "train/loss_error": 0.41187784075737, "train/loss_total": 0.49854665994644165 }, { "epoch": 2.00908362276249, "grad_norm": 2.3712029457092285, "learning_rate": 1.795351322468608e-05, "loss": 0.4893, "step": 7520 }, { "epoch": 2.00908362276249, "step": 7520, "train/loss_ctc": 0.3897785544395447, "train/loss_error": 0.5297524929046631, "train/loss_total": 0.5017576813697815 }, { "epoch": 2.009350788137857, "step": 7521, "train/loss_ctc": 0.9635118246078491, "train/loss_error": 0.4492813050746918, "train/loss_total": 0.5521274209022522 }, { "epoch": 2.009617953513225, "step": 7522, "train/loss_ctc": 1.0274263620376587, "train/loss_error": 0.4031141400337219, "train/loss_total": 0.5279765725135803 }, { "epoch": 2.009885118888592, "step": 7523, "train/loss_ctc": 0.44642072916030884, "train/loss_error": 0.3775251805782318, "train/loss_total": 0.3913043141365051 }, { "epoch": 2.010152284263959, "step": 7524, "train/loss_ctc": 1.1483049392700195, "train/loss_error": 0.469288170337677, "train/loss_total": 0.6050915718078613 }, { "epoch": 2.010419449639327, "step": 7525, "train/loss_ctc": 1.003197193145752, "train/loss_error": 0.4383481442928314, "train/loss_total": 0.5513179898262024 }, { "epoch": 2.010686615014694, "step": 7526, "train/loss_ctc": 0.45639777183532715, "train/loss_error": 0.4493333399295807, "train/loss_total": 0.45074623823165894 }, { "epoch": 2.0109537803900612, "step": 7527, "train/loss_ctc": 0.9133574366569519, "train/loss_error": 0.42495861649513245, "train/loss_total": 0.5226383805274963 }, { "epoch": 2.011220945765429, "step": 7528, "train/loss_ctc": 1.1529502868652344, "train/loss_error": 0.4377133250236511, "train/loss_total": 0.5807607173919678 }, { "epoch": 2.011488111140796, "step": 7529, "train/loss_ctc": 0.7055603265762329, "train/loss_error": 0.47661924362182617, "train/loss_total": 0.5224074721336365 }, { "epoch": 2.0117552765161637, "grad_norm": 2.4184892177581787, "learning_rate": 1.793748330216404e-05, "loss": 0.5206, "step": 7530 }, { "epoch": 2.0117552765161637, "step": 7530, "train/loss_ctc": 1.5950827598571777, "train/loss_error": 0.43633466958999634, "train/loss_total": 0.6680842638015747 }, { "epoch": 2.012022441891531, "step": 7531, "train/loss_ctc": 1.182579517364502, "train/loss_error": 0.45739179849624634, "train/loss_total": 0.6024293899536133 }, { "epoch": 2.012289607266898, "step": 7532, "train/loss_ctc": 0.6743921041488647, "train/loss_error": 0.4538112282752991, "train/loss_total": 0.4979274272918701 }, { "epoch": 2.0125567726422657, "step": 7533, "train/loss_ctc": 1.1560077667236328, "train/loss_error": 0.4736175835132599, "train/loss_total": 0.6100956201553345 }, { "epoch": 2.012823938017633, "step": 7534, "train/loss_ctc": 0.717418909072876, "train/loss_error": 0.4729909598827362, "train/loss_total": 0.5218765735626221 }, { "epoch": 2.013091103393, "step": 7535, "train/loss_ctc": 0.4472717046737671, "train/loss_error": 0.519452691078186, "train/loss_total": 0.5050165057182312 }, { "epoch": 2.0133582687683678, "step": 7536, "train/loss_ctc": 0.8283920884132385, "train/loss_error": 0.5315654873847961, "train/loss_total": 0.5909308195114136 }, { "epoch": 2.013625434143735, "step": 7537, "train/loss_ctc": 0.4850185215473175, "train/loss_error": 0.43368950486183167, "train/loss_total": 0.44395530223846436 }, { "epoch": 2.013892599519102, "step": 7538, "train/loss_ctc": 0.5332786440849304, "train/loss_error": 0.47221267223358154, "train/loss_total": 0.4844259023666382 }, { "epoch": 2.01415976489447, "step": 7539, "train/loss_ctc": 1.2206449508666992, "train/loss_error": 0.45086991786956787, "train/loss_total": 0.6048249006271362 }, { "epoch": 2.014426930269837, "grad_norm": 1.4473298788070679, "learning_rate": 1.7921453379642e-05, "loss": 0.553, "step": 7540 }, { "epoch": 2.014426930269837, "step": 7540, "train/loss_ctc": 1.2054280042648315, "train/loss_error": 0.44740164279937744, "train/loss_total": 0.5990069508552551 }, { "epoch": 2.014694095645204, "step": 7541, "train/loss_ctc": 0.6800415515899658, "train/loss_error": 0.4727276563644409, "train/loss_total": 0.5141904354095459 }, { "epoch": 2.014961261020572, "step": 7542, "train/loss_ctc": 1.0249444246292114, "train/loss_error": 0.4181284010410309, "train/loss_total": 0.539491593837738 }, { "epoch": 2.015228426395939, "step": 7543, "train/loss_ctc": 1.0768649578094482, "train/loss_error": 0.4568552076816559, "train/loss_total": 0.5808571577072144 }, { "epoch": 2.015495591771306, "step": 7544, "train/loss_ctc": 0.7905933856964111, "train/loss_error": 0.4983944296836853, "train/loss_total": 0.5568342208862305 }, { "epoch": 2.015762757146674, "step": 7545, "train/loss_ctc": 0.947842538356781, "train/loss_error": 0.4547058939933777, "train/loss_total": 0.5533332228660583 }, { "epoch": 2.016029922522041, "step": 7546, "train/loss_ctc": 0.6783738136291504, "train/loss_error": 0.4485408365726471, "train/loss_total": 0.49450743198394775 }, { "epoch": 2.0162970878974087, "step": 7547, "train/loss_ctc": 0.4908788204193115, "train/loss_error": 0.4785076975822449, "train/loss_total": 0.4809819161891937 }, { "epoch": 2.016564253272776, "step": 7548, "train/loss_ctc": 0.42244765162467957, "train/loss_error": 0.4314107894897461, "train/loss_total": 0.42961814999580383 }, { "epoch": 2.016831418648143, "step": 7549, "train/loss_ctc": 0.5242810249328613, "train/loss_error": 0.42928972840309143, "train/loss_total": 0.4482880234718323 }, { "epoch": 2.0170985840235107, "grad_norm": 1.6502530574798584, "learning_rate": 1.790542345711996e-05, "loss": 0.5197, "step": 7550 }, { "epoch": 2.0170985840235107, "step": 7550, "train/loss_ctc": 1.055500864982605, "train/loss_error": 0.41199198365211487, "train/loss_total": 0.5406937599182129 }, { "epoch": 2.017365749398878, "step": 7551, "train/loss_ctc": 0.8622382283210754, "train/loss_error": 0.38123640418052673, "train/loss_total": 0.47743678092956543 }, { "epoch": 2.017632914774245, "step": 7552, "train/loss_ctc": 0.8053715825080872, "train/loss_error": 0.4627859890460968, "train/loss_total": 0.5313031077384949 }, { "epoch": 2.0179000801496128, "step": 7553, "train/loss_ctc": 0.7950427532196045, "train/loss_error": 0.5063517093658447, "train/loss_total": 0.5640898942947388 }, { "epoch": 2.01816724552498, "step": 7554, "train/loss_ctc": 0.9834256172180176, "train/loss_error": 0.5479859113693237, "train/loss_total": 0.6350738406181335 }, { "epoch": 2.018434410900347, "step": 7555, "train/loss_ctc": 0.6708323359489441, "train/loss_error": 0.43450742959976196, "train/loss_total": 0.48177242279052734 }, { "epoch": 2.018701576275715, "step": 7556, "train/loss_ctc": 0.7630815505981445, "train/loss_error": 0.3775489032268524, "train/loss_total": 0.45465540885925293 }, { "epoch": 2.018968741651082, "step": 7557, "train/loss_ctc": 0.5470232963562012, "train/loss_error": 0.4866623878479004, "train/loss_total": 0.49873456358909607 }, { "epoch": 2.019235907026449, "step": 7558, "train/loss_ctc": 0.4179149270057678, "train/loss_error": 0.46403026580810547, "train/loss_total": 0.45480722188949585 }, { "epoch": 2.019503072401817, "step": 7559, "train/loss_ctc": 0.3560107350349426, "train/loss_error": 0.4995661973953247, "train/loss_total": 0.47085511684417725 }, { "epoch": 2.019770237777184, "grad_norm": 3.454714298248291, "learning_rate": 1.788939353459792e-05, "loss": 0.5109, "step": 7560 }, { "epoch": 2.019770237777184, "step": 7560, "train/loss_ctc": 0.7774114608764648, "train/loss_error": 0.4846493601799011, "train/loss_total": 0.5432018041610718 }, { "epoch": 2.0200374031525516, "step": 7561, "train/loss_ctc": 0.36652424931526184, "train/loss_error": 0.3703271150588989, "train/loss_total": 0.36956655979156494 }, { "epoch": 2.020304568527919, "step": 7562, "train/loss_ctc": 0.942203164100647, "train/loss_error": 0.49026623368263245, "train/loss_total": 0.5806536078453064 }, { "epoch": 2.020571733903286, "step": 7563, "train/loss_ctc": 1.0969891548156738, "train/loss_error": 0.42360398173332214, "train/loss_total": 0.5582810044288635 }, { "epoch": 2.0208388992786537, "step": 7564, "train/loss_ctc": 0.3757380545139313, "train/loss_error": 0.43183064460754395, "train/loss_total": 0.4206121563911438 }, { "epoch": 2.021106064654021, "step": 7565, "train/loss_ctc": 0.7252534627914429, "train/loss_error": 0.4614957869052887, "train/loss_total": 0.5142472982406616 }, { "epoch": 2.021373230029388, "step": 7566, "train/loss_ctc": 0.7456508874893188, "train/loss_error": 0.5000250339508057, "train/loss_total": 0.5491502285003662 }, { "epoch": 2.0216403954047557, "step": 7567, "train/loss_ctc": 0.364933043718338, "train/loss_error": 0.5095857977867126, "train/loss_total": 0.4806552529335022 }, { "epoch": 2.021907560780123, "step": 7568, "train/loss_ctc": 1.744173288345337, "train/loss_error": 0.5140263438224792, "train/loss_total": 0.7600557804107666 }, { "epoch": 2.02217472615549, "step": 7569, "train/loss_ctc": 0.3761517405509949, "train/loss_error": 0.44250211119651794, "train/loss_total": 0.42923206090927124 }, { "epoch": 2.0224418915308577, "grad_norm": 2.8813560009002686, "learning_rate": 1.7873363612075877e-05, "loss": 0.5206, "step": 7570 }, { "epoch": 2.0224418915308577, "step": 7570, "train/loss_ctc": 0.5287413597106934, "train/loss_error": 0.4501219689846039, "train/loss_total": 0.46584585309028625 }, { "epoch": 2.022709056906225, "step": 7571, "train/loss_ctc": 1.074218511581421, "train/loss_error": 0.4262556731700897, "train/loss_total": 0.555848240852356 }, { "epoch": 2.022976222281592, "step": 7572, "train/loss_ctc": 0.7367420792579651, "train/loss_error": 0.4375273287296295, "train/loss_total": 0.49737030267715454 }, { "epoch": 2.0232433876569598, "step": 7573, "train/loss_ctc": 0.8899418115615845, "train/loss_error": 0.43115898966789246, "train/loss_total": 0.5229155421257019 }, { "epoch": 2.023510553032327, "step": 7574, "train/loss_ctc": 0.6766781806945801, "train/loss_error": 0.40259072184562683, "train/loss_total": 0.45740824937820435 }, { "epoch": 2.023777718407694, "step": 7575, "train/loss_ctc": 0.6856942772865295, "train/loss_error": 0.4425880014896393, "train/loss_total": 0.4912092685699463 }, { "epoch": 2.024044883783062, "step": 7576, "train/loss_ctc": 0.7045584321022034, "train/loss_error": 0.3625990152359009, "train/loss_total": 0.43099087476730347 }, { "epoch": 2.024312049158429, "step": 7577, "train/loss_ctc": 0.5029950141906738, "train/loss_error": 0.48944219946861267, "train/loss_total": 0.49215275049209595 }, { "epoch": 2.0245792145337966, "step": 7578, "train/loss_ctc": 0.728417694568634, "train/loss_error": 0.5224657654762268, "train/loss_total": 0.5636561512947083 }, { "epoch": 2.024846379909164, "step": 7579, "train/loss_ctc": 0.7047632336616516, "train/loss_error": 0.4816558361053467, "train/loss_total": 0.5262773036956787 }, { "epoch": 2.025113545284531, "grad_norm": 2.606954336166382, "learning_rate": 1.7857333689553835e-05, "loss": 0.5004, "step": 7580 }, { "epoch": 2.025113545284531, "step": 7580, "train/loss_ctc": 0.39730921387672424, "train/loss_error": 0.3551712930202484, "train/loss_total": 0.36359888315200806 }, { "epoch": 2.0253807106598987, "step": 7581, "train/loss_ctc": 0.5289298295974731, "train/loss_error": 0.47030410170555115, "train/loss_total": 0.4820292592048645 }, { "epoch": 2.025647876035266, "step": 7582, "train/loss_ctc": 0.50981605052948, "train/loss_error": 0.39355337619781494, "train/loss_total": 0.4168059229850769 }, { "epoch": 2.025915041410633, "step": 7583, "train/loss_ctc": 0.8537776470184326, "train/loss_error": 0.48302483558654785, "train/loss_total": 0.5571753978729248 }, { "epoch": 2.0261822067860007, "step": 7584, "train/loss_ctc": 1.231784701347351, "train/loss_error": 0.4102972149848938, "train/loss_total": 0.5745947360992432 }, { "epoch": 2.026449372161368, "step": 7585, "train/loss_ctc": 0.5374487042427063, "train/loss_error": 0.44385454058647156, "train/loss_total": 0.462573379278183 }, { "epoch": 2.026716537536735, "step": 7586, "train/loss_ctc": 0.892744779586792, "train/loss_error": 0.4615536034107208, "train/loss_total": 0.5477918386459351 }, { "epoch": 2.0269837029121027, "step": 7587, "train/loss_ctc": 0.6408167481422424, "train/loss_error": 0.4597180485725403, "train/loss_total": 0.4959378242492676 }, { "epoch": 2.02725086828747, "step": 7588, "train/loss_ctc": 0.6091053485870361, "train/loss_error": 0.3772011697292328, "train/loss_total": 0.4235820174217224 }, { "epoch": 2.027518033662837, "step": 7589, "train/loss_ctc": 0.2662341594696045, "train/loss_error": 0.4390650689601898, "train/loss_total": 0.4044988751411438 }, { "epoch": 2.0277851990382048, "grad_norm": 1.314447283744812, "learning_rate": 1.7841303767031793e-05, "loss": 0.4729, "step": 7590 }, { "epoch": 2.0277851990382048, "step": 7590, "train/loss_ctc": 0.7094500064849854, "train/loss_error": 0.4916103482246399, "train/loss_total": 0.5351783037185669 }, { "epoch": 2.028052364413572, "step": 7591, "train/loss_ctc": 0.6869688034057617, "train/loss_error": 0.3797484338283539, "train/loss_total": 0.44119250774383545 }, { "epoch": 2.028319529788939, "step": 7592, "train/loss_ctc": 1.134497880935669, "train/loss_error": 0.425449013710022, "train/loss_total": 0.5672587752342224 }, { "epoch": 2.028586695164307, "step": 7593, "train/loss_ctc": 1.4994421005249023, "train/loss_error": 0.49841225147247314, "train/loss_total": 0.6986182332038879 }, { "epoch": 2.028853860539674, "step": 7594, "train/loss_ctc": 0.7749714255332947, "train/loss_error": 0.4562171995639801, "train/loss_total": 0.5199680328369141 }, { "epoch": 2.0291210259150416, "step": 7595, "train/loss_ctc": 0.476043701171875, "train/loss_error": 0.46515434980392456, "train/loss_total": 0.46733221411705017 }, { "epoch": 2.029388191290409, "step": 7596, "train/loss_ctc": 0.7422687411308289, "train/loss_error": 0.4572395980358124, "train/loss_total": 0.5142454504966736 }, { "epoch": 2.029655356665776, "step": 7597, "train/loss_ctc": 0.7783381342887878, "train/loss_error": 0.4627303183078766, "train/loss_total": 0.5258519053459167 }, { "epoch": 2.0299225220411437, "step": 7598, "train/loss_ctc": 0.7173590660095215, "train/loss_error": 0.5158799886703491, "train/loss_total": 0.5561758279800415 }, { "epoch": 2.030189687416511, "step": 7599, "train/loss_ctc": 0.46865445375442505, "train/loss_error": 0.43761900067329407, "train/loss_total": 0.4438261091709137 }, { "epoch": 2.030456852791878, "grad_norm": 1.4446271657943726, "learning_rate": 1.782527384450975e-05, "loss": 0.527, "step": 7600 }, { "epoch": 2.030456852791878, "step": 7600, "train/loss_ctc": 0.3713318407535553, "train/loss_error": 0.5329198837280273, "train/loss_total": 0.5006022453308105 }, { "epoch": 2.0307240181672457, "step": 7601, "train/loss_ctc": 0.7811155319213867, "train/loss_error": 0.46119678020477295, "train/loss_total": 0.5251805186271667 }, { "epoch": 2.030991183542613, "step": 7602, "train/loss_ctc": 1.1841602325439453, "train/loss_error": 0.4564913511276245, "train/loss_total": 0.6020251512527466 }, { "epoch": 2.03125834891798, "step": 7603, "train/loss_ctc": 1.1999397277832031, "train/loss_error": 0.4817578196525574, "train/loss_total": 0.6253942251205444 }, { "epoch": 2.0315255142933477, "step": 7604, "train/loss_ctc": 0.4426848292350769, "train/loss_error": 0.45168429613113403, "train/loss_total": 0.44988441467285156 }, { "epoch": 2.031792679668715, "step": 7605, "train/loss_ctc": 0.7126449346542358, "train/loss_error": 0.39259517192840576, "train/loss_total": 0.45660513639450073 }, { "epoch": 2.032059845044082, "step": 7606, "train/loss_ctc": 1.0653945207595825, "train/loss_error": 0.443767249584198, "train/loss_total": 0.5680927038192749 }, { "epoch": 2.0323270104194497, "step": 7607, "train/loss_ctc": 0.960228681564331, "train/loss_error": 0.43596896529197693, "train/loss_total": 0.5408208966255188 }, { "epoch": 2.032594175794817, "step": 7608, "train/loss_ctc": 0.7109440565109253, "train/loss_error": 0.4431610703468323, "train/loss_total": 0.4967176914215088 }, { "epoch": 2.032861341170184, "step": 7609, "train/loss_ctc": 0.4963068664073944, "train/loss_error": 0.4307660460472107, "train/loss_total": 0.44387421011924744 }, { "epoch": 2.0331285065455518, "grad_norm": 1.4140671491622925, "learning_rate": 1.7809243921987712e-05, "loss": 0.5209, "step": 7610 }, { "epoch": 2.0331285065455518, "step": 7610, "train/loss_ctc": 0.9020771980285645, "train/loss_error": 0.41048893332481384, "train/loss_total": 0.508806586265564 }, { "epoch": 2.033395671920919, "step": 7611, "train/loss_ctc": 0.6999647617340088, "train/loss_error": 0.48946547508239746, "train/loss_total": 0.5315653085708618 }, { "epoch": 2.0336628372962866, "step": 7612, "train/loss_ctc": 0.1962202787399292, "train/loss_error": 0.47730275988578796, "train/loss_total": 0.42108628153800964 }, { "epoch": 2.033930002671654, "step": 7613, "train/loss_ctc": 0.7048484086990356, "train/loss_error": 0.4236745536327362, "train/loss_total": 0.4799093008041382 }, { "epoch": 2.034197168047021, "step": 7614, "train/loss_ctc": 0.6260521411895752, "train/loss_error": 0.4313216805458069, "train/loss_total": 0.47026777267456055 }, { "epoch": 2.0344643334223886, "step": 7615, "train/loss_ctc": 1.2166838645935059, "train/loss_error": 0.47746941447257996, "train/loss_total": 0.625312328338623 }, { "epoch": 2.034731498797756, "step": 7616, "train/loss_ctc": 0.8979718089103699, "train/loss_error": 0.3518306612968445, "train/loss_total": 0.46105891466140747 }, { "epoch": 2.034998664173123, "step": 7617, "train/loss_ctc": 0.7629846930503845, "train/loss_error": 0.41583266854286194, "train/loss_total": 0.48526304960250854 }, { "epoch": 2.0352658295484907, "step": 7618, "train/loss_ctc": 0.8511327505111694, "train/loss_error": 0.4731220602989197, "train/loss_total": 0.5487242341041565 }, { "epoch": 2.035532994923858, "step": 7619, "train/loss_ctc": 1.2758233547210693, "train/loss_error": 0.3754173219203949, "train/loss_total": 0.5554985404014587 }, { "epoch": 2.035800160299225, "grad_norm": 2.773252248764038, "learning_rate": 1.779321399946567e-05, "loss": 0.5087, "step": 7620 }, { "epoch": 2.035800160299225, "step": 7620, "train/loss_ctc": 0.8742806315422058, "train/loss_error": 0.4604021906852722, "train/loss_total": 0.5431778430938721 }, { "epoch": 2.0360673256745927, "step": 7621, "train/loss_ctc": 0.5900846123695374, "train/loss_error": 0.4159519672393799, "train/loss_total": 0.4507785141468048 }, { "epoch": 2.03633449104996, "step": 7622, "train/loss_ctc": 0.28920406103134155, "train/loss_error": 0.4156857430934906, "train/loss_total": 0.39038941264152527 }, { "epoch": 2.036601656425327, "step": 7623, "train/loss_ctc": 0.3602363169193268, "train/loss_error": 0.45210036635398865, "train/loss_total": 0.43372756242752075 }, { "epoch": 2.0368688218006947, "step": 7624, "train/loss_ctc": 0.21624113619327545, "train/loss_error": 0.4828339219093323, "train/loss_total": 0.42951539158821106 }, { "epoch": 2.037135987176062, "step": 7625, "train/loss_ctc": 0.8249452114105225, "train/loss_error": 0.5333799123764038, "train/loss_total": 0.5916929841041565 }, { "epoch": 2.037403152551429, "step": 7626, "train/loss_ctc": 0.6707399487495422, "train/loss_error": 0.45645132660865784, "train/loss_total": 0.49930906295776367 }, { "epoch": 2.0376703179267968, "step": 7627, "train/loss_ctc": 1.546238660812378, "train/loss_error": 0.43277350068092346, "train/loss_total": 0.6554665565490723 }, { "epoch": 2.037937483302164, "step": 7628, "train/loss_ctc": 0.8820589780807495, "train/loss_error": 0.4821680188179016, "train/loss_total": 0.5621461868286133 }, { "epoch": 2.0382046486775316, "step": 7629, "train/loss_ctc": 0.8382936716079712, "train/loss_error": 0.4569738805294037, "train/loss_total": 0.5332378149032593 }, { "epoch": 2.038471814052899, "grad_norm": 1.5542373657226562, "learning_rate": 1.777718407694363e-05, "loss": 0.5089, "step": 7630 }, { "epoch": 2.038471814052899, "step": 7630, "train/loss_ctc": 0.46952807903289795, "train/loss_error": 0.45513859391212463, "train/loss_total": 0.4580165147781372 }, { "epoch": 2.038738979428266, "step": 7631, "train/loss_ctc": 0.48666656017303467, "train/loss_error": 0.45932257175445557, "train/loss_total": 0.4647913873195648 }, { "epoch": 2.0390061448036336, "step": 7632, "train/loss_ctc": 1.3852334022521973, "train/loss_error": 0.4553147852420807, "train/loss_total": 0.6412985324859619 }, { "epoch": 2.039273310179001, "step": 7633, "train/loss_ctc": 1.1808030605316162, "train/loss_error": 0.4813520908355713, "train/loss_total": 0.6212422847747803 }, { "epoch": 2.039540475554368, "step": 7634, "train/loss_ctc": 0.6133838891983032, "train/loss_error": 0.505202054977417, "train/loss_total": 0.5268384218215942 }, { "epoch": 2.0398076409297357, "step": 7635, "train/loss_ctc": 1.0176180601119995, "train/loss_error": 0.4460258185863495, "train/loss_total": 0.5603442788124084 }, { "epoch": 2.040074806305103, "step": 7636, "train/loss_ctc": 0.535987138748169, "train/loss_error": 0.5723411440849304, "train/loss_total": 0.5650703310966492 }, { "epoch": 2.04034197168047, "step": 7637, "train/loss_ctc": 1.40146803855896, "train/loss_error": 0.4621013402938843, "train/loss_total": 0.6499747037887573 }, { "epoch": 2.0406091370558377, "step": 7638, "train/loss_ctc": 1.235633373260498, "train/loss_error": 0.42335405945777893, "train/loss_total": 0.5858099460601807 }, { "epoch": 2.040876302431205, "step": 7639, "train/loss_ctc": 0.8095626831054688, "train/loss_error": 0.4294257164001465, "train/loss_total": 0.5054531097412109 }, { "epoch": 2.041143467806572, "grad_norm": 3.228978395462036, "learning_rate": 1.7761154154421587e-05, "loss": 0.5579, "step": 7640 }, { "epoch": 2.041143467806572, "step": 7640, "train/loss_ctc": 0.7538192272186279, "train/loss_error": 0.5268983244895935, "train/loss_total": 0.5722824931144714 }, { "epoch": 2.0414106331819397, "step": 7641, "train/loss_ctc": 1.0985835790634155, "train/loss_error": 0.5826982855796814, "train/loss_total": 0.6858753561973572 }, { "epoch": 2.041677798557307, "step": 7642, "train/loss_ctc": 0.7914729714393616, "train/loss_error": 0.5123860239982605, "train/loss_total": 0.5682034492492676 }, { "epoch": 2.041944963932674, "step": 7643, "train/loss_ctc": 0.872891902923584, "train/loss_error": 0.5290684700012207, "train/loss_total": 0.5978331565856934 }, { "epoch": 2.0422121293080417, "step": 7644, "train/loss_ctc": 0.4221493601799011, "train/loss_error": 0.5075304508209229, "train/loss_total": 0.4904542565345764 }, { "epoch": 2.042479294683409, "step": 7645, "train/loss_ctc": 0.32321858406066895, "train/loss_error": 0.4033212661743164, "train/loss_total": 0.3873007297515869 }, { "epoch": 2.0427464600587766, "step": 7646, "train/loss_ctc": 0.4975864887237549, "train/loss_error": 0.47122037410736084, "train/loss_total": 0.47649359703063965 }, { "epoch": 2.0430136254341438, "step": 7647, "train/loss_ctc": 0.5014523267745972, "train/loss_error": 0.48151683807373047, "train/loss_total": 0.4855039417743683 }, { "epoch": 2.043280790809511, "step": 7648, "train/loss_ctc": 0.574695348739624, "train/loss_error": 0.49018940329551697, "train/loss_total": 0.5070906281471252 }, { "epoch": 2.0435479561848786, "step": 7649, "train/loss_ctc": 0.6036293506622314, "train/loss_error": 0.4417298436164856, "train/loss_total": 0.4741097390651703 }, { "epoch": 2.043815121560246, "grad_norm": 1.6989343166351318, "learning_rate": 1.7745124231899545e-05, "loss": 0.5245, "step": 7650 }, { "epoch": 2.043815121560246, "step": 7650, "train/loss_ctc": 0.4446997046470642, "train/loss_error": 0.48337796330451965, "train/loss_total": 0.4756423234939575 }, { "epoch": 2.044082286935613, "step": 7651, "train/loss_ctc": 0.9797606468200684, "train/loss_error": 0.4837813079357147, "train/loss_total": 0.5829771757125854 }, { "epoch": 2.0443494523109806, "step": 7652, "train/loss_ctc": 0.9013546705245972, "train/loss_error": 0.4772195816040039, "train/loss_total": 0.5620466470718384 }, { "epoch": 2.044616617686348, "step": 7653, "train/loss_ctc": 1.1422760486602783, "train/loss_error": 0.4455139935016632, "train/loss_total": 0.5848664045333862 }, { "epoch": 2.044883783061715, "step": 7654, "train/loss_ctc": 1.4323813915252686, "train/loss_error": 0.5011169910430908, "train/loss_total": 0.6873698830604553 }, { "epoch": 2.0451509484370827, "step": 7655, "train/loss_ctc": 1.0018867254257202, "train/loss_error": 0.5038357973098755, "train/loss_total": 0.6034460067749023 }, { "epoch": 2.04541811381245, "step": 7656, "train/loss_ctc": 1.090972900390625, "train/loss_error": 0.522647500038147, "train/loss_total": 0.6363126039505005 }, { "epoch": 2.045685279187817, "step": 7657, "train/loss_ctc": 0.8218289613723755, "train/loss_error": 0.3906800448894501, "train/loss_total": 0.4769098460674286 }, { "epoch": 2.0459524445631847, "step": 7658, "train/loss_ctc": 1.3982915878295898, "train/loss_error": 0.4800165891647339, "train/loss_total": 0.663671612739563 }, { "epoch": 2.046219609938552, "step": 7659, "train/loss_ctc": 0.6804847717285156, "train/loss_error": 0.45225048065185547, "train/loss_total": 0.49789735674858093 }, { "epoch": 2.0464867753139195, "grad_norm": 1.9539037942886353, "learning_rate": 1.7729094309377503e-05, "loss": 0.5771, "step": 7660 }, { "epoch": 2.0464867753139195, "step": 7660, "train/loss_ctc": 0.33431291580200195, "train/loss_error": 0.4602351486682892, "train/loss_total": 0.43505069613456726 }, { "epoch": 2.0467539406892867, "step": 7661, "train/loss_ctc": 1.0881330966949463, "train/loss_error": 0.4885169565677643, "train/loss_total": 0.6084401607513428 }, { "epoch": 2.047021106064654, "step": 7662, "train/loss_ctc": 0.32042741775512695, "train/loss_error": 0.4638463854789734, "train/loss_total": 0.43516260385513306 }, { "epoch": 2.0472882714400216, "step": 7663, "train/loss_ctc": 0.1717892289161682, "train/loss_error": 0.43426719307899475, "train/loss_total": 0.38177159428596497 }, { "epoch": 2.0475554368153888, "step": 7664, "train/loss_ctc": 0.8464069962501526, "train/loss_error": 0.49345287680625916, "train/loss_total": 0.5640437006950378 }, { "epoch": 2.047822602190756, "step": 7665, "train/loss_ctc": 0.7551010847091675, "train/loss_error": 0.4415965974330902, "train/loss_total": 0.5042974948883057 }, { "epoch": 2.0480897675661236, "step": 7666, "train/loss_ctc": 0.7769087553024292, "train/loss_error": 0.47388625144958496, "train/loss_total": 0.5344907641410828 }, { "epoch": 2.048356932941491, "step": 7667, "train/loss_ctc": 0.6584024429321289, "train/loss_error": 0.48616716265678406, "train/loss_total": 0.5206142663955688 }, { "epoch": 2.048624098316858, "step": 7668, "train/loss_ctc": 1.8511898517608643, "train/loss_error": 0.46957555413246155, "train/loss_total": 0.745898425579071 }, { "epoch": 2.0488912636922256, "step": 7669, "train/loss_ctc": 0.7992113828659058, "train/loss_error": 0.47402435541152954, "train/loss_total": 0.5390617847442627 }, { "epoch": 2.049158429067593, "grad_norm": 2.468137264251709, "learning_rate": 1.7713064386855464e-05, "loss": 0.5269, "step": 7670 }, { "epoch": 2.049158429067593, "step": 7670, "train/loss_ctc": 0.7574268579483032, "train/loss_error": 0.4049634337425232, "train/loss_total": 0.4754561185836792 }, { "epoch": 2.04942559444296, "step": 7671, "train/loss_ctc": 1.0151464939117432, "train/loss_error": 0.4524247944355011, "train/loss_total": 0.5649691820144653 }, { "epoch": 2.0496927598183277, "step": 7672, "train/loss_ctc": 0.7711523175239563, "train/loss_error": 0.455154150724411, "train/loss_total": 0.5183537602424622 }, { "epoch": 2.049959925193695, "step": 7673, "train/loss_ctc": 0.9756545424461365, "train/loss_error": 0.4280848503112793, "train/loss_total": 0.5375987887382507 }, { "epoch": 2.050227090569062, "step": 7674, "train/loss_ctc": 0.6410968899726868, "train/loss_error": 0.4354208707809448, "train/loss_total": 0.47655606269836426 }, { "epoch": 2.0504942559444297, "step": 7675, "train/loss_ctc": 0.7794620990753174, "train/loss_error": 0.42577019333839417, "train/loss_total": 0.4965085983276367 }, { "epoch": 2.050761421319797, "step": 7676, "train/loss_ctc": 0.41629356145858765, "train/loss_error": 0.43727418780326843, "train/loss_total": 0.4330780804157257 }, { "epoch": 2.0510285866951645, "step": 7677, "train/loss_ctc": 0.6472500562667847, "train/loss_error": 0.41238081455230713, "train/loss_total": 0.4593546390533447 }, { "epoch": 2.0512957520705317, "step": 7678, "train/loss_ctc": 1.1341642141342163, "train/loss_error": 0.47510671615600586, "train/loss_total": 0.606918215751648 }, { "epoch": 2.051562917445899, "step": 7679, "train/loss_ctc": 0.4196021556854248, "train/loss_error": 0.493476539850235, "train/loss_total": 0.478701651096344 }, { "epoch": 2.0518300828212666, "grad_norm": 18.876115798950195, "learning_rate": 1.7697034464333422e-05, "loss": 0.5047, "step": 7680 }, { "epoch": 2.0518300828212666, "step": 7680, "train/loss_ctc": 0.6413720846176147, "train/loss_error": 0.47421616315841675, "train/loss_total": 0.5076473355293274 }, { "epoch": 2.0520972481966337, "step": 7681, "train/loss_ctc": 1.1582543849945068, "train/loss_error": 0.48621866106987, "train/loss_total": 0.6206257939338684 }, { "epoch": 2.052364413572001, "step": 7682, "train/loss_ctc": 0.8592832684516907, "train/loss_error": 0.48729872703552246, "train/loss_total": 0.5616956353187561 }, { "epoch": 2.0526315789473686, "step": 7683, "train/loss_ctc": 0.9278655648231506, "train/loss_error": 0.4625850319862366, "train/loss_total": 0.5556411743164062 }, { "epoch": 2.052898744322736, "step": 7684, "train/loss_ctc": 0.7163084745407104, "train/loss_error": 0.4605182111263275, "train/loss_total": 0.5116763114929199 }, { "epoch": 2.053165909698103, "step": 7685, "train/loss_ctc": 1.018183946609497, "train/loss_error": 0.4293656051158905, "train/loss_total": 0.5471292734146118 }, { "epoch": 2.0534330750734706, "step": 7686, "train/loss_ctc": 1.1257182359695435, "train/loss_error": 0.5186268091201782, "train/loss_total": 0.6400451064109802 }, { "epoch": 2.053700240448838, "step": 7687, "train/loss_ctc": 1.1558220386505127, "train/loss_error": 0.40470045804977417, "train/loss_total": 0.5549247860908508 }, { "epoch": 2.053967405824205, "step": 7688, "train/loss_ctc": 1.224886178970337, "train/loss_error": 0.43661484122276306, "train/loss_total": 0.5942691564559937 }, { "epoch": 2.0542345711995726, "step": 7689, "train/loss_ctc": 0.900345504283905, "train/loss_error": 0.4266718924045563, "train/loss_total": 0.5214066505432129 }, { "epoch": 2.05450173657494, "grad_norm": 2.847898483276367, "learning_rate": 1.768100454181138e-05, "loss": 0.5615, "step": 7690 }, { "epoch": 2.05450173657494, "step": 7690, "train/loss_ctc": 0.7518340945243835, "train/loss_error": 0.44262224435806274, "train/loss_total": 0.5044646263122559 }, { "epoch": 2.054768901950307, "step": 7691, "train/loss_ctc": 0.46166253089904785, "train/loss_error": 0.425586074590683, "train/loss_total": 0.43280136585235596 }, { "epoch": 2.0550360673256747, "step": 7692, "train/loss_ctc": 0.4505610764026642, "train/loss_error": 0.4581204652786255, "train/loss_total": 0.4566085934638977 }, { "epoch": 2.055303232701042, "step": 7693, "train/loss_ctc": 0.8204049468040466, "train/loss_error": 0.4428490698337555, "train/loss_total": 0.5183602571487427 }, { "epoch": 2.0555703980764095, "step": 7694, "train/loss_ctc": 0.7690532207489014, "train/loss_error": 0.5060436725616455, "train/loss_total": 0.5586456060409546 }, { "epoch": 2.0558375634517767, "step": 7695, "train/loss_ctc": 1.5417832136154175, "train/loss_error": 0.4755299389362335, "train/loss_total": 0.6887806057929993 }, { "epoch": 2.056104728827144, "step": 7696, "train/loss_ctc": 0.3873453736305237, "train/loss_error": 0.4484817683696747, "train/loss_total": 0.43625450134277344 }, { "epoch": 2.0563718942025115, "step": 7697, "train/loss_ctc": 1.2628250122070312, "train/loss_error": 0.433604896068573, "train/loss_total": 0.5994489192962646 }, { "epoch": 2.0566390595778787, "step": 7698, "train/loss_ctc": 0.5497581958770752, "train/loss_error": 0.5053125619888306, "train/loss_total": 0.5142017006874084 }, { "epoch": 2.056906224953246, "step": 7699, "train/loss_ctc": 0.9073436856269836, "train/loss_error": 0.445619136095047, "train/loss_total": 0.5379640460014343 }, { "epoch": 2.0571733903286136, "grad_norm": 1.7654613256454468, "learning_rate": 1.766497461928934e-05, "loss": 0.5248, "step": 7700 }, { "epoch": 2.0571733903286136, "step": 7700, "train/loss_ctc": 0.8178337812423706, "train/loss_error": 0.4698984920978546, "train/loss_total": 0.5394855737686157 }, { "epoch": 2.0574405557039808, "step": 7701, "train/loss_ctc": 1.0933350324630737, "train/loss_error": 0.43355557322502136, "train/loss_total": 0.5655114650726318 }, { "epoch": 2.057707721079348, "step": 7702, "train/loss_ctc": 0.8999595642089844, "train/loss_error": 0.4931638538837433, "train/loss_total": 0.5745230317115784 }, { "epoch": 2.0579748864547156, "step": 7703, "train/loss_ctc": 0.7568626403808594, "train/loss_error": 0.4738966226577759, "train/loss_total": 0.5304898619651794 }, { "epoch": 2.058242051830083, "step": 7704, "train/loss_ctc": 0.5682629346847534, "train/loss_error": 0.47947558760643005, "train/loss_total": 0.4972330629825592 }, { "epoch": 2.05850921720545, "step": 7705, "train/loss_ctc": 0.9327746629714966, "train/loss_error": 0.4629996120929718, "train/loss_total": 0.5569546222686768 }, { "epoch": 2.0587763825808176, "step": 7706, "train/loss_ctc": 0.8431554436683655, "train/loss_error": 0.4193449318408966, "train/loss_total": 0.5041070580482483 }, { "epoch": 2.059043547956185, "step": 7707, "train/loss_ctc": 0.6078627109527588, "train/loss_error": 0.4806564152240753, "train/loss_total": 0.506097674369812 }, { "epoch": 2.059310713331552, "step": 7708, "train/loss_ctc": 0.4661911725997925, "train/loss_error": 0.45221295952796936, "train/loss_total": 0.4550085961818695 }, { "epoch": 2.0595778787069197, "step": 7709, "train/loss_ctc": 1.1177573204040527, "train/loss_error": 0.46236279606819153, "train/loss_total": 0.5934417247772217 }, { "epoch": 2.059845044082287, "grad_norm": 2.341214656829834, "learning_rate": 1.76489446967673e-05, "loss": 0.5323, "step": 7710 }, { "epoch": 2.059845044082287, "step": 7710, "train/loss_ctc": 0.45588263869285583, "train/loss_error": 0.4727616012096405, "train/loss_total": 0.4693858325481415 }, { "epoch": 2.0601122094576545, "step": 7711, "train/loss_ctc": 0.9178574085235596, "train/loss_error": 0.47006306052207947, "train/loss_total": 0.5596219301223755 }, { "epoch": 2.0603793748330217, "step": 7712, "train/loss_ctc": 0.9127124547958374, "train/loss_error": 0.42970791459083557, "train/loss_total": 0.5263088345527649 }, { "epoch": 2.060646540208389, "step": 7713, "train/loss_ctc": 0.7013406753540039, "train/loss_error": 0.4310917258262634, "train/loss_total": 0.4851415157318115 }, { "epoch": 2.0609137055837565, "step": 7714, "train/loss_ctc": 0.5858442187309265, "train/loss_error": 0.4707580506801605, "train/loss_total": 0.49377527832984924 }, { "epoch": 2.0611808709591237, "step": 7715, "train/loss_ctc": 0.732433021068573, "train/loss_error": 0.44035816192626953, "train/loss_total": 0.49877315759658813 }, { "epoch": 2.061448036334491, "step": 7716, "train/loss_ctc": 1.1358789205551147, "train/loss_error": 0.4725998640060425, "train/loss_total": 0.605255663394928 }, { "epoch": 2.0617152017098586, "step": 7717, "train/loss_ctc": 0.6986634731292725, "train/loss_error": 0.4699420928955078, "train/loss_total": 0.5156863927841187 }, { "epoch": 2.0619823670852258, "step": 7718, "train/loss_ctc": 0.23423346877098083, "train/loss_error": 0.4283456802368164, "train/loss_total": 0.3895232379436493 }, { "epoch": 2.062249532460593, "step": 7719, "train/loss_ctc": 0.6754150390625, "train/loss_error": 0.46372684836387634, "train/loss_total": 0.5060644745826721 }, { "epoch": 2.0625166978359606, "grad_norm": 1.4149596691131592, "learning_rate": 1.7632914774245258e-05, "loss": 0.505, "step": 7720 }, { "epoch": 2.0625166978359606, "step": 7720, "train/loss_ctc": 0.9916902780532837, "train/loss_error": 0.4843158721923828, "train/loss_total": 0.585790753364563 }, { "epoch": 2.062783863211328, "step": 7721, "train/loss_ctc": 0.4289448857307434, "train/loss_error": 0.3697773814201355, "train/loss_total": 0.3816108703613281 }, { "epoch": 2.063051028586695, "step": 7722, "train/loss_ctc": 0.7506929636001587, "train/loss_error": 0.5071246027946472, "train/loss_total": 0.5558382868766785 }, { "epoch": 2.0633181939620626, "step": 7723, "train/loss_ctc": 1.1418004035949707, "train/loss_error": 0.47147613763809204, "train/loss_total": 0.6055409908294678 }, { "epoch": 2.06358535933743, "step": 7724, "train/loss_ctc": 1.1204400062561035, "train/loss_error": 0.47202497720718384, "train/loss_total": 0.6017079949378967 }, { "epoch": 2.063852524712797, "step": 7725, "train/loss_ctc": 0.6692732572555542, "train/loss_error": 0.45768508315086365, "train/loss_total": 0.5000027418136597 }, { "epoch": 2.0641196900881646, "step": 7726, "train/loss_ctc": 1.3944125175476074, "train/loss_error": 0.523611307144165, "train/loss_total": 0.6977715492248535 }, { "epoch": 2.064386855463532, "step": 7727, "train/loss_ctc": 1.392589807510376, "train/loss_error": 0.45386236906051636, "train/loss_total": 0.6416078805923462 }, { "epoch": 2.0646540208388995, "step": 7728, "train/loss_ctc": 0.7368162870407104, "train/loss_error": 0.4170038402080536, "train/loss_total": 0.48096632957458496 }, { "epoch": 2.0649211862142667, "step": 7729, "train/loss_ctc": 0.35946640372276306, "train/loss_error": 0.4135693311691284, "train/loss_total": 0.4027487337589264 }, { "epoch": 2.065188351589634, "grad_norm": 3.3918299674987793, "learning_rate": 1.761688485172322e-05, "loss": 0.5454, "step": 7730 }, { "epoch": 2.065188351589634, "step": 7730, "train/loss_ctc": 0.7579636573791504, "train/loss_error": 0.43039730191230774, "train/loss_total": 0.4959105849266052 }, { "epoch": 2.0654555169650015, "step": 7731, "train/loss_ctc": 0.9021443128585815, "train/loss_error": 0.46854692697525024, "train/loss_total": 0.5552663803100586 }, { "epoch": 2.0657226823403687, "step": 7732, "train/loss_ctc": 0.46307235956192017, "train/loss_error": 0.3606002628803253, "train/loss_total": 0.38109469413757324 }, { "epoch": 2.065989847715736, "step": 7733, "train/loss_ctc": 0.6342571973800659, "train/loss_error": 0.45592793822288513, "train/loss_total": 0.4915938079357147 }, { "epoch": 2.0662570130911035, "step": 7734, "train/loss_ctc": 1.4923065900802612, "train/loss_error": 0.4032150208950043, "train/loss_total": 0.6210333108901978 }, { "epoch": 2.0665241784664707, "step": 7735, "train/loss_ctc": 1.2808277606964111, "train/loss_error": 0.4489307403564453, "train/loss_total": 0.6153101921081543 }, { "epoch": 2.066791343841838, "step": 7736, "train/loss_ctc": 0.46228691935539246, "train/loss_error": 0.40161004662513733, "train/loss_total": 0.4137454330921173 }, { "epoch": 2.0670585092172056, "step": 7737, "train/loss_ctc": 0.6883416771888733, "train/loss_error": 0.5109239816665649, "train/loss_total": 0.5464075207710266 }, { "epoch": 2.0673256745925728, "step": 7738, "train/loss_ctc": 0.55143141746521, "train/loss_error": 0.46820303797721863, "train/loss_total": 0.4848487377166748 }, { "epoch": 2.06759283996794, "step": 7739, "train/loss_ctc": 1.0226936340332031, "train/loss_error": 0.47556114196777344, "train/loss_total": 0.5849876403808594 }, { "epoch": 2.0678600053433076, "grad_norm": 1.3336198329925537, "learning_rate": 1.7600854929201178e-05, "loss": 0.519, "step": 7740 }, { "epoch": 2.0678600053433076, "step": 7740, "train/loss_ctc": 0.9952367544174194, "train/loss_error": 0.5135729908943176, "train/loss_total": 0.6099057197570801 }, { "epoch": 2.068127170718675, "step": 7741, "train/loss_ctc": 0.8307009935379028, "train/loss_error": 0.4567975401878357, "train/loss_total": 0.5315782427787781 }, { "epoch": 2.068394336094042, "step": 7742, "train/loss_ctc": 0.4728226065635681, "train/loss_error": 0.46380066871643066, "train/loss_total": 0.46560508012771606 }, { "epoch": 2.0686615014694096, "step": 7743, "train/loss_ctc": 0.8023425340652466, "train/loss_error": 0.4203304946422577, "train/loss_total": 0.4967328906059265 }, { "epoch": 2.068928666844777, "step": 7744, "train/loss_ctc": 0.884364128112793, "train/loss_error": 0.4477565884590149, "train/loss_total": 0.5350781083106995 }, { "epoch": 2.0691958322201445, "step": 7745, "train/loss_ctc": 0.7525520324707031, "train/loss_error": 0.44037625193595886, "train/loss_total": 0.5028114318847656 }, { "epoch": 2.0694629975955117, "step": 7746, "train/loss_ctc": 0.391740083694458, "train/loss_error": 0.40393608808517456, "train/loss_total": 0.40149688720703125 }, { "epoch": 2.069730162970879, "step": 7747, "train/loss_ctc": 0.7244817018508911, "train/loss_error": 0.4518779218196869, "train/loss_total": 0.5063986778259277 }, { "epoch": 2.0699973283462465, "step": 7748, "train/loss_ctc": 0.7033506631851196, "train/loss_error": 0.3373747766017914, "train/loss_total": 0.410569965839386 }, { "epoch": 2.0702644937216137, "step": 7749, "train/loss_ctc": 0.877295732498169, "train/loss_error": 0.48545539379119873, "train/loss_total": 0.5638234615325928 }, { "epoch": 2.070531659096981, "grad_norm": 1.3101184368133545, "learning_rate": 1.7584825006679136e-05, "loss": 0.5024, "step": 7750 }, { "epoch": 2.070531659096981, "step": 7750, "train/loss_ctc": 0.7828429937362671, "train/loss_error": 0.46375760436058044, "train/loss_total": 0.5275747179985046 }, { "epoch": 2.0707988244723485, "step": 7751, "train/loss_ctc": 0.4472050666809082, "train/loss_error": 0.47676149010658264, "train/loss_total": 0.47085022926330566 }, { "epoch": 2.0710659898477157, "step": 7752, "train/loss_ctc": 0.7609521150588989, "train/loss_error": 0.46316948533058167, "train/loss_total": 0.5227259993553162 }, { "epoch": 2.071333155223083, "step": 7753, "train/loss_ctc": 0.8221381902694702, "train/loss_error": 0.47151827812194824, "train/loss_total": 0.5416423082351685 }, { "epoch": 2.0716003205984506, "step": 7754, "train/loss_ctc": 0.8446786403656006, "train/loss_error": 0.4937179684638977, "train/loss_total": 0.5639101266860962 }, { "epoch": 2.0718674859738178, "step": 7755, "train/loss_ctc": 0.47206729650497437, "train/loss_error": 0.5352491736412048, "train/loss_total": 0.5226128101348877 }, { "epoch": 2.072134651349185, "step": 7756, "train/loss_ctc": 0.7513571977615356, "train/loss_error": 0.49082571268081665, "train/loss_total": 0.5429320335388184 }, { "epoch": 2.0724018167245526, "step": 7757, "train/loss_ctc": 0.6578391194343567, "train/loss_error": 0.4313397705554962, "train/loss_total": 0.47663962841033936 }, { "epoch": 2.07266898209992, "step": 7758, "train/loss_ctc": 0.7227073907852173, "train/loss_error": 0.41141921281814575, "train/loss_total": 0.473676860332489 }, { "epoch": 2.0729361474752874, "step": 7759, "train/loss_ctc": 0.5500473976135254, "train/loss_error": 0.514708936214447, "train/loss_total": 0.5217766165733337 }, { "epoch": 2.0732033128506546, "grad_norm": 5.4217329025268555, "learning_rate": 1.7568795084157094e-05, "loss": 0.5164, "step": 7760 }, { "epoch": 2.0732033128506546, "step": 7760, "train/loss_ctc": 0.7526189088821411, "train/loss_error": 0.4307928681373596, "train/loss_total": 0.4951580762863159 }, { "epoch": 2.073470478226022, "step": 7761, "train/loss_ctc": 1.6798183917999268, "train/loss_error": 0.47634440660476685, "train/loss_total": 0.7170392274856567 }, { "epoch": 2.0737376436013895, "step": 7762, "train/loss_ctc": 0.616897702217102, "train/loss_error": 0.4097592532634735, "train/loss_total": 0.4511869549751282 }, { "epoch": 2.0740048089767567, "step": 7763, "train/loss_ctc": 0.9753475785255432, "train/loss_error": 0.515919029712677, "train/loss_total": 0.6078047752380371 }, { "epoch": 2.074271974352124, "step": 7764, "train/loss_ctc": 0.5657985210418701, "train/loss_error": 0.45508989691734314, "train/loss_total": 0.4772316515445709 }, { "epoch": 2.0745391397274915, "step": 7765, "train/loss_ctc": 0.620296835899353, "train/loss_error": 0.491545706987381, "train/loss_total": 0.5172959566116333 }, { "epoch": 2.0748063051028587, "step": 7766, "train/loss_ctc": 1.1986706256866455, "train/loss_error": 0.45944535732269287, "train/loss_total": 0.6072904467582703 }, { "epoch": 2.075073470478226, "step": 7767, "train/loss_ctc": 0.5903031826019287, "train/loss_error": 0.44163012504577637, "train/loss_total": 0.4713647663593292 }, { "epoch": 2.0753406358535935, "step": 7768, "train/loss_ctc": 1.0940014123916626, "train/loss_error": 0.5028878450393677, "train/loss_total": 0.6211105585098267 }, { "epoch": 2.0756078012289607, "step": 7769, "train/loss_ctc": 1.1958061456680298, "train/loss_error": 0.5072411894798279, "train/loss_total": 0.6449542045593262 }, { "epoch": 2.075874966604328, "grad_norm": 10.421516418457031, "learning_rate": 1.7552765161635052e-05, "loss": 0.561, "step": 7770 }, { "epoch": 2.075874966604328, "step": 7770, "train/loss_ctc": 0.31527528166770935, "train/loss_error": 0.4466189444065094, "train/loss_total": 0.42035022377967834 }, { "epoch": 2.0761421319796955, "step": 7771, "train/loss_ctc": 1.2531688213348389, "train/loss_error": 0.5029841661453247, "train/loss_total": 0.6530210971832275 }, { "epoch": 2.0764092973550627, "step": 7772, "train/loss_ctc": 1.4348053932189941, "train/loss_error": 0.4611194133758545, "train/loss_total": 0.6558566093444824 }, { "epoch": 2.07667646273043, "step": 7773, "train/loss_ctc": 0.4131111800670624, "train/loss_error": 0.48538556694984436, "train/loss_total": 0.47093069553375244 }, { "epoch": 2.0769436281057976, "step": 7774, "train/loss_ctc": 0.7555910348892212, "train/loss_error": 0.48587748408317566, "train/loss_total": 0.5398201942443848 }, { "epoch": 2.0772107934811648, "step": 7775, "train/loss_ctc": 1.1223034858703613, "train/loss_error": 0.46881914138793945, "train/loss_total": 0.5995160341262817 }, { "epoch": 2.077477958856532, "step": 7776, "train/loss_ctc": 0.950919508934021, "train/loss_error": 0.47964274883270264, "train/loss_total": 0.5738980770111084 }, { "epoch": 2.0777451242318996, "step": 7777, "train/loss_ctc": 0.4536922574043274, "train/loss_error": 0.46234774589538574, "train/loss_total": 0.4606166481971741 }, { "epoch": 2.078012289607267, "step": 7778, "train/loss_ctc": 1.498244285583496, "train/loss_error": 0.5015881061553955, "train/loss_total": 0.7009193301200867 }, { "epoch": 2.0782794549826344, "step": 7779, "train/loss_ctc": 0.7675399780273438, "train/loss_error": 0.5217827558517456, "train/loss_total": 0.5709341764450073 }, { "epoch": 2.0785466203580016, "grad_norm": 2.59846830368042, "learning_rate": 1.753673523911301e-05, "loss": 0.5646, "step": 7780 }, { "epoch": 2.0785466203580016, "step": 7780, "train/loss_ctc": 0.6631746292114258, "train/loss_error": 0.4766644239425659, "train/loss_total": 0.51396644115448 }, { "epoch": 2.078813785733369, "step": 7781, "train/loss_ctc": 0.3612455725669861, "train/loss_error": 0.41054674983024597, "train/loss_total": 0.4006865322589874 }, { "epoch": 2.0790809511087365, "step": 7782, "train/loss_ctc": 0.9174269437789917, "train/loss_error": 0.49274951219558716, "train/loss_total": 0.5776849985122681 }, { "epoch": 2.0793481164841037, "step": 7783, "train/loss_ctc": 0.7350155711174011, "train/loss_error": 0.43996661901474, "train/loss_total": 0.4989764094352722 }, { "epoch": 2.079615281859471, "step": 7784, "train/loss_ctc": 0.6108945608139038, "train/loss_error": 0.4315444529056549, "train/loss_total": 0.4674144685268402 }, { "epoch": 2.0798824472348385, "step": 7785, "train/loss_ctc": 0.9669349193572998, "train/loss_error": 0.45148491859436035, "train/loss_total": 0.5545749068260193 }, { "epoch": 2.0801496126102057, "step": 7786, "train/loss_ctc": 0.8016208410263062, "train/loss_error": 0.5477456450462341, "train/loss_total": 0.5985206961631775 }, { "epoch": 2.080416777985573, "step": 7787, "train/loss_ctc": 0.4330041706562042, "train/loss_error": 0.433706670999527, "train/loss_total": 0.4335661828517914 }, { "epoch": 2.0806839433609405, "step": 7788, "train/loss_ctc": 0.8593560457229614, "train/loss_error": 0.4839470088481903, "train/loss_total": 0.5590288639068604 }, { "epoch": 2.0809511087363077, "step": 7789, "train/loss_ctc": 0.4467380940914154, "train/loss_error": 0.45330673456192017, "train/loss_total": 0.45199301838874817 }, { "epoch": 2.081218274111675, "grad_norm": 2.3205173015594482, "learning_rate": 1.752070531659097e-05, "loss": 0.5056, "step": 7790 }, { "epoch": 2.081218274111675, "step": 7790, "train/loss_ctc": 0.7411700487136841, "train/loss_error": 0.4814678132534027, "train/loss_total": 0.5334082841873169 }, { "epoch": 2.0814854394870426, "step": 7791, "train/loss_ctc": 0.4366224706172943, "train/loss_error": 0.4597213566303253, "train/loss_total": 0.4551015794277191 }, { "epoch": 2.0817526048624098, "step": 7792, "train/loss_ctc": 0.41025274991989136, "train/loss_error": 0.41957587003707886, "train/loss_total": 0.4177112579345703 }, { "epoch": 2.0820197702377774, "step": 7793, "train/loss_ctc": 0.9360871315002441, "train/loss_error": 0.5388824939727783, "train/loss_total": 0.6183234453201294 }, { "epoch": 2.0822869356131446, "step": 7794, "train/loss_ctc": 1.0140540599822998, "train/loss_error": 0.47804614901542664, "train/loss_total": 0.5852477550506592 }, { "epoch": 2.082554100988512, "step": 7795, "train/loss_ctc": 0.2874108552932739, "train/loss_error": 0.514883816242218, "train/loss_total": 0.4693892300128937 }, { "epoch": 2.0828212663638794, "step": 7796, "train/loss_ctc": 0.624118983745575, "train/loss_error": 0.42702072858810425, "train/loss_total": 0.4664404094219208 }, { "epoch": 2.0830884317392466, "step": 7797, "train/loss_ctc": 0.4143804907798767, "train/loss_error": 0.4338480234146118, "train/loss_total": 0.42995452880859375 }, { "epoch": 2.083355597114614, "step": 7798, "train/loss_ctc": 0.5212183594703674, "train/loss_error": 0.4274890422821045, "train/loss_total": 0.44623491168022156 }, { "epoch": 2.0836227624899815, "step": 7799, "train/loss_ctc": 1.0718026161193848, "train/loss_error": 0.4937540590763092, "train/loss_total": 0.6093637943267822 }, { "epoch": 2.0838899278653487, "grad_norm": 3.275207042694092, "learning_rate": 1.750467539406893e-05, "loss": 0.5031, "step": 7800 }, { "epoch": 2.0838899278653487, "step": 7800, "train/loss_ctc": 0.4528385400772095, "train/loss_error": 0.4565785825252533, "train/loss_total": 0.45583057403564453 }, { "epoch": 2.084157093240716, "step": 7801, "train/loss_ctc": 0.9861735105514526, "train/loss_error": 0.41677188873291016, "train/loss_total": 0.5306522250175476 }, { "epoch": 2.0844242586160835, "step": 7802, "train/loss_ctc": 1.1337419748306274, "train/loss_error": 0.4558970630168915, "train/loss_total": 0.5914660692214966 }, { "epoch": 2.0846914239914507, "step": 7803, "train/loss_ctc": 2.2292637825012207, "train/loss_error": 0.44777336716651917, "train/loss_total": 0.8040714263916016 }, { "epoch": 2.084958589366818, "step": 7804, "train/loss_ctc": 1.0018852949142456, "train/loss_error": 0.5199794769287109, "train/loss_total": 0.6163606643676758 }, { "epoch": 2.0852257547421855, "step": 7805, "train/loss_ctc": 1.1899741888046265, "train/loss_error": 0.47249144315719604, "train/loss_total": 0.61598801612854 }, { "epoch": 2.0854929201175527, "step": 7806, "train/loss_ctc": 1.0633968114852905, "train/loss_error": 0.48961350321769714, "train/loss_total": 0.6043701767921448 }, { "epoch": 2.08576008549292, "step": 7807, "train/loss_ctc": 0.6331191658973694, "train/loss_error": 0.39850038290023804, "train/loss_total": 0.4454241394996643 }, { "epoch": 2.0860272508682876, "step": 7808, "train/loss_ctc": 0.6980565786361694, "train/loss_error": 0.39345234632492065, "train/loss_total": 0.45437318086624146 }, { "epoch": 2.0862944162436547, "step": 7809, "train/loss_ctc": 0.7635462284088135, "train/loss_error": 0.465631902217865, "train/loss_total": 0.5252147912979126 }, { "epoch": 2.0865615816190224, "grad_norm": 1.9392448663711548, "learning_rate": 1.7488645471546888e-05, "loss": 0.5644, "step": 7810 }, { "epoch": 2.0865615816190224, "step": 7810, "train/loss_ctc": 0.8373023271560669, "train/loss_error": 0.4464559257030487, "train/loss_total": 0.5246251821517944 }, { "epoch": 2.0868287469943896, "step": 7811, "train/loss_ctc": 0.4330207109451294, "train/loss_error": 0.4284061789512634, "train/loss_total": 0.4293290972709656 }, { "epoch": 2.087095912369757, "step": 7812, "train/loss_ctc": 0.6064413785934448, "train/loss_error": 0.46486449241638184, "train/loss_total": 0.4931798577308655 }, { "epoch": 2.0873630777451244, "step": 7813, "train/loss_ctc": 1.0124483108520508, "train/loss_error": 0.51490318775177, "train/loss_total": 0.6144121885299683 }, { "epoch": 2.0876302431204916, "step": 7814, "train/loss_ctc": 0.7890220284461975, "train/loss_error": 0.43112555146217346, "train/loss_total": 0.5027048587799072 }, { "epoch": 2.087897408495859, "step": 7815, "train/loss_ctc": 0.766270101070404, "train/loss_error": 0.489670068025589, "train/loss_total": 0.544990062713623 }, { "epoch": 2.0881645738712264, "step": 7816, "train/loss_ctc": 0.8262145519256592, "train/loss_error": 0.4358685314655304, "train/loss_total": 0.5139377117156982 }, { "epoch": 2.0884317392465936, "step": 7817, "train/loss_ctc": 1.428694248199463, "train/loss_error": 0.4828100800514221, "train/loss_total": 0.6719869375228882 }, { "epoch": 2.088698904621961, "step": 7818, "train/loss_ctc": 1.1389762163162231, "train/loss_error": 0.49260789155960083, "train/loss_total": 0.6218816041946411 }, { "epoch": 2.0889660699973285, "step": 7819, "train/loss_ctc": 0.8387287855148315, "train/loss_error": 0.4644220471382141, "train/loss_total": 0.5392833948135376 }, { "epoch": 2.0892332353726957, "grad_norm": 2.0663115978240967, "learning_rate": 1.7472615549024846e-05, "loss": 0.5456, "step": 7820 }, { "epoch": 2.0892332353726957, "step": 7820, "train/loss_ctc": 0.5542889833450317, "train/loss_error": 0.4572025537490845, "train/loss_total": 0.4766198396682739 }, { "epoch": 2.089500400748063, "step": 7821, "train/loss_ctc": 0.6495280265808105, "train/loss_error": 0.4586263597011566, "train/loss_total": 0.49680671095848083 }, { "epoch": 2.0897675661234305, "step": 7822, "train/loss_ctc": 0.8597867488861084, "train/loss_error": 0.4842597544193268, "train/loss_total": 0.5593651533126831 }, { "epoch": 2.0900347314987977, "step": 7823, "train/loss_ctc": 0.7107024192810059, "train/loss_error": 0.429855078458786, "train/loss_total": 0.48602455854415894 }, { "epoch": 2.090301896874165, "step": 7824, "train/loss_ctc": 1.0503435134887695, "train/loss_error": 0.4551272988319397, "train/loss_total": 0.5741705894470215 }, { "epoch": 2.0905690622495325, "step": 7825, "train/loss_ctc": 0.7005356550216675, "train/loss_error": 0.44356316328048706, "train/loss_total": 0.49495768547058105 }, { "epoch": 2.0908362276248997, "step": 7826, "train/loss_ctc": 0.3278016149997711, "train/loss_error": 0.4619668126106262, "train/loss_total": 0.43513375520706177 }, { "epoch": 2.0911033930002674, "step": 7827, "train/loss_ctc": 0.44657909870147705, "train/loss_error": 0.39095738530158997, "train/loss_total": 0.40208175778388977 }, { "epoch": 2.0913705583756346, "step": 7828, "train/loss_ctc": 0.2779444456100464, "train/loss_error": 0.4326947331428528, "train/loss_total": 0.40174469351768494 }, { "epoch": 2.0916377237510018, "step": 7829, "train/loss_ctc": 0.4244082570075989, "train/loss_error": 0.39436501264572144, "train/loss_total": 0.4003736674785614 }, { "epoch": 2.0919048891263694, "grad_norm": 3.417827606201172, "learning_rate": 1.7456585626502804e-05, "loss": 0.4727, "step": 7830 }, { "epoch": 2.0919048891263694, "step": 7830, "train/loss_ctc": 0.3806458115577698, "train/loss_error": 0.40688884258270264, "train/loss_total": 0.40164023637771606 }, { "epoch": 2.0921720545017366, "step": 7831, "train/loss_ctc": 0.6845046281814575, "train/loss_error": 0.4106319844722748, "train/loss_total": 0.46540653705596924 }, { "epoch": 2.092439219877104, "step": 7832, "train/loss_ctc": 0.4064362943172455, "train/loss_error": 0.4416716396808624, "train/loss_total": 0.434624582529068 }, { "epoch": 2.0927063852524714, "step": 7833, "train/loss_ctc": 0.362068772315979, "train/loss_error": 0.5649132132530212, "train/loss_total": 0.5243443250656128 }, { "epoch": 2.0929735506278386, "step": 7834, "train/loss_ctc": 0.5187718868255615, "train/loss_error": 0.39461809396743774, "train/loss_total": 0.4194488525390625 }, { "epoch": 2.093240716003206, "step": 7835, "train/loss_ctc": 1.282064437866211, "train/loss_error": 0.5187811851501465, "train/loss_total": 0.6714378595352173 }, { "epoch": 2.0935078813785735, "step": 7836, "train/loss_ctc": 1.2050926685333252, "train/loss_error": 0.4253072738647461, "train/loss_total": 0.5812643766403198 }, { "epoch": 2.0937750467539407, "step": 7837, "train/loss_ctc": 0.4840417206287384, "train/loss_error": 0.5643969178199768, "train/loss_total": 0.5483258962631226 }, { "epoch": 2.094042212129308, "step": 7838, "train/loss_ctc": 1.091644048690796, "train/loss_error": 0.4760974943637848, "train/loss_total": 0.599206805229187 }, { "epoch": 2.0943093775046755, "step": 7839, "train/loss_ctc": 0.5022313594818115, "train/loss_error": 0.48867931962013245, "train/loss_total": 0.49138975143432617 }, { "epoch": 2.0945765428800427, "grad_norm": 4.232266902923584, "learning_rate": 1.7440555703980765e-05, "loss": 0.5137, "step": 7840 }, { "epoch": 2.0945765428800427, "step": 7840, "train/loss_ctc": 1.0670627355575562, "train/loss_error": 0.4760979115962982, "train/loss_total": 0.5942908525466919 }, { "epoch": 2.09484370825541, "step": 7841, "train/loss_ctc": 0.721455991268158, "train/loss_error": 0.4211895763874054, "train/loss_total": 0.4812428951263428 }, { "epoch": 2.0951108736307775, "step": 7842, "train/loss_ctc": 1.356126308441162, "train/loss_error": 0.4906948506832123, "train/loss_total": 0.6637811660766602 }, { "epoch": 2.0953780390061447, "step": 7843, "train/loss_ctc": 0.5846373438835144, "train/loss_error": 0.4507932662963867, "train/loss_total": 0.4775620996952057 }, { "epoch": 2.0956452043815124, "step": 7844, "train/loss_ctc": 1.128350853919983, "train/loss_error": 0.4742564857006073, "train/loss_total": 0.6050753593444824 }, { "epoch": 2.0959123697568796, "step": 7845, "train/loss_ctc": 1.2593573331832886, "train/loss_error": 0.5663706064224243, "train/loss_total": 0.7049679756164551 }, { "epoch": 2.0961795351322468, "step": 7846, "train/loss_ctc": 0.9554042220115662, "train/loss_error": 0.5033424496650696, "train/loss_total": 0.5937548279762268 }, { "epoch": 2.0964467005076144, "step": 7847, "train/loss_ctc": 0.6933314800262451, "train/loss_error": 0.4640045166015625, "train/loss_total": 0.5098699331283569 }, { "epoch": 2.0967138658829816, "step": 7848, "train/loss_ctc": 0.4390476942062378, "train/loss_error": 0.42015957832336426, "train/loss_total": 0.42393720149993896 }, { "epoch": 2.096981031258349, "step": 7849, "train/loss_ctc": 0.7416086792945862, "train/loss_error": 0.4701300859451294, "train/loss_total": 0.5244258046150208 }, { "epoch": 2.0972481966337164, "grad_norm": 1.1834394931793213, "learning_rate": 1.7424525781458723e-05, "loss": 0.5579, "step": 7850 }, { "epoch": 2.0972481966337164, "step": 7850, "train/loss_ctc": 0.5937870740890503, "train/loss_error": 0.4590269923210144, "train/loss_total": 0.48597902059555054 }, { "epoch": 2.0975153620090836, "step": 7851, "train/loss_ctc": 0.49084341526031494, "train/loss_error": 0.47274768352508545, "train/loss_total": 0.4763668179512024 }, { "epoch": 2.097782527384451, "step": 7852, "train/loss_ctc": 0.5823696851730347, "train/loss_error": 0.4362216889858246, "train/loss_total": 0.46545130014419556 }, { "epoch": 2.0980496927598185, "step": 7853, "train/loss_ctc": 0.5127621293067932, "train/loss_error": 0.548372745513916, "train/loss_total": 0.5412506461143494 }, { "epoch": 2.0983168581351856, "step": 7854, "train/loss_ctc": 0.5032193660736084, "train/loss_error": 0.4771433472633362, "train/loss_total": 0.48235854506492615 }, { "epoch": 2.098584023510553, "step": 7855, "train/loss_ctc": 0.7390735149383545, "train/loss_error": 0.42880937457084656, "train/loss_total": 0.4908621907234192 }, { "epoch": 2.0988511888859205, "step": 7856, "train/loss_ctc": 0.7716688513755798, "train/loss_error": 0.45946887135505676, "train/loss_total": 0.5219088792800903 }, { "epoch": 2.0991183542612877, "step": 7857, "train/loss_ctc": 0.44114553928375244, "train/loss_error": 0.4602982699871063, "train/loss_total": 0.45646774768829346 }, { "epoch": 2.0993855196366553, "step": 7858, "train/loss_ctc": 0.3171338140964508, "train/loss_error": 0.4345020651817322, "train/loss_total": 0.4110284149646759 }, { "epoch": 2.0996526850120225, "step": 7859, "train/loss_ctc": 0.47855010628700256, "train/loss_error": 0.5102782249450684, "train/loss_total": 0.5039325952529907 }, { "epoch": 2.0999198503873897, "grad_norm": 10.531708717346191, "learning_rate": 1.740849585893668e-05, "loss": 0.4836, "step": 7860 }, { "epoch": 2.0999198503873897, "step": 7860, "train/loss_ctc": 0.3484024107456207, "train/loss_error": 0.45464834570884705, "train/loss_total": 0.43339917063713074 }, { "epoch": 2.1001870157627573, "step": 7861, "train/loss_ctc": 0.5758811235427856, "train/loss_error": 0.45723122358322144, "train/loss_total": 0.4809612035751343 }, { "epoch": 2.1004541811381245, "step": 7862, "train/loss_ctc": 1.17843496799469, "train/loss_error": 0.4986859858036041, "train/loss_total": 0.6346358060836792 }, { "epoch": 2.1007213465134917, "step": 7863, "train/loss_ctc": 0.9163414239883423, "train/loss_error": 0.4576519727706909, "train/loss_total": 0.5493898987770081 }, { "epoch": 2.1009885118888594, "step": 7864, "train/loss_ctc": 0.5639105439186096, "train/loss_error": 0.46771329641342163, "train/loss_total": 0.4869527518749237 }, { "epoch": 2.1012556772642266, "step": 7865, "train/loss_ctc": 0.36444157361984253, "train/loss_error": 0.4092748463153839, "train/loss_total": 0.40030819177627563 }, { "epoch": 2.1015228426395938, "step": 7866, "train/loss_ctc": 0.7569151520729065, "train/loss_error": 0.48131388425827026, "train/loss_total": 0.5364341139793396 }, { "epoch": 2.1017900080149614, "step": 7867, "train/loss_ctc": 0.8239836692810059, "train/loss_error": 0.3869698941707611, "train/loss_total": 0.47437265515327454 }, { "epoch": 2.1020571733903286, "step": 7868, "train/loss_ctc": 0.5794710516929626, "train/loss_error": 0.47233909368515015, "train/loss_total": 0.4937654733657837 }, { "epoch": 2.102324338765696, "step": 7869, "train/loss_ctc": 0.9188975691795349, "train/loss_error": 0.5473804473876953, "train/loss_total": 0.6216838955879211 }, { "epoch": 2.1025915041410634, "grad_norm": 1.6832997798919678, "learning_rate": 1.739246593641464e-05, "loss": 0.5112, "step": 7870 }, { "epoch": 2.1025915041410634, "step": 7870, "train/loss_ctc": 0.8543190956115723, "train/loss_error": 0.4661441147327423, "train/loss_total": 0.5437791347503662 }, { "epoch": 2.1028586695164306, "step": 7871, "train/loss_ctc": 0.9017041921615601, "train/loss_error": 0.50130695104599, "train/loss_total": 0.581386387348175 }, { "epoch": 2.103125834891798, "step": 7872, "train/loss_ctc": 0.4959005117416382, "train/loss_error": 0.4955638349056244, "train/loss_total": 0.4956311881542206 }, { "epoch": 2.1033930002671655, "step": 7873, "train/loss_ctc": 0.48412322998046875, "train/loss_error": 0.4614323377609253, "train/loss_total": 0.465970516204834 }, { "epoch": 2.1036601656425327, "step": 7874, "train/loss_ctc": 0.7906008362770081, "train/loss_error": 0.45713427662849426, "train/loss_total": 0.5238276124000549 }, { "epoch": 2.1039273310179, "step": 7875, "train/loss_ctc": 0.5596067309379578, "train/loss_error": 0.4751710593700409, "train/loss_total": 0.4920581877231598 }, { "epoch": 2.1041944963932675, "step": 7876, "train/loss_ctc": 0.7645986080169678, "train/loss_error": 0.4538705348968506, "train/loss_total": 0.5160161852836609 }, { "epoch": 2.1044616617686347, "step": 7877, "train/loss_ctc": 0.6742846369743347, "train/loss_error": 0.4299246370792389, "train/loss_total": 0.47879666090011597 }, { "epoch": 2.1047288271440023, "step": 7878, "train/loss_ctc": 0.5638681650161743, "train/loss_error": 0.46200770139694214, "train/loss_total": 0.4823797941207886 }, { "epoch": 2.1049959925193695, "step": 7879, "train/loss_ctc": 0.9597058892250061, "train/loss_error": 0.4623934328556061, "train/loss_total": 0.5618559122085571 }, { "epoch": 2.1052631578947367, "grad_norm": 2.1941137313842773, "learning_rate": 1.7376436013892598e-05, "loss": 0.5142, "step": 7880 }, { "epoch": 2.1052631578947367, "step": 7880, "train/loss_ctc": 1.2474682331085205, "train/loss_error": 0.48923367261886597, "train/loss_total": 0.6408805847167969 }, { "epoch": 2.1055303232701044, "step": 7881, "train/loss_ctc": 0.9212722182273865, "train/loss_error": 0.4463938772678375, "train/loss_total": 0.5413695573806763 }, { "epoch": 2.1057974886454716, "step": 7882, "train/loss_ctc": 0.6973792314529419, "train/loss_error": 0.493991881608963, "train/loss_total": 0.5346693992614746 }, { "epoch": 2.1060646540208388, "step": 7883, "train/loss_ctc": 0.4435122013092041, "train/loss_error": 0.4443376064300537, "train/loss_total": 0.44417253136634827 }, { "epoch": 2.1063318193962064, "step": 7884, "train/loss_ctc": 1.2792811393737793, "train/loss_error": 0.48881155252456665, "train/loss_total": 0.6469054818153381 }, { "epoch": 2.1065989847715736, "step": 7885, "train/loss_ctc": 0.8839437961578369, "train/loss_error": 0.42050907015800476, "train/loss_total": 0.5131960511207581 }, { "epoch": 2.106866150146941, "step": 7886, "train/loss_ctc": 1.0940325260162354, "train/loss_error": 0.49645674228668213, "train/loss_total": 0.6159719228744507 }, { "epoch": 2.1071333155223084, "step": 7887, "train/loss_ctc": 0.6712153553962708, "train/loss_error": 0.43950963020324707, "train/loss_total": 0.4858507812023163 }, { "epoch": 2.1074004808976756, "step": 7888, "train/loss_ctc": 1.5324013233184814, "train/loss_error": 0.42397499084472656, "train/loss_total": 0.6456602811813354 }, { "epoch": 2.107667646273043, "step": 7889, "train/loss_ctc": 0.7031779885292053, "train/loss_error": 0.46964579820632935, "train/loss_total": 0.5163522362709045 }, { "epoch": 2.1079348116484105, "grad_norm": 1.8448513746261597, "learning_rate": 1.736040609137056e-05, "loss": 0.5585, "step": 7890 }, { "epoch": 2.1079348116484105, "step": 7890, "train/loss_ctc": 0.5673099756240845, "train/loss_error": 0.4670478403568268, "train/loss_total": 0.4871002733707428 }, { "epoch": 2.1082019770237777, "step": 7891, "train/loss_ctc": 0.9033156037330627, "train/loss_error": 0.477770060300827, "train/loss_total": 0.562879204750061 }, { "epoch": 2.1084691423991453, "step": 7892, "train/loss_ctc": 0.5819301009178162, "train/loss_error": 0.5130065679550171, "train/loss_total": 0.5267912745475769 }, { "epoch": 2.1087363077745125, "step": 7893, "train/loss_ctc": 0.8308601379394531, "train/loss_error": 0.4654923677444458, "train/loss_total": 0.5385659337043762 }, { "epoch": 2.1090034731498797, "step": 7894, "train/loss_ctc": 0.6939026713371277, "train/loss_error": 0.4749169945716858, "train/loss_total": 0.5187141299247742 }, { "epoch": 2.1092706385252473, "step": 7895, "train/loss_ctc": 0.5582834482192993, "train/loss_error": 0.44870397448539734, "train/loss_total": 0.47061988711357117 }, { "epoch": 2.1095378039006145, "step": 7896, "train/loss_ctc": 0.822742223739624, "train/loss_error": 0.39823785424232483, "train/loss_total": 0.4831387400627136 }, { "epoch": 2.1098049692759817, "step": 7897, "train/loss_ctc": 0.27988290786743164, "train/loss_error": 0.4001768231391907, "train/loss_total": 0.3761180341243744 }, { "epoch": 2.1100721346513494, "step": 7898, "train/loss_ctc": 0.8938060998916626, "train/loss_error": 0.471166729927063, "train/loss_total": 0.5556946396827698 }, { "epoch": 2.1103393000267165, "step": 7899, "train/loss_ctc": 0.9236247539520264, "train/loss_error": 0.4480711817741394, "train/loss_total": 0.5431818962097168 }, { "epoch": 2.1106064654020837, "grad_norm": 3.029996871948242, "learning_rate": 1.734437616884852e-05, "loss": 0.5063, "step": 7900 }, { "epoch": 2.1106064654020837, "step": 7900, "train/loss_ctc": 0.9814890027046204, "train/loss_error": 0.4181191623210907, "train/loss_total": 0.5307931303977966 }, { "epoch": 2.1108736307774514, "step": 7901, "train/loss_ctc": 0.46621173620224, "train/loss_error": 0.4039992690086365, "train/loss_total": 0.41644176840782166 }, { "epoch": 2.1111407961528186, "step": 7902, "train/loss_ctc": 1.3633663654327393, "train/loss_error": 0.44130656123161316, "train/loss_total": 0.6257185339927673 }, { "epoch": 2.1114079615281858, "step": 7903, "train/loss_ctc": 0.29196810722351074, "train/loss_error": 0.41978543996810913, "train/loss_total": 0.3942219913005829 }, { "epoch": 2.1116751269035534, "step": 7904, "train/loss_ctc": 1.0551425218582153, "train/loss_error": 0.4463760256767273, "train/loss_total": 0.568129301071167 }, { "epoch": 2.1119422922789206, "step": 7905, "train/loss_ctc": 0.7683908939361572, "train/loss_error": 0.45224204659461975, "train/loss_total": 0.5154718160629272 }, { "epoch": 2.112209457654288, "step": 7906, "train/loss_ctc": 0.5348166227340698, "train/loss_error": 0.4349668622016907, "train/loss_total": 0.45493683218955994 }, { "epoch": 2.1124766230296554, "step": 7907, "train/loss_ctc": 0.4751664400100708, "train/loss_error": 0.4730502963066101, "train/loss_total": 0.47347351908683777 }, { "epoch": 2.1127437884050226, "step": 7908, "train/loss_ctc": 0.527145504951477, "train/loss_error": 0.4201732873916626, "train/loss_total": 0.4415677487850189 }, { "epoch": 2.11301095378039, "step": 7909, "train/loss_ctc": 0.9519764184951782, "train/loss_error": 0.4862346053123474, "train/loss_total": 0.5793829560279846 }, { "epoch": 2.1132781191557575, "grad_norm": 2.3603243827819824, "learning_rate": 1.732834624632648e-05, "loss": 0.5, "step": 7910 }, { "epoch": 2.1132781191557575, "step": 7910, "train/loss_ctc": 1.2411335706710815, "train/loss_error": 0.5216490030288696, "train/loss_total": 0.6655459403991699 }, { "epoch": 2.1135452845311247, "step": 7911, "train/loss_ctc": 0.6670019626617432, "train/loss_error": 0.47708266973495483, "train/loss_total": 0.5150665640830994 }, { "epoch": 2.1138124499064923, "step": 7912, "train/loss_ctc": 0.5594745874404907, "train/loss_error": 0.46949395537376404, "train/loss_total": 0.48749011754989624 }, { "epoch": 2.1140796152818595, "step": 7913, "train/loss_ctc": 0.39180371165275574, "train/loss_error": 0.4338507056236267, "train/loss_total": 0.42544129490852356 }, { "epoch": 2.1143467806572267, "step": 7914, "train/loss_ctc": 0.47163742780685425, "train/loss_error": 0.4629896879196167, "train/loss_total": 0.4647192358970642 }, { "epoch": 2.1146139460325943, "step": 7915, "train/loss_ctc": 0.4110961854457855, "train/loss_error": 0.42413702607154846, "train/loss_total": 0.4215288758277893 }, { "epoch": 2.1148811114079615, "step": 7916, "train/loss_ctc": 0.4476451873779297, "train/loss_error": 0.44369277358055115, "train/loss_total": 0.4444832503795624 }, { "epoch": 2.1151482767833287, "step": 7917, "train/loss_ctc": 0.6327456831932068, "train/loss_error": 0.47099900245666504, "train/loss_total": 0.5033483505249023 }, { "epoch": 2.1154154421586964, "step": 7918, "train/loss_ctc": 0.7888215184211731, "train/loss_error": 0.4655008018016815, "train/loss_total": 0.5301649570465088 }, { "epoch": 2.1156826075340636, "step": 7919, "train/loss_ctc": 0.9147534966468811, "train/loss_error": 0.4723600149154663, "train/loss_total": 0.5608386993408203 }, { "epoch": 2.1159497729094308, "grad_norm": 2.9362778663635254, "learning_rate": 1.7312316323804437e-05, "loss": 0.5019, "step": 7920 }, { "epoch": 2.1159497729094308, "step": 7920, "train/loss_ctc": 1.4786921739578247, "train/loss_error": 0.43166619539260864, "train/loss_total": 0.6410713791847229 }, { "epoch": 2.1162169382847984, "step": 7921, "train/loss_ctc": 0.8647371530532837, "train/loss_error": 0.3617331087589264, "train/loss_total": 0.46233391761779785 }, { "epoch": 2.1164841036601656, "step": 7922, "train/loss_ctc": 0.6706700921058655, "train/loss_error": 0.4863261878490448, "train/loss_total": 0.5231949687004089 }, { "epoch": 2.116751269035533, "step": 7923, "train/loss_ctc": 0.7290524244308472, "train/loss_error": 0.5090819001197815, "train/loss_total": 0.5530760288238525 }, { "epoch": 2.1170184344109004, "step": 7924, "train/loss_ctc": 0.8919397592544556, "train/loss_error": 0.4908676743507385, "train/loss_total": 0.5710821151733398 }, { "epoch": 2.1172855997862676, "step": 7925, "train/loss_ctc": 0.5008749961853027, "train/loss_error": 0.458809494972229, "train/loss_total": 0.4672226011753082 }, { "epoch": 2.1175527651616353, "step": 7926, "train/loss_ctc": 0.69446861743927, "train/loss_error": 0.4440161883831024, "train/loss_total": 0.4941066801548004 }, { "epoch": 2.1178199305370025, "step": 7927, "train/loss_ctc": 0.6172131299972534, "train/loss_error": 0.4346291422843933, "train/loss_total": 0.4711459279060364 }, { "epoch": 2.1180870959123697, "step": 7928, "train/loss_ctc": 0.9563568830490112, "train/loss_error": 0.4466206729412079, "train/loss_total": 0.5485679507255554 }, { "epoch": 2.1183542612877373, "step": 7929, "train/loss_ctc": 0.7856224775314331, "train/loss_error": 0.5261833071708679, "train/loss_total": 0.5780711770057678 }, { "epoch": 2.1186214266631045, "grad_norm": 2.19888973236084, "learning_rate": 1.7296286401282395e-05, "loss": 0.531, "step": 7930 }, { "epoch": 2.1186214266631045, "step": 7930, "train/loss_ctc": 1.0385832786560059, "train/loss_error": 0.5063379406929016, "train/loss_total": 0.6127870082855225 }, { "epoch": 2.1188885920384717, "step": 7931, "train/loss_ctc": 0.4988096058368683, "train/loss_error": 0.52187180519104, "train/loss_total": 0.5172593593597412 }, { "epoch": 2.1191557574138393, "step": 7932, "train/loss_ctc": 1.1393303871154785, "train/loss_error": 0.4266088306903839, "train/loss_total": 0.5691531896591187 }, { "epoch": 2.1194229227892065, "step": 7933, "train/loss_ctc": 1.5493628978729248, "train/loss_error": 0.4710512161254883, "train/loss_total": 0.6867135763168335 }, { "epoch": 2.1196900881645737, "step": 7934, "train/loss_ctc": 0.581631064414978, "train/loss_error": 0.46828603744506836, "train/loss_total": 0.49095505475997925 }, { "epoch": 2.1199572535399414, "step": 7935, "train/loss_ctc": 0.48661375045776367, "train/loss_error": 0.4471595585346222, "train/loss_total": 0.45505040884017944 }, { "epoch": 2.1202244189153086, "step": 7936, "train/loss_ctc": 0.3703872263431549, "train/loss_error": 0.43269017338752747, "train/loss_total": 0.42022961378097534 }, { "epoch": 2.1204915842906757, "step": 7937, "train/loss_ctc": 0.708172619342804, "train/loss_error": 0.501468300819397, "train/loss_total": 0.5428091287612915 }, { "epoch": 2.1207587496660434, "step": 7938, "train/loss_ctc": 0.4916653037071228, "train/loss_error": 0.4954974353313446, "train/loss_total": 0.49473100900650024 }, { "epoch": 2.1210259150414106, "step": 7939, "train/loss_ctc": 0.9249944686889648, "train/loss_error": 0.4663926064968109, "train/loss_total": 0.5581129789352417 }, { "epoch": 2.1212930804167778, "grad_norm": 1.2875359058380127, "learning_rate": 1.7280256478760353e-05, "loss": 0.5348, "step": 7940 }, { "epoch": 2.1212930804167778, "step": 7940, "train/loss_ctc": 0.3407576382160187, "train/loss_error": 0.4428182542324066, "train/loss_total": 0.4224061369895935 }, { "epoch": 2.1215602457921454, "step": 7941, "train/loss_ctc": 0.42049703001976013, "train/loss_error": 0.4923921823501587, "train/loss_total": 0.47801315784454346 }, { "epoch": 2.1218274111675126, "step": 7942, "train/loss_ctc": 1.1520521640777588, "train/loss_error": 0.5106039643287659, "train/loss_total": 0.6388936042785645 }, { "epoch": 2.1220945765428803, "step": 7943, "train/loss_ctc": 0.9757982492446899, "train/loss_error": 0.4461780786514282, "train/loss_total": 0.5521021485328674 }, { "epoch": 2.1223617419182474, "step": 7944, "train/loss_ctc": 0.801504909992218, "train/loss_error": 0.3835464119911194, "train/loss_total": 0.4671381115913391 }, { "epoch": 2.1226289072936146, "step": 7945, "train/loss_ctc": 0.7727588415145874, "train/loss_error": 0.4685823321342468, "train/loss_total": 0.5294176340103149 }, { "epoch": 2.1228960726689823, "step": 7946, "train/loss_ctc": 0.9891800284385681, "train/loss_error": 0.45598870515823364, "train/loss_total": 0.5626269578933716 }, { "epoch": 2.1231632380443495, "step": 7947, "train/loss_ctc": 0.5598739981651306, "train/loss_error": 0.49048125743865967, "train/loss_total": 0.504359781742096 }, { "epoch": 2.1234304034197167, "step": 7948, "train/loss_ctc": 0.4362747371196747, "train/loss_error": 0.4685267210006714, "train/loss_total": 0.462076336145401 }, { "epoch": 2.1236975687950843, "step": 7949, "train/loss_ctc": 0.7819530963897705, "train/loss_error": 0.4905219078063965, "train/loss_total": 0.5488081574440002 }, { "epoch": 2.1239647341704515, "grad_norm": 1.6334741115570068, "learning_rate": 1.726422655623831e-05, "loss": 0.5166, "step": 7950 }, { "epoch": 2.1239647341704515, "step": 7950, "train/loss_ctc": 1.3261606693267822, "train/loss_error": 0.5048944354057312, "train/loss_total": 0.6691477298736572 }, { "epoch": 2.1242318995458187, "step": 7951, "train/loss_ctc": 1.009443759918213, "train/loss_error": 0.43878498673439026, "train/loss_total": 0.5529167652130127 }, { "epoch": 2.1244990649211863, "step": 7952, "train/loss_ctc": 0.7212607264518738, "train/loss_error": 0.3686748743057251, "train/loss_total": 0.4391920566558838 }, { "epoch": 2.1247662302965535, "step": 7953, "train/loss_ctc": 1.6816641092300415, "train/loss_error": 0.502492368221283, "train/loss_total": 0.7383267283439636 }, { "epoch": 2.1250333956719207, "step": 7954, "train/loss_ctc": 0.9297584891319275, "train/loss_error": 0.4464658498764038, "train/loss_total": 0.5431243777275085 }, { "epoch": 2.1253005610472884, "step": 7955, "train/loss_ctc": 0.45512083172798157, "train/loss_error": 0.4685630798339844, "train/loss_total": 0.46587464213371277 }, { "epoch": 2.1255677264226556, "step": 7956, "train/loss_ctc": 0.5259881615638733, "train/loss_error": 0.44907230138778687, "train/loss_total": 0.4644554853439331 }, { "epoch": 2.125834891798023, "step": 7957, "train/loss_ctc": 0.8745598196983337, "train/loss_error": 0.42044904828071594, "train/loss_total": 0.5112711787223816 }, { "epoch": 2.1261020571733904, "step": 7958, "train/loss_ctc": 0.626620352268219, "train/loss_error": 0.400803804397583, "train/loss_total": 0.4459671080112457 }, { "epoch": 2.1263692225487576, "step": 7959, "train/loss_ctc": 0.7235785722732544, "train/loss_error": 0.4380183815956116, "train/loss_total": 0.49513041973114014 }, { "epoch": 2.1266363879241252, "grad_norm": 1.6423428058624268, "learning_rate": 1.7248196633716273e-05, "loss": 0.5325, "step": 7960 }, { "epoch": 2.1266363879241252, "step": 7960, "train/loss_ctc": 1.3504079580307007, "train/loss_error": 0.4790900945663452, "train/loss_total": 0.6533536911010742 }, { "epoch": 2.1269035532994924, "step": 7961, "train/loss_ctc": 0.4949468970298767, "train/loss_error": 0.5420317053794861, "train/loss_total": 0.5326147675514221 }, { "epoch": 2.1271707186748596, "step": 7962, "train/loss_ctc": 0.4526210129261017, "train/loss_error": 0.4800921678543091, "train/loss_total": 0.4745979309082031 }, { "epoch": 2.1274378840502273, "step": 7963, "train/loss_ctc": 0.7469412088394165, "train/loss_error": 0.4798101782798767, "train/loss_total": 0.5332363843917847 }, { "epoch": 2.1277050494255945, "step": 7964, "train/loss_ctc": 0.9755847454071045, "train/loss_error": 0.4487534463405609, "train/loss_total": 0.5541197061538696 }, { "epoch": 2.1279722148009617, "step": 7965, "train/loss_ctc": 0.5524066686630249, "train/loss_error": 0.37817952036857605, "train/loss_total": 0.4130249619483948 }, { "epoch": 2.1282393801763293, "step": 7966, "train/loss_ctc": 0.5153021812438965, "train/loss_error": 0.46883323788642883, "train/loss_total": 0.47812706232070923 }, { "epoch": 2.1285065455516965, "step": 7967, "train/loss_ctc": 0.5993196964263916, "train/loss_error": 0.5470273494720459, "train/loss_total": 0.557485818862915 }, { "epoch": 2.1287737109270637, "step": 7968, "train/loss_ctc": 0.4723730981349945, "train/loss_error": 0.4291093349456787, "train/loss_total": 0.4377620816230774 }, { "epoch": 2.1290408763024313, "step": 7969, "train/loss_ctc": 0.31715819239616394, "train/loss_error": 0.42788729071617126, "train/loss_total": 0.40574148297309875 }, { "epoch": 2.1293080416777985, "grad_norm": 2.004554033279419, "learning_rate": 1.723216671119423e-05, "loss": 0.504, "step": 7970 }, { "epoch": 2.1293080416777985, "step": 7970, "train/loss_ctc": 0.7319104671478271, "train/loss_error": 0.4748132824897766, "train/loss_total": 0.5262327194213867 }, { "epoch": 2.1295752070531657, "step": 7971, "train/loss_ctc": 0.433942049741745, "train/loss_error": 0.4533478021621704, "train/loss_total": 0.44946667551994324 }, { "epoch": 2.1298423724285334, "step": 7972, "train/loss_ctc": 0.4377787709236145, "train/loss_error": 0.4062492847442627, "train/loss_total": 0.41255518794059753 }, { "epoch": 2.1301095378039006, "step": 7973, "train/loss_ctc": 0.7310957908630371, "train/loss_error": 0.47382649779319763, "train/loss_total": 0.5252803564071655 }, { "epoch": 2.1303767031792677, "step": 7974, "train/loss_ctc": 0.7694132924079895, "train/loss_error": 0.5318280458450317, "train/loss_total": 0.5793451070785522 }, { "epoch": 2.1306438685546354, "step": 7975, "train/loss_ctc": 0.9223206639289856, "train/loss_error": 0.49835917353630066, "train/loss_total": 0.5831514596939087 }, { "epoch": 2.1309110339300026, "step": 7976, "train/loss_ctc": 0.9306402206420898, "train/loss_error": 0.46649646759033203, "train/loss_total": 0.5593252182006836 }, { "epoch": 2.1311781993053702, "step": 7977, "train/loss_ctc": 0.5259146690368652, "train/loss_error": 0.46123480796813965, "train/loss_total": 0.4741708040237427 }, { "epoch": 2.1314453646807374, "step": 7978, "train/loss_ctc": 1.2664719820022583, "train/loss_error": 0.4186042249202728, "train/loss_total": 0.5881778001785278 }, { "epoch": 2.1317125300561046, "step": 7979, "train/loss_ctc": 1.1209156513214111, "train/loss_error": 0.5021395683288574, "train/loss_total": 0.6258947849273682 }, { "epoch": 2.1319796954314723, "grad_norm": 4.525613307952881, "learning_rate": 1.721613678867219e-05, "loss": 0.5324, "step": 7980 }, { "epoch": 2.1319796954314723, "step": 7980, "train/loss_ctc": 0.8370013236999512, "train/loss_error": 0.4441675841808319, "train/loss_total": 0.5227343440055847 }, { "epoch": 2.1322468608068395, "step": 7981, "train/loss_ctc": 0.7807319164276123, "train/loss_error": 0.4494084417819977, "train/loss_total": 0.5156731605529785 }, { "epoch": 2.1325140261822066, "step": 7982, "train/loss_ctc": 0.5898259282112122, "train/loss_error": 0.45113903284072876, "train/loss_total": 0.47887641191482544 }, { "epoch": 2.1327811915575743, "step": 7983, "train/loss_ctc": 0.361916184425354, "train/loss_error": 0.4364044666290283, "train/loss_total": 0.4215068221092224 }, { "epoch": 2.1330483569329415, "step": 7984, "train/loss_ctc": 0.9623187184333801, "train/loss_error": 0.45422524213790894, "train/loss_total": 0.5558439493179321 }, { "epoch": 2.1333155223083087, "step": 7985, "train/loss_ctc": 1.1513952016830444, "train/loss_error": 0.47796377539634705, "train/loss_total": 0.6126500964164734 }, { "epoch": 2.1335826876836763, "step": 7986, "train/loss_ctc": 1.167190432548523, "train/loss_error": 0.45793911814689636, "train/loss_total": 0.5997893810272217 }, { "epoch": 2.1338498530590435, "step": 7987, "train/loss_ctc": 1.1811556816101074, "train/loss_error": 0.40186557173728943, "train/loss_total": 0.5577235817909241 }, { "epoch": 2.1341170184344107, "step": 7988, "train/loss_ctc": 0.49917343258857727, "train/loss_error": 0.43657630681991577, "train/loss_total": 0.4490957260131836 }, { "epoch": 2.1343841838097783, "step": 7989, "train/loss_ctc": 0.6028051376342773, "train/loss_error": 0.3964338004589081, "train/loss_total": 0.4377080798149109 }, { "epoch": 2.1346513491851455, "grad_norm": 2.1310276985168457, "learning_rate": 1.7200106866150147e-05, "loss": 0.5152, "step": 7990 }, { "epoch": 2.1346513491851455, "step": 7990, "train/loss_ctc": 0.8674027919769287, "train/loss_error": 0.4720005691051483, "train/loss_total": 0.5510810017585754 }, { "epoch": 2.134918514560513, "step": 7991, "train/loss_ctc": 0.5212236046791077, "train/loss_error": 0.44069021940231323, "train/loss_total": 0.45679688453674316 }, { "epoch": 2.1351856799358804, "step": 7992, "train/loss_ctc": 0.7609196901321411, "train/loss_error": 0.4277876615524292, "train/loss_total": 0.4944140911102295 }, { "epoch": 2.1354528453112476, "step": 7993, "train/loss_ctc": 0.3087635636329651, "train/loss_error": 0.42604634165763855, "train/loss_total": 0.4025897979736328 }, { "epoch": 2.135720010686615, "step": 7994, "train/loss_ctc": 0.6766257882118225, "train/loss_error": 0.4386019706726074, "train/loss_total": 0.4862067401409149 }, { "epoch": 2.1359871760619824, "step": 7995, "train/loss_ctc": 0.3013348877429962, "train/loss_error": 0.4271102845668793, "train/loss_total": 0.4019552171230316 }, { "epoch": 2.1362543414373496, "step": 7996, "train/loss_ctc": 0.9880262613296509, "train/loss_error": 0.5022087097167969, "train/loss_total": 0.5993722677230835 }, { "epoch": 2.1365215068127172, "step": 7997, "train/loss_ctc": 0.7644856572151184, "train/loss_error": 0.4412063956260681, "train/loss_total": 0.5058622360229492 }, { "epoch": 2.1367886721880844, "step": 7998, "train/loss_ctc": 0.7555376291275024, "train/loss_error": 0.41334623098373413, "train/loss_total": 0.48178452253341675 }, { "epoch": 2.1370558375634516, "step": 7999, "train/loss_ctc": 0.19365528225898743, "train/loss_error": 0.43377140164375305, "train/loss_total": 0.3857482075691223 }, { "epoch": 2.1373230029388193, "grad_norm": 2.5053482055664062, "learning_rate": 1.7184076943628105e-05, "loss": 0.4766, "step": 8000 }, { "epoch": 2.1373230029388193, "step": 8000, "train/loss_ctc": 0.6840887069702148, "train/loss_error": 0.442114919424057, "train/loss_total": 0.49050968885421753 }, { "epoch": 2.1375901683141865, "step": 8001, "train/loss_ctc": 1.1196551322937012, "train/loss_error": 0.4109474718570709, "train/loss_total": 0.5526890158653259 }, { "epoch": 2.1378573336895537, "step": 8002, "train/loss_ctc": 0.9115353226661682, "train/loss_error": 0.47085580229759216, "train/loss_total": 0.5589916706085205 }, { "epoch": 2.1381244990649213, "step": 8003, "train/loss_ctc": 0.28943705558776855, "train/loss_error": 0.44865527749061584, "train/loss_total": 0.41681164503097534 }, { "epoch": 2.1383916644402885, "step": 8004, "train/loss_ctc": 1.0485498905181885, "train/loss_error": 0.4557539224624634, "train/loss_total": 0.5743131041526794 }, { "epoch": 2.1386588298156557, "step": 8005, "train/loss_ctc": 1.1349444389343262, "train/loss_error": 0.4791569709777832, "train/loss_total": 0.6103144884109497 }, { "epoch": 2.1389259951910233, "step": 8006, "train/loss_ctc": 0.6090051531791687, "train/loss_error": 0.4749794900417328, "train/loss_total": 0.50178462266922 }, { "epoch": 2.1391931605663905, "step": 8007, "train/loss_ctc": 0.7985213398933411, "train/loss_error": 0.44434767961502075, "train/loss_total": 0.515182375907898 }, { "epoch": 2.1394603259417577, "step": 8008, "train/loss_ctc": 1.5512146949768066, "train/loss_error": 0.513375997543335, "train/loss_total": 0.7209437489509583 }, { "epoch": 2.1397274913171254, "step": 8009, "train/loss_ctc": 0.9244266748428345, "train/loss_error": 0.48281005024909973, "train/loss_total": 0.5711333751678467 }, { "epoch": 2.1399946566924926, "grad_norm": 2.0681889057159424, "learning_rate": 1.7168047021106063e-05, "loss": 0.5513, "step": 8010 }, { "epoch": 2.1399946566924926, "step": 8010, "train/loss_ctc": 0.5486630797386169, "train/loss_error": 0.4457487165927887, "train/loss_total": 0.4663316011428833 }, { "epoch": 2.14026182206786, "step": 8011, "train/loss_ctc": 0.655940592288971, "train/loss_error": 0.4764397144317627, "train/loss_total": 0.5123398900032043 }, { "epoch": 2.1405289874432274, "step": 8012, "train/loss_ctc": 0.7252610921859741, "train/loss_error": 0.477640300989151, "train/loss_total": 0.5271644592285156 }, { "epoch": 2.1407961528185946, "step": 8013, "train/loss_ctc": 0.7599588632583618, "train/loss_error": 0.39768052101135254, "train/loss_total": 0.4701361656188965 }, { "epoch": 2.1410633181939622, "step": 8014, "train/loss_ctc": 0.9488728046417236, "train/loss_error": 0.420282781124115, "train/loss_total": 0.5260007977485657 }, { "epoch": 2.1413304835693294, "step": 8015, "train/loss_ctc": 0.6476002931594849, "train/loss_error": 0.46756282448768616, "train/loss_total": 0.5035703182220459 }, { "epoch": 2.1415976489446966, "step": 8016, "train/loss_ctc": 0.7851192951202393, "train/loss_error": 0.4346746802330017, "train/loss_total": 0.5047636032104492 }, { "epoch": 2.1418648143200643, "step": 8017, "train/loss_ctc": 0.7006476521492004, "train/loss_error": 0.46040013432502747, "train/loss_total": 0.5084496736526489 }, { "epoch": 2.1421319796954315, "step": 8018, "train/loss_ctc": 0.4976615309715271, "train/loss_error": 0.44659191370010376, "train/loss_total": 0.45680585503578186 }, { "epoch": 2.1423991450707986, "step": 8019, "train/loss_ctc": 0.35019251704216003, "train/loss_error": 0.4199923574924469, "train/loss_total": 0.40603238344192505 }, { "epoch": 2.1426663104461663, "grad_norm": 1.4826050996780396, "learning_rate": 1.7152017098584024e-05, "loss": 0.4882, "step": 8020 }, { "epoch": 2.1426663104461663, "step": 8020, "train/loss_ctc": 0.7242522239685059, "train/loss_error": 0.42271265387535095, "train/loss_total": 0.4830206036567688 }, { "epoch": 2.1429334758215335, "step": 8021, "train/loss_ctc": 0.7927967309951782, "train/loss_error": 0.42756256461143494, "train/loss_total": 0.5006093978881836 }, { "epoch": 2.1432006411969007, "step": 8022, "train/loss_ctc": 1.1428555250167847, "train/loss_error": 0.4515658915042877, "train/loss_total": 0.589823842048645 }, { "epoch": 2.1434678065722683, "step": 8023, "train/loss_ctc": 0.8882750272750854, "train/loss_error": 0.4239991307258606, "train/loss_total": 0.5168542861938477 }, { "epoch": 2.1437349719476355, "step": 8024, "train/loss_ctc": 0.7044739723205566, "train/loss_error": 0.46887272596359253, "train/loss_total": 0.5159929990768433 }, { "epoch": 2.144002137323003, "step": 8025, "train/loss_ctc": 0.5530004501342773, "train/loss_error": 0.463226854801178, "train/loss_total": 0.4811815619468689 }, { "epoch": 2.1442693026983703, "step": 8026, "train/loss_ctc": 0.624862551689148, "train/loss_error": 0.44442686438560486, "train/loss_total": 0.4805140197277069 }, { "epoch": 2.1445364680737375, "step": 8027, "train/loss_ctc": 1.1697252988815308, "train/loss_error": 0.44938525557518005, "train/loss_total": 0.5934532880783081 }, { "epoch": 2.144803633449105, "step": 8028, "train/loss_ctc": 1.0626158714294434, "train/loss_error": 0.42471086978912354, "train/loss_total": 0.5522918701171875 }, { "epoch": 2.1450707988244724, "step": 8029, "train/loss_ctc": 0.4862019121646881, "train/loss_error": 0.4132803678512573, "train/loss_total": 0.4278647005558014 }, { "epoch": 2.1453379641998396, "grad_norm": 1.8792718648910522, "learning_rate": 1.7135987176061982e-05, "loss": 0.5142, "step": 8030 }, { "epoch": 2.1453379641998396, "step": 8030, "train/loss_ctc": 0.8961899876594543, "train/loss_error": 0.4411044716835022, "train/loss_total": 0.5321215987205505 }, { "epoch": 2.145605129575207, "step": 8031, "train/loss_ctc": 0.7322050333023071, "train/loss_error": 0.4623315632343292, "train/loss_total": 0.5163062810897827 }, { "epoch": 2.1458722949505744, "step": 8032, "train/loss_ctc": 1.0075751543045044, "train/loss_error": 0.5727730989456177, "train/loss_total": 0.6597335338592529 }, { "epoch": 2.1461394603259416, "step": 8033, "train/loss_ctc": 0.7056262493133545, "train/loss_error": 0.4664158821105957, "train/loss_total": 0.5142579674720764 }, { "epoch": 2.1464066257013092, "step": 8034, "train/loss_ctc": 1.3864033222198486, "train/loss_error": 0.47896888852119446, "train/loss_total": 0.6604557633399963 }, { "epoch": 2.1466737910766764, "step": 8035, "train/loss_ctc": 0.6435701251029968, "train/loss_error": 0.48753994703292847, "train/loss_total": 0.518746018409729 }, { "epoch": 2.1469409564520436, "step": 8036, "train/loss_ctc": 1.3043159246444702, "train/loss_error": 0.5380710959434509, "train/loss_total": 0.6913200616836548 }, { "epoch": 2.1472081218274113, "step": 8037, "train/loss_ctc": 0.796202540397644, "train/loss_error": 0.4331287443637848, "train/loss_total": 0.5057435035705566 }, { "epoch": 2.1474752872027785, "step": 8038, "train/loss_ctc": 0.41166824102401733, "train/loss_error": 0.43592125177383423, "train/loss_total": 0.4310706555843353 }, { "epoch": 2.1477424525781457, "step": 8039, "train/loss_ctc": 0.4782310128211975, "train/loss_error": 0.4275713860988617, "train/loss_total": 0.43770331144332886 }, { "epoch": 2.1480096179535133, "grad_norm": 1.3530489206314087, "learning_rate": 1.711995725353994e-05, "loss": 0.5467, "step": 8040 }, { "epoch": 2.1480096179535133, "step": 8040, "train/loss_ctc": 0.32779037952423096, "train/loss_error": 0.3762631416320801, "train/loss_total": 0.36656859517097473 }, { "epoch": 2.1482767833288805, "step": 8041, "train/loss_ctc": 0.9684208631515503, "train/loss_error": 0.441453218460083, "train/loss_total": 0.5468467473983765 }, { "epoch": 2.1485439487042477, "step": 8042, "train/loss_ctc": 0.5326645374298096, "train/loss_error": 0.5009685158729553, "train/loss_total": 0.5073077082633972 }, { "epoch": 2.1488111140796153, "step": 8043, "train/loss_ctc": 1.6845002174377441, "train/loss_error": 0.46904802322387695, "train/loss_total": 0.7121384739875793 }, { "epoch": 2.1490782794549825, "step": 8044, "train/loss_ctc": 0.5867748260498047, "train/loss_error": 0.4048413932323456, "train/loss_total": 0.44122809171676636 }, { "epoch": 2.14934544483035, "step": 8045, "train/loss_ctc": 0.5964678525924683, "train/loss_error": 0.49731263518333435, "train/loss_total": 0.517143726348877 }, { "epoch": 2.1496126102057174, "step": 8046, "train/loss_ctc": 0.4456365406513214, "train/loss_error": 0.47497206926345825, "train/loss_total": 0.46910497546195984 }, { "epoch": 2.1498797755810846, "step": 8047, "train/loss_ctc": 1.2271314859390259, "train/loss_error": 0.461556613445282, "train/loss_total": 0.6146715879440308 }, { "epoch": 2.150146940956452, "step": 8048, "train/loss_ctc": 0.9971909523010254, "train/loss_error": 0.45818421244621277, "train/loss_total": 0.5659855604171753 }, { "epoch": 2.1504141063318194, "step": 8049, "train/loss_ctc": 0.676496684551239, "train/loss_error": 0.5432796478271484, "train/loss_total": 0.5699230432510376 }, { "epoch": 2.1506812717071866, "grad_norm": 2.2617039680480957, "learning_rate": 1.71039273310179e-05, "loss": 0.5311, "step": 8050 }, { "epoch": 2.1506812717071866, "step": 8050, "train/loss_ctc": 1.0498697757720947, "train/loss_error": 0.4642184376716614, "train/loss_total": 0.581348717212677 }, { "epoch": 2.1509484370825542, "step": 8051, "train/loss_ctc": 0.6441759467124939, "train/loss_error": 0.4609188735485077, "train/loss_total": 0.497570276260376 }, { "epoch": 2.1512156024579214, "step": 8052, "train/loss_ctc": 0.7891416549682617, "train/loss_error": 0.525217592716217, "train/loss_total": 0.5780024528503418 }, { "epoch": 2.1514827678332886, "step": 8053, "train/loss_ctc": 0.5250541567802429, "train/loss_error": 0.447279155254364, "train/loss_total": 0.4628341794013977 }, { "epoch": 2.1517499332086563, "step": 8054, "train/loss_ctc": 0.6637412309646606, "train/loss_error": 0.4527657926082611, "train/loss_total": 0.49496087431907654 }, { "epoch": 2.1520170985840235, "step": 8055, "train/loss_ctc": 0.6398813724517822, "train/loss_error": 0.4799242615699768, "train/loss_total": 0.5119156837463379 }, { "epoch": 2.152284263959391, "step": 8056, "train/loss_ctc": 0.4421501159667969, "train/loss_error": 0.48930230736732483, "train/loss_total": 0.47987186908721924 }, { "epoch": 2.1525514293347583, "step": 8057, "train/loss_ctc": 0.6236079335212708, "train/loss_error": 0.44411176443099976, "train/loss_total": 0.4800110161304474 }, { "epoch": 2.1528185947101255, "step": 8058, "train/loss_ctc": 1.6320130825042725, "train/loss_error": 0.44034671783447266, "train/loss_total": 0.6786800026893616 }, { "epoch": 2.153085760085493, "step": 8059, "train/loss_ctc": 1.0264147520065308, "train/loss_error": 0.4522244930267334, "train/loss_total": 0.5670625567436218 }, { "epoch": 2.1533529254608603, "grad_norm": 1.5467115640640259, "learning_rate": 1.7087897408495857e-05, "loss": 0.5332, "step": 8060 }, { "epoch": 2.1533529254608603, "step": 8060, "train/loss_ctc": 0.8811287879943848, "train/loss_error": 0.3765421211719513, "train/loss_total": 0.47745949029922485 }, { "epoch": 2.1536200908362275, "step": 8061, "train/loss_ctc": 0.6419377326965332, "train/loss_error": 0.45598867535591125, "train/loss_total": 0.49317848682403564 }, { "epoch": 2.153887256211595, "step": 8062, "train/loss_ctc": 0.7409306764602661, "train/loss_error": 0.5725240111351013, "train/loss_total": 0.6062053442001343 }, { "epoch": 2.1541544215869624, "step": 8063, "train/loss_ctc": 0.4640774428844452, "train/loss_error": 0.5120957493782043, "train/loss_total": 0.5024920701980591 }, { "epoch": 2.1544215869623295, "step": 8064, "train/loss_ctc": 1.3264374732971191, "train/loss_error": 0.5368566513061523, "train/loss_total": 0.6947728395462036 }, { "epoch": 2.154688752337697, "step": 8065, "train/loss_ctc": 1.1801962852478027, "train/loss_error": 0.4647756814956665, "train/loss_total": 0.6078597903251648 }, { "epoch": 2.1549559177130644, "step": 8066, "train/loss_ctc": 0.6306834816932678, "train/loss_error": 0.5044543147087097, "train/loss_total": 0.5297001600265503 }, { "epoch": 2.1552230830884316, "step": 8067, "train/loss_ctc": 0.6228909492492676, "train/loss_error": 0.4754534065723419, "train/loss_total": 0.504940927028656 }, { "epoch": 2.155490248463799, "step": 8068, "train/loss_ctc": 0.8384645581245422, "train/loss_error": 0.4480508267879486, "train/loss_total": 0.5261335968971252 }, { "epoch": 2.1557574138391664, "step": 8069, "train/loss_ctc": 0.6515408158302307, "train/loss_error": 0.5419945120811462, "train/loss_total": 0.56390380859375 }, { "epoch": 2.1560245792145336, "grad_norm": 3.4190919399261475, "learning_rate": 1.7071867485973818e-05, "loss": 0.5507, "step": 8070 }, { "epoch": 2.1560245792145336, "step": 8070, "train/loss_ctc": 0.27827268838882446, "train/loss_error": 0.46336328983306885, "train/loss_total": 0.42634516954421997 }, { "epoch": 2.1562917445899012, "step": 8071, "train/loss_ctc": 0.99064701795578, "train/loss_error": 0.49556785821914673, "train/loss_total": 0.5945836901664734 }, { "epoch": 2.1565589099652684, "step": 8072, "train/loss_ctc": 0.47807610034942627, "train/loss_error": 0.4822482168674469, "train/loss_total": 0.4814137816429138 }, { "epoch": 2.1568260753406356, "step": 8073, "train/loss_ctc": 1.0860596895217896, "train/loss_error": 0.5215028524398804, "train/loss_total": 0.6344142556190491 }, { "epoch": 2.1570932407160033, "step": 8074, "train/loss_ctc": 0.5629191994667053, "train/loss_error": 0.4717269241809845, "train/loss_total": 0.48996537923812866 }, { "epoch": 2.1573604060913705, "step": 8075, "train/loss_ctc": 0.5578694343566895, "train/loss_error": 0.4818708002567291, "train/loss_total": 0.4970705509185791 }, { "epoch": 2.157627571466738, "step": 8076, "train/loss_ctc": 1.0871597528457642, "train/loss_error": 0.43059438467025757, "train/loss_total": 0.5619074702262878 }, { "epoch": 2.1578947368421053, "step": 8077, "train/loss_ctc": 0.6175870299339294, "train/loss_error": 0.4578850269317627, "train/loss_total": 0.48982542753219604 }, { "epoch": 2.1581619022174725, "step": 8078, "train/loss_ctc": 0.44530919194221497, "train/loss_error": 0.47570669651031494, "train/loss_total": 0.4696272015571594 }, { "epoch": 2.15842906759284, "step": 8079, "train/loss_ctc": 0.4281840920448303, "train/loss_error": 0.5027043223381042, "train/loss_total": 0.4878003001213074 }, { "epoch": 2.1586962329682073, "grad_norm": 1.4981508255004883, "learning_rate": 1.705583756345178e-05, "loss": 0.5133, "step": 8080 }, { "epoch": 2.1586962329682073, "step": 8080, "train/loss_ctc": 0.3898126184940338, "train/loss_error": 0.44323429465293884, "train/loss_total": 0.43254995346069336 }, { "epoch": 2.1589633983435745, "step": 8081, "train/loss_ctc": 0.5454903841018677, "train/loss_error": 0.40006983280181885, "train/loss_total": 0.4291539490222931 }, { "epoch": 2.159230563718942, "step": 8082, "train/loss_ctc": 1.0038203001022339, "train/loss_error": 0.4939589202404022, "train/loss_total": 0.5959311723709106 }, { "epoch": 2.1594977290943094, "step": 8083, "train/loss_ctc": 0.5914192795753479, "train/loss_error": 0.4235956072807312, "train/loss_total": 0.4571603536605835 }, { "epoch": 2.1597648944696766, "step": 8084, "train/loss_ctc": 0.9235661029815674, "train/loss_error": 0.5036453604698181, "train/loss_total": 0.587629497051239 }, { "epoch": 2.160032059845044, "step": 8085, "train/loss_ctc": 0.34729912877082825, "train/loss_error": 0.4434325098991394, "train/loss_total": 0.42420583963394165 }, { "epoch": 2.1602992252204114, "step": 8086, "train/loss_ctc": 0.6119515895843506, "train/loss_error": 0.4523906111717224, "train/loss_total": 0.484302818775177 }, { "epoch": 2.1605663905957786, "step": 8087, "train/loss_ctc": 0.5490610003471375, "train/loss_error": 0.5272196531295776, "train/loss_total": 0.5315879583358765 }, { "epoch": 2.1608335559711462, "step": 8088, "train/loss_ctc": 0.4606582224369049, "train/loss_error": 0.4438396990299225, "train/loss_total": 0.4472033977508545 }, { "epoch": 2.1611007213465134, "step": 8089, "train/loss_ctc": 0.6899975538253784, "train/loss_error": 0.45086756348609924, "train/loss_total": 0.498693585395813 }, { "epoch": 2.161367886721881, "grad_norm": 6.377617835998535, "learning_rate": 1.7039807640929738e-05, "loss": 0.4888, "step": 8090 }, { "epoch": 2.161367886721881, "step": 8090, "train/loss_ctc": 0.5119922161102295, "train/loss_error": 0.5231999158859253, "train/loss_total": 0.5209583640098572 }, { "epoch": 2.1616350520972483, "step": 8091, "train/loss_ctc": 0.8167094588279724, "train/loss_error": 0.47473037242889404, "train/loss_total": 0.5431262254714966 }, { "epoch": 2.1619022174726155, "step": 8092, "train/loss_ctc": 0.5753387808799744, "train/loss_error": 0.48067671060562134, "train/loss_total": 0.499609112739563 }, { "epoch": 2.162169382847983, "step": 8093, "train/loss_ctc": 0.6269093751907349, "train/loss_error": 0.4199677109718323, "train/loss_total": 0.4613560438156128 }, { "epoch": 2.1624365482233503, "step": 8094, "train/loss_ctc": 0.2404632866382599, "train/loss_error": 0.4716545045375824, "train/loss_total": 0.4254162609577179 }, { "epoch": 2.1627037135987175, "step": 8095, "train/loss_ctc": 0.7018563747406006, "train/loss_error": 0.37823280692100525, "train/loss_total": 0.4429575204849243 }, { "epoch": 2.162970878974085, "step": 8096, "train/loss_ctc": 0.8402804732322693, "train/loss_error": 0.3953576982021332, "train/loss_total": 0.4843422770500183 }, { "epoch": 2.1632380443494523, "step": 8097, "train/loss_ctc": 0.8589035272598267, "train/loss_error": 0.387782484292984, "train/loss_total": 0.482006698846817 }, { "epoch": 2.1635052097248195, "step": 8098, "train/loss_ctc": 0.5290058851242065, "train/loss_error": 0.4514097273349762, "train/loss_total": 0.46692895889282227 }, { "epoch": 2.163772375100187, "step": 8099, "train/loss_ctc": 0.8970987796783447, "train/loss_error": 0.42158043384552, "train/loss_total": 0.5166841149330139 }, { "epoch": 2.1640395404755544, "grad_norm": 3.2143056392669678, "learning_rate": 1.7023777718407696e-05, "loss": 0.4843, "step": 8100 }, { "epoch": 2.1640395404755544, "step": 8100, "train/loss_ctc": 0.8800414800643921, "train/loss_error": 0.3932359218597412, "train/loss_total": 0.49059706926345825 }, { "epoch": 2.1643067058509216, "step": 8101, "train/loss_ctc": 1.502576470375061, "train/loss_error": 0.46609506011009216, "train/loss_total": 0.6733913421630859 }, { "epoch": 2.164573871226289, "step": 8102, "train/loss_ctc": 0.8062926530838013, "train/loss_error": 0.3843078017234802, "train/loss_total": 0.4687047600746155 }, { "epoch": 2.1648410366016564, "step": 8103, "train/loss_ctc": 0.8354945778846741, "train/loss_error": 0.40217602252960205, "train/loss_total": 0.4888397455215454 }, { "epoch": 2.1651082019770236, "step": 8104, "train/loss_ctc": 0.948762059211731, "train/loss_error": 0.4099099338054657, "train/loss_total": 0.5176803469657898 }, { "epoch": 2.1653753673523912, "step": 8105, "train/loss_ctc": 0.5930551886558533, "train/loss_error": 0.5211752653121948, "train/loss_total": 0.5355512499809265 }, { "epoch": 2.1656425327277584, "step": 8106, "train/loss_ctc": 0.7957843542098999, "train/loss_error": 0.5331624746322632, "train/loss_total": 0.5856868624687195 }, { "epoch": 2.1659096981031256, "step": 8107, "train/loss_ctc": 1.2738926410675049, "train/loss_error": 0.5356132388114929, "train/loss_total": 0.6832691431045532 }, { "epoch": 2.1661768634784933, "step": 8108, "train/loss_ctc": 0.3794819116592407, "train/loss_error": 0.42653679847717285, "train/loss_total": 0.4171258211135864 }, { "epoch": 2.1664440288538604, "step": 8109, "train/loss_ctc": 0.8292999267578125, "train/loss_error": 0.43980783224105835, "train/loss_total": 0.5177062749862671 }, { "epoch": 2.166711194229228, "grad_norm": 1.7367764711380005, "learning_rate": 1.7007747795885654e-05, "loss": 0.5379, "step": 8110 }, { "epoch": 2.166711194229228, "step": 8110, "train/loss_ctc": 0.6574289798736572, "train/loss_error": 0.4119723439216614, "train/loss_total": 0.4610636830329895 }, { "epoch": 2.1669783596045953, "step": 8111, "train/loss_ctc": 0.44743090867996216, "train/loss_error": 0.5021166205406189, "train/loss_total": 0.491179496049881 }, { "epoch": 2.1672455249799625, "step": 8112, "train/loss_ctc": 1.4571014642715454, "train/loss_error": 0.5136032700538635, "train/loss_total": 0.7023029327392578 }, { "epoch": 2.16751269035533, "step": 8113, "train/loss_ctc": 0.5910425782203674, "train/loss_error": 0.4416995346546173, "train/loss_total": 0.47156816720962524 }, { "epoch": 2.1677798557306973, "step": 8114, "train/loss_ctc": 0.2930345833301544, "train/loss_error": 0.4488583505153656, "train/loss_total": 0.4176936149597168 }, { "epoch": 2.1680470211060645, "step": 8115, "train/loss_ctc": 0.7159236073493958, "train/loss_error": 0.3926301896572113, "train/loss_total": 0.4572888910770416 }, { "epoch": 2.168314186481432, "step": 8116, "train/loss_ctc": 0.7922492027282715, "train/loss_error": 0.4908232092857361, "train/loss_total": 0.5511084198951721 }, { "epoch": 2.1685813518567993, "step": 8117, "train/loss_ctc": 1.002082347869873, "train/loss_error": 0.44193392992019653, "train/loss_total": 0.5539636611938477 }, { "epoch": 2.1688485172321665, "step": 8118, "train/loss_ctc": 0.667750358581543, "train/loss_error": 0.3496975302696228, "train/loss_total": 0.41330811381340027 }, { "epoch": 2.169115682607534, "step": 8119, "train/loss_ctc": 0.8382221460342407, "train/loss_error": 0.46232905983924866, "train/loss_total": 0.5375076532363892 }, { "epoch": 2.1693828479829014, "grad_norm": 1.292252779006958, "learning_rate": 1.6991717873363612e-05, "loss": 0.5057, "step": 8120 }, { "epoch": 2.1693828479829014, "step": 8120, "train/loss_ctc": 0.9683529138565063, "train/loss_error": 0.4432148039340973, "train/loss_total": 0.548242449760437 }, { "epoch": 2.1696500133582686, "step": 8121, "train/loss_ctc": 0.5623973608016968, "train/loss_error": 0.36509591341018677, "train/loss_total": 0.4045562148094177 }, { "epoch": 2.169917178733636, "step": 8122, "train/loss_ctc": 0.41027548909187317, "train/loss_error": 0.4025150239467621, "train/loss_total": 0.40406712889671326 }, { "epoch": 2.1701843441090034, "step": 8123, "train/loss_ctc": 0.3451773524284363, "train/loss_error": 0.4100525379180908, "train/loss_total": 0.3970775008201599 }, { "epoch": 2.170451509484371, "step": 8124, "train/loss_ctc": 0.35248392820358276, "train/loss_error": 0.4110122323036194, "train/loss_total": 0.39930659532546997 }, { "epoch": 2.1707186748597382, "step": 8125, "train/loss_ctc": 1.1180038452148438, "train/loss_error": 0.4347248375415802, "train/loss_total": 0.571380615234375 }, { "epoch": 2.1709858402351054, "step": 8126, "train/loss_ctc": 0.7242704629898071, "train/loss_error": 0.4829709529876709, "train/loss_total": 0.5312308669090271 }, { "epoch": 2.171253005610473, "step": 8127, "train/loss_ctc": 0.45522063970565796, "train/loss_error": 0.4092860221862793, "train/loss_total": 0.41847294569015503 }, { "epoch": 2.1715201709858403, "step": 8128, "train/loss_ctc": 0.3834646940231323, "train/loss_error": 0.44190993905067444, "train/loss_total": 0.43022090196609497 }, { "epoch": 2.1717873363612075, "step": 8129, "train/loss_ctc": 1.13652503490448, "train/loss_error": 0.4735743999481201, "train/loss_total": 0.6061645150184631 }, { "epoch": 2.172054501736575, "grad_norm": 2.434201717376709, "learning_rate": 1.6975687950841573e-05, "loss": 0.4711, "step": 8130 }, { "epoch": 2.172054501736575, "step": 8130, "train/loss_ctc": 0.4745772182941437, "train/loss_error": 0.47126999497413635, "train/loss_total": 0.47193145751953125 }, { "epoch": 2.1723216671119423, "step": 8131, "train/loss_ctc": 1.4097325801849365, "train/loss_error": 0.4471505582332611, "train/loss_total": 0.6396669745445251 }, { "epoch": 2.1725888324873095, "step": 8132, "train/loss_ctc": 0.4376107454299927, "train/loss_error": 0.4190981686115265, "train/loss_total": 0.4228006899356842 }, { "epoch": 2.172855997862677, "step": 8133, "train/loss_ctc": 0.5895246267318726, "train/loss_error": 0.49724021553993225, "train/loss_total": 0.5156971216201782 }, { "epoch": 2.1731231632380443, "step": 8134, "train/loss_ctc": 1.0075492858886719, "train/loss_error": 0.4488052725791931, "train/loss_total": 0.5605540871620178 }, { "epoch": 2.1733903286134115, "step": 8135, "train/loss_ctc": 0.7377734184265137, "train/loss_error": 0.4662088453769684, "train/loss_total": 0.5205217599868774 }, { "epoch": 2.173657493988779, "step": 8136, "train/loss_ctc": 1.2534252405166626, "train/loss_error": 0.47826939821243286, "train/loss_total": 0.6333006024360657 }, { "epoch": 2.1739246593641464, "step": 8137, "train/loss_ctc": 0.6124264001846313, "train/loss_error": 0.4471895396709442, "train/loss_total": 0.4802369177341461 }, { "epoch": 2.1741918247395136, "step": 8138, "train/loss_ctc": 0.6154895424842834, "train/loss_error": 0.46808889508247375, "train/loss_total": 0.4975690245628357 }, { "epoch": 2.174458990114881, "step": 8139, "train/loss_ctc": 1.0529284477233887, "train/loss_error": 0.4762345254421234, "train/loss_total": 0.5915732979774475 }, { "epoch": 2.1747261554902484, "grad_norm": 2.2414064407348633, "learning_rate": 1.695965802831953e-05, "loss": 0.5334, "step": 8140 }, { "epoch": 2.1747261554902484, "step": 8140, "train/loss_ctc": 0.7113454341888428, "train/loss_error": 0.49288156628608704, "train/loss_total": 0.5365743637084961 }, { "epoch": 2.1749933208656156, "step": 8141, "train/loss_ctc": 1.1572778224945068, "train/loss_error": 0.4838702380657196, "train/loss_total": 0.6185517311096191 }, { "epoch": 2.1752604862409832, "step": 8142, "train/loss_ctc": 0.28516626358032227, "train/loss_error": 0.4555533230304718, "train/loss_total": 0.42147591710090637 }, { "epoch": 2.1755276516163504, "step": 8143, "train/loss_ctc": 1.0390756130218506, "train/loss_error": 0.4921562671661377, "train/loss_total": 0.6015401482582092 }, { "epoch": 2.175794816991718, "step": 8144, "train/loss_ctc": 0.6515640020370483, "train/loss_error": 0.47911176085472107, "train/loss_total": 0.5136022567749023 }, { "epoch": 2.1760619823670853, "step": 8145, "train/loss_ctc": 0.5699135065078735, "train/loss_error": 0.4781551957130432, "train/loss_total": 0.49650686979293823 }, { "epoch": 2.1763291477424525, "step": 8146, "train/loss_ctc": 0.738076388835907, "train/loss_error": 0.4791189134120941, "train/loss_total": 0.5309104323387146 }, { "epoch": 2.17659631311782, "step": 8147, "train/loss_ctc": 0.951865017414093, "train/loss_error": 0.44240620732307434, "train/loss_total": 0.5442979335784912 }, { "epoch": 2.1768634784931873, "step": 8148, "train/loss_ctc": 1.0285382270812988, "train/loss_error": 0.45507755875587463, "train/loss_total": 0.5697696805000305 }, { "epoch": 2.1771306438685545, "step": 8149, "train/loss_ctc": 0.8184165358543396, "train/loss_error": 0.4183582663536072, "train/loss_total": 0.4983699321746826 }, { "epoch": 2.177397809243922, "grad_norm": 17.473352432250977, "learning_rate": 1.694362810579749e-05, "loss": 0.5332, "step": 8150 }, { "epoch": 2.177397809243922, "step": 8150, "train/loss_ctc": 0.6366961002349854, "train/loss_error": 0.47222018241882324, "train/loss_total": 0.5051153898239136 }, { "epoch": 2.1776649746192893, "step": 8151, "train/loss_ctc": 1.164032220840454, "train/loss_error": 0.5087665915489197, "train/loss_total": 0.6398197412490845 }, { "epoch": 2.1779321399946565, "step": 8152, "train/loss_ctc": 1.1705949306488037, "train/loss_error": 0.48642417788505554, "train/loss_total": 0.6232583522796631 }, { "epoch": 2.178199305370024, "step": 8153, "train/loss_ctc": 0.4739856719970703, "train/loss_error": 0.5219176411628723, "train/loss_total": 0.5123312473297119 }, { "epoch": 2.1784664707453913, "step": 8154, "train/loss_ctc": 1.0761555433273315, "train/loss_error": 0.4970552623271942, "train/loss_total": 0.6128753423690796 }, { "epoch": 2.1787336361207585, "step": 8155, "train/loss_ctc": 1.1626616716384888, "train/loss_error": 0.4582642614841461, "train/loss_total": 0.5991437435150146 }, { "epoch": 2.179000801496126, "step": 8156, "train/loss_ctc": 0.5798794031143188, "train/loss_error": 0.41718992590904236, "train/loss_total": 0.4497278332710266 }, { "epoch": 2.1792679668714934, "step": 8157, "train/loss_ctc": 0.824895977973938, "train/loss_error": 0.4687371850013733, "train/loss_total": 0.5399689674377441 }, { "epoch": 2.179535132246861, "step": 8158, "train/loss_ctc": 0.38014844059944153, "train/loss_error": 0.3773336708545685, "train/loss_total": 0.37789663672447205 }, { "epoch": 2.179802297622228, "step": 8159, "train/loss_ctc": 0.6940862536430359, "train/loss_error": 0.45434853434562683, "train/loss_total": 0.5022960901260376 }, { "epoch": 2.1800694629975954, "grad_norm": 1.6622158288955688, "learning_rate": 1.6927598183275448e-05, "loss": 0.5362, "step": 8160 }, { "epoch": 2.1800694629975954, "step": 8160, "train/loss_ctc": 0.8739787340164185, "train/loss_error": 0.5369446873664856, "train/loss_total": 0.6043515205383301 }, { "epoch": 2.180336628372963, "step": 8161, "train/loss_ctc": 0.5696239471435547, "train/loss_error": 0.41561904549598694, "train/loss_total": 0.4464200437068939 }, { "epoch": 2.1806037937483302, "step": 8162, "train/loss_ctc": 0.4806642532348633, "train/loss_error": 0.41801226139068604, "train/loss_total": 0.43054264783859253 }, { "epoch": 2.1808709591236974, "step": 8163, "train/loss_ctc": 0.48314163088798523, "train/loss_error": 0.40180131793022156, "train/loss_total": 0.41806939244270325 }, { "epoch": 2.181138124499065, "step": 8164, "train/loss_ctc": 0.4394846260547638, "train/loss_error": 0.3760770559310913, "train/loss_total": 0.3887585997581482 }, { "epoch": 2.1814052898744323, "step": 8165, "train/loss_ctc": 1.3942523002624512, "train/loss_error": 0.4742353856563568, "train/loss_total": 0.6582387685775757 }, { "epoch": 2.1816724552497995, "step": 8166, "train/loss_ctc": 0.6809026598930359, "train/loss_error": 0.4690961539745331, "train/loss_total": 0.5114574432373047 }, { "epoch": 2.181939620625167, "step": 8167, "train/loss_ctc": 1.1286556720733643, "train/loss_error": 0.511081337928772, "train/loss_total": 0.6345962285995483 }, { "epoch": 2.1822067860005343, "step": 8168, "train/loss_ctc": 0.9144593477249146, "train/loss_error": 0.44610655307769775, "train/loss_total": 0.5397771596908569 }, { "epoch": 2.1824739513759015, "step": 8169, "train/loss_ctc": 0.9756731986999512, "train/loss_error": 0.4252631664276123, "train/loss_total": 0.535345196723938 }, { "epoch": 2.182741116751269, "grad_norm": 1.6518886089324951, "learning_rate": 1.6911568260753406e-05, "loss": 0.5168, "step": 8170 }, { "epoch": 2.182741116751269, "step": 8170, "train/loss_ctc": 0.8852097392082214, "train/loss_error": 0.40523239970207214, "train/loss_total": 0.501227855682373 }, { "epoch": 2.1830082821266363, "step": 8171, "train/loss_ctc": 0.8961502313613892, "train/loss_error": 0.4119490683078766, "train/loss_total": 0.5087893009185791 }, { "epoch": 2.1832754475020035, "step": 8172, "train/loss_ctc": 0.5551782846450806, "train/loss_error": 0.4529879093170166, "train/loss_total": 0.4734259843826294 }, { "epoch": 2.183542612877371, "step": 8173, "train/loss_ctc": 0.6070788502693176, "train/loss_error": 0.42418742179870605, "train/loss_total": 0.4607657194137573 }, { "epoch": 2.1838097782527384, "step": 8174, "train/loss_ctc": 1.3307057619094849, "train/loss_error": 0.40978801250457764, "train/loss_total": 0.5939715504646301 }, { "epoch": 2.184076943628106, "step": 8175, "train/loss_ctc": 1.0562961101531982, "train/loss_error": 0.40796324610710144, "train/loss_total": 0.5376298427581787 }, { "epoch": 2.184344109003473, "step": 8176, "train/loss_ctc": 0.916242241859436, "train/loss_error": 0.41068848967552185, "train/loss_total": 0.5117992162704468 }, { "epoch": 2.1846112743788404, "step": 8177, "train/loss_ctc": 0.8148480653762817, "train/loss_error": 0.49225956201553345, "train/loss_total": 0.5567772388458252 }, { "epoch": 2.184878439754208, "step": 8178, "train/loss_ctc": 0.5680685043334961, "train/loss_error": 0.43429750204086304, "train/loss_total": 0.46105170249938965 }, { "epoch": 2.1851456051295752, "step": 8179, "train/loss_ctc": 0.7914195656776428, "train/loss_error": 0.5128934979438782, "train/loss_total": 0.568598747253418 }, { "epoch": 2.1854127705049424, "grad_norm": 10.390247344970703, "learning_rate": 1.6895538338231364e-05, "loss": 0.5174, "step": 8180 }, { "epoch": 2.1854127705049424, "step": 8180, "train/loss_ctc": 1.2716089487075806, "train/loss_error": 0.4626934230327606, "train/loss_total": 0.6244765520095825 }, { "epoch": 2.18567993588031, "step": 8181, "train/loss_ctc": 1.0490784645080566, "train/loss_error": 0.48060816526412964, "train/loss_total": 0.594302237033844 }, { "epoch": 2.1859471012556773, "step": 8182, "train/loss_ctc": 1.259798288345337, "train/loss_error": 0.49805498123168945, "train/loss_total": 0.650403618812561 }, { "epoch": 2.1862142666310445, "step": 8183, "train/loss_ctc": 0.7481521368026733, "train/loss_error": 0.4600044786930084, "train/loss_total": 0.5176340341567993 }, { "epoch": 2.186481432006412, "step": 8184, "train/loss_ctc": 0.5901978015899658, "train/loss_error": 0.45328065752983093, "train/loss_total": 0.48066407442092896 }, { "epoch": 2.1867485973817793, "step": 8185, "train/loss_ctc": 0.5755012035369873, "train/loss_error": 0.456143856048584, "train/loss_total": 0.4800153374671936 }, { "epoch": 2.1870157627571465, "step": 8186, "train/loss_ctc": 0.8097131252288818, "train/loss_error": 0.43586790561676025, "train/loss_total": 0.5106369256973267 }, { "epoch": 2.187282928132514, "step": 8187, "train/loss_ctc": 0.5493568181991577, "train/loss_error": 0.454760879278183, "train/loss_total": 0.4736800789833069 }, { "epoch": 2.1875500935078813, "step": 8188, "train/loss_ctc": 0.5329216122627258, "train/loss_error": 0.4371322691440582, "train/loss_total": 0.4562901258468628 }, { "epoch": 2.187817258883249, "step": 8189, "train/loss_ctc": 0.2186530977487564, "train/loss_error": 0.41566720604896545, "train/loss_total": 0.37626439332962036 }, { "epoch": 2.188084424258616, "grad_norm": 1.7925106287002563, "learning_rate": 1.6879508415709325e-05, "loss": 0.5164, "step": 8190 }, { "epoch": 2.188084424258616, "step": 8190, "train/loss_ctc": 0.6534693241119385, "train/loss_error": 0.46195188164711, "train/loss_total": 0.5002554059028625 }, { "epoch": 2.1883515896339834, "step": 8191, "train/loss_ctc": 0.4912877678871155, "train/loss_error": 0.4702562987804413, "train/loss_total": 0.474462628364563 }, { "epoch": 2.188618755009351, "step": 8192, "train/loss_ctc": 0.591223955154419, "train/loss_error": 0.38882413506507874, "train/loss_total": 0.4293041229248047 }, { "epoch": 2.188885920384718, "step": 8193, "train/loss_ctc": 0.5036361217498779, "train/loss_error": 0.5128982663154602, "train/loss_total": 0.5110458135604858 }, { "epoch": 2.1891530857600854, "step": 8194, "train/loss_ctc": 0.3525625467300415, "train/loss_error": 0.49349942803382874, "train/loss_total": 0.46531206369400024 }, { "epoch": 2.189420251135453, "step": 8195, "train/loss_ctc": 0.2769213616847992, "train/loss_error": 0.4018080234527588, "train/loss_total": 0.37683069705963135 }, { "epoch": 2.18968741651082, "step": 8196, "train/loss_ctc": 0.9675359725952148, "train/loss_error": 0.46000856161117554, "train/loss_total": 0.5615140199661255 }, { "epoch": 2.1899545818861874, "step": 8197, "train/loss_ctc": 0.8756815195083618, "train/loss_error": 0.4662925899028778, "train/loss_total": 0.5481703877449036 }, { "epoch": 2.190221747261555, "step": 8198, "train/loss_ctc": 1.1476004123687744, "train/loss_error": 0.4935082495212555, "train/loss_total": 0.6243267059326172 }, { "epoch": 2.1904889126369222, "step": 8199, "train/loss_ctc": 0.6095049381256104, "train/loss_error": 0.4607071578502655, "train/loss_total": 0.4904667139053345 }, { "epoch": 2.1907560780122894, "grad_norm": 3.3526649475097656, "learning_rate": 1.6863478493187283e-05, "loss": 0.4982, "step": 8200 }, { "epoch": 2.1907560780122894, "step": 8200, "train/loss_ctc": 0.523903489112854, "train/loss_error": 0.5147544145584106, "train/loss_total": 0.5165842175483704 }, { "epoch": 2.191023243387657, "step": 8201, "train/loss_ctc": 1.0091190338134766, "train/loss_error": 0.4228183329105377, "train/loss_total": 0.5400784611701965 }, { "epoch": 2.1912904087630243, "step": 8202, "train/loss_ctc": 0.5784949064254761, "train/loss_error": 0.49648818373680115, "train/loss_total": 0.5128895044326782 }, { "epoch": 2.1915575741383915, "step": 8203, "train/loss_ctc": 0.32098063826560974, "train/loss_error": 0.4190400242805481, "train/loss_total": 0.3994281589984894 }, { "epoch": 2.191824739513759, "step": 8204, "train/loss_ctc": 0.7476128339767456, "train/loss_error": 0.48177894949913025, "train/loss_total": 0.5349457263946533 }, { "epoch": 2.1920919048891263, "step": 8205, "train/loss_ctc": 0.41669219732284546, "train/loss_error": 0.5107676982879639, "train/loss_total": 0.4919525980949402 }, { "epoch": 2.1923590702644935, "step": 8206, "train/loss_ctc": 0.2773403525352478, "train/loss_error": 0.5119103193283081, "train/loss_total": 0.464996337890625 }, { "epoch": 2.192626235639861, "step": 8207, "train/loss_ctc": 1.3378195762634277, "train/loss_error": 0.5195664763450623, "train/loss_total": 0.6832171082496643 }, { "epoch": 2.1928934010152283, "step": 8208, "train/loss_ctc": 0.5933042764663696, "train/loss_error": 0.4765687584877014, "train/loss_total": 0.49991586804389954 }, { "epoch": 2.193160566390596, "step": 8209, "train/loss_ctc": 0.5358736515045166, "train/loss_error": 0.4337772727012634, "train/loss_total": 0.4541965425014496 }, { "epoch": 2.193427731765963, "grad_norm": 1.743367314338684, "learning_rate": 1.684744857066524e-05, "loss": 0.5098, "step": 8210 }, { "epoch": 2.193427731765963, "step": 8210, "train/loss_ctc": 0.44565942883491516, "train/loss_error": 0.5413278937339783, "train/loss_total": 0.5221942067146301 }, { "epoch": 2.1936948971413304, "step": 8211, "train/loss_ctc": 0.6925580501556396, "train/loss_error": 0.4399474561214447, "train/loss_total": 0.4904695749282837 }, { "epoch": 2.193962062516698, "step": 8212, "train/loss_ctc": 0.8030165433883667, "train/loss_error": 0.5623756051063538, "train/loss_total": 0.6105037927627563 }, { "epoch": 2.194229227892065, "step": 8213, "train/loss_ctc": 1.3424683809280396, "train/loss_error": 0.4265862703323364, "train/loss_total": 0.6097626686096191 }, { "epoch": 2.1944963932674324, "step": 8214, "train/loss_ctc": 0.655493438243866, "train/loss_error": 0.46492382884025574, "train/loss_total": 0.5030377507209778 }, { "epoch": 2.1947635586428, "step": 8215, "train/loss_ctc": 0.5419692993164062, "train/loss_error": 0.4159705638885498, "train/loss_total": 0.441170334815979 }, { "epoch": 2.1950307240181672, "step": 8216, "train/loss_ctc": 0.4088585078716278, "train/loss_error": 0.408942312002182, "train/loss_total": 0.4089255630970001 }, { "epoch": 2.1952978893935344, "step": 8217, "train/loss_ctc": 0.2742714285850525, "train/loss_error": 0.45247316360473633, "train/loss_total": 0.4168328046798706 }, { "epoch": 2.195565054768902, "step": 8218, "train/loss_ctc": 0.9545575976371765, "train/loss_error": 0.446935772895813, "train/loss_total": 0.5484601259231567 }, { "epoch": 2.1958322201442693, "step": 8219, "train/loss_ctc": 0.45198243856430054, "train/loss_error": 0.4614047110080719, "train/loss_total": 0.45952028036117554 }, { "epoch": 2.1960993855196365, "grad_norm": 1.7294886112213135, "learning_rate": 1.68314186481432e-05, "loss": 0.5011, "step": 8220 }, { "epoch": 2.1960993855196365, "step": 8220, "train/loss_ctc": 0.6633368134498596, "train/loss_error": 0.471783310174942, "train/loss_total": 0.5100940465927124 }, { "epoch": 2.196366550895004, "step": 8221, "train/loss_ctc": 0.7055971622467041, "train/loss_error": 0.41427522897720337, "train/loss_total": 0.47253960371017456 }, { "epoch": 2.1966337162703713, "step": 8222, "train/loss_ctc": 0.6228970289230347, "train/loss_error": 0.4186534285545349, "train/loss_total": 0.4595021605491638 }, { "epoch": 2.196900881645739, "step": 8223, "train/loss_ctc": 0.44071483612060547, "train/loss_error": 0.4808979332447052, "train/loss_total": 0.47286131978034973 }, { "epoch": 2.197168047021106, "step": 8224, "train/loss_ctc": 0.2589919865131378, "train/loss_error": 0.42253077030181885, "train/loss_total": 0.3898230195045471 }, { "epoch": 2.1974352123964733, "step": 8225, "train/loss_ctc": 1.0719780921936035, "train/loss_error": 0.47699862718582153, "train/loss_total": 0.5959945321083069 }, { "epoch": 2.197702377771841, "step": 8226, "train/loss_ctc": 0.6300159692764282, "train/loss_error": 0.5381603240966797, "train/loss_total": 0.5565314292907715 }, { "epoch": 2.197969543147208, "step": 8227, "train/loss_ctc": 0.8152023553848267, "train/loss_error": 0.45186328887939453, "train/loss_total": 0.5245311260223389 }, { "epoch": 2.1982367085225754, "step": 8228, "train/loss_ctc": 0.7218331098556519, "train/loss_error": 0.4224066734313965, "train/loss_total": 0.48229196667671204 }, { "epoch": 2.198503873897943, "step": 8229, "train/loss_ctc": 0.9848959445953369, "train/loss_error": 0.4733555316925049, "train/loss_total": 0.5756636261940002 }, { "epoch": 2.19877103927331, "grad_norm": 1.2759106159210205, "learning_rate": 1.6815388725621158e-05, "loss": 0.504, "step": 8230 }, { "epoch": 2.19877103927331, "step": 8230, "train/loss_ctc": 0.7012431621551514, "train/loss_error": 0.439481258392334, "train/loss_total": 0.4918336272239685 }, { "epoch": 2.1990382046486774, "step": 8231, "train/loss_ctc": 0.8515946865081787, "train/loss_error": 0.46347576379776, "train/loss_total": 0.5410995483398438 }, { "epoch": 2.199305370024045, "step": 8232, "train/loss_ctc": 0.7586479187011719, "train/loss_error": 0.42287373542785645, "train/loss_total": 0.49002858996391296 }, { "epoch": 2.199572535399412, "step": 8233, "train/loss_ctc": 0.9431595802307129, "train/loss_error": 0.41366901993751526, "train/loss_total": 0.5195671319961548 }, { "epoch": 2.1998397007747794, "step": 8234, "train/loss_ctc": 0.6624854803085327, "train/loss_error": 0.547768771648407, "train/loss_total": 0.5707120895385742 }, { "epoch": 2.200106866150147, "step": 8235, "train/loss_ctc": 0.4689337909221649, "train/loss_error": 0.4809894263744354, "train/loss_total": 0.4785783290863037 }, { "epoch": 2.2003740315255143, "step": 8236, "train/loss_ctc": 1.2186565399169922, "train/loss_error": 0.4691508412361145, "train/loss_total": 0.619051992893219 }, { "epoch": 2.2006411969008814, "step": 8237, "train/loss_ctc": 0.686385989189148, "train/loss_error": 0.45504969358444214, "train/loss_total": 0.5013169646263123 }, { "epoch": 2.200908362276249, "step": 8238, "train/loss_ctc": 1.1468632221221924, "train/loss_error": 0.5046765208244324, "train/loss_total": 0.6331138610839844 }, { "epoch": 2.2011755276516163, "step": 8239, "train/loss_ctc": 0.6900321841239929, "train/loss_error": 0.44363725185394287, "train/loss_total": 0.4929162263870239 }, { "epoch": 2.2014426930269835, "grad_norm": 1.7175871133804321, "learning_rate": 1.6799358803099116e-05, "loss": 0.5338, "step": 8240 }, { "epoch": 2.2014426930269835, "step": 8240, "train/loss_ctc": 0.5698298215866089, "train/loss_error": 0.4757688343524933, "train/loss_total": 0.49458104372024536 }, { "epoch": 2.201709858402351, "step": 8241, "train/loss_ctc": 0.4275890290737152, "train/loss_error": 0.4506118893623352, "train/loss_total": 0.44600731134414673 }, { "epoch": 2.2019770237777183, "step": 8242, "train/loss_ctc": 0.28221866488456726, "train/loss_error": 0.41828665137290955, "train/loss_total": 0.3910730481147766 }, { "epoch": 2.202244189153086, "step": 8243, "train/loss_ctc": 1.0168769359588623, "train/loss_error": 0.5188132524490356, "train/loss_total": 0.6184259653091431 }, { "epoch": 2.202511354528453, "step": 8244, "train/loss_ctc": 0.651157021522522, "train/loss_error": 0.45449236035346985, "train/loss_total": 0.4938253164291382 }, { "epoch": 2.2027785199038203, "step": 8245, "train/loss_ctc": 1.077252984046936, "train/loss_error": 0.5269735455513, "train/loss_total": 0.6370294690132141 }, { "epoch": 2.203045685279188, "step": 8246, "train/loss_ctc": 1.320446252822876, "train/loss_error": 0.4299980103969574, "train/loss_total": 0.6080876588821411 }, { "epoch": 2.203312850654555, "step": 8247, "train/loss_ctc": 0.43259358406066895, "train/loss_error": 0.5344601273536682, "train/loss_total": 0.5140868425369263 }, { "epoch": 2.2035800160299224, "step": 8248, "train/loss_ctc": 0.5316038131713867, "train/loss_error": 0.46721550822257996, "train/loss_total": 0.48009318113327026 }, { "epoch": 2.20384718140529, "step": 8249, "train/loss_ctc": 0.2942945957183838, "train/loss_error": 0.4924555718898773, "train/loss_total": 0.4528234004974365 }, { "epoch": 2.204114346780657, "grad_norm": 1.179335355758667, "learning_rate": 1.678332888057708e-05, "loss": 0.5136, "step": 8250 }, { "epoch": 2.204114346780657, "step": 8250, "train/loss_ctc": 1.3825106620788574, "train/loss_error": 0.532400906085968, "train/loss_total": 0.7024228572845459 }, { "epoch": 2.2043815121560244, "step": 8251, "train/loss_ctc": 1.2182832956314087, "train/loss_error": 0.4509778320789337, "train/loss_total": 0.6044389009475708 }, { "epoch": 2.204648677531392, "step": 8252, "train/loss_ctc": 0.36255699396133423, "train/loss_error": 0.5001164674758911, "train/loss_total": 0.47260457277297974 }, { "epoch": 2.2049158429067592, "step": 8253, "train/loss_ctc": 0.7444702386856079, "train/loss_error": 0.47532105445861816, "train/loss_total": 0.5291509032249451 }, { "epoch": 2.2051830082821264, "step": 8254, "train/loss_ctc": 0.801779568195343, "train/loss_error": 0.4088882505893707, "train/loss_total": 0.4874665141105652 }, { "epoch": 2.205450173657494, "step": 8255, "train/loss_ctc": 0.6450139284133911, "train/loss_error": 0.4764670431613922, "train/loss_total": 0.510176420211792 }, { "epoch": 2.2057173390328613, "step": 8256, "train/loss_ctc": 0.7424972057342529, "train/loss_error": 0.4887906014919281, "train/loss_total": 0.539531946182251 }, { "epoch": 2.205984504408229, "step": 8257, "train/loss_ctc": 1.3141365051269531, "train/loss_error": 0.4522169232368469, "train/loss_total": 0.624600887298584 }, { "epoch": 2.206251669783596, "step": 8258, "train/loss_ctc": 1.3852134943008423, "train/loss_error": 0.4826582372188568, "train/loss_total": 0.6631693243980408 }, { "epoch": 2.2065188351589633, "step": 8259, "train/loss_ctc": 0.8051096200942993, "train/loss_error": 0.4589494466781616, "train/loss_total": 0.5281814932823181 }, { "epoch": 2.206786000534331, "grad_norm": 1.4250174760818481, "learning_rate": 1.676729895805504e-05, "loss": 0.5662, "step": 8260 }, { "epoch": 2.206786000534331, "step": 8260, "train/loss_ctc": 1.0782365798950195, "train/loss_error": 0.42935845255851746, "train/loss_total": 0.5591340661048889 }, { "epoch": 2.207053165909698, "step": 8261, "train/loss_ctc": 1.2885067462921143, "train/loss_error": 0.4796220362186432, "train/loss_total": 0.6413990259170532 }, { "epoch": 2.2073203312850653, "step": 8262, "train/loss_ctc": 0.6647567749023438, "train/loss_error": 0.46305811405181885, "train/loss_total": 0.5033978819847107 }, { "epoch": 2.207587496660433, "step": 8263, "train/loss_ctc": 0.3835899829864502, "train/loss_error": 0.44764187932014465, "train/loss_total": 0.43483150005340576 }, { "epoch": 2.2078546620358, "step": 8264, "train/loss_ctc": 1.5219790935516357, "train/loss_error": 0.481596440076828, "train/loss_total": 0.6896729469299316 }, { "epoch": 2.2081218274111674, "step": 8265, "train/loss_ctc": 0.4699043929576874, "train/loss_error": 0.3845407962799072, "train/loss_total": 0.4016135334968567 }, { "epoch": 2.208388992786535, "step": 8266, "train/loss_ctc": 0.7874394059181213, "train/loss_error": 0.44472500681877136, "train/loss_total": 0.5132678747177124 }, { "epoch": 2.208656158161902, "step": 8267, "train/loss_ctc": 1.1466448307037354, "train/loss_error": 0.40437424182891846, "train/loss_total": 0.5528283715248108 }, { "epoch": 2.2089233235372694, "step": 8268, "train/loss_ctc": 0.33423542976379395, "train/loss_error": 0.4157122075557709, "train/loss_total": 0.39941686391830444 }, { "epoch": 2.209190488912637, "step": 8269, "train/loss_ctc": 1.1750514507293701, "train/loss_error": 0.4833798408508301, "train/loss_total": 0.621714174747467 }, { "epoch": 2.2094576542880042, "grad_norm": 2.135162353515625, "learning_rate": 1.6751269035532997e-05, "loss": 0.5317, "step": 8270 }, { "epoch": 2.2094576542880042, "step": 8270, "train/loss_ctc": 0.9392805099487305, "train/loss_error": 0.5496990084648132, "train/loss_total": 0.6276153326034546 }, { "epoch": 2.2097248196633714, "step": 8271, "train/loss_ctc": 0.8621571063995361, "train/loss_error": 0.4957242012023926, "train/loss_total": 0.5690107941627502 }, { "epoch": 2.209991985038739, "step": 8272, "train/loss_ctc": 0.8197975158691406, "train/loss_error": 0.3977696895599365, "train/loss_total": 0.4821752607822418 }, { "epoch": 2.2102591504141063, "step": 8273, "train/loss_ctc": 1.1565324068069458, "train/loss_error": 0.4866742193698883, "train/loss_total": 0.6206458806991577 }, { "epoch": 2.2105263157894735, "step": 8274, "train/loss_ctc": 1.0407819747924805, "train/loss_error": 0.44443175196647644, "train/loss_total": 0.5637018084526062 }, { "epoch": 2.210793481164841, "step": 8275, "train/loss_ctc": 0.6552301645278931, "train/loss_error": 0.48881271481513977, "train/loss_total": 0.5220962166786194 }, { "epoch": 2.2110606465402083, "step": 8276, "train/loss_ctc": 1.021631121635437, "train/loss_error": 0.4855460524559021, "train/loss_total": 0.5927630662918091 }, { "epoch": 2.211327811915576, "step": 8277, "train/loss_ctc": 0.7482474446296692, "train/loss_error": 0.44786664843559265, "train/loss_total": 0.507942795753479 }, { "epoch": 2.211594977290943, "step": 8278, "train/loss_ctc": 0.39988282322883606, "train/loss_error": 0.44142571091651917, "train/loss_total": 0.4331171214580536 }, { "epoch": 2.2118621426663103, "step": 8279, "train/loss_ctc": 0.72233647108078, "train/loss_error": 0.45785439014434814, "train/loss_total": 0.5107507705688477 }, { "epoch": 2.212129308041678, "grad_norm": 2.17480206489563, "learning_rate": 1.6735239113010955e-05, "loss": 0.543, "step": 8280 }, { "epoch": 2.212129308041678, "step": 8280, "train/loss_ctc": 0.904596745967865, "train/loss_error": 0.4496873915195465, "train/loss_total": 0.5406692624092102 }, { "epoch": 2.212396473417045, "step": 8281, "train/loss_ctc": 0.9520018696784973, "train/loss_error": 0.4199881851673126, "train/loss_total": 0.5263909101486206 }, { "epoch": 2.2126636387924123, "step": 8282, "train/loss_ctc": 0.6931089162826538, "train/loss_error": 0.46558433771133423, "train/loss_total": 0.5110892653465271 }, { "epoch": 2.21293080416778, "step": 8283, "train/loss_ctc": 1.5528533458709717, "train/loss_error": 0.5032387375831604, "train/loss_total": 0.7131617069244385 }, { "epoch": 2.213197969543147, "step": 8284, "train/loss_ctc": 1.0670623779296875, "train/loss_error": 0.44103530049324036, "train/loss_total": 0.5662407279014587 }, { "epoch": 2.2134651349185144, "step": 8285, "train/loss_ctc": 0.7660626173019409, "train/loss_error": 0.43816083669662476, "train/loss_total": 0.5037412047386169 }, { "epoch": 2.213732300293882, "step": 8286, "train/loss_ctc": 0.370575487613678, "train/loss_error": 0.38094502687454224, "train/loss_total": 0.3788711130619049 }, { "epoch": 2.213999465669249, "step": 8287, "train/loss_ctc": 0.5599892139434814, "train/loss_error": 0.42802631855010986, "train/loss_total": 0.4544188976287842 }, { "epoch": 2.214266631044617, "step": 8288, "train/loss_ctc": 0.9421489834785461, "train/loss_error": 0.487245112657547, "train/loss_total": 0.5782259106636047 }, { "epoch": 2.214533796419984, "step": 8289, "train/loss_ctc": 0.566670298576355, "train/loss_error": 0.5171443223953247, "train/loss_total": 0.5270495414733887 }, { "epoch": 2.2148009617953512, "grad_norm": 1.9809156656265259, "learning_rate": 1.6719209190488913e-05, "loss": 0.53, "step": 8290 }, { "epoch": 2.2148009617953512, "step": 8290, "train/loss_ctc": 1.1415350437164307, "train/loss_error": 0.4525439143180847, "train/loss_total": 0.5903421640396118 }, { "epoch": 2.215068127170719, "step": 8291, "train/loss_ctc": 0.9913381338119507, "train/loss_error": 0.4742475748062134, "train/loss_total": 0.5776656866073608 }, { "epoch": 2.215335292546086, "step": 8292, "train/loss_ctc": 1.1451714038848877, "train/loss_error": 0.44401806592941284, "train/loss_total": 0.5842487215995789 }, { "epoch": 2.2156024579214533, "step": 8293, "train/loss_ctc": 0.8512595295906067, "train/loss_error": 0.4388144314289093, "train/loss_total": 0.5213034152984619 }, { "epoch": 2.215869623296821, "step": 8294, "train/loss_ctc": 1.1019879579544067, "train/loss_error": 0.4149313271045685, "train/loss_total": 0.5523426532745361 }, { "epoch": 2.216136788672188, "step": 8295, "train/loss_ctc": 0.49751588702201843, "train/loss_error": 0.45189738273620605, "train/loss_total": 0.4610210955142975 }, { "epoch": 2.2164039540475553, "step": 8296, "train/loss_ctc": 0.7819799184799194, "train/loss_error": 0.42839086055755615, "train/loss_total": 0.4991086721420288 }, { "epoch": 2.216671119422923, "step": 8297, "train/loss_ctc": 0.45155584812164307, "train/loss_error": 0.4039345681667328, "train/loss_total": 0.41345882415771484 }, { "epoch": 2.21693828479829, "step": 8298, "train/loss_ctc": 0.4781925082206726, "train/loss_error": 0.46275439858436584, "train/loss_total": 0.46584203839302063 }, { "epoch": 2.2172054501736573, "step": 8299, "train/loss_ctc": 0.3705707788467407, "train/loss_error": 0.36698952317237854, "train/loss_total": 0.367705762386322 }, { "epoch": 2.217472615549025, "grad_norm": 1.5271183252334595, "learning_rate": 1.670317926796687e-05, "loss": 0.5033, "step": 8300 }, { "epoch": 2.217472615549025, "step": 8300, "train/loss_ctc": 0.8112995624542236, "train/loss_error": 0.4228673279285431, "train/loss_total": 0.5005537867546082 }, { "epoch": 2.217739780924392, "step": 8301, "train/loss_ctc": 1.0035226345062256, "train/loss_error": 0.4929710328578949, "train/loss_total": 0.5950813889503479 }, { "epoch": 2.2180069462997594, "step": 8302, "train/loss_ctc": 0.8346572518348694, "train/loss_error": 0.46184101700782776, "train/loss_total": 0.5364042520523071 }, { "epoch": 2.218274111675127, "step": 8303, "train/loss_ctc": 0.5791745185852051, "train/loss_error": 0.4542154371738434, "train/loss_total": 0.47920727729797363 }, { "epoch": 2.218541277050494, "step": 8304, "train/loss_ctc": 0.7392692565917969, "train/loss_error": 0.42509281635284424, "train/loss_total": 0.4879281222820282 }, { "epoch": 2.2188084424258614, "step": 8305, "train/loss_ctc": 0.777862548828125, "train/loss_error": 0.4425162076950073, "train/loss_total": 0.5095854997634888 }, { "epoch": 2.219075607801229, "step": 8306, "train/loss_ctc": 0.7843616604804993, "train/loss_error": 0.4615044891834259, "train/loss_total": 0.5260759592056274 }, { "epoch": 2.2193427731765962, "step": 8307, "train/loss_ctc": 0.4496995806694031, "train/loss_error": 0.4166875183582306, "train/loss_total": 0.423289954662323 }, { "epoch": 2.219609938551964, "step": 8308, "train/loss_ctc": 1.0800116062164307, "train/loss_error": 0.4051229655742645, "train/loss_total": 0.5401006937026978 }, { "epoch": 2.219877103927331, "step": 8309, "train/loss_ctc": 0.5983036756515503, "train/loss_error": 0.464239239692688, "train/loss_total": 0.49105212092399597 }, { "epoch": 2.2201442693026983, "grad_norm": 2.5739314556121826, "learning_rate": 1.6687149345444833e-05, "loss": 0.5089, "step": 8310 }, { "epoch": 2.2201442693026983, "step": 8310, "train/loss_ctc": 0.35259392857551575, "train/loss_error": 0.42010605335235596, "train/loss_total": 0.4066036343574524 }, { "epoch": 2.220411434678066, "step": 8311, "train/loss_ctc": 0.24537013471126556, "train/loss_error": 0.37897875905036926, "train/loss_total": 0.35225704312324524 }, { "epoch": 2.220678600053433, "step": 8312, "train/loss_ctc": 1.8193745613098145, "train/loss_error": 0.4819847643375397, "train/loss_total": 0.7494627237319946 }, { "epoch": 2.2209457654288003, "step": 8313, "train/loss_ctc": 0.5665059089660645, "train/loss_error": 0.4167022705078125, "train/loss_total": 0.4466630220413208 }, { "epoch": 2.221212930804168, "step": 8314, "train/loss_ctc": 0.5784716606140137, "train/loss_error": 0.4883080720901489, "train/loss_total": 0.5063408017158508 }, { "epoch": 2.221480096179535, "step": 8315, "train/loss_ctc": 0.9125955104827881, "train/loss_error": 0.4691691994667053, "train/loss_total": 0.5578544735908508 }, { "epoch": 2.2217472615549023, "step": 8316, "train/loss_ctc": 0.5563068985939026, "train/loss_error": 0.4472607970237732, "train/loss_total": 0.4690700173377991 }, { "epoch": 2.22201442693027, "step": 8317, "train/loss_ctc": 1.4738422632217407, "train/loss_error": 0.42317983508110046, "train/loss_total": 0.6333123445510864 }, { "epoch": 2.222281592305637, "step": 8318, "train/loss_ctc": 0.23224472999572754, "train/loss_error": 0.3582276701927185, "train/loss_total": 0.3330310881137848 }, { "epoch": 2.2225487576810043, "step": 8319, "train/loss_ctc": 1.4352272748947144, "train/loss_error": 0.48170968890190125, "train/loss_total": 0.6724132299423218 }, { "epoch": 2.222815923056372, "grad_norm": 1.7781331539154053, "learning_rate": 1.667111942292279e-05, "loss": 0.5127, "step": 8320 }, { "epoch": 2.222815923056372, "step": 8320, "train/loss_ctc": 0.5700250864028931, "train/loss_error": 0.48463577032089233, "train/loss_total": 0.5017136335372925 }, { "epoch": 2.223083088431739, "step": 8321, "train/loss_ctc": 0.8214495778083801, "train/loss_error": 0.5123259425163269, "train/loss_total": 0.5741506814956665 }, { "epoch": 2.223350253807107, "step": 8322, "train/loss_ctc": 0.8233094215393066, "train/loss_error": 0.44560858607292175, "train/loss_total": 0.5211488008499146 }, { "epoch": 2.223617419182474, "step": 8323, "train/loss_ctc": 1.005895733833313, "train/loss_error": 0.42144396901130676, "train/loss_total": 0.5383343696594238 }, { "epoch": 2.223884584557841, "step": 8324, "train/loss_ctc": 1.1714050769805908, "train/loss_error": 0.3862060308456421, "train/loss_total": 0.5432458519935608 }, { "epoch": 2.224151749933209, "step": 8325, "train/loss_ctc": 1.1937780380249023, "train/loss_error": 0.4515102207660675, "train/loss_total": 0.5999637842178345 }, { "epoch": 2.224418915308576, "step": 8326, "train/loss_ctc": 1.1769111156463623, "train/loss_error": 0.47908908128738403, "train/loss_total": 0.6186535358428955 }, { "epoch": 2.2246860806839432, "step": 8327, "train/loss_ctc": 0.48972147703170776, "train/loss_error": 0.38367852568626404, "train/loss_total": 0.4048871099948883 }, { "epoch": 2.224953246059311, "step": 8328, "train/loss_ctc": 0.65113765001297, "train/loss_error": 0.4664689898490906, "train/loss_total": 0.5034027099609375 }, { "epoch": 2.225220411434678, "step": 8329, "train/loss_ctc": 0.5444777011871338, "train/loss_error": 0.48893430829048157, "train/loss_total": 0.5000430345535278 }, { "epoch": 2.2254875768100453, "grad_norm": 3.0354931354522705, "learning_rate": 1.665508950040075e-05, "loss": 0.5306, "step": 8330 }, { "epoch": 2.2254875768100453, "step": 8330, "train/loss_ctc": 0.5777945518493652, "train/loss_error": 0.48009905219078064, "train/loss_total": 0.499638170003891 }, { "epoch": 2.225754742185413, "step": 8331, "train/loss_ctc": 0.9460288286209106, "train/loss_error": 0.4970989525318146, "train/loss_total": 0.5868849754333496 }, { "epoch": 2.22602190756078, "step": 8332, "train/loss_ctc": 0.8169068098068237, "train/loss_error": 0.3767600357532501, "train/loss_total": 0.46478939056396484 }, { "epoch": 2.2262890729361473, "step": 8333, "train/loss_ctc": 0.5264999866485596, "train/loss_error": 0.51503586769104, "train/loss_total": 0.517328679561615 }, { "epoch": 2.226556238311515, "step": 8334, "train/loss_ctc": 1.3386671543121338, "train/loss_error": 0.4699123501777649, "train/loss_total": 0.6436632871627808 }, { "epoch": 2.226823403686882, "step": 8335, "train/loss_ctc": 0.7978314161300659, "train/loss_error": 0.46031081676483154, "train/loss_total": 0.5278149843215942 }, { "epoch": 2.2270905690622493, "step": 8336, "train/loss_ctc": 0.6166195869445801, "train/loss_error": 0.43385636806488037, "train/loss_total": 0.47040900588035583 }, { "epoch": 2.227357734437617, "step": 8337, "train/loss_ctc": 0.8774468898773193, "train/loss_error": 0.44792070984840393, "train/loss_total": 0.5338259339332581 }, { "epoch": 2.227624899812984, "step": 8338, "train/loss_ctc": 1.3056650161743164, "train/loss_error": 0.4568335711956024, "train/loss_total": 0.626599907875061 }, { "epoch": 2.2278920651883514, "step": 8339, "train/loss_ctc": 0.4860343337059021, "train/loss_error": 0.43129265308380127, "train/loss_total": 0.44224098324775696 }, { "epoch": 2.228159230563719, "grad_norm": 1.4918327331542969, "learning_rate": 1.6639059577878707e-05, "loss": 0.5313, "step": 8340 }, { "epoch": 2.228159230563719, "step": 8340, "train/loss_ctc": 1.1316713094711304, "train/loss_error": 0.4521346688270569, "train/loss_total": 0.5880420207977295 }, { "epoch": 2.228426395939086, "step": 8341, "train/loss_ctc": 0.8887335062026978, "train/loss_error": 0.5426704287528992, "train/loss_total": 0.6118830442428589 }, { "epoch": 2.228693561314454, "step": 8342, "train/loss_ctc": 0.5749046802520752, "train/loss_error": 0.389600932598114, "train/loss_total": 0.4266617000102997 }, { "epoch": 2.228960726689821, "step": 8343, "train/loss_ctc": 0.9197492599487305, "train/loss_error": 0.4518718421459198, "train/loss_total": 0.5454473495483398 }, { "epoch": 2.2292278920651882, "step": 8344, "train/loss_ctc": 0.7498700618743896, "train/loss_error": 0.48454251885414124, "train/loss_total": 0.5376080274581909 }, { "epoch": 2.229495057440556, "step": 8345, "train/loss_ctc": 0.4917318820953369, "train/loss_error": 0.3177899122238159, "train/loss_total": 0.3525783121585846 }, { "epoch": 2.229762222815923, "step": 8346, "train/loss_ctc": 0.45261290669441223, "train/loss_error": 0.45510560274124146, "train/loss_total": 0.4546070694923401 }, { "epoch": 2.2300293881912903, "step": 8347, "train/loss_ctc": 0.8340214490890503, "train/loss_error": 0.4433131515979767, "train/loss_total": 0.5214548110961914 }, { "epoch": 2.230296553566658, "step": 8348, "train/loss_ctc": 0.4884030520915985, "train/loss_error": 0.4339251220226288, "train/loss_total": 0.44482070207595825 }, { "epoch": 2.230563718942025, "step": 8349, "train/loss_ctc": 0.5932285785675049, "train/loss_error": 0.46095719933509827, "train/loss_total": 0.4874114990234375 }, { "epoch": 2.2308308843173923, "grad_norm": 3.5776760578155518, "learning_rate": 1.6623029655356665e-05, "loss": 0.4971, "step": 8350 }, { "epoch": 2.2308308843173923, "step": 8350, "train/loss_ctc": 0.9762635827064514, "train/loss_error": 0.41585758328437805, "train/loss_total": 0.5279387831687927 }, { "epoch": 2.23109804969276, "step": 8351, "train/loss_ctc": 1.029283046722412, "train/loss_error": 0.4715041518211365, "train/loss_total": 0.5830599069595337 }, { "epoch": 2.231365215068127, "step": 8352, "train/loss_ctc": 0.621565580368042, "train/loss_error": 0.3643580675125122, "train/loss_total": 0.4157995879650116 }, { "epoch": 2.2316323804434943, "step": 8353, "train/loss_ctc": 1.13992440700531, "train/loss_error": 0.4635537564754486, "train/loss_total": 0.5988278985023499 }, { "epoch": 2.231899545818862, "step": 8354, "train/loss_ctc": 0.5485784411430359, "train/loss_error": 0.4993593096733093, "train/loss_total": 0.5092031359672546 }, { "epoch": 2.232166711194229, "step": 8355, "train/loss_ctc": 0.5647732019424438, "train/loss_error": 0.4248403310775757, "train/loss_total": 0.45282691717147827 }, { "epoch": 2.232433876569597, "step": 8356, "train/loss_ctc": 0.4429500699043274, "train/loss_error": 0.43239936232566833, "train/loss_total": 0.4345095157623291 }, { "epoch": 2.232701041944964, "step": 8357, "train/loss_ctc": 0.7408325672149658, "train/loss_error": 0.38918668031692505, "train/loss_total": 0.45951586961746216 }, { "epoch": 2.232968207320331, "step": 8358, "train/loss_ctc": 0.8668269515037537, "train/loss_error": 0.3902645707130432, "train/loss_total": 0.4855770468711853 }, { "epoch": 2.233235372695699, "step": 8359, "train/loss_ctc": 1.5433145761489868, "train/loss_error": 0.4565976858139038, "train/loss_total": 0.6739410758018494 }, { "epoch": 2.233502538071066, "grad_norm": 14.34791374206543, "learning_rate": 1.6606999732834623e-05, "loss": 0.5141, "step": 8360 }, { "epoch": 2.233502538071066, "step": 8360, "train/loss_ctc": 0.6617146730422974, "train/loss_error": 0.47796133160591125, "train/loss_total": 0.5147119760513306 }, { "epoch": 2.233769703446433, "step": 8361, "train/loss_ctc": 0.6945030689239502, "train/loss_error": 0.548069179058075, "train/loss_total": 0.5773559808731079 }, { "epoch": 2.234036868821801, "step": 8362, "train/loss_ctc": 0.49319130182266235, "train/loss_error": 0.4402272403240204, "train/loss_total": 0.45082005858421326 }, { "epoch": 2.234304034197168, "step": 8363, "train/loss_ctc": 1.4145259857177734, "train/loss_error": 0.459274023771286, "train/loss_total": 0.6503244042396545 }, { "epoch": 2.2345711995725352, "step": 8364, "train/loss_ctc": 0.5228114724159241, "train/loss_error": 0.49459969997406006, "train/loss_total": 0.5002420544624329 }, { "epoch": 2.234838364947903, "step": 8365, "train/loss_ctc": 0.6836715936660767, "train/loss_error": 0.43503421545028687, "train/loss_total": 0.48476171493530273 }, { "epoch": 2.23510553032327, "step": 8366, "train/loss_ctc": 1.2809512615203857, "train/loss_error": 0.42531639337539673, "train/loss_total": 0.5964434146881104 }, { "epoch": 2.2353726956986373, "step": 8367, "train/loss_ctc": 0.47624099254608154, "train/loss_error": 0.40628883242607117, "train/loss_total": 0.42027926445007324 }, { "epoch": 2.235639861074005, "step": 8368, "train/loss_ctc": 0.6629866361618042, "train/loss_error": 0.5215286016464233, "train/loss_total": 0.5498201847076416 }, { "epoch": 2.235907026449372, "step": 8369, "train/loss_ctc": 0.5209157466888428, "train/loss_error": 0.4409720003604889, "train/loss_total": 0.4569607377052307 }, { "epoch": 2.2361741918247393, "grad_norm": 1.235548496246338, "learning_rate": 1.6590969810312584e-05, "loss": 0.5202, "step": 8370 }, { "epoch": 2.2361741918247393, "step": 8370, "train/loss_ctc": 0.8781849145889282, "train/loss_error": 0.3504486083984375, "train/loss_total": 0.4559958577156067 }, { "epoch": 2.236441357200107, "step": 8371, "train/loss_ctc": 0.6793251037597656, "train/loss_error": 0.43945953249931335, "train/loss_total": 0.48743265867233276 }, { "epoch": 2.236708522575474, "step": 8372, "train/loss_ctc": 0.6104971170425415, "train/loss_error": 0.43347883224487305, "train/loss_total": 0.4688825011253357 }, { "epoch": 2.2369756879508413, "step": 8373, "train/loss_ctc": 0.8370609283447266, "train/loss_error": 0.4444142282009125, "train/loss_total": 0.5229436159133911 }, { "epoch": 2.237242853326209, "step": 8374, "train/loss_ctc": 0.8671510219573975, "train/loss_error": 0.5129185914993286, "train/loss_total": 0.5837650895118713 }, { "epoch": 2.237510018701576, "step": 8375, "train/loss_ctc": 0.7685827016830444, "train/loss_error": 0.5084220170974731, "train/loss_total": 0.5604541897773743 }, { "epoch": 2.237777184076944, "step": 8376, "train/loss_ctc": 0.69707852602005, "train/loss_error": 0.4517807066440582, "train/loss_total": 0.5008403062820435 }, { "epoch": 2.238044349452311, "step": 8377, "train/loss_ctc": 0.5185003280639648, "train/loss_error": 0.4602062404155731, "train/loss_total": 0.47186505794525146 }, { "epoch": 2.238311514827678, "step": 8378, "train/loss_ctc": 0.7022026777267456, "train/loss_error": 0.5022721886634827, "train/loss_total": 0.5422583222389221 }, { "epoch": 2.238578680203046, "step": 8379, "train/loss_ctc": 0.7724806070327759, "train/loss_error": 0.5180291533470154, "train/loss_total": 0.5689194202423096 }, { "epoch": 2.238845845578413, "grad_norm": 4.64066743850708, "learning_rate": 1.6574939887790542e-05, "loss": 0.5163, "step": 8380 }, { "epoch": 2.238845845578413, "step": 8380, "train/loss_ctc": 1.0831233263015747, "train/loss_error": 0.486794114112854, "train/loss_total": 0.6060599684715271 }, { "epoch": 2.2391130109537802, "step": 8381, "train/loss_ctc": 0.6542561054229736, "train/loss_error": 0.5335565805435181, "train/loss_total": 0.557696521282196 }, { "epoch": 2.239380176329148, "step": 8382, "train/loss_ctc": 0.815651535987854, "train/loss_error": 0.4205462634563446, "train/loss_total": 0.49956732988357544 }, { "epoch": 2.239647341704515, "step": 8383, "train/loss_ctc": 0.43496963381767273, "train/loss_error": 0.4055520296096802, "train/loss_total": 0.4114355742931366 }, { "epoch": 2.2399145070798823, "step": 8384, "train/loss_ctc": 0.9936148524284363, "train/loss_error": 0.4373498857021332, "train/loss_total": 0.5486028790473938 }, { "epoch": 2.24018167245525, "step": 8385, "train/loss_ctc": 1.5847499370574951, "train/loss_error": 0.4604712426662445, "train/loss_total": 0.6853269934654236 }, { "epoch": 2.240448837830617, "step": 8386, "train/loss_ctc": 1.1593869924545288, "train/loss_error": 0.42961910367012024, "train/loss_total": 0.575572669506073 }, { "epoch": 2.2407160032059843, "step": 8387, "train/loss_ctc": 1.636871099472046, "train/loss_error": 0.4365582764148712, "train/loss_total": 0.6766208410263062 }, { "epoch": 2.240983168581352, "step": 8388, "train/loss_ctc": 0.9934256672859192, "train/loss_error": 0.4752453565597534, "train/loss_total": 0.5788814425468445 }, { "epoch": 2.241250333956719, "step": 8389, "train/loss_ctc": 0.5053634643554688, "train/loss_error": 0.45236632227897644, "train/loss_total": 0.4629657566547394 }, { "epoch": 2.2415174993320868, "grad_norm": 6.33084774017334, "learning_rate": 1.65589099652685e-05, "loss": 0.5603, "step": 8390 }, { "epoch": 2.2415174993320868, "step": 8390, "train/loss_ctc": 0.864203691482544, "train/loss_error": 0.41585657000541687, "train/loss_total": 0.5055260062217712 }, { "epoch": 2.241784664707454, "step": 8391, "train/loss_ctc": 0.38226115703582764, "train/loss_error": 0.4483935236930847, "train/loss_total": 0.4351670444011688 }, { "epoch": 2.242051830082821, "step": 8392, "train/loss_ctc": 0.41326263546943665, "train/loss_error": 0.4709300100803375, "train/loss_total": 0.4593965411186218 }, { "epoch": 2.242318995458189, "step": 8393, "train/loss_ctc": 0.737395167350769, "train/loss_error": 0.46907979249954224, "train/loss_total": 0.5227428674697876 }, { "epoch": 2.242586160833556, "step": 8394, "train/loss_ctc": 0.36038917303085327, "train/loss_error": 0.4809785783290863, "train/loss_total": 0.4568607211112976 }, { "epoch": 2.242853326208923, "step": 8395, "train/loss_ctc": 0.8507223129272461, "train/loss_error": 0.5257046222686768, "train/loss_total": 0.5907081365585327 }, { "epoch": 2.243120491584291, "step": 8396, "train/loss_ctc": 0.4335145354270935, "train/loss_error": 0.4334082305431366, "train/loss_total": 0.4334295094013214 }, { "epoch": 2.243387656959658, "step": 8397, "train/loss_ctc": 0.4569873809814453, "train/loss_error": 0.43669357895851135, "train/loss_total": 0.4407523274421692 }, { "epoch": 2.2436548223350252, "step": 8398, "train/loss_ctc": 1.2548081874847412, "train/loss_error": 0.47648870944976807, "train/loss_total": 0.6321526169776917 }, { "epoch": 2.243921987710393, "step": 8399, "train/loss_ctc": 0.7235155701637268, "train/loss_error": 0.407124400138855, "train/loss_total": 0.47040265798568726 }, { "epoch": 2.24418915308576, "grad_norm": 4.452159881591797, "learning_rate": 1.654288004274646e-05, "loss": 0.4947, "step": 8400 }, { "epoch": 2.24418915308576, "step": 8400, "train/loss_ctc": 0.3939230442047119, "train/loss_error": 0.37914571166038513, "train/loss_total": 0.3821011781692505 }, { "epoch": 2.2444563184611273, "step": 8401, "train/loss_ctc": 0.8946431279182434, "train/loss_error": 0.4741169810295105, "train/loss_total": 0.558222234249115 }, { "epoch": 2.244723483836495, "step": 8402, "train/loss_ctc": 0.583921492099762, "train/loss_error": 0.429832398891449, "train/loss_total": 0.460650235414505 }, { "epoch": 2.244990649211862, "step": 8403, "train/loss_ctc": 0.3354935944080353, "train/loss_error": 0.42103537917137146, "train/loss_total": 0.4039270281791687 }, { "epoch": 2.2452578145872293, "step": 8404, "train/loss_ctc": 1.1597929000854492, "train/loss_error": 0.4682447612285614, "train/loss_total": 0.606554388999939 }, { "epoch": 2.245524979962597, "step": 8405, "train/loss_ctc": 0.583651602268219, "train/loss_error": 0.4404340982437134, "train/loss_total": 0.46907761693000793 }, { "epoch": 2.245792145337964, "step": 8406, "train/loss_ctc": 0.4092331528663635, "train/loss_error": 0.48027196526527405, "train/loss_total": 0.4660642147064209 }, { "epoch": 2.2460593107133318, "step": 8407, "train/loss_ctc": 0.40533891320228577, "train/loss_error": 0.5089304447174072, "train/loss_total": 0.4882121682167053 }, { "epoch": 2.246326476088699, "step": 8408, "train/loss_ctc": 0.44738274812698364, "train/loss_error": 0.48417049646377563, "train/loss_total": 0.4768129587173462 }, { "epoch": 2.246593641464066, "step": 8409, "train/loss_ctc": 1.1042520999908447, "train/loss_error": 0.4827274680137634, "train/loss_total": 0.6070324182510376 }, { "epoch": 2.246860806839434, "grad_norm": 1.2987122535705566, "learning_rate": 1.6528453112476625e-05, "loss": 0.4919, "step": 8410 }, { "epoch": 2.246860806839434, "step": 8410, "train/loss_ctc": 0.37663590908050537, "train/loss_error": 0.3753677308559418, "train/loss_total": 0.37562137842178345 }, { "epoch": 2.247127972214801, "step": 8411, "train/loss_ctc": 0.5370370149612427, "train/loss_error": 0.477485328912735, "train/loss_total": 0.4893956780433655 }, { "epoch": 2.247395137590168, "step": 8412, "train/loss_ctc": 0.8045904636383057, "train/loss_error": 0.44898954033851624, "train/loss_total": 0.5201097130775452 }, { "epoch": 2.247662302965536, "step": 8413, "train/loss_ctc": 1.2120401859283447, "train/loss_error": 0.4790083169937134, "train/loss_total": 0.6256147027015686 }, { "epoch": 2.247929468340903, "step": 8414, "train/loss_ctc": 0.7507451772689819, "train/loss_error": 0.44265711307525635, "train/loss_total": 0.5042747259140015 }, { "epoch": 2.24819663371627, "step": 8415, "train/loss_ctc": 0.5299021601676941, "train/loss_error": 0.49918508529663086, "train/loss_total": 0.5053285360336304 }, { "epoch": 2.248463799091638, "step": 8416, "train/loss_ctc": 0.9193035364151001, "train/loss_error": 0.3939480185508728, "train/loss_total": 0.49901914596557617 }, { "epoch": 2.248730964467005, "step": 8417, "train/loss_ctc": 0.4048340320587158, "train/loss_error": 0.4628749489784241, "train/loss_total": 0.4512667655944824 }, { "epoch": 2.2489981298423722, "step": 8418, "train/loss_ctc": 0.43496379256248474, "train/loss_error": 0.47505345940589905, "train/loss_total": 0.46703553199768066 }, { "epoch": 2.24926529521774, "step": 8419, "train/loss_ctc": 0.7524102926254272, "train/loss_error": 0.41362231969833374, "train/loss_total": 0.4813799262046814 }, { "epoch": 2.249532460593107, "grad_norm": 2.374875783920288, "learning_rate": 1.6512423189954583e-05, "loss": 0.4919, "step": 8420 }, { "epoch": 2.249532460593107, "step": 8420, "train/loss_ctc": 0.46639326214790344, "train/loss_error": 0.4179130792617798, "train/loss_total": 0.4276091158390045 }, { "epoch": 2.2497996259684747, "step": 8421, "train/loss_ctc": 0.766380786895752, "train/loss_error": 0.39298009872436523, "train/loss_total": 0.46766024827957153 }, { "epoch": 2.250066791343842, "step": 8422, "train/loss_ctc": 0.6195485591888428, "train/loss_error": 0.4906187653541565, "train/loss_total": 0.5164047479629517 }, { "epoch": 2.250333956719209, "step": 8423, "train/loss_ctc": 1.1980502605438232, "train/loss_error": 0.45621880888938904, "train/loss_total": 0.6045851111412048 }, { "epoch": 2.2506011220945767, "step": 8424, "train/loss_ctc": 0.48050910234451294, "train/loss_error": 0.4167986810207367, "train/loss_total": 0.4295407831668854 }, { "epoch": 2.250868287469944, "step": 8425, "train/loss_ctc": 0.3657301664352417, "train/loss_error": 0.3959363102912903, "train/loss_total": 0.38989511132240295 }, { "epoch": 2.251135452845311, "step": 8426, "train/loss_ctc": 1.1553746461868286, "train/loss_error": 0.49839287996292114, "train/loss_total": 0.6297892332077026 }, { "epoch": 2.2514026182206788, "step": 8427, "train/loss_ctc": 0.5079574584960938, "train/loss_error": 0.43382254242897034, "train/loss_total": 0.448649525642395 }, { "epoch": 2.251669783596046, "step": 8428, "train/loss_ctc": 0.7474393844604492, "train/loss_error": 0.4256398379802704, "train/loss_total": 0.48999977111816406 }, { "epoch": 2.251936948971413, "step": 8429, "train/loss_ctc": 0.8078539967536926, "train/loss_error": 0.43128111958503723, "train/loss_total": 0.5065957307815552 }, { "epoch": 2.252204114346781, "grad_norm": 2.774585008621216, "learning_rate": 1.649639326743254e-05, "loss": 0.4911, "step": 8430 }, { "epoch": 2.252204114346781, "step": 8430, "train/loss_ctc": 0.48323413729667664, "train/loss_error": 0.43659576773643494, "train/loss_total": 0.44592344760894775 }, { "epoch": 2.252471279722148, "step": 8431, "train/loss_ctc": 0.6930557489395142, "train/loss_error": 0.4691120684146881, "train/loss_total": 0.5139008164405823 }, { "epoch": 2.252738445097515, "step": 8432, "train/loss_ctc": 1.8195436000823975, "train/loss_error": 0.4344080090522766, "train/loss_total": 0.7114351391792297 }, { "epoch": 2.253005610472883, "step": 8433, "train/loss_ctc": 0.5002431869506836, "train/loss_error": 0.47411438822746277, "train/loss_total": 0.479340136051178 }, { "epoch": 2.25327277584825, "step": 8434, "train/loss_ctc": 0.3926022946834564, "train/loss_error": 0.44203105568885803, "train/loss_total": 0.43214529752731323 }, { "epoch": 2.2535399412236172, "step": 8435, "train/loss_ctc": 0.9558243751525879, "train/loss_error": 0.4236036539077759, "train/loss_total": 0.5300477743148804 }, { "epoch": 2.253807106598985, "step": 8436, "train/loss_ctc": 1.1197869777679443, "train/loss_error": 0.5537399649620056, "train/loss_total": 0.6669493913650513 }, { "epoch": 2.254074271974352, "step": 8437, "train/loss_ctc": 0.744006335735321, "train/loss_error": 0.4726271629333496, "train/loss_total": 0.5269030332565308 }, { "epoch": 2.2543414373497193, "step": 8438, "train/loss_ctc": 1.1333928108215332, "train/loss_error": 0.4485042989253998, "train/loss_total": 0.5854820013046265 }, { "epoch": 2.254608602725087, "step": 8439, "train/loss_ctc": 1.5394786596298218, "train/loss_error": 0.41446107625961304, "train/loss_total": 0.6394646167755127 }, { "epoch": 2.254875768100454, "grad_norm": 1.4886983633041382, "learning_rate": 1.64803633449105e-05, "loss": 0.5532, "step": 8440 }, { "epoch": 2.254875768100454, "step": 8440, "train/loss_ctc": 1.1191741228103638, "train/loss_error": 0.5246689915657043, "train/loss_total": 0.6435700058937073 }, { "epoch": 2.2551429334758213, "step": 8441, "train/loss_ctc": 0.6117287874221802, "train/loss_error": 0.4850133955478668, "train/loss_total": 0.5103564858436584 }, { "epoch": 2.255410098851189, "step": 8442, "train/loss_ctc": 0.722946047782898, "train/loss_error": 0.42978009581565857, "train/loss_total": 0.4884133040904999 }, { "epoch": 2.255677264226556, "step": 8443, "train/loss_ctc": 0.5423279404640198, "train/loss_error": 0.5132166743278503, "train/loss_total": 0.5190389156341553 }, { "epoch": 2.2559444296019238, "step": 8444, "train/loss_ctc": 0.4568302631378174, "train/loss_error": 0.4516010880470276, "train/loss_total": 0.452646940946579 }, { "epoch": 2.256211594977291, "step": 8445, "train/loss_ctc": 0.505418062210083, "train/loss_error": 0.3818899095058441, "train/loss_total": 0.40659552812576294 }, { "epoch": 2.256478760352658, "step": 8446, "train/loss_ctc": 0.5054609775543213, "train/loss_error": 0.4739684462547302, "train/loss_total": 0.4802669584751129 }, { "epoch": 2.256745925728026, "step": 8447, "train/loss_ctc": 1.0120149850845337, "train/loss_error": 0.4494536221027374, "train/loss_total": 0.5619658827781677 }, { "epoch": 2.257013091103393, "step": 8448, "train/loss_ctc": 1.0983806848526, "train/loss_error": 0.43761152029037476, "train/loss_total": 0.5697653293609619 }, { "epoch": 2.25728025647876, "step": 8449, "train/loss_ctc": 0.8500741720199585, "train/loss_error": 0.4317166209220886, "train/loss_total": 0.5153881311416626 }, { "epoch": 2.257547421854128, "grad_norm": 3.2656519412994385, "learning_rate": 1.6464333422388457e-05, "loss": 0.5148, "step": 8450 }, { "epoch": 2.257547421854128, "step": 8450, "train/loss_ctc": 0.6593039035797119, "train/loss_error": 0.37491926550865173, "train/loss_total": 0.43179619312286377 }, { "epoch": 2.257814587229495, "step": 8451, "train/loss_ctc": 0.8124159574508667, "train/loss_error": 0.5519930720329285, "train/loss_total": 0.6040776371955872 }, { "epoch": 2.2580817526048627, "step": 8452, "train/loss_ctc": 0.43887439370155334, "train/loss_error": 0.5414748191833496, "train/loss_total": 0.5209547281265259 }, { "epoch": 2.25834891798023, "step": 8453, "train/loss_ctc": 0.9163817167282104, "train/loss_error": 0.45522668957710266, "train/loss_total": 0.5474576950073242 }, { "epoch": 2.258616083355597, "step": 8454, "train/loss_ctc": 0.8211153745651245, "train/loss_error": 0.4550386965274811, "train/loss_total": 0.5282540321350098 }, { "epoch": 2.2588832487309647, "step": 8455, "train/loss_ctc": 0.7425352334976196, "train/loss_error": 0.4838162958621979, "train/loss_total": 0.5355600714683533 }, { "epoch": 2.259150414106332, "step": 8456, "train/loss_ctc": 1.0176777839660645, "train/loss_error": 0.461429625749588, "train/loss_total": 0.5726792812347412 }, { "epoch": 2.259417579481699, "step": 8457, "train/loss_ctc": 0.3966776132583618, "train/loss_error": 0.3972729742527008, "train/loss_total": 0.39715391397476196 }, { "epoch": 2.2596847448570667, "step": 8458, "train/loss_ctc": 0.8742656707763672, "train/loss_error": 0.4739167094230652, "train/loss_total": 0.5539864897727966 }, { "epoch": 2.259951910232434, "step": 8459, "train/loss_ctc": 0.8861926794052124, "train/loss_error": 0.5163325667381287, "train/loss_total": 0.5903046131134033 }, { "epoch": 2.260219075607801, "grad_norm": 4.500612258911133, "learning_rate": 1.6448303499866415e-05, "loss": 0.5282, "step": 8460 }, { "epoch": 2.260219075607801, "step": 8460, "train/loss_ctc": 1.175419807434082, "train/loss_error": 0.47377416491508484, "train/loss_total": 0.6141033172607422 }, { "epoch": 2.2604862409831687, "step": 8461, "train/loss_ctc": 0.5041308999061584, "train/loss_error": 0.4436386525630951, "train/loss_total": 0.4557371139526367 }, { "epoch": 2.260753406358536, "step": 8462, "train/loss_ctc": 1.149085283279419, "train/loss_error": 0.4930044710636139, "train/loss_total": 0.624220609664917 }, { "epoch": 2.261020571733903, "step": 8463, "train/loss_ctc": 0.4864988327026367, "train/loss_error": 0.48143088817596436, "train/loss_total": 0.4824444651603699 }, { "epoch": 2.261287737109271, "step": 8464, "train/loss_ctc": 0.7747918367385864, "train/loss_error": 0.492792546749115, "train/loss_total": 0.5491924285888672 }, { "epoch": 2.261554902484638, "step": 8465, "train/loss_ctc": 0.5977559089660645, "train/loss_error": 0.5287593603134155, "train/loss_total": 0.5425586700439453 }, { "epoch": 2.261822067860005, "step": 8466, "train/loss_ctc": 0.7990995645523071, "train/loss_error": 0.4749818742275238, "train/loss_total": 0.5398054122924805 }, { "epoch": 2.262089233235373, "step": 8467, "train/loss_ctc": 0.7290723919868469, "train/loss_error": 0.4829859435558319, "train/loss_total": 0.532203197479248 }, { "epoch": 2.26235639861074, "step": 8468, "train/loss_ctc": 0.9376716613769531, "train/loss_error": 0.3876717686653137, "train/loss_total": 0.4976717531681061 }, { "epoch": 2.262623563986107, "step": 8469, "train/loss_ctc": 0.47524964809417725, "train/loss_error": 0.43046876788139343, "train/loss_total": 0.43942493200302124 }, { "epoch": 2.262890729361475, "grad_norm": 3.268307685852051, "learning_rate": 1.6432273577344377e-05, "loss": 0.5277, "step": 8470 }, { "epoch": 2.262890729361475, "step": 8470, "train/loss_ctc": 0.4854907691478729, "train/loss_error": 0.4283384680747986, "train/loss_total": 0.43976891040802 }, { "epoch": 2.263157894736842, "step": 8471, "train/loss_ctc": 0.8805203437805176, "train/loss_error": 0.45360836386680603, "train/loss_total": 0.5389907360076904 }, { "epoch": 2.2634250601122092, "step": 8472, "train/loss_ctc": 0.6463012099266052, "train/loss_error": 0.49799844622612, "train/loss_total": 0.527658998966217 }, { "epoch": 2.263692225487577, "step": 8473, "train/loss_ctc": 0.5786702036857605, "train/loss_error": 0.4625683128833771, "train/loss_total": 0.4857887029647827 }, { "epoch": 2.263959390862944, "step": 8474, "train/loss_ctc": 1.0433874130249023, "train/loss_error": 0.5347152352333069, "train/loss_total": 0.6364496946334839 }, { "epoch": 2.2642265562383117, "step": 8475, "train/loss_ctc": 0.389915406703949, "train/loss_error": 0.4007161855697632, "train/loss_total": 0.39855602383613586 }, { "epoch": 2.264493721613679, "step": 8476, "train/loss_ctc": 0.7229539752006531, "train/loss_error": 0.4487263858318329, "train/loss_total": 0.5035718679428101 }, { "epoch": 2.264760886989046, "step": 8477, "train/loss_ctc": 0.7905473709106445, "train/loss_error": 0.4902433753013611, "train/loss_total": 0.5503041744232178 }, { "epoch": 2.2650280523644137, "step": 8478, "train/loss_ctc": 0.9608887434005737, "train/loss_error": 0.4109638035297394, "train/loss_total": 0.5209488272666931 }, { "epoch": 2.265295217739781, "step": 8479, "train/loss_ctc": 0.41316038370132446, "train/loss_error": 0.4417247772216797, "train/loss_total": 0.4360119104385376 }, { "epoch": 2.265562383115148, "grad_norm": 3.9888386726379395, "learning_rate": 1.6416243654822335e-05, "loss": 0.5038, "step": 8480 }, { "epoch": 2.265562383115148, "step": 8480, "train/loss_ctc": 0.6502349376678467, "train/loss_error": 0.48547786474227905, "train/loss_total": 0.5184292793273926 }, { "epoch": 2.2658295484905158, "step": 8481, "train/loss_ctc": 0.33355557918548584, "train/loss_error": 0.44115784764289856, "train/loss_total": 0.41963741183280945 }, { "epoch": 2.266096713865883, "step": 8482, "train/loss_ctc": 0.6589443683624268, "train/loss_error": 0.4943356215953827, "train/loss_total": 0.5272573828697205 }, { "epoch": 2.26636387924125, "step": 8483, "train/loss_ctc": 1.316750407218933, "train/loss_error": 0.432281494140625, "train/loss_total": 0.6091753244400024 }, { "epoch": 2.266631044616618, "step": 8484, "train/loss_ctc": 1.4247887134552002, "train/loss_error": 0.43346497416496277, "train/loss_total": 0.6317297220230103 }, { "epoch": 2.266898209991985, "step": 8485, "train/loss_ctc": 1.0491299629211426, "train/loss_error": 0.39141854643821716, "train/loss_total": 0.5229608416557312 }, { "epoch": 2.2671653753673526, "step": 8486, "train/loss_ctc": 0.5788503885269165, "train/loss_error": 0.42310482263565063, "train/loss_total": 0.4542539417743683 }, { "epoch": 2.26743254074272, "step": 8487, "train/loss_ctc": 0.7964769005775452, "train/loss_error": 0.4388417899608612, "train/loss_total": 0.510368824005127 }, { "epoch": 2.267699706118087, "step": 8488, "train/loss_ctc": 1.2408480644226074, "train/loss_error": 0.47646185755729675, "train/loss_total": 0.6293390989303589 }, { "epoch": 2.2679668714934547, "step": 8489, "train/loss_ctc": 0.978056013584137, "train/loss_error": 0.43691930174827576, "train/loss_total": 0.545146644115448 }, { "epoch": 2.268234036868822, "grad_norm": 1.9376485347747803, "learning_rate": 1.6400213732300293e-05, "loss": 0.5368, "step": 8490 }, { "epoch": 2.268234036868822, "step": 8490, "train/loss_ctc": 0.6030120849609375, "train/loss_error": 0.4133217930793762, "train/loss_total": 0.45125988125801086 }, { "epoch": 2.268501202244189, "step": 8491, "train/loss_ctc": 0.536033570766449, "train/loss_error": 0.48458728194236755, "train/loss_total": 0.49487656354904175 }, { "epoch": 2.2687683676195567, "step": 8492, "train/loss_ctc": 0.5728641748428345, "train/loss_error": 0.4947628676891327, "train/loss_total": 0.510383129119873 }, { "epoch": 2.269035532994924, "step": 8493, "train/loss_ctc": 1.0954288244247437, "train/loss_error": 0.4474944472312927, "train/loss_total": 0.5770813226699829 }, { "epoch": 2.269302698370291, "step": 8494, "train/loss_ctc": 1.0858397483825684, "train/loss_error": 0.42935627698898315, "train/loss_total": 0.5606529712677002 }, { "epoch": 2.2695698637456587, "step": 8495, "train/loss_ctc": 0.6979640126228333, "train/loss_error": 0.43442949652671814, "train/loss_total": 0.4871364235877991 }, { "epoch": 2.269837029121026, "step": 8496, "train/loss_ctc": 0.35371914505958557, "train/loss_error": 0.47989749908447266, "train/loss_total": 0.45466184616088867 }, { "epoch": 2.270104194496393, "step": 8497, "train/loss_ctc": 0.918816864490509, "train/loss_error": 0.5322803854942322, "train/loss_total": 0.6095876693725586 }, { "epoch": 2.2703713598717608, "step": 8498, "train/loss_ctc": 0.6168830394744873, "train/loss_error": 0.4450828731060028, "train/loss_total": 0.47944292426109314 }, { "epoch": 2.270638525247128, "step": 8499, "train/loss_ctc": 0.3731912076473236, "train/loss_error": 0.43278321623802185, "train/loss_total": 0.4208648204803467 }, { "epoch": 2.270905690622495, "grad_norm": 3.2467455863952637, "learning_rate": 1.638418380977825e-05, "loss": 0.5046, "step": 8500 }, { "epoch": 2.270905690622495, "step": 8500, "train/loss_ctc": 0.7733504772186279, "train/loss_error": 0.47896361351013184, "train/loss_total": 0.5378410220146179 }, { "epoch": 2.271172855997863, "step": 8501, "train/loss_ctc": 1.1794519424438477, "train/loss_error": 0.5399134755134583, "train/loss_total": 0.6678211688995361 }, { "epoch": 2.27144002137323, "step": 8502, "train/loss_ctc": 1.0995795726776123, "train/loss_error": 0.4607226252555847, "train/loss_total": 0.5884940028190613 }, { "epoch": 2.271707186748597, "step": 8503, "train/loss_ctc": 0.53490149974823, "train/loss_error": 0.45254215598106384, "train/loss_total": 0.4690140187740326 }, { "epoch": 2.271974352123965, "step": 8504, "train/loss_ctc": 0.3790196180343628, "train/loss_error": 0.4810834527015686, "train/loss_total": 0.46067070960998535 }, { "epoch": 2.272241517499332, "step": 8505, "train/loss_ctc": 0.5364899039268494, "train/loss_error": 0.5014763474464417, "train/loss_total": 0.5084790587425232 }, { "epoch": 2.272508682874699, "step": 8506, "train/loss_ctc": 0.7792260646820068, "train/loss_error": 0.4377862513065338, "train/loss_total": 0.5060741901397705 }, { "epoch": 2.272775848250067, "step": 8507, "train/loss_ctc": 0.6182120442390442, "train/loss_error": 0.4158099591732025, "train/loss_total": 0.4562903940677643 }, { "epoch": 2.273043013625434, "step": 8508, "train/loss_ctc": 1.1195118427276611, "train/loss_error": 0.5038385987281799, "train/loss_total": 0.6269732713699341 }, { "epoch": 2.2733101790008017, "step": 8509, "train/loss_ctc": 0.35639244318008423, "train/loss_error": 0.501303493976593, "train/loss_total": 0.4723212718963623 }, { "epoch": 2.273577344376169, "grad_norm": 1.976806640625, "learning_rate": 1.636815388725621e-05, "loss": 0.5294, "step": 8510 }, { "epoch": 2.273577344376169, "step": 8510, "train/loss_ctc": 1.2243752479553223, "train/loss_error": 0.4489983320236206, "train/loss_total": 0.604073703289032 }, { "epoch": 2.273844509751536, "step": 8511, "train/loss_ctc": 0.9813412427902222, "train/loss_error": 0.5243426561355591, "train/loss_total": 0.6157423853874207 }, { "epoch": 2.2741116751269037, "step": 8512, "train/loss_ctc": 0.6916135549545288, "train/loss_error": 0.5061765313148499, "train/loss_total": 0.5432639122009277 }, { "epoch": 2.274378840502271, "step": 8513, "train/loss_ctc": 0.8146034479141235, "train/loss_error": 0.4180728495121002, "train/loss_total": 0.49737900495529175 }, { "epoch": 2.274646005877638, "step": 8514, "train/loss_ctc": 0.4139031767845154, "train/loss_error": 0.5281920433044434, "train/loss_total": 0.5053342580795288 }, { "epoch": 2.2749131712530057, "step": 8515, "train/loss_ctc": 0.3168294131755829, "train/loss_error": 0.419829785823822, "train/loss_total": 0.3992297053337097 }, { "epoch": 2.275180336628373, "step": 8516, "train/loss_ctc": 0.5522156953811646, "train/loss_error": 0.4617577791213989, "train/loss_total": 0.47984936833381653 }, { "epoch": 2.27544750200374, "step": 8517, "train/loss_ctc": 1.2815245389938354, "train/loss_error": 0.492818683385849, "train/loss_total": 0.6505599021911621 }, { "epoch": 2.2757146673791078, "step": 8518, "train/loss_ctc": 0.8240801692008972, "train/loss_error": 0.40025052428245544, "train/loss_total": 0.48501646518707275 }, { "epoch": 2.275981832754475, "step": 8519, "train/loss_ctc": 0.6223849058151245, "train/loss_error": 0.42828965187072754, "train/loss_total": 0.46710872650146484 }, { "epoch": 2.2762489981298426, "grad_norm": 3.2118234634399414, "learning_rate": 1.6352123964734174e-05, "loss": 0.5248, "step": 8520 }, { "epoch": 2.2762489981298426, "step": 8520, "train/loss_ctc": 0.8330034017562866, "train/loss_error": 0.5164893865585327, "train/loss_total": 0.5797922015190125 }, { "epoch": 2.27651616350521, "step": 8521, "train/loss_ctc": 0.8123413920402527, "train/loss_error": 0.41913771629333496, "train/loss_total": 0.4977784752845764 }, { "epoch": 2.276783328880577, "step": 8522, "train/loss_ctc": 0.5972445607185364, "train/loss_error": 0.4775001108646393, "train/loss_total": 0.5014489889144897 }, { "epoch": 2.2770504942559446, "step": 8523, "train/loss_ctc": 0.42572060227394104, "train/loss_error": 0.4203324615955353, "train/loss_total": 0.42141011357307434 }, { "epoch": 2.277317659631312, "step": 8524, "train/loss_ctc": 0.7395285367965698, "train/loss_error": 0.41391974687576294, "train/loss_total": 0.47904151678085327 }, { "epoch": 2.277584825006679, "step": 8525, "train/loss_ctc": 0.6400002837181091, "train/loss_error": 0.4046977758407593, "train/loss_total": 0.4517582654953003 }, { "epoch": 2.2778519903820467, "step": 8526, "train/loss_ctc": 1.0067620277404785, "train/loss_error": 0.4551640748977661, "train/loss_total": 0.5654836893081665 }, { "epoch": 2.278119155757414, "step": 8527, "train/loss_ctc": 0.9091655015945435, "train/loss_error": 0.46887892484664917, "train/loss_total": 0.5569362640380859 }, { "epoch": 2.278386321132781, "step": 8528, "train/loss_ctc": 0.48677435517311096, "train/loss_error": 0.3773006498813629, "train/loss_total": 0.3991953730583191 }, { "epoch": 2.2786534865081487, "step": 8529, "train/loss_ctc": 0.8804882764816284, "train/loss_error": 0.48952144384384155, "train/loss_total": 0.5677148103713989 }, { "epoch": 2.278920651883516, "grad_norm": 3.170779228210449, "learning_rate": 1.6336094042212132e-05, "loss": 0.5021, "step": 8530 }, { "epoch": 2.278920651883516, "step": 8530, "train/loss_ctc": 0.899217963218689, "train/loss_error": 0.4132676422595978, "train/loss_total": 0.5104576945304871 }, { "epoch": 2.279187817258883, "step": 8531, "train/loss_ctc": 0.619545578956604, "train/loss_error": 0.44692808389663696, "train/loss_total": 0.4814516007900238 }, { "epoch": 2.2794549826342507, "step": 8532, "train/loss_ctc": 0.31944072246551514, "train/loss_error": 0.416718989610672, "train/loss_total": 0.3972633481025696 }, { "epoch": 2.279722148009618, "step": 8533, "train/loss_ctc": 0.2173944115638733, "train/loss_error": 0.43543165922164917, "train/loss_total": 0.39182421565055847 }, { "epoch": 2.279989313384985, "step": 8534, "train/loss_ctc": 1.06125807762146, "train/loss_error": 0.4613781273365021, "train/loss_total": 0.5813541412353516 }, { "epoch": 2.2802564787603528, "step": 8535, "train/loss_ctc": 0.4892520308494568, "train/loss_error": 0.5037778615951538, "train/loss_total": 0.5008726716041565 }, { "epoch": 2.28052364413572, "step": 8536, "train/loss_ctc": 0.5371137261390686, "train/loss_error": 0.4596145749092102, "train/loss_total": 0.4751144051551819 }, { "epoch": 2.280790809511087, "step": 8537, "train/loss_ctc": 0.7290005683898926, "train/loss_error": 0.42383792996406555, "train/loss_total": 0.48487046360969543 }, { "epoch": 2.281057974886455, "step": 8538, "train/loss_ctc": 1.5265419483184814, "train/loss_error": 0.501218855381012, "train/loss_total": 0.706283450126648 }, { "epoch": 2.281325140261822, "step": 8539, "train/loss_ctc": 0.6086102724075317, "train/loss_error": 0.4649195075035095, "train/loss_total": 0.493657648563385 }, { "epoch": 2.281592305637189, "grad_norm": 2.0571882724761963, "learning_rate": 1.632006411969009e-05, "loss": 0.5023, "step": 8540 }, { "epoch": 2.281592305637189, "step": 8540, "train/loss_ctc": 0.46877461671829224, "train/loss_error": 0.49481484293937683, "train/loss_total": 0.4896067976951599 }, { "epoch": 2.281859471012557, "step": 8541, "train/loss_ctc": 0.9414839744567871, "train/loss_error": 0.45381852984428406, "train/loss_total": 0.5513516664505005 }, { "epoch": 2.282126636387924, "step": 8542, "train/loss_ctc": 1.0373661518096924, "train/loss_error": 0.4965516924858093, "train/loss_total": 0.604714572429657 }, { "epoch": 2.2823938017632917, "step": 8543, "train/loss_ctc": 0.5805529356002808, "train/loss_error": 0.4784485995769501, "train/loss_total": 0.49886947870254517 }, { "epoch": 2.282660967138659, "step": 8544, "train/loss_ctc": 0.8082685470581055, "train/loss_error": 0.46502485871315, "train/loss_total": 0.5336735844612122 }, { "epoch": 2.282928132514026, "step": 8545, "train/loss_ctc": 0.8384010791778564, "train/loss_error": 0.42597153782844543, "train/loss_total": 0.5084574818611145 }, { "epoch": 2.2831952978893937, "step": 8546, "train/loss_ctc": 0.5922383069992065, "train/loss_error": 0.42652949690818787, "train/loss_total": 0.4596712589263916 }, { "epoch": 2.283462463264761, "step": 8547, "train/loss_ctc": 0.913546085357666, "train/loss_error": 0.4425811767578125, "train/loss_total": 0.5367741584777832 }, { "epoch": 2.283729628640128, "step": 8548, "train/loss_ctc": 1.1333482265472412, "train/loss_error": 0.4590032994747162, "train/loss_total": 0.5938723087310791 }, { "epoch": 2.2839967940154957, "step": 8549, "train/loss_ctc": 0.6971143484115601, "train/loss_error": 0.45059484243392944, "train/loss_total": 0.4998987317085266 }, { "epoch": 2.284263959390863, "grad_norm": 3.5418472290039062, "learning_rate": 1.630403419716805e-05, "loss": 0.5277, "step": 8550 }, { "epoch": 2.284263959390863, "step": 8550, "train/loss_ctc": 0.9499183893203735, "train/loss_error": 0.45792579650878906, "train/loss_total": 0.556324303150177 }, { "epoch": 2.28453112476623, "step": 8551, "train/loss_ctc": 0.4222516417503357, "train/loss_error": 0.3972719609737396, "train/loss_total": 0.4022679030895233 }, { "epoch": 2.2847982901415977, "step": 8552, "train/loss_ctc": 0.5811341404914856, "train/loss_error": 0.42047566175460815, "train/loss_total": 0.4526073634624481 }, { "epoch": 2.285065455516965, "step": 8553, "train/loss_ctc": 0.5968294143676758, "train/loss_error": 0.4449058771133423, "train/loss_total": 0.47529059648513794 }, { "epoch": 2.2853326208923326, "step": 8554, "train/loss_ctc": 0.5786443948745728, "train/loss_error": 0.5353142619132996, "train/loss_total": 0.5439803004264832 }, { "epoch": 2.2855997862676998, "step": 8555, "train/loss_ctc": 1.5085980892181396, "train/loss_error": 0.39735251665115356, "train/loss_total": 0.6196016669273376 }, { "epoch": 2.285866951643067, "step": 8556, "train/loss_ctc": 0.6167030930519104, "train/loss_error": 0.4653688371181488, "train/loss_total": 0.4956356883049011 }, { "epoch": 2.2861341170184346, "step": 8557, "train/loss_ctc": 0.825208306312561, "train/loss_error": 0.5034621357917786, "train/loss_total": 0.5678113698959351 }, { "epoch": 2.286401282393802, "step": 8558, "train/loss_ctc": 0.45386219024658203, "train/loss_error": 0.400149941444397, "train/loss_total": 0.41089239716529846 }, { "epoch": 2.286668447769169, "step": 8559, "train/loss_ctc": 0.33687150478363037, "train/loss_error": 0.46946439146995544, "train/loss_total": 0.44294583797454834 }, { "epoch": 2.2869356131445366, "grad_norm": 1.6999181509017944, "learning_rate": 1.6288004274646006e-05, "loss": 0.4967, "step": 8560 }, { "epoch": 2.2869356131445366, "step": 8560, "train/loss_ctc": 0.7947405576705933, "train/loss_error": 0.4802631735801697, "train/loss_total": 0.5431586503982544 }, { "epoch": 2.287202778519904, "step": 8561, "train/loss_ctc": 0.8918238878250122, "train/loss_error": 0.45283615589141846, "train/loss_total": 0.5406336784362793 }, { "epoch": 2.287469943895271, "step": 8562, "train/loss_ctc": 0.7237066030502319, "train/loss_error": 0.5011737942695618, "train/loss_total": 0.5456804037094116 }, { "epoch": 2.2877371092706387, "step": 8563, "train/loss_ctc": 0.5327780246734619, "train/loss_error": 0.4338875710964203, "train/loss_total": 0.45366567373275757 }, { "epoch": 2.288004274646006, "step": 8564, "train/loss_ctc": 0.5826402306556702, "train/loss_error": 0.4794548749923706, "train/loss_total": 0.5000919699668884 }, { "epoch": 2.288271440021373, "step": 8565, "train/loss_ctc": 0.6324026584625244, "train/loss_error": 0.4875396490097046, "train/loss_total": 0.5165122747421265 }, { "epoch": 2.2885386053967407, "step": 8566, "train/loss_ctc": 0.8863272070884705, "train/loss_error": 0.47835391759872437, "train/loss_total": 0.5599485635757446 }, { "epoch": 2.288805770772108, "step": 8567, "train/loss_ctc": 1.1000419855117798, "train/loss_error": 0.48377805948257446, "train/loss_total": 0.6070308685302734 }, { "epoch": 2.289072936147475, "step": 8568, "train/loss_ctc": 0.960655927658081, "train/loss_error": 0.40557822585105896, "train/loss_total": 0.5165938138961792 }, { "epoch": 2.2893401015228427, "step": 8569, "train/loss_ctc": 0.8726375699043274, "train/loss_error": 0.4368011951446533, "train/loss_total": 0.5239684581756592 }, { "epoch": 2.28960726689821, "grad_norm": 1.5574278831481934, "learning_rate": 1.6271974352123964e-05, "loss": 0.5307, "step": 8570 }, { "epoch": 2.28960726689821, "step": 8570, "train/loss_ctc": 1.0149610042572021, "train/loss_error": 0.4927434027194977, "train/loss_total": 0.5971869230270386 }, { "epoch": 2.289874432273577, "step": 8571, "train/loss_ctc": 1.0447368621826172, "train/loss_error": 0.4090639054775238, "train/loss_total": 0.5361984968185425 }, { "epoch": 2.2901415976489448, "step": 8572, "train/loss_ctc": 0.8588261604309082, "train/loss_error": 0.4035545587539673, "train/loss_total": 0.49460887908935547 }, { "epoch": 2.290408763024312, "step": 8573, "train/loss_ctc": 0.6527800559997559, "train/loss_error": 0.4386625289916992, "train/loss_total": 0.481486052274704 }, { "epoch": 2.2906759283996796, "step": 8574, "train/loss_ctc": 0.7862299680709839, "train/loss_error": 0.4758477210998535, "train/loss_total": 0.5379241704940796 }, { "epoch": 2.290943093775047, "step": 8575, "train/loss_ctc": 0.5599069595336914, "train/loss_error": 0.4163975417613983, "train/loss_total": 0.44509944319725037 }, { "epoch": 2.291210259150414, "step": 8576, "train/loss_ctc": 0.2831977903842926, "train/loss_error": 0.43477457761764526, "train/loss_total": 0.4044592082500458 }, { "epoch": 2.2914774245257816, "step": 8577, "train/loss_ctc": 0.8067416548728943, "train/loss_error": 0.4525447487831116, "train/loss_total": 0.523384153842926 }, { "epoch": 2.291744589901149, "step": 8578, "train/loss_ctc": 1.4762024879455566, "train/loss_error": 0.4576844274997711, "train/loss_total": 0.6613880395889282 }, { "epoch": 2.292011755276516, "step": 8579, "train/loss_ctc": 0.7632551193237305, "train/loss_error": 0.5054134726524353, "train/loss_total": 0.5569818019866943 }, { "epoch": 2.2922789206518837, "grad_norm": 2.644827365875244, "learning_rate": 1.6255944429601926e-05, "loss": 0.5239, "step": 8580 }, { "epoch": 2.2922789206518837, "step": 8580, "train/loss_ctc": 0.7139209508895874, "train/loss_error": 0.46325528621673584, "train/loss_total": 0.513388454914093 }, { "epoch": 2.292546086027251, "step": 8581, "train/loss_ctc": 0.5080625414848328, "train/loss_error": 0.4421009421348572, "train/loss_total": 0.4552932679653168 }, { "epoch": 2.292813251402618, "step": 8582, "train/loss_ctc": 0.9180382490158081, "train/loss_error": 0.5398347973823547, "train/loss_total": 0.6154754757881165 }, { "epoch": 2.2930804167779857, "step": 8583, "train/loss_ctc": 0.4874947667121887, "train/loss_error": 0.3988892436027527, "train/loss_total": 0.41661036014556885 }, { "epoch": 2.293347582153353, "step": 8584, "train/loss_ctc": 0.6808546185493469, "train/loss_error": 0.5534648895263672, "train/loss_total": 0.5789428353309631 }, { "epoch": 2.2936147475287205, "step": 8585, "train/loss_ctc": 1.5874109268188477, "train/loss_error": 0.4637916088104248, "train/loss_total": 0.6885154843330383 }, { "epoch": 2.2938819129040877, "step": 8586, "train/loss_ctc": 0.5840710401535034, "train/loss_error": 0.4480264186859131, "train/loss_total": 0.47523534297943115 }, { "epoch": 2.294149078279455, "step": 8587, "train/loss_ctc": 1.4724719524383545, "train/loss_error": 0.4799295663833618, "train/loss_total": 0.6784380674362183 }, { "epoch": 2.2944162436548226, "step": 8588, "train/loss_ctc": 0.868850588798523, "train/loss_error": 0.4661821722984314, "train/loss_total": 0.5467158555984497 }, { "epoch": 2.2946834090301897, "step": 8589, "train/loss_ctc": 1.0411105155944824, "train/loss_error": 0.45271381735801697, "train/loss_total": 0.5703931450843811 }, { "epoch": 2.294950574405557, "grad_norm": 3.510812520980835, "learning_rate": 1.6239914507079884e-05, "loss": 0.5539, "step": 8590 }, { "epoch": 2.294950574405557, "step": 8590, "train/loss_ctc": 1.1896706819534302, "train/loss_error": 0.45221424102783203, "train/loss_total": 0.5997055768966675 }, { "epoch": 2.2952177397809246, "step": 8591, "train/loss_ctc": 0.5583781003952026, "train/loss_error": 0.4395330250263214, "train/loss_total": 0.46330204606056213 }, { "epoch": 2.2954849051562918, "step": 8592, "train/loss_ctc": 0.5249467492103577, "train/loss_error": 0.4826951026916504, "train/loss_total": 0.49114543199539185 }, { "epoch": 2.295752070531659, "step": 8593, "train/loss_ctc": 1.1492547988891602, "train/loss_error": 0.4684772491455078, "train/loss_total": 0.6046327948570251 }, { "epoch": 2.2960192359070266, "step": 8594, "train/loss_ctc": 0.432933509349823, "train/loss_error": 0.39453810453414917, "train/loss_total": 0.40221720933914185 }, { "epoch": 2.296286401282394, "step": 8595, "train/loss_ctc": 0.5307267308235168, "train/loss_error": 0.4956183433532715, "train/loss_total": 0.5026400089263916 }, { "epoch": 2.296553566657761, "step": 8596, "train/loss_ctc": 0.7724556922912598, "train/loss_error": 0.46984949707984924, "train/loss_total": 0.5303707718849182 }, { "epoch": 2.2968207320331286, "step": 8597, "train/loss_ctc": 0.7107998132705688, "train/loss_error": 0.4566934108734131, "train/loss_total": 0.5075147151947021 }, { "epoch": 2.297087897408496, "step": 8598, "train/loss_ctc": 0.3315836191177368, "train/loss_error": 0.38065388798713684, "train/loss_total": 0.37083983421325684 }, { "epoch": 2.297355062783863, "step": 8599, "train/loss_ctc": 0.7110035419464111, "train/loss_error": 0.4821873605251312, "train/loss_total": 0.527950644493103 }, { "epoch": 2.2976222281592307, "grad_norm": 2.6870481967926025, "learning_rate": 1.6223884584557842e-05, "loss": 0.5, "step": 8600 }, { "epoch": 2.2976222281592307, "step": 8600, "train/loss_ctc": 1.3356032371520996, "train/loss_error": 0.42803728580474854, "train/loss_total": 0.6095504760742188 }, { "epoch": 2.297889393534598, "step": 8601, "train/loss_ctc": 0.7998729944229126, "train/loss_error": 0.43372786045074463, "train/loss_total": 0.506956934928894 }, { "epoch": 2.298156558909965, "step": 8602, "train/loss_ctc": 0.37813544273376465, "train/loss_error": 0.4865170121192932, "train/loss_total": 0.46484071016311646 }, { "epoch": 2.2984237242853327, "step": 8603, "train/loss_ctc": 0.9599453210830688, "train/loss_error": 0.42610055208206177, "train/loss_total": 0.5328695178031921 }, { "epoch": 2.2986908896607, "step": 8604, "train/loss_ctc": 0.6172367334365845, "train/loss_error": 0.4450191855430603, "train/loss_total": 0.47946271300315857 }, { "epoch": 2.298958055036067, "step": 8605, "train/loss_ctc": 0.7770324349403381, "train/loss_error": 0.4715976119041443, "train/loss_total": 0.5326845645904541 }, { "epoch": 2.2992252204114347, "step": 8606, "train/loss_ctc": 0.5693694353103638, "train/loss_error": 0.4947749376296997, "train/loss_total": 0.5096938610076904 }, { "epoch": 2.299492385786802, "step": 8607, "train/loss_ctc": 0.7756230235099792, "train/loss_error": 0.5018465518951416, "train/loss_total": 0.556601881980896 }, { "epoch": 2.2997595511621696, "step": 8608, "train/loss_ctc": 0.32799121737480164, "train/loss_error": 0.43220198154449463, "train/loss_total": 0.41135984659194946 }, { "epoch": 2.3000267165375368, "step": 8609, "train/loss_ctc": 0.26712340116500854, "train/loss_error": 0.38340675830841064, "train/loss_total": 0.3601500988006592 }, { "epoch": 2.300293881912904, "grad_norm": 4.919119834899902, "learning_rate": 1.62078546620358e-05, "loss": 0.4964, "step": 8610 }, { "epoch": 2.300293881912904, "step": 8610, "train/loss_ctc": 1.0457756519317627, "train/loss_error": 0.48021388053894043, "train/loss_total": 0.593326210975647 }, { "epoch": 2.3005610472882716, "step": 8611, "train/loss_ctc": 0.5171359777450562, "train/loss_error": 0.4721905589103699, "train/loss_total": 0.4811796545982361 }, { "epoch": 2.300828212663639, "step": 8612, "train/loss_ctc": 0.8007794618606567, "train/loss_error": 0.43999460339546204, "train/loss_total": 0.5121515989303589 }, { "epoch": 2.301095378039006, "step": 8613, "train/loss_ctc": 0.7226524353027344, "train/loss_error": 0.4747353494167328, "train/loss_total": 0.5243187546730042 }, { "epoch": 2.3013625434143736, "step": 8614, "train/loss_ctc": 0.7364451885223389, "train/loss_error": 0.4594344198703766, "train/loss_total": 0.5148365497589111 }, { "epoch": 2.301629708789741, "step": 8615, "train/loss_ctc": 0.4716033637523651, "train/loss_error": 0.42369142174720764, "train/loss_total": 0.4332738220691681 }, { "epoch": 2.301896874165108, "step": 8616, "train/loss_ctc": 0.3829680383205414, "train/loss_error": 0.4224366843700409, "train/loss_total": 0.4145429730415344 }, { "epoch": 2.3021640395404757, "step": 8617, "train/loss_ctc": 1.029131531715393, "train/loss_error": 0.4453749358654022, "train/loss_total": 0.5621262788772583 }, { "epoch": 2.302431204915843, "step": 8618, "train/loss_ctc": 0.7240005731582642, "train/loss_error": 0.4534403085708618, "train/loss_total": 0.5075523853302002 }, { "epoch": 2.3026983702912105, "step": 8619, "train/loss_ctc": 0.8742213249206543, "train/loss_error": 0.42503342032432556, "train/loss_total": 0.5148710012435913 }, { "epoch": 2.3029655356665777, "grad_norm": 2.320101499557495, "learning_rate": 1.6191824739513758e-05, "loss": 0.5058, "step": 8620 }, { "epoch": 2.3029655356665777, "step": 8620, "train/loss_ctc": 0.9590544104576111, "train/loss_error": 0.48447510600090027, "train/loss_total": 0.5793910026550293 }, { "epoch": 2.303232701041945, "step": 8621, "train/loss_ctc": 0.7433066964149475, "train/loss_error": 0.4483993649482727, "train/loss_total": 0.5073808431625366 }, { "epoch": 2.3034998664173125, "step": 8622, "train/loss_ctc": 0.7606130838394165, "train/loss_error": 0.46151378750801086, "train/loss_total": 0.5213336944580078 }, { "epoch": 2.3037670317926797, "step": 8623, "train/loss_ctc": 1.0812615156173706, "train/loss_error": 0.4517442286014557, "train/loss_total": 0.5776476860046387 }, { "epoch": 2.304034197168047, "step": 8624, "train/loss_ctc": 0.660335898399353, "train/loss_error": 0.4418436884880066, "train/loss_total": 0.4855421185493469 }, { "epoch": 2.3043013625434146, "step": 8625, "train/loss_ctc": 0.5680113434791565, "train/loss_error": 0.6232389211654663, "train/loss_total": 0.6121934056282043 }, { "epoch": 2.3045685279187818, "step": 8626, "train/loss_ctc": 0.5671731233596802, "train/loss_error": 0.43327054381370544, "train/loss_total": 0.4600510597229004 }, { "epoch": 2.304835693294149, "step": 8627, "train/loss_ctc": 1.4271409511566162, "train/loss_error": 0.4687294065952301, "train/loss_total": 0.6604117155075073 }, { "epoch": 2.3051028586695166, "step": 8628, "train/loss_ctc": 1.2356932163238525, "train/loss_error": 0.5050294995307922, "train/loss_total": 0.6511622667312622 }, { "epoch": 2.305370024044884, "step": 8629, "train/loss_ctc": 0.7733941078186035, "train/loss_error": 0.46764978766441345, "train/loss_total": 0.5287986993789673 }, { "epoch": 2.305637189420251, "grad_norm": 2.1975674629211426, "learning_rate": 1.6175794816991716e-05, "loss": 0.5584, "step": 8630 }, { "epoch": 2.305637189420251, "step": 8630, "train/loss_ctc": 0.5434434413909912, "train/loss_error": 0.40777379274368286, "train/loss_total": 0.4349077343940735 }, { "epoch": 2.3059043547956186, "step": 8631, "train/loss_ctc": 0.429205983877182, "train/loss_error": 0.4020217955112457, "train/loss_total": 0.40745866298675537 }, { "epoch": 2.306171520170986, "step": 8632, "train/loss_ctc": 0.6348996758460999, "train/loss_error": 0.505128800868988, "train/loss_total": 0.5310829877853394 }, { "epoch": 2.306438685546353, "step": 8633, "train/loss_ctc": 0.9885269999504089, "train/loss_error": 0.488938570022583, "train/loss_total": 0.5888562798500061 }, { "epoch": 2.3067058509217206, "step": 8634, "train/loss_ctc": 0.6896458268165588, "train/loss_error": 0.4032321870326996, "train/loss_total": 0.46051493287086487 }, { "epoch": 2.306973016297088, "step": 8635, "train/loss_ctc": 0.9168154001235962, "train/loss_error": 0.48823627829551697, "train/loss_total": 0.5739520788192749 }, { "epoch": 2.307240181672455, "step": 8636, "train/loss_ctc": 0.47162359952926636, "train/loss_error": 0.3832041621208191, "train/loss_total": 0.4008880853652954 }, { "epoch": 2.3075073470478227, "step": 8637, "train/loss_ctc": 0.6190381646156311, "train/loss_error": 0.40589356422424316, "train/loss_total": 0.44852250814437866 }, { "epoch": 2.30777451242319, "step": 8638, "train/loss_ctc": 0.38668569922447205, "train/loss_error": 0.48432737588882446, "train/loss_total": 0.46479904651641846 }, { "epoch": 2.308041677798557, "step": 8639, "train/loss_ctc": 0.7708114981651306, "train/loss_error": 0.4296760559082031, "train/loss_total": 0.49790316820144653 }, { "epoch": 2.3083088431739247, "grad_norm": 2.3141536712646484, "learning_rate": 1.6159764894469678e-05, "loss": 0.4809, "step": 8640 }, { "epoch": 2.3083088431739247, "step": 8640, "train/loss_ctc": 0.5190814137458801, "train/loss_error": 0.44912371039390564, "train/loss_total": 0.46311527490615845 }, { "epoch": 2.308576008549292, "step": 8641, "train/loss_ctc": 1.0158085823059082, "train/loss_error": 0.3681267499923706, "train/loss_total": 0.49766311049461365 }, { "epoch": 2.3088431739246595, "step": 8642, "train/loss_ctc": 0.9678400158882141, "train/loss_error": 0.5489002466201782, "train/loss_total": 0.6326882243156433 }, { "epoch": 2.3091103393000267, "step": 8643, "train/loss_ctc": 1.1911776065826416, "train/loss_error": 0.4747895300388336, "train/loss_total": 0.6180671453475952 }, { "epoch": 2.309377504675394, "step": 8644, "train/loss_ctc": 0.7499830722808838, "train/loss_error": 0.49221375584602356, "train/loss_total": 0.5437676310539246 }, { "epoch": 2.3096446700507616, "step": 8645, "train/loss_ctc": 0.5443293452262878, "train/loss_error": 0.41936272382736206, "train/loss_total": 0.4443560838699341 }, { "epoch": 2.3099118354261288, "step": 8646, "train/loss_ctc": 1.1083984375, "train/loss_error": 0.5151187181472778, "train/loss_total": 0.6337746381759644 }, { "epoch": 2.310179000801496, "step": 8647, "train/loss_ctc": 0.9024804830551147, "train/loss_error": 0.5842140913009644, "train/loss_total": 0.6478673815727234 }, { "epoch": 2.3104461661768636, "step": 8648, "train/loss_ctc": 0.6766957640647888, "train/loss_error": 0.42086660861968994, "train/loss_total": 0.47203242778778076 }, { "epoch": 2.310713331552231, "step": 8649, "train/loss_ctc": 0.6325449347496033, "train/loss_error": 0.4562220871448517, "train/loss_total": 0.49148666858673096 }, { "epoch": 2.310980496927598, "grad_norm": 1.3641021251678467, "learning_rate": 1.6143734971947636e-05, "loss": 0.5445, "step": 8650 }, { "epoch": 2.310980496927598, "step": 8650, "train/loss_ctc": 0.700663685798645, "train/loss_error": 0.4068511128425598, "train/loss_total": 0.4656136631965637 }, { "epoch": 2.3112476623029656, "step": 8651, "train/loss_ctc": 0.4596497118473053, "train/loss_error": 0.444261372089386, "train/loss_total": 0.4473390579223633 }, { "epoch": 2.311514827678333, "step": 8652, "train/loss_ctc": 0.5233660936355591, "train/loss_error": 0.49221083521842957, "train/loss_total": 0.4984418749809265 }, { "epoch": 2.3117819930537005, "step": 8653, "train/loss_ctc": 1.0041098594665527, "train/loss_error": 0.47033560276031494, "train/loss_total": 0.5770904421806335 }, { "epoch": 2.3120491584290677, "step": 8654, "train/loss_ctc": 1.1631112098693848, "train/loss_error": 0.46664100885391235, "train/loss_total": 0.6059350371360779 }, { "epoch": 2.312316323804435, "step": 8655, "train/loss_ctc": 0.34585267305374146, "train/loss_error": 0.4311105012893677, "train/loss_total": 0.41405895352363586 }, { "epoch": 2.3125834891798025, "step": 8656, "train/loss_ctc": 0.35937902331352234, "train/loss_error": 0.4529467523097992, "train/loss_total": 0.4342332184314728 }, { "epoch": 2.3128506545551697, "step": 8657, "train/loss_ctc": 0.6892235279083252, "train/loss_error": 0.47039875388145447, "train/loss_total": 0.5141637325286865 }, { "epoch": 2.313117819930537, "step": 8658, "train/loss_ctc": 1.0915570259094238, "train/loss_error": 0.5455701947212219, "train/loss_total": 0.6547675728797913 }, { "epoch": 2.3133849853059045, "step": 8659, "train/loss_ctc": 0.43698981404304504, "train/loss_error": 0.5218369364738464, "train/loss_total": 0.5048675537109375 }, { "epoch": 2.3136521506812717, "grad_norm": 2.270331621170044, "learning_rate": 1.6127705049425594e-05, "loss": 0.5117, "step": 8660 }, { "epoch": 2.3136521506812717, "step": 8660, "train/loss_ctc": 0.9858186841011047, "train/loss_error": 0.522635281085968, "train/loss_total": 0.6152719855308533 }, { "epoch": 2.313919316056639, "step": 8661, "train/loss_ctc": 0.2867797613143921, "train/loss_error": 0.3979250490665436, "train/loss_total": 0.37569597363471985 }, { "epoch": 2.3141864814320066, "step": 8662, "train/loss_ctc": 0.5597950220108032, "train/loss_error": 0.44154879450798035, "train/loss_total": 0.4651980400085449 }, { "epoch": 2.3144536468073738, "step": 8663, "train/loss_ctc": 0.40743130445480347, "train/loss_error": 0.39196711778640747, "train/loss_total": 0.3950599431991577 }, { "epoch": 2.314720812182741, "step": 8664, "train/loss_ctc": 1.455183744430542, "train/loss_error": 0.5565338134765625, "train/loss_total": 0.7362638115882874 }, { "epoch": 2.3149879775581086, "step": 8665, "train/loss_ctc": 0.5119123458862305, "train/loss_error": 0.4298602342605591, "train/loss_total": 0.4462706744670868 }, { "epoch": 2.315255142933476, "step": 8666, "train/loss_ctc": 0.5169885754585266, "train/loss_error": 0.43372902274131775, "train/loss_total": 0.45038095116615295 }, { "epoch": 2.315522308308843, "step": 8667, "train/loss_ctc": 0.6518224477767944, "train/loss_error": 0.4582565426826477, "train/loss_total": 0.4969697594642639 }, { "epoch": 2.3157894736842106, "step": 8668, "train/loss_ctc": 0.6785407662391663, "train/loss_error": 0.432998389005661, "train/loss_total": 0.48210686445236206 }, { "epoch": 2.316056639059578, "step": 8669, "train/loss_ctc": 0.5350910425186157, "train/loss_error": 0.48811212182044983, "train/loss_total": 0.49750789999961853 }, { "epoch": 2.316323804434945, "grad_norm": 3.2057313919067383, "learning_rate": 1.6111675126903552e-05, "loss": 0.4961, "step": 8670 }, { "epoch": 2.316323804434945, "step": 8670, "train/loss_ctc": 0.5815839171409607, "train/loss_error": 0.4790363013744354, "train/loss_total": 0.4995458424091339 }, { "epoch": 2.3165909698103127, "step": 8671, "train/loss_ctc": 0.4532064199447632, "train/loss_error": 0.39105066657066345, "train/loss_total": 0.4034818410873413 }, { "epoch": 2.31685813518568, "step": 8672, "train/loss_ctc": 0.9481800198554993, "train/loss_error": 0.5868934392929077, "train/loss_total": 0.6591507792472839 }, { "epoch": 2.317125300561047, "step": 8673, "train/loss_ctc": 1.2062163352966309, "train/loss_error": 0.47631174325942993, "train/loss_total": 0.6222926378250122 }, { "epoch": 2.3173924659364147, "step": 8674, "train/loss_ctc": 0.5460904240608215, "train/loss_error": 0.4494188725948334, "train/loss_total": 0.4687531888484955 }, { "epoch": 2.317659631311782, "step": 8675, "train/loss_ctc": 1.352691411972046, "train/loss_error": 0.48436659574508667, "train/loss_total": 0.6580315828323364 }, { "epoch": 2.3179267966871495, "step": 8676, "train/loss_ctc": 0.7766051292419434, "train/loss_error": 0.4829202890396118, "train/loss_total": 0.5416572690010071 }, { "epoch": 2.3181939620625167, "step": 8677, "train/loss_ctc": 1.264447569847107, "train/loss_error": 0.4153914749622345, "train/loss_total": 0.585202693939209 }, { "epoch": 2.318461127437884, "step": 8678, "train/loss_ctc": 0.5711400508880615, "train/loss_error": 0.45577576756477356, "train/loss_total": 0.4788486361503601 }, { "epoch": 2.3187282928132515, "step": 8679, "train/loss_ctc": 0.48858481645584106, "train/loss_error": 0.4942987859249115, "train/loss_total": 0.49315598607063293 }, { "epoch": 2.3189954581886187, "grad_norm": 1.5033695697784424, "learning_rate": 1.609564520438151e-05, "loss": 0.541, "step": 8680 }, { "epoch": 2.3189954581886187, "step": 8680, "train/loss_ctc": 1.4792561531066895, "train/loss_error": 0.4996429979801178, "train/loss_total": 0.6955656409263611 }, { "epoch": 2.319262623563986, "step": 8681, "train/loss_ctc": 0.32359591126441956, "train/loss_error": 0.4272841811180115, "train/loss_total": 0.40654653310775757 }, { "epoch": 2.3195297889393536, "step": 8682, "train/loss_ctc": 0.8132648468017578, "train/loss_error": 0.5061914324760437, "train/loss_total": 0.5676060914993286 }, { "epoch": 2.3197969543147208, "step": 8683, "train/loss_ctc": 0.798132598400116, "train/loss_error": 0.40188637375831604, "train/loss_total": 0.48113560676574707 }, { "epoch": 2.3200641196900884, "step": 8684, "train/loss_ctc": 0.5870211720466614, "train/loss_error": 0.5116381645202637, "train/loss_total": 0.5267148017883301 }, { "epoch": 2.3203312850654556, "step": 8685, "train/loss_ctc": 0.7688578367233276, "train/loss_error": 0.4195229709148407, "train/loss_total": 0.48938995599746704 }, { "epoch": 2.320598450440823, "step": 8686, "train/loss_ctc": 0.7743287682533264, "train/loss_error": 0.4856514036655426, "train/loss_total": 0.5433868765830994 }, { "epoch": 2.3208656158161904, "step": 8687, "train/loss_ctc": 1.279536485671997, "train/loss_error": 0.47489529848098755, "train/loss_total": 0.6358235478401184 }, { "epoch": 2.3211327811915576, "step": 8688, "train/loss_ctc": 0.28163278102874756, "train/loss_error": 0.392838716506958, "train/loss_total": 0.3705975413322449 }, { "epoch": 2.321399946566925, "step": 8689, "train/loss_ctc": 0.8281570672988892, "train/loss_error": 0.4385906755924225, "train/loss_total": 0.5165039300918579 }, { "epoch": 2.3216671119422925, "grad_norm": 1.397823691368103, "learning_rate": 1.6079615281859468e-05, "loss": 0.5233, "step": 8690 }, { "epoch": 2.3216671119422925, "step": 8690, "train/loss_ctc": 0.5047317743301392, "train/loss_error": 0.4381944239139557, "train/loss_total": 0.45150190591812134 }, { "epoch": 2.3219342773176597, "step": 8691, "train/loss_ctc": 0.5822892189025879, "train/loss_error": 0.4501715302467346, "train/loss_total": 0.47659507393836975 }, { "epoch": 2.322201442693027, "step": 8692, "train/loss_ctc": 0.2935219407081604, "train/loss_error": 0.47678324580192566, "train/loss_total": 0.44013097882270813 }, { "epoch": 2.3224686080683945, "step": 8693, "train/loss_ctc": 0.8798054456710815, "train/loss_error": 0.4977300763130188, "train/loss_total": 0.5741451382637024 }, { "epoch": 2.3227357734437617, "step": 8694, "train/loss_ctc": 0.875760018825531, "train/loss_error": 0.45276376605033875, "train/loss_total": 0.5373630523681641 }, { "epoch": 2.323002938819129, "step": 8695, "train/loss_ctc": 0.7746918201446533, "train/loss_error": 0.4245619773864746, "train/loss_total": 0.4945879578590393 }, { "epoch": 2.3232701041944965, "step": 8696, "train/loss_ctc": 0.8696612119674683, "train/loss_error": 0.48074644804000854, "train/loss_total": 0.5585293769836426 }, { "epoch": 2.3235372695698637, "step": 8697, "train/loss_ctc": 0.4761536121368408, "train/loss_error": 0.42390644550323486, "train/loss_total": 0.43435588479042053 }, { "epoch": 2.323804434945231, "step": 8698, "train/loss_ctc": 0.4880727529525757, "train/loss_error": 0.4792037308216095, "train/loss_total": 0.48097753524780273 }, { "epoch": 2.3240716003205986, "step": 8699, "train/loss_ctc": 1.383704423904419, "train/loss_error": 0.4757886826992035, "train/loss_total": 0.6573718190193176 }, { "epoch": 2.3243387656959658, "grad_norm": 3.8800418376922607, "learning_rate": 1.6063585359337433e-05, "loss": 0.5106, "step": 8700 }, { "epoch": 2.3243387656959658, "step": 8700, "train/loss_ctc": 0.8011082410812378, "train/loss_error": 0.4749055802822113, "train/loss_total": 0.5401461124420166 }, { "epoch": 2.324605931071333, "step": 8701, "train/loss_ctc": 0.38983631134033203, "train/loss_error": 0.4295014441013336, "train/loss_total": 0.4215684235095978 }, { "epoch": 2.3248730964467006, "step": 8702, "train/loss_ctc": 0.8554826378822327, "train/loss_error": 0.4607114791870117, "train/loss_total": 0.539665699005127 }, { "epoch": 2.325140261822068, "step": 8703, "train/loss_ctc": 0.6020280122756958, "train/loss_error": 0.46699172258377075, "train/loss_total": 0.49399900436401367 }, { "epoch": 2.325407427197435, "step": 8704, "train/loss_ctc": 0.6907935738563538, "train/loss_error": 0.4132136404514313, "train/loss_total": 0.4687296152114868 }, { "epoch": 2.3256745925728026, "step": 8705, "train/loss_ctc": 0.43595993518829346, "train/loss_error": 0.44617539644241333, "train/loss_total": 0.44413232803344727 }, { "epoch": 2.32594175794817, "step": 8706, "train/loss_ctc": 0.6583563089370728, "train/loss_error": 0.4602450430393219, "train/loss_total": 0.49986732006073 }, { "epoch": 2.3262089233235375, "step": 8707, "train/loss_ctc": 0.8077929019927979, "train/loss_error": 0.4759202301502228, "train/loss_total": 0.5422948002815247 }, { "epoch": 2.3264760886989047, "step": 8708, "train/loss_ctc": 0.473062664270401, "train/loss_error": 0.41812852025032043, "train/loss_total": 0.429115355014801 }, { "epoch": 2.326743254074272, "step": 8709, "train/loss_ctc": 0.6438530683517456, "train/loss_error": 0.4395902752876282, "train/loss_total": 0.4804428517818451 }, { "epoch": 2.3270104194496395, "grad_norm": 1.4757194519042969, "learning_rate": 1.604755543681539e-05, "loss": 0.486, "step": 8710 }, { "epoch": 2.3270104194496395, "step": 8710, "train/loss_ctc": 0.8420679569244385, "train/loss_error": 0.4785903990268707, "train/loss_total": 0.5512859225273132 }, { "epoch": 2.3272775848250067, "step": 8711, "train/loss_ctc": 0.7316020727157593, "train/loss_error": 0.515038013458252, "train/loss_total": 0.5583508610725403 }, { "epoch": 2.327544750200374, "step": 8712, "train/loss_ctc": 0.7089730501174927, "train/loss_error": 0.46712514758110046, "train/loss_total": 0.515494704246521 }, { "epoch": 2.3278119155757415, "step": 8713, "train/loss_ctc": 0.744156539440155, "train/loss_error": 0.39809614419937134, "train/loss_total": 0.4673082232475281 }, { "epoch": 2.3280790809511087, "step": 8714, "train/loss_ctc": 0.5357459783554077, "train/loss_error": 0.40065741539001465, "train/loss_total": 0.42767512798309326 }, { "epoch": 2.328346246326476, "step": 8715, "train/loss_ctc": 0.5848416090011597, "train/loss_error": 0.39420995116233826, "train/loss_total": 0.43233630061149597 }, { "epoch": 2.3286134117018435, "step": 8716, "train/loss_ctc": 0.8050732612609863, "train/loss_error": 0.44034093618392944, "train/loss_total": 0.5132874250411987 }, { "epoch": 2.3288805770772107, "step": 8717, "train/loss_ctc": 0.43499255180358887, "train/loss_error": 0.43750134110450745, "train/loss_total": 0.4369995892047882 }, { "epoch": 2.3291477424525784, "step": 8718, "train/loss_ctc": 0.3870985507965088, "train/loss_error": 0.40512099862098694, "train/loss_total": 0.40151649713516235 }, { "epoch": 2.3294149078279456, "step": 8719, "train/loss_ctc": 1.0048408508300781, "train/loss_error": 0.43172529339790344, "train/loss_total": 0.5463484525680542 }, { "epoch": 2.3296820732033128, "grad_norm": 2.067565679550171, "learning_rate": 1.603152551429335e-05, "loss": 0.4851, "step": 8720 }, { "epoch": 2.3296820732033128, "step": 8720, "train/loss_ctc": 0.8628809452056885, "train/loss_error": 0.42038923501968384, "train/loss_total": 0.5088875889778137 }, { "epoch": 2.3299492385786804, "step": 8721, "train/loss_ctc": 0.6527805328369141, "train/loss_error": 0.5266515016555786, "train/loss_total": 0.5518773198127747 }, { "epoch": 2.3302164039540476, "step": 8722, "train/loss_ctc": 0.49065616726875305, "train/loss_error": 0.40955498814582825, "train/loss_total": 0.4257752299308777 }, { "epoch": 2.330483569329415, "step": 8723, "train/loss_ctc": 1.1262519359588623, "train/loss_error": 0.43759992718696594, "train/loss_total": 0.575330376625061 }, { "epoch": 2.3307507347047824, "step": 8724, "train/loss_ctc": 1.2424601316452026, "train/loss_error": 0.4740966856479645, "train/loss_total": 0.6277693510055542 }, { "epoch": 2.3310179000801496, "step": 8725, "train/loss_ctc": 0.682218611240387, "train/loss_error": 0.4467563033103943, "train/loss_total": 0.4938487410545349 }, { "epoch": 2.331285065455517, "step": 8726, "train/loss_ctc": 0.43133801221847534, "train/loss_error": 0.38852229714393616, "train/loss_total": 0.39708542823791504 }, { "epoch": 2.3315522308308845, "step": 8727, "train/loss_ctc": 0.6074482202529907, "train/loss_error": 0.548427164554596, "train/loss_total": 0.5602313876152039 }, { "epoch": 2.3318193962062517, "step": 8728, "train/loss_ctc": 0.5651463270187378, "train/loss_error": 0.4200795888900757, "train/loss_total": 0.44909295439720154 }, { "epoch": 2.332086561581619, "step": 8729, "train/loss_ctc": 1.066374659538269, "train/loss_error": 0.45124226808547974, "train/loss_total": 0.5742687582969666 }, { "epoch": 2.3323537269569865, "grad_norm": 3.139583110809326, "learning_rate": 1.6015495591771307e-05, "loss": 0.5164, "step": 8730 }, { "epoch": 2.3323537269569865, "step": 8730, "train/loss_ctc": 0.8593875169754028, "train/loss_error": 0.4594956338405609, "train/loss_total": 0.5394740104675293 }, { "epoch": 2.3326208923323537, "step": 8731, "train/loss_ctc": 0.6901180148124695, "train/loss_error": 0.5059133172035217, "train/loss_total": 0.5427542328834534 }, { "epoch": 2.332888057707721, "step": 8732, "train/loss_ctc": 0.47426170110702515, "train/loss_error": 0.4541102349758148, "train/loss_total": 0.4581405520439148 }, { "epoch": 2.3331552230830885, "step": 8733, "train/loss_ctc": 1.2880260944366455, "train/loss_error": 0.47235721349716187, "train/loss_total": 0.6354910135269165 }, { "epoch": 2.3334223884584557, "step": 8734, "train/loss_ctc": 0.8672440052032471, "train/loss_error": 0.45318639278411865, "train/loss_total": 0.5359979271888733 }, { "epoch": 2.333689553833823, "step": 8735, "train/loss_ctc": 0.4453431963920593, "train/loss_error": 0.5064680576324463, "train/loss_total": 0.4942431151866913 }, { "epoch": 2.3339567192091906, "step": 8736, "train/loss_ctc": 0.3773406147956848, "train/loss_error": 0.38549989461898804, "train/loss_total": 0.3838680386543274 }, { "epoch": 2.3342238845845578, "step": 8737, "train/loss_ctc": 1.0944156646728516, "train/loss_error": 0.49181222915649414, "train/loss_total": 0.6123329401016235 }, { "epoch": 2.334491049959925, "step": 8738, "train/loss_ctc": 1.0639269351959229, "train/loss_error": 0.48215973377227783, "train/loss_total": 0.5985131859779358 }, { "epoch": 2.3347582153352926, "step": 8739, "train/loss_ctc": 1.220472812652588, "train/loss_error": 0.43961799144744873, "train/loss_total": 0.5957889556884766 }, { "epoch": 2.33502538071066, "grad_norm": 2.903020143508911, "learning_rate": 1.5999465669249265e-05, "loss": 0.5397, "step": 8740 }, { "epoch": 2.33502538071066, "step": 8740, "train/loss_ctc": 1.0865211486816406, "train/loss_error": 0.4581216871738434, "train/loss_total": 0.5838016271591187 }, { "epoch": 2.3352925460860274, "step": 8741, "train/loss_ctc": 0.4136391282081604, "train/loss_error": 0.4153100550174713, "train/loss_total": 0.4149758815765381 }, { "epoch": 2.3355597114613946, "step": 8742, "train/loss_ctc": 0.5025448799133301, "train/loss_error": 0.4451886713504791, "train/loss_total": 0.4566599428653717 }, { "epoch": 2.335826876836762, "step": 8743, "train/loss_ctc": 0.8578418493270874, "train/loss_error": 0.4715897738933563, "train/loss_total": 0.5488402247428894 }, { "epoch": 2.3360940422121295, "step": 8744, "train/loss_ctc": 0.5031267404556274, "train/loss_error": 0.4115089774131775, "train/loss_total": 0.4298325181007385 }, { "epoch": 2.3363612075874967, "step": 8745, "train/loss_ctc": 0.4682445526123047, "train/loss_error": 0.47090286016464233, "train/loss_total": 0.47037121653556824 }, { "epoch": 2.336628372962864, "step": 8746, "train/loss_ctc": 0.9974239468574524, "train/loss_error": 0.43023553490638733, "train/loss_total": 0.5436732172966003 }, { "epoch": 2.3368955383382315, "step": 8747, "train/loss_ctc": 0.5062693357467651, "train/loss_error": 0.5585840940475464, "train/loss_total": 0.5481211543083191 }, { "epoch": 2.3371627037135987, "step": 8748, "train/loss_ctc": 0.6183816194534302, "train/loss_error": 0.46224385499954224, "train/loss_total": 0.4934714138507843 }, { "epoch": 2.337429869088966, "step": 8749, "train/loss_ctc": 0.7807842493057251, "train/loss_error": 0.48783671855926514, "train/loss_total": 0.5464262366294861 }, { "epoch": 2.3376970344643335, "grad_norm": 2.4094700813293457, "learning_rate": 1.5983435746727224e-05, "loss": 0.5036, "step": 8750 }, { "epoch": 2.3376970344643335, "step": 8750, "train/loss_ctc": 0.8196901082992554, "train/loss_error": 0.4657503068447113, "train/loss_total": 0.536538302898407 }, { "epoch": 2.3379641998397007, "step": 8751, "train/loss_ctc": 0.42092370986938477, "train/loss_error": 0.4747331738471985, "train/loss_total": 0.4639712870121002 }, { "epoch": 2.3382313652150684, "step": 8752, "train/loss_ctc": 0.8608447313308716, "train/loss_error": 0.4476929306983948, "train/loss_total": 0.530323326587677 }, { "epoch": 2.3384985305904356, "step": 8753, "train/loss_ctc": 1.0909574031829834, "train/loss_error": 0.4699011445045471, "train/loss_total": 0.5941123962402344 }, { "epoch": 2.3387656959658027, "step": 8754, "train/loss_ctc": 1.7853599786758423, "train/loss_error": 0.46495383977890015, "train/loss_total": 0.7290350794792175 }, { "epoch": 2.3390328613411704, "step": 8755, "train/loss_ctc": 0.2200412154197693, "train/loss_error": 0.4383378326892853, "train/loss_total": 0.39467853307724 }, { "epoch": 2.3393000267165376, "step": 8756, "train/loss_ctc": 0.43488961458206177, "train/loss_error": 0.43581998348236084, "train/loss_total": 0.43563392758369446 }, { "epoch": 2.339567192091905, "step": 8757, "train/loss_ctc": 0.4688606262207031, "train/loss_error": 0.41025641560554504, "train/loss_total": 0.42197728157043457 }, { "epoch": 2.3398343574672724, "step": 8758, "train/loss_ctc": 0.5379148125648499, "train/loss_error": 0.47441115975379944, "train/loss_total": 0.487111896276474 }, { "epoch": 2.3401015228426396, "step": 8759, "train/loss_ctc": 0.8232860565185547, "train/loss_error": 0.4884921908378601, "train/loss_total": 0.555450975894928 }, { "epoch": 2.340368688218007, "grad_norm": 1.8585857152938843, "learning_rate": 1.5967405824205185e-05, "loss": 0.5149, "step": 8760 }, { "epoch": 2.340368688218007, "step": 8760, "train/loss_ctc": 1.459864616394043, "train/loss_error": 0.4928337633609772, "train/loss_total": 0.6862399578094482 }, { "epoch": 2.3406358535933744, "step": 8761, "train/loss_ctc": 0.8337335586547852, "train/loss_error": 0.4652479290962219, "train/loss_total": 0.5389450788497925 }, { "epoch": 2.3409030189687416, "step": 8762, "train/loss_ctc": 0.4905262589454651, "train/loss_error": 0.38704967498779297, "train/loss_total": 0.40774500370025635 }, { "epoch": 2.341170184344109, "step": 8763, "train/loss_ctc": 0.6709740161895752, "train/loss_error": 0.36560678482055664, "train/loss_total": 0.4266802668571472 }, { "epoch": 2.3414373497194765, "step": 8764, "train/loss_ctc": 0.19636520743370056, "train/loss_error": 0.5112062692642212, "train/loss_total": 0.4482380747795105 }, { "epoch": 2.3417045150948437, "step": 8765, "train/loss_ctc": 0.48415607213974, "train/loss_error": 0.5175276398658752, "train/loss_total": 0.5108533501625061 }, { "epoch": 2.341971680470211, "step": 8766, "train/loss_ctc": 1.112693428993225, "train/loss_error": 0.37854689359664917, "train/loss_total": 0.5253762006759644 }, { "epoch": 2.3422388458455785, "step": 8767, "train/loss_ctc": 1.8381562232971191, "train/loss_error": 0.4403170049190521, "train/loss_total": 0.7198848724365234 }, { "epoch": 2.3425060112209457, "step": 8768, "train/loss_ctc": 0.3942146301269531, "train/loss_error": 0.47883984446525574, "train/loss_total": 0.4619148075580597 }, { "epoch": 2.342773176596313, "step": 8769, "train/loss_ctc": 1.1971278190612793, "train/loss_error": 0.43344125151634216, "train/loss_total": 0.5861785411834717 }, { "epoch": 2.3430403419716805, "grad_norm": 1.9337527751922607, "learning_rate": 1.5951375901683143e-05, "loss": 0.5312, "step": 8770 }, { "epoch": 2.3430403419716805, "step": 8770, "train/loss_ctc": 1.2779548168182373, "train/loss_error": 0.46158817410469055, "train/loss_total": 0.624861478805542 }, { "epoch": 2.3433075073470477, "step": 8771, "train/loss_ctc": 0.7632877826690674, "train/loss_error": 0.4791838228702545, "train/loss_total": 0.5360046029090881 }, { "epoch": 2.343574672722415, "step": 8772, "train/loss_ctc": 1.1202008724212646, "train/loss_error": 0.4884524345397949, "train/loss_total": 0.6148021221160889 }, { "epoch": 2.3438418380977826, "step": 8773, "train/loss_ctc": 0.6127744913101196, "train/loss_error": 0.5108977556228638, "train/loss_total": 0.5312731266021729 }, { "epoch": 2.3441090034731498, "step": 8774, "train/loss_ctc": 0.4454187750816345, "train/loss_error": 0.5691218972206116, "train/loss_total": 0.5443812608718872 }, { "epoch": 2.3443761688485174, "step": 8775, "train/loss_ctc": 0.8314733505249023, "train/loss_error": 0.4184500575065613, "train/loss_total": 0.5010547041893005 }, { "epoch": 2.3446433342238846, "step": 8776, "train/loss_ctc": 0.7909584641456604, "train/loss_error": 0.4493071138858795, "train/loss_total": 0.5176373720169067 }, { "epoch": 2.344910499599252, "step": 8777, "train/loss_ctc": 0.4324510395526886, "train/loss_error": 0.3966693580150604, "train/loss_total": 0.40382570028305054 }, { "epoch": 2.3451776649746194, "step": 8778, "train/loss_ctc": 0.40765106678009033, "train/loss_error": 0.453876256942749, "train/loss_total": 0.4446312189102173 }, { "epoch": 2.3454448303499866, "step": 8779, "train/loss_ctc": 1.2715840339660645, "train/loss_error": 0.48523789644241333, "train/loss_total": 0.6425071358680725 }, { "epoch": 2.345711995725354, "grad_norm": 5.401598930358887, "learning_rate": 1.59353459791611e-05, "loss": 0.5361, "step": 8780 }, { "epoch": 2.345711995725354, "step": 8780, "train/loss_ctc": 2.281546115875244, "train/loss_error": 0.4740528166294098, "train/loss_total": 0.8355515003204346 }, { "epoch": 2.3459791611007215, "step": 8781, "train/loss_ctc": 0.7131984233856201, "train/loss_error": 0.43176451325416565, "train/loss_total": 0.48805129528045654 }, { "epoch": 2.3462463264760887, "step": 8782, "train/loss_ctc": 0.39128613471984863, "train/loss_error": 0.4684921205043793, "train/loss_total": 0.4530509412288666 }, { "epoch": 2.346513491851456, "step": 8783, "train/loss_ctc": 0.5832685828208923, "train/loss_error": 0.45893827080726624, "train/loss_total": 0.4838043451309204 }, { "epoch": 2.3467806572268235, "step": 8784, "train/loss_ctc": 0.4779517352581024, "train/loss_error": 0.44583284854888916, "train/loss_total": 0.4522566497325897 }, { "epoch": 2.3470478226021907, "step": 8785, "train/loss_ctc": 0.37761515378952026, "train/loss_error": 0.43739911913871765, "train/loss_total": 0.42544233798980713 }, { "epoch": 2.3473149879775583, "step": 8786, "train/loss_ctc": 0.4543399214744568, "train/loss_error": 0.47446441650390625, "train/loss_total": 0.47043952345848083 }, { "epoch": 2.3475821533529255, "step": 8787, "train/loss_ctc": 1.0485697984695435, "train/loss_error": 0.4508286118507385, "train/loss_total": 0.5703768730163574 }, { "epoch": 2.3478493187282927, "step": 8788, "train/loss_ctc": 0.7123659253120422, "train/loss_error": 0.5161269307136536, "train/loss_total": 0.5553747415542603 }, { "epoch": 2.3481164841036604, "step": 8789, "train/loss_ctc": 1.0392582416534424, "train/loss_error": 0.5239738821983337, "train/loss_total": 0.6270307302474976 }, { "epoch": 2.3483836494790276, "grad_norm": 1.9710644483566284, "learning_rate": 1.591931605663906e-05, "loss": 0.5361, "step": 8790 }, { "epoch": 2.3483836494790276, "step": 8790, "train/loss_ctc": 0.8520160913467407, "train/loss_error": 0.45080631971359253, "train/loss_total": 0.5310482978820801 }, { "epoch": 2.3486508148543948, "step": 8791, "train/loss_ctc": 1.0560152530670166, "train/loss_error": 0.5191556811332703, "train/loss_total": 0.6265276074409485 }, { "epoch": 2.3489179802297624, "step": 8792, "train/loss_ctc": 0.7362926602363586, "train/loss_error": 0.49166202545166016, "train/loss_total": 0.5405881404876709 }, { "epoch": 2.3491851456051296, "step": 8793, "train/loss_ctc": 0.5942826271057129, "train/loss_error": 0.4002968370914459, "train/loss_total": 0.43909400701522827 }, { "epoch": 2.349452310980497, "step": 8794, "train/loss_ctc": 0.46599769592285156, "train/loss_error": 0.40572407841682434, "train/loss_total": 0.4177788197994232 }, { "epoch": 2.3497194763558644, "step": 8795, "train/loss_ctc": 1.364945411682129, "train/loss_error": 0.48248517513275146, "train/loss_total": 0.6589772701263428 }, { "epoch": 2.3499866417312316, "step": 8796, "train/loss_ctc": 1.112518310546875, "train/loss_error": 0.5074781775474548, "train/loss_total": 0.6284862160682678 }, { "epoch": 2.350253807106599, "step": 8797, "train/loss_ctc": 0.4654443562030792, "train/loss_error": 0.48882946372032166, "train/loss_total": 0.4841524362564087 }, { "epoch": 2.3505209724819665, "step": 8798, "train/loss_ctc": 0.49449867010116577, "train/loss_error": 0.4722343683242798, "train/loss_total": 0.4766872525215149 }, { "epoch": 2.3507881378573336, "step": 8799, "train/loss_ctc": 0.7738299369812012, "train/loss_error": 0.4063686430454254, "train/loss_total": 0.47986090183258057 }, { "epoch": 2.351055303232701, "grad_norm": 4.997534275054932, "learning_rate": 1.5903286134117017e-05, "loss": 0.5283, "step": 8800 }, { "epoch": 2.351055303232701, "step": 8800, "train/loss_ctc": 0.5471168756484985, "train/loss_error": 0.4757717251777649, "train/loss_total": 0.49004074931144714 }, { "epoch": 2.3513224686080685, "step": 8801, "train/loss_ctc": 0.9227390289306641, "train/loss_error": 0.4360211491584778, "train/loss_total": 0.5333647727966309 }, { "epoch": 2.3515896339834357, "step": 8802, "train/loss_ctc": 0.8165837526321411, "train/loss_error": 0.4224932789802551, "train/loss_total": 0.5013114213943481 }, { "epoch": 2.351856799358803, "step": 8803, "train/loss_ctc": 1.3049489259719849, "train/loss_error": 0.38044610619544983, "train/loss_total": 0.5653467178344727 }, { "epoch": 2.3521239647341705, "step": 8804, "train/loss_ctc": 0.8384706974029541, "train/loss_error": 0.4384300112724304, "train/loss_total": 0.5184381604194641 }, { "epoch": 2.3523911301095377, "step": 8805, "train/loss_ctc": 0.6729918718338013, "train/loss_error": 0.45380160212516785, "train/loss_total": 0.49763965606689453 }, { "epoch": 2.3526582954849053, "step": 8806, "train/loss_ctc": 0.8736358880996704, "train/loss_error": 0.45232394337654114, "train/loss_total": 0.536586344242096 }, { "epoch": 2.3529254608602725, "step": 8807, "train/loss_ctc": 0.3902406692504883, "train/loss_error": 0.46631187200546265, "train/loss_total": 0.45109763741493225 }, { "epoch": 2.3531926262356397, "step": 8808, "train/loss_ctc": 1.0378038883209229, "train/loss_error": 0.5091773867607117, "train/loss_total": 0.6149027347564697 }, { "epoch": 2.3534597916110074, "step": 8809, "train/loss_ctc": 0.9086430072784424, "train/loss_error": 0.4208439886569977, "train/loss_total": 0.5184037685394287 }, { "epoch": 2.3537269569863746, "grad_norm": 2.019519567489624, "learning_rate": 1.588725621159498e-05, "loss": 0.5227, "step": 8810 }, { "epoch": 2.3537269569863746, "step": 8810, "train/loss_ctc": 0.5284656882286072, "train/loss_error": 0.48899129033088684, "train/loss_total": 0.49688616394996643 }, { "epoch": 2.3539941223617418, "step": 8811, "train/loss_ctc": 0.7706245183944702, "train/loss_error": 0.42258214950561523, "train/loss_total": 0.4921905994415283 }, { "epoch": 2.3542612877371094, "step": 8812, "train/loss_ctc": 0.728729248046875, "train/loss_error": 0.48359760642051697, "train/loss_total": 0.5326239466667175 }, { "epoch": 2.3545284531124766, "step": 8813, "train/loss_ctc": 1.425164818763733, "train/loss_error": 0.4887656271457672, "train/loss_total": 0.6760454773902893 }, { "epoch": 2.354795618487844, "step": 8814, "train/loss_ctc": 0.6988691091537476, "train/loss_error": 0.4916897714138031, "train/loss_total": 0.533125638961792 }, { "epoch": 2.3550627838632114, "step": 8815, "train/loss_ctc": 0.6582852005958557, "train/loss_error": 0.4618649184703827, "train/loss_total": 0.5011489987373352 }, { "epoch": 2.3553299492385786, "step": 8816, "train/loss_ctc": 0.9244657754898071, "train/loss_error": 0.42067795991897583, "train/loss_total": 0.5214354991912842 }, { "epoch": 2.3555971146139463, "step": 8817, "train/loss_ctc": 1.3862855434417725, "train/loss_error": 0.4360275864601135, "train/loss_total": 0.6260792016983032 }, { "epoch": 2.3558642799893135, "step": 8818, "train/loss_ctc": 1.366172194480896, "train/loss_error": 0.4614880681037903, "train/loss_total": 0.6424249410629272 }, { "epoch": 2.3561314453646807, "step": 8819, "train/loss_ctc": 0.3390318751335144, "train/loss_error": 0.4054180979728699, "train/loss_total": 0.39214086532592773 }, { "epoch": 2.3563986107400483, "grad_norm": 2.564068078994751, "learning_rate": 1.5871226289072937e-05, "loss": 0.5414, "step": 8820 }, { "epoch": 2.3563986107400483, "step": 8820, "train/loss_ctc": 0.5215107202529907, "train/loss_error": 0.4041104018688202, "train/loss_total": 0.4275904595851898 }, { "epoch": 2.3566657761154155, "step": 8821, "train/loss_ctc": 0.557799220085144, "train/loss_error": 0.4428337812423706, "train/loss_total": 0.4658268690109253 }, { "epoch": 2.3569329414907827, "step": 8822, "train/loss_ctc": 0.3887425661087036, "train/loss_error": 0.4478471279144287, "train/loss_total": 0.4360262155532837 }, { "epoch": 2.3572001068661503, "step": 8823, "train/loss_ctc": 0.7471532821655273, "train/loss_error": 0.5156019330024719, "train/loss_total": 0.5619121789932251 }, { "epoch": 2.3574672722415175, "step": 8824, "train/loss_ctc": 0.754063606262207, "train/loss_error": 0.4723662734031677, "train/loss_total": 0.5287057757377625 }, { "epoch": 2.3577344376168847, "step": 8825, "train/loss_ctc": 0.5645653009414673, "train/loss_error": 0.4686717689037323, "train/loss_total": 0.48785048723220825 }, { "epoch": 2.3580016029922524, "step": 8826, "train/loss_ctc": 0.6082720160484314, "train/loss_error": 0.43096205592155457, "train/loss_total": 0.46642404794692993 }, { "epoch": 2.3582687683676196, "step": 8827, "train/loss_ctc": 0.5297467708587646, "train/loss_error": 0.4384132921695709, "train/loss_total": 0.4566799998283386 }, { "epoch": 2.3585359337429868, "step": 8828, "train/loss_ctc": 0.5115883946418762, "train/loss_error": 0.4655492901802063, "train/loss_total": 0.4747571349143982 }, { "epoch": 2.3588030991183544, "step": 8829, "train/loss_ctc": 0.32014337182044983, "train/loss_error": 0.4668388068675995, "train/loss_total": 0.4374997317790985 }, { "epoch": 2.3590702644937216, "grad_norm": 1.64303719997406, "learning_rate": 1.5855196366550895e-05, "loss": 0.4743, "step": 8830 }, { "epoch": 2.3590702644937216, "step": 8830, "train/loss_ctc": 0.7259583473205566, "train/loss_error": 0.44569218158721924, "train/loss_total": 0.5017454624176025 }, { "epoch": 2.359337429869089, "step": 8831, "train/loss_ctc": 0.6921954154968262, "train/loss_error": 0.43491891026496887, "train/loss_total": 0.48637422919273376 }, { "epoch": 2.3596045952444564, "step": 8832, "train/loss_ctc": 0.8332809805870056, "train/loss_error": 0.46764078736305237, "train/loss_total": 0.5407688617706299 }, { "epoch": 2.3598717606198236, "step": 8833, "train/loss_ctc": 0.8283447027206421, "train/loss_error": 0.45562344789505005, "train/loss_total": 0.5301676988601685 }, { "epoch": 2.360138925995191, "step": 8834, "train/loss_ctc": 0.6847772598266602, "train/loss_error": 0.4733635485172272, "train/loss_total": 0.5156462788581848 }, { "epoch": 2.3604060913705585, "step": 8835, "train/loss_ctc": 0.22043108940124512, "train/loss_error": 0.4015915095806122, "train/loss_total": 0.36535942554473877 }, { "epoch": 2.3606732567459257, "step": 8836, "train/loss_ctc": 0.9498533606529236, "train/loss_error": 0.4534519910812378, "train/loss_total": 0.5527322292327881 }, { "epoch": 2.360940422121293, "step": 8837, "train/loss_ctc": 0.18761786818504333, "train/loss_error": 0.4271713197231293, "train/loss_total": 0.3792606294155121 }, { "epoch": 2.3612075874966605, "step": 8838, "train/loss_ctc": 0.6101033687591553, "train/loss_error": 0.4330945611000061, "train/loss_total": 0.46849632263183594 }, { "epoch": 2.3614747528720277, "step": 8839, "train/loss_ctc": 1.2737350463867188, "train/loss_error": 0.4581987261772156, "train/loss_total": 0.6213060021400452 }, { "epoch": 2.3617419182473953, "grad_norm": 2.6615281105041504, "learning_rate": 1.5839166444028853e-05, "loss": 0.4962, "step": 8840 }, { "epoch": 2.3617419182473953, "step": 8840, "train/loss_ctc": 0.6039729118347168, "train/loss_error": 0.5055326223373413, "train/loss_total": 0.5252206921577454 }, { "epoch": 2.3620090836227625, "step": 8841, "train/loss_ctc": 0.6390333771705627, "train/loss_error": 0.451881468296051, "train/loss_total": 0.4893118739128113 }, { "epoch": 2.3622762489981297, "step": 8842, "train/loss_ctc": 0.8168716430664062, "train/loss_error": 0.4192653298377991, "train/loss_total": 0.498786598443985 }, { "epoch": 2.3625434143734974, "step": 8843, "train/loss_ctc": 1.3440351486206055, "train/loss_error": 0.47417497634887695, "train/loss_total": 0.6481469869613647 }, { "epoch": 2.3628105797488645, "step": 8844, "train/loss_ctc": 0.7932451963424683, "train/loss_error": 0.5295698642730713, "train/loss_total": 0.5823049545288086 }, { "epoch": 2.3630777451242317, "step": 8845, "train/loss_ctc": 1.1419460773468018, "train/loss_error": 0.4066844582557678, "train/loss_total": 0.5537368059158325 }, { "epoch": 2.3633449104995994, "step": 8846, "train/loss_ctc": 0.9664613008499146, "train/loss_error": 0.4294874370098114, "train/loss_total": 0.536882221698761 }, { "epoch": 2.3636120758749666, "step": 8847, "train/loss_ctc": 0.8868013024330139, "train/loss_error": 0.4069159924983978, "train/loss_total": 0.5028930902481079 }, { "epoch": 2.3638792412503338, "step": 8848, "train/loss_ctc": 0.9151843190193176, "train/loss_error": 0.43247345089912415, "train/loss_total": 0.5290156602859497 }, { "epoch": 2.3641464066257014, "step": 8849, "train/loss_ctc": 1.1270101070404053, "train/loss_error": 0.46493062376976013, "train/loss_total": 0.5973465442657471 }, { "epoch": 2.3644135720010686, "grad_norm": 2.4678220748901367, "learning_rate": 1.582313652150681e-05, "loss": 0.5464, "step": 8850 }, { "epoch": 2.3644135720010686, "step": 8850, "train/loss_ctc": 0.6888419985771179, "train/loss_error": 0.3898754417896271, "train/loss_total": 0.4496687650680542 }, { "epoch": 2.3646807373764362, "step": 8851, "train/loss_ctc": 0.7365692257881165, "train/loss_error": 0.46275487542152405, "train/loss_total": 0.5175177454948425 }, { "epoch": 2.3649479027518034, "step": 8852, "train/loss_ctc": 0.7167302370071411, "train/loss_error": 0.4286823868751526, "train/loss_total": 0.48629194498062134 }, { "epoch": 2.3652150681271706, "step": 8853, "train/loss_ctc": 0.6617781519889832, "train/loss_error": 0.4814257323741913, "train/loss_total": 0.5174962282180786 }, { "epoch": 2.3654822335025383, "step": 8854, "train/loss_ctc": 0.7242820262908936, "train/loss_error": 0.5195822715759277, "train/loss_total": 0.5605222582817078 }, { "epoch": 2.3657493988779055, "step": 8855, "train/loss_ctc": 0.7360303997993469, "train/loss_error": 0.62703937292099, "train/loss_total": 0.6488375663757324 }, { "epoch": 2.3660165642532727, "step": 8856, "train/loss_ctc": 0.7049439549446106, "train/loss_error": 0.4370670020580292, "train/loss_total": 0.49064239859580994 }, { "epoch": 2.3662837296286403, "step": 8857, "train/loss_ctc": 0.5185424089431763, "train/loss_error": 0.45098018646240234, "train/loss_total": 0.4644926190376282 }, { "epoch": 2.3665508950040075, "step": 8858, "train/loss_ctc": 0.8698095083236694, "train/loss_error": 0.3890523314476013, "train/loss_total": 0.4852037727832794 }, { "epoch": 2.3668180603793747, "step": 8859, "train/loss_ctc": 0.4988907277584076, "train/loss_error": 0.4302896559238434, "train/loss_total": 0.4440098702907562 }, { "epoch": 2.3670852257547423, "grad_norm": 1.6754789352416992, "learning_rate": 1.580710659898477e-05, "loss": 0.5065, "step": 8860 }, { "epoch": 2.3670852257547423, "step": 8860, "train/loss_ctc": 0.7232959866523743, "train/loss_error": 0.48301365971565247, "train/loss_total": 0.5310701131820679 }, { "epoch": 2.3673523911301095, "step": 8861, "train/loss_ctc": 1.0872912406921387, "train/loss_error": 0.45828548073768616, "train/loss_total": 0.5840866565704346 }, { "epoch": 2.3676195565054767, "step": 8862, "train/loss_ctc": 0.31161731481552124, "train/loss_error": 0.5631113052368164, "train/loss_total": 0.5128124952316284 }, { "epoch": 2.3678867218808444, "step": 8863, "train/loss_ctc": 1.3133727312088013, "train/loss_error": 0.382305383682251, "train/loss_total": 0.568518877029419 }, { "epoch": 2.3681538872562116, "step": 8864, "train/loss_ctc": 0.5384926795959473, "train/loss_error": 0.45519116520881653, "train/loss_total": 0.4718514680862427 }, { "epoch": 2.3684210526315788, "step": 8865, "train/loss_ctc": 0.7464965581893921, "train/loss_error": 0.46727871894836426, "train/loss_total": 0.5231223106384277 }, { "epoch": 2.3686882180069464, "step": 8866, "train/loss_ctc": 0.5398451089859009, "train/loss_error": 0.4840759336948395, "train/loss_total": 0.4952297806739807 }, { "epoch": 2.3689553833823136, "step": 8867, "train/loss_ctc": 0.7192164659500122, "train/loss_error": 0.44298672676086426, "train/loss_total": 0.4982326924800873 }, { "epoch": 2.369222548757681, "step": 8868, "train/loss_ctc": 0.4958128035068512, "train/loss_error": 0.4026936888694763, "train/loss_total": 0.42131751775741577 }, { "epoch": 2.3694897141330484, "step": 8869, "train/loss_ctc": 0.8698369860649109, "train/loss_error": 0.39464113116264343, "train/loss_total": 0.48968029022216797 }, { "epoch": 2.3697568795084156, "grad_norm": 1.1397262811660767, "learning_rate": 1.579107667646273e-05, "loss": 0.5096, "step": 8870 }, { "epoch": 2.3697568795084156, "step": 8870, "train/loss_ctc": 0.6614151000976562, "train/loss_error": 0.43727245926856995, "train/loss_total": 0.4821009635925293 }, { "epoch": 2.370024044883783, "step": 8871, "train/loss_ctc": 0.640194296836853, "train/loss_error": 0.3921649754047394, "train/loss_total": 0.44177085161209106 }, { "epoch": 2.3702912102591505, "step": 8872, "train/loss_ctc": 0.5322418808937073, "train/loss_error": 0.4030910134315491, "train/loss_total": 0.4289211928844452 }, { "epoch": 2.3705583756345177, "step": 8873, "train/loss_ctc": 0.7614595293998718, "train/loss_error": 0.5008113384246826, "train/loss_total": 0.5529409646987915 }, { "epoch": 2.3708255410098853, "step": 8874, "train/loss_ctc": 0.36009353399276733, "train/loss_error": 0.38238775730133057, "train/loss_total": 0.3779289126396179 }, { "epoch": 2.3710927063852525, "step": 8875, "train/loss_ctc": 0.6236898899078369, "train/loss_error": 0.4915669560432434, "train/loss_total": 0.5179915428161621 }, { "epoch": 2.3713598717606197, "step": 8876, "train/loss_ctc": 0.8996330499649048, "train/loss_error": 0.4294399917125702, "train/loss_total": 0.523478627204895 }, { "epoch": 2.3716270371359873, "step": 8877, "train/loss_ctc": 0.8335157632827759, "train/loss_error": 0.4987808167934418, "train/loss_total": 0.5657278299331665 }, { "epoch": 2.3718942025113545, "step": 8878, "train/loss_ctc": 0.8894385695457458, "train/loss_error": 0.5311394333839417, "train/loss_total": 0.6027992963790894 }, { "epoch": 2.3721613678867217, "step": 8879, "train/loss_ctc": 1.1911466121673584, "train/loss_error": 0.4037110507488251, "train/loss_total": 0.5611981749534607 }, { "epoch": 2.3724285332620894, "grad_norm": 2.106099843978882, "learning_rate": 1.5775046753940692e-05, "loss": 0.5055, "step": 8880 }, { "epoch": 2.3724285332620894, "step": 8880, "train/loss_ctc": 0.6040993928909302, "train/loss_error": 0.5128283500671387, "train/loss_total": 0.5310825705528259 }, { "epoch": 2.3726956986374566, "step": 8881, "train/loss_ctc": 0.7586444020271301, "train/loss_error": 0.40064293146133423, "train/loss_total": 0.4722432494163513 }, { "epoch": 2.3729628640128237, "step": 8882, "train/loss_ctc": 0.989983081817627, "train/loss_error": 0.44855690002441406, "train/loss_total": 0.5568421483039856 }, { "epoch": 2.3732300293881914, "step": 8883, "train/loss_ctc": 0.9692316055297852, "train/loss_error": 0.44909682869911194, "train/loss_total": 0.5531237721443176 }, { "epoch": 2.3734971947635586, "step": 8884, "train/loss_ctc": 1.0325164794921875, "train/loss_error": 0.4767788052558899, "train/loss_total": 0.5879263877868652 }, { "epoch": 2.373764360138926, "step": 8885, "train/loss_ctc": 1.5373523235321045, "train/loss_error": 0.43962302803993225, "train/loss_total": 0.6591688990592957 }, { "epoch": 2.3740315255142934, "step": 8886, "train/loss_ctc": 0.49498143792152405, "train/loss_error": 0.4900345206260681, "train/loss_total": 0.4910238981246948 }, { "epoch": 2.3742986908896606, "step": 8887, "train/loss_ctc": 1.1616883277893066, "train/loss_error": 0.45535194873809814, "train/loss_total": 0.5966192483901978 }, { "epoch": 2.3745658562650283, "step": 8888, "train/loss_ctc": 1.2716577053070068, "train/loss_error": 0.4465278387069702, "train/loss_total": 0.6115538477897644 }, { "epoch": 2.3748330216403954, "step": 8889, "train/loss_ctc": 0.44913285970687866, "train/loss_error": 0.5187870264053345, "train/loss_total": 0.5048561692237854 }, { "epoch": 2.3751001870157626, "grad_norm": 2.615082025527954, "learning_rate": 1.575901683141865e-05, "loss": 0.5564, "step": 8890 }, { "epoch": 2.3751001870157626, "step": 8890, "train/loss_ctc": 1.1254866123199463, "train/loss_error": 0.5006477236747742, "train/loss_total": 0.6256154775619507 }, { "epoch": 2.3753673523911303, "step": 8891, "train/loss_ctc": 0.4022454619407654, "train/loss_error": 0.4900372326374054, "train/loss_total": 0.4724788963794708 }, { "epoch": 2.3756345177664975, "step": 8892, "train/loss_ctc": 0.8082234859466553, "train/loss_error": 0.4527667164802551, "train/loss_total": 0.5238580703735352 }, { "epoch": 2.3759016831418647, "step": 8893, "train/loss_ctc": 0.4972045421600342, "train/loss_error": 0.4133400619029999, "train/loss_total": 0.43011295795440674 }, { "epoch": 2.3761688485172323, "step": 8894, "train/loss_ctc": 0.4717802405357361, "train/loss_error": 0.5040098428726196, "train/loss_total": 0.4975639283657074 }, { "epoch": 2.3764360138925995, "step": 8895, "train/loss_ctc": 0.5106251835823059, "train/loss_error": 0.4808218777179718, "train/loss_total": 0.4867825508117676 }, { "epoch": 2.3767031792679667, "step": 8896, "train/loss_ctc": 1.125838041305542, "train/loss_error": 0.464543879032135, "train/loss_total": 0.5968027114868164 }, { "epoch": 2.3769703446433343, "step": 8897, "train/loss_ctc": 0.6769442558288574, "train/loss_error": 0.4632425308227539, "train/loss_total": 0.5059828758239746 }, { "epoch": 2.3772375100187015, "step": 8898, "train/loss_ctc": 0.4504358768463135, "train/loss_error": 0.4240262508392334, "train/loss_total": 0.4293081760406494 }, { "epoch": 2.3775046753940687, "step": 8899, "train/loss_ctc": 0.7168631553649902, "train/loss_error": 0.41706839203834534, "train/loss_total": 0.47702735662460327 }, { "epoch": 2.3777718407694364, "grad_norm": 10.587728500366211, "learning_rate": 1.574298690889661e-05, "loss": 0.5046, "step": 8900 }, { "epoch": 2.3777718407694364, "step": 8900, "train/loss_ctc": 0.9247279167175293, "train/loss_error": 0.4395182132720947, "train/loss_total": 0.5365601778030396 }, { "epoch": 2.3780390061448036, "step": 8901, "train/loss_ctc": 0.2650150656700134, "train/loss_error": 0.44628652930259705, "train/loss_total": 0.4100322425365448 }, { "epoch": 2.3783061715201708, "step": 8902, "train/loss_ctc": 0.675103485584259, "train/loss_error": 0.46088218688964844, "train/loss_total": 0.5037264823913574 }, { "epoch": 2.3785733368955384, "step": 8903, "train/loss_ctc": 1.2435033321380615, "train/loss_error": 0.471117228269577, "train/loss_total": 0.625594437122345 }, { "epoch": 2.3788405022709056, "step": 8904, "train/loss_ctc": 0.8277779817581177, "train/loss_error": 0.45802587270736694, "train/loss_total": 0.5319763422012329 }, { "epoch": 2.379107667646273, "step": 8905, "train/loss_ctc": 1.5029330253601074, "train/loss_error": 0.4288010597229004, "train/loss_total": 0.6436274647712708 }, { "epoch": 2.3793748330216404, "step": 8906, "train/loss_ctc": 0.43996375799179077, "train/loss_error": 0.46036988496780396, "train/loss_total": 0.4562886655330658 }, { "epoch": 2.3796419983970076, "step": 8907, "train/loss_ctc": 1.2464466094970703, "train/loss_error": 0.48780104517936707, "train/loss_total": 0.6395301818847656 }, { "epoch": 2.3799091637723753, "step": 8908, "train/loss_ctc": 0.726452112197876, "train/loss_error": 0.4529620409011841, "train/loss_total": 0.5076600313186646 }, { "epoch": 2.3801763291477425, "step": 8909, "train/loss_ctc": 0.6877027750015259, "train/loss_error": 0.49987828731536865, "train/loss_total": 0.537443220615387 }, { "epoch": 2.3804434945231097, "grad_norm": 1.5563457012176514, "learning_rate": 1.5726956986374566e-05, "loss": 0.5392, "step": 8910 }, { "epoch": 2.3804434945231097, "step": 8910, "train/loss_ctc": 0.5692065358161926, "train/loss_error": 0.46641236543655396, "train/loss_total": 0.4869711995124817 }, { "epoch": 2.3807106598984773, "step": 8911, "train/loss_ctc": 0.7325730323791504, "train/loss_error": 0.42356398701667786, "train/loss_total": 0.4853658080101013 }, { "epoch": 2.3809778252738445, "step": 8912, "train/loss_ctc": 0.9053059816360474, "train/loss_error": 0.4740298092365265, "train/loss_total": 0.5602850317955017 }, { "epoch": 2.3812449906492117, "step": 8913, "train/loss_ctc": 1.0218976736068726, "train/loss_error": 0.4172349274158478, "train/loss_total": 0.5381674766540527 }, { "epoch": 2.3815121560245793, "step": 8914, "train/loss_ctc": 0.642369270324707, "train/loss_error": 0.49862733483314514, "train/loss_total": 0.5273757576942444 }, { "epoch": 2.3817793213999465, "step": 8915, "train/loss_ctc": 0.8260654211044312, "train/loss_error": 0.4745592474937439, "train/loss_total": 0.5448604822158813 }, { "epoch": 2.382046486775314, "step": 8916, "train/loss_ctc": 0.3726976811885834, "train/loss_error": 0.4683411121368408, "train/loss_total": 0.4492124319076538 }, { "epoch": 2.3823136521506814, "step": 8917, "train/loss_ctc": 0.4664309620857239, "train/loss_error": 0.3869822323322296, "train/loss_total": 0.4028719663619995 }, { "epoch": 2.3825808175260486, "step": 8918, "train/loss_ctc": 0.3694847822189331, "train/loss_error": 0.43874406814575195, "train/loss_total": 0.42489224672317505 }, { "epoch": 2.382847982901416, "step": 8919, "train/loss_ctc": 0.9333551526069641, "train/loss_error": 0.5022521018981934, "train/loss_total": 0.5884727239608765 }, { "epoch": 2.3831151482767834, "grad_norm": 1.6396563053131104, "learning_rate": 1.5710927063852524e-05, "loss": 0.5008, "step": 8920 }, { "epoch": 2.3831151482767834, "step": 8920, "train/loss_ctc": 0.49186965823173523, "train/loss_error": 0.518822431564331, "train/loss_total": 0.5134319067001343 }, { "epoch": 2.3833823136521506, "step": 8921, "train/loss_ctc": 0.6767641305923462, "train/loss_error": 0.38903874158859253, "train/loss_total": 0.4465838074684143 }, { "epoch": 2.3836494790275182, "step": 8922, "train/loss_ctc": 1.2317147254943848, "train/loss_error": 0.5285800695419312, "train/loss_total": 0.669206976890564 }, { "epoch": 2.3839166444028854, "step": 8923, "train/loss_ctc": 0.5877769589424133, "train/loss_error": 0.4499112069606781, "train/loss_total": 0.4774843454360962 }, { "epoch": 2.3841838097782526, "step": 8924, "train/loss_ctc": 0.39648082852363586, "train/loss_error": 0.4557700753211975, "train/loss_total": 0.44391223788261414 }, { "epoch": 2.3844509751536203, "step": 8925, "train/loss_ctc": 0.5116263031959534, "train/loss_error": 0.474946528673172, "train/loss_total": 0.48228248953819275 }, { "epoch": 2.3847181405289875, "step": 8926, "train/loss_ctc": 1.2458655834197998, "train/loss_error": 0.480821818113327, "train/loss_total": 0.6338306069374084 }, { "epoch": 2.3849853059043546, "step": 8927, "train/loss_ctc": 0.42864465713500977, "train/loss_error": 0.4486214518547058, "train/loss_total": 0.444626122713089 }, { "epoch": 2.3852524712797223, "step": 8928, "train/loss_ctc": 0.675875186920166, "train/loss_error": 0.4644271731376648, "train/loss_total": 0.506716787815094 }, { "epoch": 2.3855196366550895, "step": 8929, "train/loss_ctc": 0.28275614976882935, "train/loss_error": 0.45687177777290344, "train/loss_total": 0.4220486581325531 }, { "epoch": 2.3857868020304567, "grad_norm": 2.6408588886260986, "learning_rate": 1.5694897141330486e-05, "loss": 0.504, "step": 8930 }, { "epoch": 2.3857868020304567, "step": 8930, "train/loss_ctc": 0.6674518585205078, "train/loss_error": 0.4862903654575348, "train/loss_total": 0.5225226879119873 }, { "epoch": 2.3860539674058243, "step": 8931, "train/loss_ctc": 0.3775521516799927, "train/loss_error": 0.45311692357063293, "train/loss_total": 0.4380039870738983 }, { "epoch": 2.3863211327811915, "step": 8932, "train/loss_ctc": 0.4554082751274109, "train/loss_error": 0.47262096405029297, "train/loss_total": 0.4691784381866455 }, { "epoch": 2.3865882981565587, "step": 8933, "train/loss_ctc": 0.5815126299858093, "train/loss_error": 0.4825757145881653, "train/loss_total": 0.5023630857467651 }, { "epoch": 2.3868554635319263, "step": 8934, "train/loss_ctc": 0.5795246362686157, "train/loss_error": 0.43530213832855225, "train/loss_total": 0.4641466438770294 }, { "epoch": 2.3871226289072935, "step": 8935, "train/loss_ctc": 0.8665194511413574, "train/loss_error": 0.4769769310951233, "train/loss_total": 0.5548854470252991 }, { "epoch": 2.3873897942826607, "step": 8936, "train/loss_ctc": 0.8155874609947205, "train/loss_error": 0.47341522574424744, "train/loss_total": 0.541849672794342 }, { "epoch": 2.3876569596580284, "step": 8937, "train/loss_ctc": 2.1243553161621094, "train/loss_error": 0.5084067583084106, "train/loss_total": 0.8315964937210083 }, { "epoch": 2.3879241250333956, "step": 8938, "train/loss_ctc": 0.6298738121986389, "train/loss_error": 0.43397533893585205, "train/loss_total": 0.47315502166748047 }, { "epoch": 2.388191290408763, "step": 8939, "train/loss_ctc": 0.467732697725296, "train/loss_error": 0.4478599727153778, "train/loss_total": 0.4518345296382904 }, { "epoch": 2.3884584557841304, "grad_norm": 2.769680976867676, "learning_rate": 1.5678867218808444e-05, "loss": 0.525, "step": 8940 }, { "epoch": 2.3884584557841304, "step": 8940, "train/loss_ctc": 0.47948190569877625, "train/loss_error": 0.4978697896003723, "train/loss_total": 0.4941922426223755 }, { "epoch": 2.3887256211594976, "step": 8941, "train/loss_ctc": 0.5166174173355103, "train/loss_error": 0.5499586462974548, "train/loss_total": 0.543290376663208 }, { "epoch": 2.3889927865348652, "step": 8942, "train/loss_ctc": 0.6316666007041931, "train/loss_error": 0.479056179523468, "train/loss_total": 0.509578287601471 }, { "epoch": 2.3892599519102324, "step": 8943, "train/loss_ctc": 0.7109532356262207, "train/loss_error": 0.4232170879840851, "train/loss_total": 0.48076432943344116 }, { "epoch": 2.3895271172855996, "step": 8944, "train/loss_ctc": 0.9407321214675903, "train/loss_error": 0.4561706483364105, "train/loss_total": 0.5530829429626465 }, { "epoch": 2.3897942826609673, "step": 8945, "train/loss_ctc": 0.493095725774765, "train/loss_error": 0.4945407509803772, "train/loss_total": 0.49425172805786133 }, { "epoch": 2.3900614480363345, "step": 8946, "train/loss_ctc": 0.3735034465789795, "train/loss_error": 0.38692161440849304, "train/loss_total": 0.38423797488212585 }, { "epoch": 2.3903286134117017, "step": 8947, "train/loss_ctc": 0.7146968841552734, "train/loss_error": 0.45014557242393494, "train/loss_total": 0.5030558109283447 }, { "epoch": 2.3905957787870693, "step": 8948, "train/loss_ctc": 0.4001065194606781, "train/loss_error": 0.3779950737953186, "train/loss_total": 0.38241738080978394 }, { "epoch": 2.3908629441624365, "step": 8949, "train/loss_ctc": 0.766752302646637, "train/loss_error": 0.47355708479881287, "train/loss_total": 0.5321961045265198 }, { "epoch": 2.391130109537804, "grad_norm": 1.346714973449707, "learning_rate": 1.5662837296286402e-05, "loss": 0.4877, "step": 8950 }, { "epoch": 2.391130109537804, "step": 8950, "train/loss_ctc": 0.6591615080833435, "train/loss_error": 0.4955956041812897, "train/loss_total": 0.5283087491989136 }, { "epoch": 2.3913972749131713, "step": 8951, "train/loss_ctc": 0.9644699096679688, "train/loss_error": 0.4726216793060303, "train/loss_total": 0.5709913372993469 }, { "epoch": 2.3916644402885385, "step": 8952, "train/loss_ctc": 0.8349693417549133, "train/loss_error": 0.4284825921058655, "train/loss_total": 0.5097799301147461 }, { "epoch": 2.391931605663906, "step": 8953, "train/loss_ctc": 0.833388090133667, "train/loss_error": 0.44404417276382446, "train/loss_total": 0.5219129323959351 }, { "epoch": 2.3921987710392734, "step": 8954, "train/loss_ctc": 0.5131297707557678, "train/loss_error": 0.5012768507003784, "train/loss_total": 0.5036474466323853 }, { "epoch": 2.3924659364146406, "step": 8955, "train/loss_ctc": 0.5655453205108643, "train/loss_error": 0.525811493396759, "train/loss_total": 0.533758282661438 }, { "epoch": 2.392733101790008, "step": 8956, "train/loss_ctc": 0.5698267221450806, "train/loss_error": 0.4778323471546173, "train/loss_total": 0.4962312579154968 }, { "epoch": 2.3930002671653754, "step": 8957, "train/loss_ctc": 0.4207484722137451, "train/loss_error": 0.4227275550365448, "train/loss_total": 0.4223317503929138 }, { "epoch": 2.3932674325407426, "step": 8958, "train/loss_ctc": 0.4645845293998718, "train/loss_error": 0.46456122398376465, "train/loss_total": 0.4645659029483795 }, { "epoch": 2.3935345979161102, "step": 8959, "train/loss_ctc": 0.6900432109832764, "train/loss_error": 0.43292751908302307, "train/loss_total": 0.48435068130493164 }, { "epoch": 2.3938017632914774, "grad_norm": 3.531236410140991, "learning_rate": 1.564680737376436e-05, "loss": 0.5036, "step": 8960 }, { "epoch": 2.3938017632914774, "step": 8960, "train/loss_ctc": 0.732940673828125, "train/loss_error": 0.502746045589447, "train/loss_total": 0.5487849712371826 }, { "epoch": 2.3940689286668446, "step": 8961, "train/loss_ctc": 1.1710340976715088, "train/loss_error": 0.38706114888191223, "train/loss_total": 0.5438557863235474 }, { "epoch": 2.3943360940422123, "step": 8962, "train/loss_ctc": 1.1768399477005005, "train/loss_error": 0.4552464187145233, "train/loss_total": 0.5995651483535767 }, { "epoch": 2.3946032594175795, "step": 8963, "train/loss_ctc": 0.5290789008140564, "train/loss_error": 0.44867050647735596, "train/loss_total": 0.464752197265625 }, { "epoch": 2.3948704247929467, "step": 8964, "train/loss_ctc": 0.3271304666996002, "train/loss_error": 0.48515504598617554, "train/loss_total": 0.45355015993118286 }, { "epoch": 2.3951375901683143, "step": 8965, "train/loss_ctc": 0.7812142968177795, "train/loss_error": 0.47446703910827637, "train/loss_total": 0.535816490650177 }, { "epoch": 2.3954047555436815, "step": 8966, "train/loss_ctc": 1.380397081375122, "train/loss_error": 0.5005527138710022, "train/loss_total": 0.6765215992927551 }, { "epoch": 2.3956719209190487, "step": 8967, "train/loss_ctc": 0.47231853008270264, "train/loss_error": 0.4715825915336609, "train/loss_total": 0.4717297852039337 }, { "epoch": 2.3959390862944163, "step": 8968, "train/loss_ctc": 0.7592573761940002, "train/loss_error": 0.49795302748680115, "train/loss_total": 0.5502139329910278 }, { "epoch": 2.3962062516697835, "step": 8969, "train/loss_ctc": 1.111953854560852, "train/loss_error": 0.4139460325241089, "train/loss_total": 0.5535476207733154 }, { "epoch": 2.3964734170451507, "grad_norm": 1.4089345932006836, "learning_rate": 1.5630777451242318e-05, "loss": 0.5398, "step": 8970 }, { "epoch": 2.3964734170451507, "step": 8970, "train/loss_ctc": 0.7114678621292114, "train/loss_error": 0.4625130593776703, "train/loss_total": 0.5123040676116943 }, { "epoch": 2.3967405824205184, "step": 8971, "train/loss_ctc": 0.5634312629699707, "train/loss_error": 0.4001464247703552, "train/loss_total": 0.4328033924102783 }, { "epoch": 2.3970077477958855, "step": 8972, "train/loss_ctc": 1.0893865823745728, "train/loss_error": 0.451503187417984, "train/loss_total": 0.5790798664093018 }, { "epoch": 2.397274913171253, "step": 8973, "train/loss_ctc": 0.5045075416564941, "train/loss_error": 0.45036962628364563, "train/loss_total": 0.46119722723960876 }, { "epoch": 2.3975420785466204, "step": 8974, "train/loss_ctc": 0.5677545666694641, "train/loss_error": 0.4219963252544403, "train/loss_total": 0.45114797353744507 }, { "epoch": 2.3978092439219876, "step": 8975, "train/loss_ctc": 0.9645604491233826, "train/loss_error": 0.4990563690662384, "train/loss_total": 0.5921571850776672 }, { "epoch": 2.398076409297355, "step": 8976, "train/loss_ctc": 0.6374106407165527, "train/loss_error": 0.4481639564037323, "train/loss_total": 0.4860132932662964 }, { "epoch": 2.3983435746727224, "step": 8977, "train/loss_ctc": 0.9425953030586243, "train/loss_error": 0.5112229585647583, "train/loss_total": 0.5974974632263184 }, { "epoch": 2.3986107400480896, "step": 8978, "train/loss_ctc": 0.974137544631958, "train/loss_error": 0.4131757915019989, "train/loss_total": 0.5253681540489197 }, { "epoch": 2.3988779054234572, "step": 8979, "train/loss_ctc": 0.38546866178512573, "train/loss_error": 0.3851117491722107, "train/loss_total": 0.3851831555366516 }, { "epoch": 2.3991450707988244, "grad_norm": 4.074772834777832, "learning_rate": 1.5614747528720276e-05, "loss": 0.5023, "step": 8980 }, { "epoch": 2.3991450707988244, "step": 8980, "train/loss_ctc": 0.7441490888595581, "train/loss_error": 0.4241075813770294, "train/loss_total": 0.4881158769130707 }, { "epoch": 2.3994122361741916, "step": 8981, "train/loss_ctc": 0.6572889089584351, "train/loss_error": 0.4816916882991791, "train/loss_total": 0.5168111324310303 }, { "epoch": 2.3996794015495593, "step": 8982, "train/loss_ctc": 0.569622278213501, "train/loss_error": 0.5229526162147522, "train/loss_total": 0.5322865843772888 }, { "epoch": 2.3999465669249265, "step": 8983, "train/loss_ctc": 0.6015806198120117, "train/loss_error": 0.4614610970020294, "train/loss_total": 0.4894849956035614 }, { "epoch": 2.400213732300294, "step": 8984, "train/loss_ctc": 0.4660133123397827, "train/loss_error": 0.38739070296287537, "train/loss_total": 0.4031152129173279 }, { "epoch": 2.4004808976756613, "step": 8985, "train/loss_ctc": 0.7577170133590698, "train/loss_error": 0.4235711991786957, "train/loss_total": 0.49040037393569946 }, { "epoch": 2.4007480630510285, "step": 8986, "train/loss_ctc": 0.3381898105144501, "train/loss_error": 0.387031614780426, "train/loss_total": 0.37726324796676636 }, { "epoch": 2.401015228426396, "step": 8987, "train/loss_ctc": 0.6971148252487183, "train/loss_error": 0.47172021865844727, "train/loss_total": 0.5167991518974304 }, { "epoch": 2.4012823938017633, "step": 8988, "train/loss_ctc": 0.4605148434638977, "train/loss_error": 0.3800928592681885, "train/loss_total": 0.3961772620677948 }, { "epoch": 2.4015495591771305, "step": 8989, "train/loss_ctc": 0.9284213185310364, "train/loss_error": 0.4702928066253662, "train/loss_total": 0.5619184970855713 }, { "epoch": 2.401816724552498, "grad_norm": 2.115234136581421, "learning_rate": 1.5598717606198238e-05, "loss": 0.4772, "step": 8990 }, { "epoch": 2.401816724552498, "step": 8990, "train/loss_ctc": 0.6802237033843994, "train/loss_error": 0.4712286591529846, "train/loss_total": 0.5130276679992676 }, { "epoch": 2.4020838899278654, "step": 8991, "train/loss_ctc": 0.42481160163879395, "train/loss_error": 0.46477964520454407, "train/loss_total": 0.45678603649139404 }, { "epoch": 2.4023510553032326, "step": 8992, "train/loss_ctc": 1.1905231475830078, "train/loss_error": 0.4553444981575012, "train/loss_total": 0.6023802161216736 }, { "epoch": 2.4026182206786, "step": 8993, "train/loss_ctc": 1.2614164352416992, "train/loss_error": 0.41332876682281494, "train/loss_total": 0.5829463005065918 }, { "epoch": 2.4028853860539674, "step": 8994, "train/loss_ctc": 1.1564936637878418, "train/loss_error": 0.47728148102760315, "train/loss_total": 0.613123893737793 }, { "epoch": 2.4031525514293346, "step": 8995, "train/loss_ctc": 0.6718668341636658, "train/loss_error": 0.438865602016449, "train/loss_total": 0.4854658544063568 }, { "epoch": 2.4034197168047022, "step": 8996, "train/loss_ctc": 0.6364676356315613, "train/loss_error": 0.514011561870575, "train/loss_total": 0.5385028123855591 }, { "epoch": 2.4036868821800694, "step": 8997, "train/loss_ctc": 0.914677619934082, "train/loss_error": 0.44541501998901367, "train/loss_total": 0.5392675399780273 }, { "epoch": 2.4039540475554366, "step": 8998, "train/loss_ctc": 0.790347695350647, "train/loss_error": 0.4512021541595459, "train/loss_total": 0.519031286239624 }, { "epoch": 2.4042212129308043, "step": 8999, "train/loss_ctc": 0.7643985748291016, "train/loss_error": 0.45304906368255615, "train/loss_total": 0.5153189897537231 }, { "epoch": 2.4044883783061715, "grad_norm": 3.4016406536102295, "learning_rate": 1.5582687683676196e-05, "loss": 0.5366, "step": 9000 }, { "epoch": 2.4044883783061715, "step": 9000, "train/loss_ctc": 0.5732405781745911, "train/loss_error": 0.4115886390209198, "train/loss_total": 0.4439190626144409 }, { "epoch": 2.4047555436815387, "step": 9001, "train/loss_ctc": 0.5822398662567139, "train/loss_error": 0.5332261323928833, "train/loss_total": 0.5430288910865784 }, { "epoch": 2.4050227090569063, "step": 9002, "train/loss_ctc": 0.5813132524490356, "train/loss_error": 0.45283663272857666, "train/loss_total": 0.47853195667266846 }, { "epoch": 2.4052898744322735, "step": 9003, "train/loss_ctc": 0.5671212673187256, "train/loss_error": 0.44213712215423584, "train/loss_total": 0.46713393926620483 }, { "epoch": 2.4055570398076407, "step": 9004, "train/loss_ctc": 0.42580193281173706, "train/loss_error": 0.4240823984146118, "train/loss_total": 0.4244263172149658 }, { "epoch": 2.4058242051830083, "step": 9005, "train/loss_ctc": 0.4579070806503296, "train/loss_error": 0.4999143183231354, "train/loss_total": 0.49151289463043213 }, { "epoch": 2.4060913705583755, "step": 9006, "train/loss_ctc": 0.9095335602760315, "train/loss_error": 0.4694463014602661, "train/loss_total": 0.5574637651443481 }, { "epoch": 2.406358535933743, "step": 9007, "train/loss_ctc": 1.1622549295425415, "train/loss_error": 0.4625411331653595, "train/loss_total": 0.602483868598938 }, { "epoch": 2.4066257013091104, "step": 9008, "train/loss_ctc": 0.6779060363769531, "train/loss_error": 0.4444043040275574, "train/loss_total": 0.4911046624183655 }, { "epoch": 2.4068928666844775, "step": 9009, "train/loss_ctc": 0.31537988781929016, "train/loss_error": 0.43035152554512024, "train/loss_total": 0.40735721588134766 }, { "epoch": 2.407160032059845, "grad_norm": 1.7841061353683472, "learning_rate": 1.5566657761154154e-05, "loss": 0.4907, "step": 9010 }, { "epoch": 2.407160032059845, "step": 9010, "train/loss_ctc": 0.5037299394607544, "train/loss_error": 0.4753926992416382, "train/loss_total": 0.4810601472854614 }, { "epoch": 2.4074271974352124, "step": 9011, "train/loss_ctc": 0.7683477401733398, "train/loss_error": 0.49382132291793823, "train/loss_total": 0.5487266182899475 }, { "epoch": 2.4076943628105796, "step": 9012, "train/loss_ctc": 0.5176993608474731, "train/loss_error": 0.5115613341331482, "train/loss_total": 0.5127889513969421 }, { "epoch": 2.407961528185947, "step": 9013, "train/loss_ctc": 1.0032007694244385, "train/loss_error": 0.4274628758430481, "train/loss_total": 0.5426104664802551 }, { "epoch": 2.4082286935613144, "step": 9014, "train/loss_ctc": 0.6617419719696045, "train/loss_error": 0.4085317850112915, "train/loss_total": 0.45917385816574097 }, { "epoch": 2.408495858936682, "step": 9015, "train/loss_ctc": 0.37499821186065674, "train/loss_error": 0.42129722237586975, "train/loss_total": 0.4120374321937561 }, { "epoch": 2.4087630243120493, "step": 9016, "train/loss_ctc": 0.6300997734069824, "train/loss_error": 0.42145708203315735, "train/loss_total": 0.4631856381893158 }, { "epoch": 2.4090301896874164, "step": 9017, "train/loss_ctc": 0.3224387764930725, "train/loss_error": 0.3651372194290161, "train/loss_total": 0.35659754276275635 }, { "epoch": 2.409297355062784, "step": 9018, "train/loss_ctc": 1.0285663604736328, "train/loss_error": 0.5218489766120911, "train/loss_total": 0.6231924295425415 }, { "epoch": 2.4095645204381513, "step": 9019, "train/loss_ctc": 0.2675586938858032, "train/loss_error": 0.447433739900589, "train/loss_total": 0.41145873069763184 }, { "epoch": 2.4098316858135185, "grad_norm": 2.398451805114746, "learning_rate": 1.5550627838632112e-05, "loss": 0.4811, "step": 9020 }, { "epoch": 2.4098316858135185, "step": 9020, "train/loss_ctc": 0.9470508694648743, "train/loss_error": 0.3461567461490631, "train/loss_total": 0.46633559465408325 }, { "epoch": 2.410098851188886, "step": 9021, "train/loss_ctc": 0.8045310974121094, "train/loss_error": 0.4701296091079712, "train/loss_total": 0.5370099544525146 }, { "epoch": 2.4103660165642533, "step": 9022, "train/loss_ctc": 1.2707715034484863, "train/loss_error": 0.4042908251285553, "train/loss_total": 0.5775869488716125 }, { "epoch": 2.4106331819396205, "step": 9023, "train/loss_ctc": 0.6282315254211426, "train/loss_error": 0.45052629709243774, "train/loss_total": 0.48606735467910767 }, { "epoch": 2.410900347314988, "step": 9024, "train/loss_ctc": 0.5777919888496399, "train/loss_error": 0.49769604206085205, "train/loss_total": 0.5137152671813965 }, { "epoch": 2.4111675126903553, "step": 9025, "train/loss_ctc": 0.5593401789665222, "train/loss_error": 0.4890635311603546, "train/loss_total": 0.5031188726425171 }, { "epoch": 2.4114346780657225, "step": 9026, "train/loss_ctc": 0.5418918132781982, "train/loss_error": 0.467788428068161, "train/loss_total": 0.4826090931892395 }, { "epoch": 2.41170184344109, "step": 9027, "train/loss_ctc": 1.2597867250442505, "train/loss_error": 0.4958250820636749, "train/loss_total": 0.6486173868179321 }, { "epoch": 2.4119690088164574, "step": 9028, "train/loss_ctc": 0.3069170415401459, "train/loss_error": 0.38681909441947937, "train/loss_total": 0.3708386719226837 }, { "epoch": 2.4122361741918246, "step": 9029, "train/loss_ctc": 0.9892286062240601, "train/loss_error": 0.3727320730686188, "train/loss_total": 0.49603140354156494 }, { "epoch": 2.412503339567192, "grad_norm": 13.048813819885254, "learning_rate": 1.553459791611007e-05, "loss": 0.5082, "step": 9030 }, { "epoch": 2.412503339567192, "step": 9030, "train/loss_ctc": 0.7616807818412781, "train/loss_error": 0.46113917231559753, "train/loss_total": 0.5212475061416626 }, { "epoch": 2.4127705049425594, "step": 9031, "train/loss_ctc": 0.7433012127876282, "train/loss_error": 0.4756883680820465, "train/loss_total": 0.5292109251022339 }, { "epoch": 2.4130376703179266, "step": 9032, "train/loss_ctc": 1.0326974391937256, "train/loss_error": 0.47836601734161377, "train/loss_total": 0.589232325553894 }, { "epoch": 2.4133048356932942, "step": 9033, "train/loss_ctc": 0.5964264869689941, "train/loss_error": 0.45379477739334106, "train/loss_total": 0.4823211431503296 }, { "epoch": 2.4135720010686614, "step": 9034, "train/loss_ctc": 0.7551835179328918, "train/loss_error": 0.48060280084609985, "train/loss_total": 0.5355189442634583 }, { "epoch": 2.4138391664440286, "step": 9035, "train/loss_ctc": 0.9819808006286621, "train/loss_error": 0.4509338140487671, "train/loss_total": 0.5571432113647461 }, { "epoch": 2.4141063318193963, "step": 9036, "train/loss_ctc": 0.4424620568752289, "train/loss_error": 0.5409480333328247, "train/loss_total": 0.52125084400177 }, { "epoch": 2.4143734971947635, "step": 9037, "train/loss_ctc": 0.8695678114891052, "train/loss_error": 0.48777687549591064, "train/loss_total": 0.5641350746154785 }, { "epoch": 2.414640662570131, "step": 9038, "train/loss_ctc": 0.7140277028083801, "train/loss_error": 0.4497297406196594, "train/loss_total": 0.5025893449783325 }, { "epoch": 2.4149078279454983, "step": 9039, "train/loss_ctc": 0.7664343118667603, "train/loss_error": 0.5313103199005127, "train/loss_total": 0.5783351063728333 }, { "epoch": 2.4151749933208655, "grad_norm": 1.15925931930542, "learning_rate": 1.5518567993588028e-05, "loss": 0.5381, "step": 9040 }, { "epoch": 2.4151749933208655, "step": 9040, "train/loss_ctc": 0.24329522252082825, "train/loss_error": 0.40713077783584595, "train/loss_total": 0.3743636906147003 }, { "epoch": 2.415442158696233, "step": 9041, "train/loss_ctc": 0.578736424446106, "train/loss_error": 0.47518277168273926, "train/loss_total": 0.49589353799819946 }, { "epoch": 2.4157093240716003, "step": 9042, "train/loss_ctc": 0.4389342665672302, "train/loss_error": 0.43633556365966797, "train/loss_total": 0.4368553161621094 }, { "epoch": 2.4159764894469675, "step": 9043, "train/loss_ctc": 0.8399797081947327, "train/loss_error": 0.515022337436676, "train/loss_total": 0.5800138115882874 }, { "epoch": 2.416243654822335, "step": 9044, "train/loss_ctc": 1.3079628944396973, "train/loss_error": 0.48321273922920227, "train/loss_total": 0.6481627821922302 }, { "epoch": 2.4165108201977024, "step": 9045, "train/loss_ctc": 0.5713715553283691, "train/loss_error": 0.44936180114746094, "train/loss_total": 0.47376376390457153 }, { "epoch": 2.4167779855730696, "step": 9046, "train/loss_ctc": 0.8125132918357849, "train/loss_error": 0.5153653621673584, "train/loss_total": 0.5747949481010437 }, { "epoch": 2.417045150948437, "step": 9047, "train/loss_ctc": 0.48918014764785767, "train/loss_error": 0.44482073187828064, "train/loss_total": 0.45369261503219604 }, { "epoch": 2.4173123163238044, "step": 9048, "train/loss_ctc": 0.7859365940093994, "train/loss_error": 0.43359261751174927, "train/loss_total": 0.5040614008903503 }, { "epoch": 2.417579481699172, "step": 9049, "train/loss_ctc": 0.8393857479095459, "train/loss_error": 0.5122162699699402, "train/loss_total": 0.5776501893997192 }, { "epoch": 2.4178466470745392, "grad_norm": 3.3769593238830566, "learning_rate": 1.550253807106599e-05, "loss": 0.5119, "step": 9050 }, { "epoch": 2.4178466470745392, "step": 9050, "train/loss_ctc": 0.18821218609809875, "train/loss_error": 0.39866891503334045, "train/loss_total": 0.3565775752067566 }, { "epoch": 2.4181138124499064, "step": 9051, "train/loss_ctc": 1.266664981842041, "train/loss_error": 0.5536276698112488, "train/loss_total": 0.696235179901123 }, { "epoch": 2.418380977825274, "step": 9052, "train/loss_ctc": 0.5871955156326294, "train/loss_error": 0.43046218156814575, "train/loss_total": 0.46180886030197144 }, { "epoch": 2.4186481432006413, "step": 9053, "train/loss_ctc": 0.6154877543449402, "train/loss_error": 0.45161181688308716, "train/loss_total": 0.48438704013824463 }, { "epoch": 2.4189153085760084, "step": 9054, "train/loss_ctc": 0.7910863161087036, "train/loss_error": 0.4774497449398041, "train/loss_total": 0.540177047252655 }, { "epoch": 2.419182473951376, "step": 9055, "train/loss_ctc": 0.8558899760246277, "train/loss_error": 0.41395244002342224, "train/loss_total": 0.5023399591445923 }, { "epoch": 2.4194496393267433, "step": 9056, "train/loss_ctc": 1.0552072525024414, "train/loss_error": 0.5100743174552917, "train/loss_total": 0.6191009283065796 }, { "epoch": 2.4197168047021105, "step": 9057, "train/loss_ctc": 1.7225208282470703, "train/loss_error": 0.4453686773777008, "train/loss_total": 0.7007991075515747 }, { "epoch": 2.419983970077478, "step": 9058, "train/loss_ctc": 0.2910909056663513, "train/loss_error": 0.411573588848114, "train/loss_total": 0.3874770700931549 }, { "epoch": 2.4202511354528453, "step": 9059, "train/loss_ctc": 0.652947187423706, "train/loss_error": 0.430734783411026, "train/loss_total": 0.4751772880554199 }, { "epoch": 2.4205183008282125, "grad_norm": 2.709035634994507, "learning_rate": 1.548650814854395e-05, "loss": 0.5224, "step": 9060 }, { "epoch": 2.4205183008282125, "step": 9060, "train/loss_ctc": 0.9115457534790039, "train/loss_error": 0.4540177583694458, "train/loss_total": 0.5455234050750732 }, { "epoch": 2.42078546620358, "step": 9061, "train/loss_ctc": 1.0383880138397217, "train/loss_error": 0.5439254641532898, "train/loss_total": 0.6428179740905762 }, { "epoch": 2.4210526315789473, "step": 9062, "train/loss_ctc": 0.3040972948074341, "train/loss_error": 0.5509980916976929, "train/loss_total": 0.501617968082428 }, { "epoch": 2.4213197969543145, "step": 9063, "train/loss_ctc": 0.5529671311378479, "train/loss_error": 0.4571380019187927, "train/loss_total": 0.4763038456439972 }, { "epoch": 2.421586962329682, "step": 9064, "train/loss_ctc": 0.9054356813430786, "train/loss_error": 0.4371642768383026, "train/loss_total": 0.5308185815811157 }, { "epoch": 2.4218541277050494, "step": 9065, "train/loss_ctc": 0.5139827728271484, "train/loss_error": 0.4164409637451172, "train/loss_total": 0.43594932556152344 }, { "epoch": 2.4221212930804166, "step": 9066, "train/loss_ctc": 0.732248067855835, "train/loss_error": 0.4590640366077423, "train/loss_total": 0.5137008428573608 }, { "epoch": 2.422388458455784, "step": 9067, "train/loss_ctc": 1.1690343618392944, "train/loss_error": 0.47851356863975525, "train/loss_total": 0.616617739200592 }, { "epoch": 2.4226556238311514, "step": 9068, "train/loss_ctc": 0.786953330039978, "train/loss_error": 0.3837972581386566, "train/loss_total": 0.46442848443984985 }, { "epoch": 2.4229227892065186, "step": 9069, "train/loss_ctc": 0.43706780672073364, "train/loss_error": 0.4247046113014221, "train/loss_total": 0.4271772503852844 }, { "epoch": 2.4231899545818862, "grad_norm": 1.6044061183929443, "learning_rate": 1.547047822602191e-05, "loss": 0.5155, "step": 9070 }, { "epoch": 2.4231899545818862, "step": 9070, "train/loss_ctc": 0.34775835275650024, "train/loss_error": 0.5017927289009094, "train/loss_total": 0.47098585963249207 }, { "epoch": 2.4234571199572534, "step": 9071, "train/loss_ctc": 0.9207496643066406, "train/loss_error": 0.45095568895339966, "train/loss_total": 0.5449144840240479 }, { "epoch": 2.423724285332621, "step": 9072, "train/loss_ctc": 0.5551434755325317, "train/loss_error": 0.375684529542923, "train/loss_total": 0.4115763306617737 }, { "epoch": 2.4239914507079883, "step": 9073, "train/loss_ctc": 0.8903206586837769, "train/loss_error": 0.48827415704727173, "train/loss_total": 0.5686835050582886 }, { "epoch": 2.4242586160833555, "step": 9074, "train/loss_ctc": 0.6849173307418823, "train/loss_error": 0.48883646726608276, "train/loss_total": 0.5280526280403137 }, { "epoch": 2.424525781458723, "step": 9075, "train/loss_ctc": 0.5998228788375854, "train/loss_error": 0.4363344609737396, "train/loss_total": 0.4690321385860443 }, { "epoch": 2.4247929468340903, "step": 9076, "train/loss_ctc": 0.8356368541717529, "train/loss_error": 0.4533141553401947, "train/loss_total": 0.5297787189483643 }, { "epoch": 2.4250601122094575, "step": 9077, "train/loss_ctc": 1.4954843521118164, "train/loss_error": 0.5000455975532532, "train/loss_total": 0.6991333961486816 }, { "epoch": 2.425327277584825, "step": 9078, "train/loss_ctc": 0.6857540607452393, "train/loss_error": 0.4526180624961853, "train/loss_total": 0.499245285987854 }, { "epoch": 2.4255944429601923, "step": 9079, "train/loss_ctc": 0.5023662447929382, "train/loss_error": 0.33193618059158325, "train/loss_total": 0.3660221993923187 }, { "epoch": 2.4258616083355595, "grad_norm": 1.3172054290771484, "learning_rate": 1.5454448303499867e-05, "loss": 0.5087, "step": 9080 }, { "epoch": 2.4258616083355595, "step": 9080, "train/loss_ctc": 0.6803572177886963, "train/loss_error": 0.47928470373153687, "train/loss_total": 0.5194991827011108 }, { "epoch": 2.426128773710927, "step": 9081, "train/loss_ctc": 0.2993653416633606, "train/loss_error": 0.46088942885398865, "train/loss_total": 0.42858463525772095 }, { "epoch": 2.4263959390862944, "step": 9082, "train/loss_ctc": 0.6196599006652832, "train/loss_error": 0.45222708582878113, "train/loss_total": 0.4857136607170105 }, { "epoch": 2.426663104461662, "step": 9083, "train/loss_ctc": 0.5887343883514404, "train/loss_error": 0.43005692958831787, "train/loss_total": 0.4617924392223358 }, { "epoch": 2.426930269837029, "step": 9084, "train/loss_ctc": 0.6783754229545593, "train/loss_error": 0.4350287616252899, "train/loss_total": 0.48369812965393066 }, { "epoch": 2.4271974352123964, "step": 9085, "train/loss_ctc": 0.47273147106170654, "train/loss_error": 0.4712720215320587, "train/loss_total": 0.4715639054775238 }, { "epoch": 2.427464600587764, "step": 9086, "train/loss_ctc": 0.5730319619178772, "train/loss_error": 0.44020307064056396, "train/loss_total": 0.46676886081695557 }, { "epoch": 2.4277317659631312, "step": 9087, "train/loss_ctc": 0.5464560985565186, "train/loss_error": 0.4507921040058136, "train/loss_total": 0.4699249267578125 }, { "epoch": 2.4279989313384984, "step": 9088, "train/loss_ctc": 1.4066650867462158, "train/loss_error": 0.5299494862556458, "train/loss_total": 0.7052925825119019 }, { "epoch": 2.428266096713866, "step": 9089, "train/loss_ctc": 0.416475772857666, "train/loss_error": 0.49138855934143066, "train/loss_total": 0.4764060080051422 }, { "epoch": 2.4285332620892333, "grad_norm": 2.596073865890503, "learning_rate": 1.5438418380977825e-05, "loss": 0.4969, "step": 9090 }, { "epoch": 2.4285332620892333, "step": 9090, "train/loss_ctc": 0.48584967851638794, "train/loss_error": 0.44677743315696716, "train/loss_total": 0.45459190011024475 }, { "epoch": 2.4288004274646005, "step": 9091, "train/loss_ctc": 0.4793277680873871, "train/loss_error": 0.44663745164871216, "train/loss_total": 0.45317551493644714 }, { "epoch": 2.429067592839968, "step": 9092, "train/loss_ctc": 0.5199800729751587, "train/loss_error": 0.4157702624797821, "train/loss_total": 0.43661221861839294 }, { "epoch": 2.4293347582153353, "step": 9093, "train/loss_ctc": 0.5092406272888184, "train/loss_error": 0.4362345337867737, "train/loss_total": 0.4508357644081116 }, { "epoch": 2.4296019235907025, "step": 9094, "train/loss_ctc": 1.2662849426269531, "train/loss_error": 0.49766236543655396, "train/loss_total": 0.6513869166374207 }, { "epoch": 2.42986908896607, "step": 9095, "train/loss_ctc": 0.8320835828781128, "train/loss_error": 0.48068711161613464, "train/loss_total": 0.5509664416313171 }, { "epoch": 2.4301362543414373, "step": 9096, "train/loss_ctc": 0.40719175338745117, "train/loss_error": 0.4182228147983551, "train/loss_total": 0.4160166084766388 }, { "epoch": 2.4304034197168045, "step": 9097, "train/loss_ctc": 0.8561059236526489, "train/loss_error": 0.44236546754837036, "train/loss_total": 0.525113582611084 }, { "epoch": 2.430670585092172, "step": 9098, "train/loss_ctc": 0.6821677684783936, "train/loss_error": 0.4321766495704651, "train/loss_total": 0.4821748733520508 }, { "epoch": 2.4309377504675393, "step": 9099, "train/loss_ctc": 0.8712894916534424, "train/loss_error": 0.5107067823410034, "train/loss_total": 0.5828233361244202 }, { "epoch": 2.4312049158429065, "grad_norm": 2.041922092437744, "learning_rate": 1.5422388458455787e-05, "loss": 0.5004, "step": 9100 }, { "epoch": 2.4312049158429065, "step": 9100, "train/loss_ctc": 0.6099515557289124, "train/loss_error": 0.46098193526268005, "train/loss_total": 0.4907758831977844 }, { "epoch": 2.431472081218274, "step": 9101, "train/loss_ctc": 1.084377408027649, "train/loss_error": 0.4694567918777466, "train/loss_total": 0.5924409031867981 }, { "epoch": 2.4317392465936414, "step": 9102, "train/loss_ctc": 0.6031240224838257, "train/loss_error": 0.5105419158935547, "train/loss_total": 0.5290583372116089 }, { "epoch": 2.4320064119690086, "step": 9103, "train/loss_ctc": 0.6089065670967102, "train/loss_error": 0.42565256357192993, "train/loss_total": 0.46230337023735046 }, { "epoch": 2.432273577344376, "step": 9104, "train/loss_ctc": 0.954999566078186, "train/loss_error": 0.5196914672851562, "train/loss_total": 0.6067531108856201 }, { "epoch": 2.4325407427197434, "step": 9105, "train/loss_ctc": 0.7204663753509521, "train/loss_error": 0.43701785802841187, "train/loss_total": 0.4937075674533844 }, { "epoch": 2.432807908095111, "step": 9106, "train/loss_ctc": 0.42936408519744873, "train/loss_error": 0.4446142613887787, "train/loss_total": 0.4415642321109772 }, { "epoch": 2.4330750734704782, "step": 9107, "train/loss_ctc": 1.2151355743408203, "train/loss_error": 0.5273929834365845, "train/loss_total": 0.6649415493011475 }, { "epoch": 2.4333422388458454, "step": 9108, "train/loss_ctc": 1.0798383951187134, "train/loss_error": 0.4639737010002136, "train/loss_total": 0.5871466398239136 }, { "epoch": 2.433609404221213, "step": 9109, "train/loss_ctc": 0.8637218475341797, "train/loss_error": 0.3689408302307129, "train/loss_total": 0.46789705753326416 }, { "epoch": 2.4338765695965803, "grad_norm": 6.09454870223999, "learning_rate": 1.5406358535933745e-05, "loss": 0.5337, "step": 9110 }, { "epoch": 2.4338765695965803, "step": 9110, "train/loss_ctc": 1.1133904457092285, "train/loss_error": 0.4879758358001709, "train/loss_total": 0.6130588054656982 }, { "epoch": 2.4341437349719475, "step": 9111, "train/loss_ctc": 1.3115224838256836, "train/loss_error": 0.4363761842250824, "train/loss_total": 0.6114054918289185 }, { "epoch": 2.434410900347315, "step": 9112, "train/loss_ctc": 0.9556905031204224, "train/loss_error": 0.4290279448032379, "train/loss_total": 0.5343604683876038 }, { "epoch": 2.4346780657226823, "step": 9113, "train/loss_ctc": 1.0825966596603394, "train/loss_error": 0.5157408118247986, "train/loss_total": 0.6291120052337646 }, { "epoch": 2.4349452310980495, "step": 9114, "train/loss_ctc": 0.6612380743026733, "train/loss_error": 0.49737784266471863, "train/loss_total": 0.5301498770713806 }, { "epoch": 2.435212396473417, "step": 9115, "train/loss_ctc": 0.8498539924621582, "train/loss_error": 0.44494113326072693, "train/loss_total": 0.5259237289428711 }, { "epoch": 2.4354795618487843, "step": 9116, "train/loss_ctc": 0.6536152362823486, "train/loss_error": 0.46836844086647034, "train/loss_total": 0.5054178237915039 }, { "epoch": 2.435746727224152, "step": 9117, "train/loss_ctc": 0.34661418199539185, "train/loss_error": 0.4734671711921692, "train/loss_total": 0.4480965733528137 }, { "epoch": 2.436013892599519, "step": 9118, "train/loss_ctc": 0.6183946132659912, "train/loss_error": 0.43869665265083313, "train/loss_total": 0.4746362566947937 }, { "epoch": 2.4362810579748864, "step": 9119, "train/loss_ctc": 0.891080915927887, "train/loss_error": 0.4581616520881653, "train/loss_total": 0.5447455048561096 }, { "epoch": 2.436548223350254, "grad_norm": 1.681800365447998, "learning_rate": 1.5390328613411703e-05, "loss": 0.5417, "step": 9120 }, { "epoch": 2.436548223350254, "step": 9120, "train/loss_ctc": 1.401758074760437, "train/loss_error": 0.4507577121257782, "train/loss_total": 0.640957772731781 }, { "epoch": 2.436815388725621, "step": 9121, "train/loss_ctc": 0.5680598616600037, "train/loss_error": 0.4568409323692322, "train/loss_total": 0.47908473014831543 }, { "epoch": 2.4370825541009884, "step": 9122, "train/loss_ctc": 0.7728517651557922, "train/loss_error": 0.5227459669113159, "train/loss_total": 0.5727671384811401 }, { "epoch": 2.437349719476356, "step": 9123, "train/loss_ctc": 0.6086862087249756, "train/loss_error": 0.47466158866882324, "train/loss_total": 0.5014665126800537 }, { "epoch": 2.4376168848517232, "step": 9124, "train/loss_ctc": 0.5254898071289062, "train/loss_error": 0.43217265605926514, "train/loss_total": 0.4508361220359802 }, { "epoch": 2.4378840502270904, "step": 9125, "train/loss_ctc": 0.978897213935852, "train/loss_error": 0.3945298492908478, "train/loss_total": 0.5114033222198486 }, { "epoch": 2.438151215602458, "step": 9126, "train/loss_ctc": 0.6410201787948608, "train/loss_error": 0.43046289682388306, "train/loss_total": 0.4725743532180786 }, { "epoch": 2.4384183809778253, "step": 9127, "train/loss_ctc": 1.4430650472640991, "train/loss_error": 0.4393824338912964, "train/loss_total": 0.6401189565658569 }, { "epoch": 2.4386855463531925, "step": 9128, "train/loss_ctc": 0.8233945369720459, "train/loss_error": 0.43492430448532104, "train/loss_total": 0.512618362903595 }, { "epoch": 2.43895271172856, "step": 9129, "train/loss_ctc": 0.622744083404541, "train/loss_error": 0.42579036951065063, "train/loss_total": 0.4651811122894287 }, { "epoch": 2.4392198771039273, "grad_norm": 3.7722043991088867, "learning_rate": 1.537429869088966e-05, "loss": 0.5247, "step": 9130 }, { "epoch": 2.4392198771039273, "step": 9130, "train/loss_ctc": 0.7943919897079468, "train/loss_error": 0.49311313033103943, "train/loss_total": 0.5533689260482788 }, { "epoch": 2.4394870424792945, "step": 9131, "train/loss_ctc": 0.9739316701889038, "train/loss_error": 0.42948582768440247, "train/loss_total": 0.5383750200271606 }, { "epoch": 2.439754207854662, "step": 9132, "train/loss_ctc": 1.371199369430542, "train/loss_error": 0.4566040635108948, "train/loss_total": 0.6395231485366821 }, { "epoch": 2.4400213732300293, "step": 9133, "train/loss_ctc": 2.1189894676208496, "train/loss_error": 0.5350337028503418, "train/loss_total": 0.8518248796463013 }, { "epoch": 2.4402885386053965, "step": 9134, "train/loss_ctc": 1.2388426065444946, "train/loss_error": 0.4306705594062805, "train/loss_total": 0.5923049449920654 }, { "epoch": 2.440555703980764, "step": 9135, "train/loss_ctc": 0.4257834255695343, "train/loss_error": 0.4752845764160156, "train/loss_total": 0.4653843343257904 }, { "epoch": 2.4408228693561314, "step": 9136, "train/loss_ctc": 1.0192739963531494, "train/loss_error": 0.46332231163978577, "train/loss_total": 0.5745126605033875 }, { "epoch": 2.4410900347314985, "step": 9137, "train/loss_ctc": 0.41911619901657104, "train/loss_error": 0.4212226867675781, "train/loss_total": 0.42080140113830566 }, { "epoch": 2.441357200106866, "step": 9138, "train/loss_ctc": 0.35385340452194214, "train/loss_error": 0.4335615932941437, "train/loss_total": 0.4176199734210968 }, { "epoch": 2.4416243654822334, "step": 9139, "train/loss_ctc": 0.6961803436279297, "train/loss_error": 0.4445902109146118, "train/loss_total": 0.49490827322006226 }, { "epoch": 2.441891530857601, "grad_norm": 1.6758689880371094, "learning_rate": 1.535826876836762e-05, "loss": 0.5549, "step": 9140 }, { "epoch": 2.441891530857601, "step": 9140, "train/loss_ctc": 0.48298606276512146, "train/loss_error": 0.4185805320739746, "train/loss_total": 0.4314616620540619 }, { "epoch": 2.442158696232968, "step": 9141, "train/loss_ctc": 0.5025110840797424, "train/loss_error": 0.4078589081764221, "train/loss_total": 0.4267893433570862 }, { "epoch": 2.4424258616083354, "step": 9142, "train/loss_ctc": 0.6677731275558472, "train/loss_error": 0.4537396728992462, "train/loss_total": 0.4965463876724243 }, { "epoch": 2.442693026983703, "step": 9143, "train/loss_ctc": 1.0874255895614624, "train/loss_error": 0.49322310090065, "train/loss_total": 0.6120635867118835 }, { "epoch": 2.4429601923590702, "step": 9144, "train/loss_ctc": 0.5067290663719177, "train/loss_error": 0.45592010021209717, "train/loss_total": 0.4660818874835968 }, { "epoch": 2.4432273577344374, "step": 9145, "train/loss_ctc": 0.8973620533943176, "train/loss_error": 0.45147213339805603, "train/loss_total": 0.5406501293182373 }, { "epoch": 2.443494523109805, "step": 9146, "train/loss_ctc": 1.8042593002319336, "train/loss_error": 0.4715680480003357, "train/loss_total": 0.7381063103675842 }, { "epoch": 2.4437616884851723, "step": 9147, "train/loss_ctc": 0.5048542618751526, "train/loss_error": 0.4090770184993744, "train/loss_total": 0.42823249101638794 }, { "epoch": 2.44402885386054, "step": 9148, "train/loss_ctc": 0.5908308625221252, "train/loss_error": 0.48509350419044495, "train/loss_total": 0.506240963935852 }, { "epoch": 2.444296019235907, "step": 9149, "train/loss_ctc": 1.307370662689209, "train/loss_error": 0.45795953273773193, "train/loss_total": 0.6278417706489563 }, { "epoch": 2.4445631846112743, "grad_norm": 3.3555753231048584, "learning_rate": 1.5342238845845577e-05, "loss": 0.5274, "step": 9150 }, { "epoch": 2.4445631846112743, "step": 9150, "train/loss_ctc": 0.9580427408218384, "train/loss_error": 0.4649091958999634, "train/loss_total": 0.5635359287261963 }, { "epoch": 2.444830349986642, "step": 9151, "train/loss_ctc": 0.6944926977157593, "train/loss_error": 0.43725746870040894, "train/loss_total": 0.48870450258255005 }, { "epoch": 2.445097515362009, "step": 9152, "train/loss_ctc": 1.1839427947998047, "train/loss_error": 0.42260077595710754, "train/loss_total": 0.5748691558837891 }, { "epoch": 2.4453646807373763, "step": 9153, "train/loss_ctc": 0.508641242980957, "train/loss_error": 0.4651707410812378, "train/loss_total": 0.4738648533821106 }, { "epoch": 2.445631846112744, "step": 9154, "train/loss_ctc": 1.0479060411453247, "train/loss_error": 0.478763222694397, "train/loss_total": 0.5925918221473694 }, { "epoch": 2.445899011488111, "step": 9155, "train/loss_ctc": 1.30635666847229, "train/loss_error": 0.4204586446285248, "train/loss_total": 0.5976382493972778 }, { "epoch": 2.4461661768634784, "step": 9156, "train/loss_ctc": 0.9613265991210938, "train/loss_error": 0.4514690041542053, "train/loss_total": 0.553440511226654 }, { "epoch": 2.446433342238846, "step": 9157, "train/loss_ctc": 0.3996332883834839, "train/loss_error": 0.43792885541915894, "train/loss_total": 0.4302697479724884 }, { "epoch": 2.446700507614213, "step": 9158, "train/loss_ctc": 0.6938343048095703, "train/loss_error": 0.432753324508667, "train/loss_total": 0.4849695563316345 }, { "epoch": 2.4469676729895804, "step": 9159, "train/loss_ctc": 1.0659191608428955, "train/loss_error": 0.4166584312915802, "train/loss_total": 0.5465105772018433 }, { "epoch": 2.447234838364948, "grad_norm": 1.8202486038208008, "learning_rate": 1.532620892332354e-05, "loss": 0.5306, "step": 9160 }, { "epoch": 2.447234838364948, "step": 9160, "train/loss_ctc": 0.850692093372345, "train/loss_error": 0.4343504309654236, "train/loss_total": 0.5176187753677368 }, { "epoch": 2.4475020037403152, "step": 9161, "train/loss_ctc": 0.5823301076889038, "train/loss_error": 0.47984111309051514, "train/loss_total": 0.5003389120101929 }, { "epoch": 2.4477691691156824, "step": 9162, "train/loss_ctc": 1.6318550109863281, "train/loss_error": 0.4994819462299347, "train/loss_total": 0.7259565591812134 }, { "epoch": 2.44803633449105, "step": 9163, "train/loss_ctc": 0.4702451527118683, "train/loss_error": 0.4712470769882202, "train/loss_total": 0.47104671597480774 }, { "epoch": 2.4483034998664173, "step": 9164, "train/loss_ctc": 0.5351256132125854, "train/loss_error": 0.4446164071559906, "train/loss_total": 0.46271824836730957 }, { "epoch": 2.4485706652417845, "step": 9165, "train/loss_ctc": 0.384168803691864, "train/loss_error": 0.386289119720459, "train/loss_total": 0.38586506247520447 }, { "epoch": 2.448837830617152, "step": 9166, "train/loss_ctc": 0.9431310892105103, "train/loss_error": 0.4455489218235016, "train/loss_total": 0.5450653433799744 }, { "epoch": 2.4491049959925193, "step": 9167, "train/loss_ctc": 1.1825031042099, "train/loss_error": 0.4386257231235504, "train/loss_total": 0.5874012112617493 }, { "epoch": 2.4493721613678865, "step": 9168, "train/loss_ctc": 0.8748775720596313, "train/loss_error": 0.43738165497779846, "train/loss_total": 0.5248808860778809 }, { "epoch": 2.449639326743254, "step": 9169, "train/loss_ctc": 0.4185263514518738, "train/loss_error": 0.4891461133956909, "train/loss_total": 0.47502216696739197 }, { "epoch": 2.4499064921186213, "grad_norm": 1.3778331279754639, "learning_rate": 1.5310179000801497e-05, "loss": 0.5196, "step": 9170 }, { "epoch": 2.4499064921186213, "step": 9170, "train/loss_ctc": 1.3047139644622803, "train/loss_error": 0.5226588845252991, "train/loss_total": 0.6790698766708374 }, { "epoch": 2.450173657493989, "step": 9171, "train/loss_ctc": 1.1253304481506348, "train/loss_error": 0.40942123532295227, "train/loss_total": 0.5526031255722046 }, { "epoch": 2.450440822869356, "step": 9172, "train/loss_ctc": 0.7374705672264099, "train/loss_error": 0.4333934783935547, "train/loss_total": 0.4942089319229126 }, { "epoch": 2.4507079882447234, "step": 9173, "train/loss_ctc": 0.5003796219825745, "train/loss_error": 0.48319360613822937, "train/loss_total": 0.4866308271884918 }, { "epoch": 2.450975153620091, "step": 9174, "train/loss_ctc": 0.5573493242263794, "train/loss_error": 0.4440564215183258, "train/loss_total": 0.466715008020401 }, { "epoch": 2.451242318995458, "step": 9175, "train/loss_ctc": 0.7975061535835266, "train/loss_error": 0.47576239705085754, "train/loss_total": 0.5401111841201782 }, { "epoch": 2.4515094843708254, "step": 9176, "train/loss_ctc": 0.2913798987865448, "train/loss_error": 0.4572884440422058, "train/loss_total": 0.4241067171096802 }, { "epoch": 2.451776649746193, "step": 9177, "train/loss_ctc": 1.3109455108642578, "train/loss_error": 0.4778915047645569, "train/loss_total": 0.6445023417472839 }, { "epoch": 2.45204381512156, "step": 9178, "train/loss_ctc": 1.2180933952331543, "train/loss_error": 0.5360461473464966, "train/loss_total": 0.6724556088447571 }, { "epoch": 2.4523109804969274, "step": 9179, "train/loss_ctc": 0.8186280131340027, "train/loss_error": 0.43319571018218994, "train/loss_total": 0.5102821588516235 }, { "epoch": 2.452578145872295, "grad_norm": 3.358529806137085, "learning_rate": 1.5294149078279455e-05, "loss": 0.5471, "step": 9180 }, { "epoch": 2.452578145872295, "step": 9180, "train/loss_ctc": 0.9310215711593628, "train/loss_error": 0.48372119665145874, "train/loss_total": 0.5731812715530396 }, { "epoch": 2.4528453112476623, "step": 9181, "train/loss_ctc": 0.48786842823028564, "train/loss_error": 0.45888546109199524, "train/loss_total": 0.46468207240104675 }, { "epoch": 2.45311247662303, "step": 9182, "train/loss_ctc": 0.5665050745010376, "train/loss_error": 0.47872042655944824, "train/loss_total": 0.4962773621082306 }, { "epoch": 2.453379641998397, "step": 9183, "train/loss_ctc": 0.6230435371398926, "train/loss_error": 0.5423274040222168, "train/loss_total": 0.5584706664085388 }, { "epoch": 2.4536468073737643, "step": 9184, "train/loss_ctc": 1.0375754833221436, "train/loss_error": 0.4650314152240753, "train/loss_total": 0.5795402526855469 }, { "epoch": 2.453913972749132, "step": 9185, "train/loss_ctc": 1.4226621389389038, "train/loss_error": 0.4578121602535248, "train/loss_total": 0.6507821679115295 }, { "epoch": 2.454181138124499, "step": 9186, "train/loss_ctc": 0.3353364169597626, "train/loss_error": 0.377743661403656, "train/loss_total": 0.3692622184753418 }, { "epoch": 2.4544483034998663, "step": 9187, "train/loss_ctc": 0.678106963634491, "train/loss_error": 0.5035459399223328, "train/loss_total": 0.5384581685066223 }, { "epoch": 2.454715468875234, "step": 9188, "train/loss_ctc": 1.0355629920959473, "train/loss_error": 0.43171602487564087, "train/loss_total": 0.5524854063987732 }, { "epoch": 2.454982634250601, "step": 9189, "train/loss_ctc": 0.9881619215011597, "train/loss_error": 0.4979468286037445, "train/loss_total": 0.5959898829460144 }, { "epoch": 2.4552497996259683, "grad_norm": 1.6108795404434204, "learning_rate": 1.5278119155757413e-05, "loss": 0.5379, "step": 9190 }, { "epoch": 2.4552497996259683, "step": 9190, "train/loss_ctc": 1.2757384777069092, "train/loss_error": 0.46244779229164124, "train/loss_total": 0.6251059770584106 }, { "epoch": 2.455516965001336, "step": 9191, "train/loss_ctc": 1.395577311515808, "train/loss_error": 0.5022752285003662, "train/loss_total": 0.6809356212615967 }, { "epoch": 2.455784130376703, "step": 9192, "train/loss_ctc": 0.27369314432144165, "train/loss_error": 0.40927353501319885, "train/loss_total": 0.38215747475624084 }, { "epoch": 2.4560512957520704, "step": 9193, "train/loss_ctc": 0.8545121550559998, "train/loss_error": 0.3869948089122772, "train/loss_total": 0.4804982841014862 }, { "epoch": 2.456318461127438, "step": 9194, "train/loss_ctc": 0.9353819489479065, "train/loss_error": 0.4404981732368469, "train/loss_total": 0.5394749641418457 }, { "epoch": 2.456585626502805, "step": 9195, "train/loss_ctc": 1.0815229415893555, "train/loss_error": 0.5025884509086609, "train/loss_total": 0.6183753609657288 }, { "epoch": 2.4568527918781724, "step": 9196, "train/loss_ctc": 0.6649999618530273, "train/loss_error": 0.4017810821533203, "train/loss_total": 0.4544248580932617 }, { "epoch": 2.45711995725354, "step": 9197, "train/loss_ctc": 0.3794028162956238, "train/loss_error": 0.45593947172164917, "train/loss_total": 0.4406321346759796 }, { "epoch": 2.4573871226289072, "step": 9198, "train/loss_ctc": 0.508392333984375, "train/loss_error": 0.46242064237594604, "train/loss_total": 0.4716149866580963 }, { "epoch": 2.4576542880042744, "step": 9199, "train/loss_ctc": 0.3459216356277466, "train/loss_error": 0.49414435029029846, "train/loss_total": 0.464499831199646 }, { "epoch": 2.457921453379642, "grad_norm": 2.0381245613098145, "learning_rate": 1.526208923323537e-05, "loss": 0.5158, "step": 9200 }, { "epoch": 2.457921453379642, "step": 9200, "train/loss_ctc": 1.3638995885849, "train/loss_error": 0.43801406025886536, "train/loss_total": 0.6231911778450012 }, { "epoch": 2.4581886187550093, "step": 9201, "train/loss_ctc": 0.45074957609176636, "train/loss_error": 0.38917332887649536, "train/loss_total": 0.4014885723590851 }, { "epoch": 2.4584557841303765, "step": 9202, "train/loss_ctc": 0.9143980741500854, "train/loss_error": 0.511085033416748, "train/loss_total": 0.5917476415634155 }, { "epoch": 2.458722949505744, "step": 9203, "train/loss_ctc": 0.5813388824462891, "train/loss_error": 0.39215466380119324, "train/loss_total": 0.4299915134906769 }, { "epoch": 2.4589901148811113, "step": 9204, "train/loss_ctc": 1.346726894378662, "train/loss_error": 0.4314431846141815, "train/loss_total": 0.6144999265670776 }, { "epoch": 2.459257280256479, "step": 9205, "train/loss_ctc": 0.5759460926055908, "train/loss_error": 0.44899284839630127, "train/loss_total": 0.47438350319862366 }, { "epoch": 2.459524445631846, "step": 9206, "train/loss_ctc": 0.36838221549987793, "train/loss_error": 0.5352156758308411, "train/loss_total": 0.5018489956855774 }, { "epoch": 2.4597916110072133, "step": 9207, "train/loss_ctc": 0.8387852907180786, "train/loss_error": 0.4705423414707184, "train/loss_total": 0.5441909432411194 }, { "epoch": 2.460058776382581, "step": 9208, "train/loss_ctc": 1.0029213428497314, "train/loss_error": 0.4618503153324127, "train/loss_total": 0.5700645446777344 }, { "epoch": 2.460325941757948, "step": 9209, "train/loss_ctc": 1.358892560005188, "train/loss_error": 0.4476422071456909, "train/loss_total": 0.6298922896385193 }, { "epoch": 2.4605931071333154, "grad_norm": 2.0679187774658203, "learning_rate": 1.5246059310713331e-05, "loss": 0.5381, "step": 9210 }, { "epoch": 2.4605931071333154, "step": 9210, "train/loss_ctc": 0.9645170569419861, "train/loss_error": 0.4586389660835266, "train/loss_total": 0.5598145723342896 }, { "epoch": 2.460860272508683, "step": 9211, "train/loss_ctc": 0.8470377922058105, "train/loss_error": 0.5225118398666382, "train/loss_total": 0.5874170660972595 }, { "epoch": 2.46112743788405, "step": 9212, "train/loss_ctc": 0.659783124923706, "train/loss_error": 0.4365382492542267, "train/loss_total": 0.48118722438812256 }, { "epoch": 2.4613946032594174, "step": 9213, "train/loss_ctc": 1.824573278427124, "train/loss_error": 0.48114004731178284, "train/loss_total": 0.7498266696929932 }, { "epoch": 2.461661768634785, "step": 9214, "train/loss_ctc": 0.8792873620986938, "train/loss_error": 0.45454320311546326, "train/loss_total": 0.5394920110702515 }, { "epoch": 2.4619289340101522, "step": 9215, "train/loss_ctc": 0.3926103115081787, "train/loss_error": 0.4291493892669678, "train/loss_total": 0.421841561794281 }, { "epoch": 2.46219609938552, "step": 9216, "train/loss_ctc": 2.0832927227020264, "train/loss_error": 0.5417080521583557, "train/loss_total": 0.8500249981880188 }, { "epoch": 2.462463264760887, "step": 9217, "train/loss_ctc": 0.9495061039924622, "train/loss_error": 0.4347017705440521, "train/loss_total": 0.5376626253128052 }, { "epoch": 2.4627304301362543, "step": 9218, "train/loss_ctc": 0.436168909072876, "train/loss_error": 0.4184360206127167, "train/loss_total": 0.42198261618614197 }, { "epoch": 2.462997595511622, "step": 9219, "train/loss_ctc": 0.22682973742485046, "train/loss_error": 0.530872642993927, "train/loss_total": 0.47006407380104065 }, { "epoch": 2.463264760886989, "grad_norm": 1.2696127891540527, "learning_rate": 1.5230029388191292e-05, "loss": 0.5619, "step": 9220 }, { "epoch": 2.463264760886989, "step": 9220, "train/loss_ctc": 0.49326443672180176, "train/loss_error": 0.44998353719711304, "train/loss_total": 0.4586397409439087 }, { "epoch": 2.4635319262623563, "step": 9221, "train/loss_ctc": 0.5245181918144226, "train/loss_error": 0.4407258927822113, "train/loss_total": 0.4574843645095825 }, { "epoch": 2.463799091637724, "step": 9222, "train/loss_ctc": 1.0602848529815674, "train/loss_error": 0.45283302664756775, "train/loss_total": 0.5743234157562256 }, { "epoch": 2.464066257013091, "step": 9223, "train/loss_ctc": 0.5848987102508545, "train/loss_error": 0.47010037302970886, "train/loss_total": 0.49306005239486694 }, { "epoch": 2.4643334223884583, "step": 9224, "train/loss_ctc": 0.6313905715942383, "train/loss_error": 0.47913381457328796, "train/loss_total": 0.5095852017402649 }, { "epoch": 2.464600587763826, "step": 9225, "train/loss_ctc": 0.6073924899101257, "train/loss_error": 0.4622843563556671, "train/loss_total": 0.49130597710609436 }, { "epoch": 2.464867753139193, "step": 9226, "train/loss_ctc": 0.37367963790893555, "train/loss_error": 0.4242047369480133, "train/loss_total": 0.41409972310066223 }, { "epoch": 2.4651349185145603, "step": 9227, "train/loss_ctc": 0.9155153036117554, "train/loss_error": 0.4671860337257385, "train/loss_total": 0.5568519234657288 }, { "epoch": 2.465402083889928, "step": 9228, "train/loss_ctc": 0.5191129446029663, "train/loss_error": 0.4986530840396881, "train/loss_total": 0.5027450323104858 }, { "epoch": 2.465669249265295, "step": 9229, "train/loss_ctc": 0.5285369157791138, "train/loss_error": 0.4749879240989685, "train/loss_total": 0.4856977164745331 }, { "epoch": 2.4659364146406624, "grad_norm": 4.130960464477539, "learning_rate": 1.521399946566925e-05, "loss": 0.4944, "step": 9230 }, { "epoch": 2.4659364146406624, "step": 9230, "train/loss_ctc": 0.457806259393692, "train/loss_error": 0.4757646322250366, "train/loss_total": 0.47217297554016113 }, { "epoch": 2.46620358001603, "step": 9231, "train/loss_ctc": 1.0363104343414307, "train/loss_error": 0.47017401456832886, "train/loss_total": 0.5834013223648071 }, { "epoch": 2.466470745391397, "step": 9232, "train/loss_ctc": 0.7241321802139282, "train/loss_error": 0.409717321395874, "train/loss_total": 0.4726003110408783 }, { "epoch": 2.4667379107667644, "step": 9233, "train/loss_ctc": 0.8048104047775269, "train/loss_error": 0.43729323148727417, "train/loss_total": 0.5107966661453247 }, { "epoch": 2.467005076142132, "step": 9234, "train/loss_ctc": 0.6129311919212341, "train/loss_error": 0.49776703119277954, "train/loss_total": 0.5207998752593994 }, { "epoch": 2.4672722415174992, "step": 9235, "train/loss_ctc": 0.5218415260314941, "train/loss_error": 0.43950769305229187, "train/loss_total": 0.4559744596481323 }, { "epoch": 2.4675394068928664, "step": 9236, "train/loss_ctc": 0.7328321933746338, "train/loss_error": 0.41470101475715637, "train/loss_total": 0.47832727432250977 }, { "epoch": 2.467806572268234, "step": 9237, "train/loss_ctc": 0.49003878235816956, "train/loss_error": 0.3693579435348511, "train/loss_total": 0.3934941291809082 }, { "epoch": 2.4680737376436013, "step": 9238, "train/loss_ctc": 0.8563099503517151, "train/loss_error": 0.5166893601417542, "train/loss_total": 0.5846135020256042 }, { "epoch": 2.468340903018969, "step": 9239, "train/loss_ctc": 0.6524012088775635, "train/loss_error": 0.4352276623249054, "train/loss_total": 0.478662371635437 }, { "epoch": 2.468608068394336, "grad_norm": 2.36834979057312, "learning_rate": 1.5197969543147209e-05, "loss": 0.4951, "step": 9240 }, { "epoch": 2.468608068394336, "step": 9240, "train/loss_ctc": 0.5832839012145996, "train/loss_error": 0.41467973589897156, "train/loss_total": 0.4484005868434906 }, { "epoch": 2.4688752337697033, "step": 9241, "train/loss_ctc": 0.587178647518158, "train/loss_error": 0.45169752836227417, "train/loss_total": 0.478793740272522 }, { "epoch": 2.469142399145071, "step": 9242, "train/loss_ctc": 0.4670667052268982, "train/loss_error": 0.4402317404747009, "train/loss_total": 0.4455987513065338 }, { "epoch": 2.469409564520438, "step": 9243, "train/loss_ctc": 1.0995550155639648, "train/loss_error": 0.46846404671669006, "train/loss_total": 0.5946822166442871 }, { "epoch": 2.4696767298958053, "step": 9244, "train/loss_ctc": 0.554039716720581, "train/loss_error": 0.41773098707199097, "train/loss_total": 0.4449927508831024 }, { "epoch": 2.469943895271173, "step": 9245, "train/loss_ctc": 0.5897877216339111, "train/loss_error": 0.5176995992660522, "train/loss_total": 0.5321172475814819 }, { "epoch": 2.47021106064654, "step": 9246, "train/loss_ctc": 0.8981135487556458, "train/loss_error": 0.5645866990089417, "train/loss_total": 0.6312921047210693 }, { "epoch": 2.470478226021908, "step": 9247, "train/loss_ctc": 0.5115811824798584, "train/loss_error": 0.4818444550037384, "train/loss_total": 0.4877918064594269 }, { "epoch": 2.470745391397275, "step": 9248, "train/loss_ctc": 0.7542023658752441, "train/loss_error": 0.4512532651424408, "train/loss_total": 0.5118430852890015 }, { "epoch": 2.471012556772642, "step": 9249, "train/loss_ctc": 0.9125192165374756, "train/loss_error": 0.5005763173103333, "train/loss_total": 0.5829648971557617 }, { "epoch": 2.47127972214801, "grad_norm": 4.005443572998047, "learning_rate": 1.5181939620625167e-05, "loss": 0.5158, "step": 9250 }, { "epoch": 2.47127972214801, "step": 9250, "train/loss_ctc": 0.7316011190414429, "train/loss_error": 0.4807227551937103, "train/loss_total": 0.5308984518051147 }, { "epoch": 2.471546887523377, "step": 9251, "train/loss_ctc": 0.4980253279209137, "train/loss_error": 0.48287224769592285, "train/loss_total": 0.4859028458595276 }, { "epoch": 2.4718140528987442, "step": 9252, "train/loss_ctc": 0.4757983684539795, "train/loss_error": 0.38447105884552, "train/loss_total": 0.4027365446090698 }, { "epoch": 2.472081218274112, "step": 9253, "train/loss_ctc": 1.0105210542678833, "train/loss_error": 0.46475157141685486, "train/loss_total": 0.5739054679870605 }, { "epoch": 2.472348383649479, "step": 9254, "train/loss_ctc": 0.4834553003311157, "train/loss_error": 0.5189215540885925, "train/loss_total": 0.5118283033370972 }, { "epoch": 2.4726155490248463, "step": 9255, "train/loss_ctc": 0.9403713941574097, "train/loss_error": 0.42153120040893555, "train/loss_total": 0.5252992510795593 }, { "epoch": 2.472882714400214, "step": 9256, "train/loss_ctc": 0.5503202676773071, "train/loss_error": 0.4864141047000885, "train/loss_total": 0.4991953372955322 }, { "epoch": 2.473149879775581, "step": 9257, "train/loss_ctc": 1.505778193473816, "train/loss_error": 0.4291747510433197, "train/loss_total": 0.6444954872131348 }, { "epoch": 2.4734170451509483, "step": 9258, "train/loss_ctc": 0.9608743190765381, "train/loss_error": 0.4608926773071289, "train/loss_total": 0.5608890056610107 }, { "epoch": 2.473684210526316, "step": 9259, "train/loss_ctc": 0.31376826763153076, "train/loss_error": 0.3982500731945038, "train/loss_total": 0.3813537061214447 }, { "epoch": 2.473951375901683, "grad_norm": 5.582956314086914, "learning_rate": 1.5165909698103125e-05, "loss": 0.5117, "step": 9260 }, { "epoch": 2.473951375901683, "step": 9260, "train/loss_ctc": 1.6395305395126343, "train/loss_error": 0.47267237305641174, "train/loss_total": 0.7060440182685852 }, { "epoch": 2.4742185412770503, "step": 9261, "train/loss_ctc": 0.7127863168716431, "train/loss_error": 0.5424937605857849, "train/loss_total": 0.5765522718429565 }, { "epoch": 2.474485706652418, "step": 9262, "train/loss_ctc": 0.9705090522766113, "train/loss_error": 0.4211469888687134, "train/loss_total": 0.5310194492340088 }, { "epoch": 2.474752872027785, "step": 9263, "train/loss_ctc": 0.4322884976863861, "train/loss_error": 0.4483347237110138, "train/loss_total": 0.4451254904270172 }, { "epoch": 2.4750200374031524, "step": 9264, "train/loss_ctc": 0.9980629682540894, "train/loss_error": 0.4088255763053894, "train/loss_total": 0.5266730785369873 }, { "epoch": 2.47528720277852, "step": 9265, "train/loss_ctc": 0.5091472864151001, "train/loss_error": 0.4370494782924652, "train/loss_total": 0.4514690637588501 }, { "epoch": 2.475554368153887, "step": 9266, "train/loss_ctc": 0.7082902193069458, "train/loss_error": 0.4973379969596863, "train/loss_total": 0.5395284295082092 }, { "epoch": 2.4758215335292544, "step": 9267, "train/loss_ctc": 0.5499000549316406, "train/loss_error": 0.43598783016204834, "train/loss_total": 0.4587702751159668 }, { "epoch": 2.476088698904622, "step": 9268, "train/loss_ctc": 0.517922043800354, "train/loss_error": 0.48034319281578064, "train/loss_total": 0.48785898089408875 }, { "epoch": 2.476355864279989, "step": 9269, "train/loss_ctc": 0.23322007060050964, "train/loss_error": 0.47017812728881836, "train/loss_total": 0.42278650403022766 }, { "epoch": 2.476623029655357, "grad_norm": 1.5525810718536377, "learning_rate": 1.5149879775581083e-05, "loss": 0.5146, "step": 9270 }, { "epoch": 2.476623029655357, "step": 9270, "train/loss_ctc": 0.5405360460281372, "train/loss_error": 0.39838001132011414, "train/loss_total": 0.42681121826171875 }, { "epoch": 2.476890195030724, "step": 9271, "train/loss_ctc": 0.6044655442237854, "train/loss_error": 0.4051443934440613, "train/loss_total": 0.44500863552093506 }, { "epoch": 2.4771573604060912, "step": 9272, "train/loss_ctc": 0.6236693859100342, "train/loss_error": 0.44091716408729553, "train/loss_total": 0.4774675965309143 }, { "epoch": 2.477424525781459, "step": 9273, "train/loss_ctc": 0.4969867765903473, "train/loss_error": 0.5432119369506836, "train/loss_total": 0.5339668989181519 }, { "epoch": 2.477691691156826, "step": 9274, "train/loss_ctc": 0.5519598126411438, "train/loss_error": 0.4086695611476898, "train/loss_total": 0.43732762336730957 }, { "epoch": 2.4779588565321933, "step": 9275, "train/loss_ctc": 0.5299972295761108, "train/loss_error": 0.4650965929031372, "train/loss_total": 0.4780767261981964 }, { "epoch": 2.478226021907561, "step": 9276, "train/loss_ctc": 0.7716949582099915, "train/loss_error": 0.50266033411026, "train/loss_total": 0.5564672946929932 }, { "epoch": 2.478493187282928, "step": 9277, "train/loss_ctc": 0.37262842059135437, "train/loss_error": 0.46214985847473145, "train/loss_total": 0.4442455768585205 }, { "epoch": 2.4787603526582953, "step": 9278, "train/loss_ctc": 0.5410791635513306, "train/loss_error": 0.4376751482486725, "train/loss_total": 0.45835596323013306 }, { "epoch": 2.479027518033663, "step": 9279, "train/loss_ctc": 0.33429551124572754, "train/loss_error": 0.48789694905281067, "train/loss_total": 0.45717665553092957 }, { "epoch": 2.47929468340903, "grad_norm": 4.046041011810303, "learning_rate": 1.5133849853059046e-05, "loss": 0.4715, "step": 9280 }, { "epoch": 2.47929468340903, "step": 9280, "train/loss_ctc": 0.35926222801208496, "train/loss_error": 0.447971910238266, "train/loss_total": 0.4302299916744232 }, { "epoch": 2.479561848784398, "step": 9281, "train/loss_ctc": 1.6721965074539185, "train/loss_error": 0.4830474257469177, "train/loss_total": 0.7208772897720337 }, { "epoch": 2.479829014159765, "step": 9282, "train/loss_ctc": 0.3311113119125366, "train/loss_error": 0.47056257724761963, "train/loss_total": 0.4426723122596741 }, { "epoch": 2.480096179535132, "step": 9283, "train/loss_ctc": 1.4274741411209106, "train/loss_error": 0.41334083676338196, "train/loss_total": 0.6161675453186035 }, { "epoch": 2.4803633449105, "step": 9284, "train/loss_ctc": 1.2588036060333252, "train/loss_error": 0.49902573227882385, "train/loss_total": 0.6509813070297241 }, { "epoch": 2.480630510285867, "step": 9285, "train/loss_ctc": 0.3758862018585205, "train/loss_error": 0.4307727515697479, "train/loss_total": 0.4197954535484314 }, { "epoch": 2.480897675661234, "step": 9286, "train/loss_ctc": 0.8321330547332764, "train/loss_error": 0.43731123208999634, "train/loss_total": 0.5162755846977234 }, { "epoch": 2.481164841036602, "step": 9287, "train/loss_ctc": 0.6280949711799622, "train/loss_error": 0.4621815085411072, "train/loss_total": 0.4953642189502716 }, { "epoch": 2.481432006411969, "step": 9288, "train/loss_ctc": 0.50294029712677, "train/loss_error": 0.48381105065345764, "train/loss_total": 0.48763689398765564 }, { "epoch": 2.4816991717873362, "step": 9289, "train/loss_ctc": 0.4712866246700287, "train/loss_error": 0.4748555123806, "train/loss_total": 0.4741417467594147 }, { "epoch": 2.481966337162704, "grad_norm": 3.126473903656006, "learning_rate": 1.5117819930537004e-05, "loss": 0.5254, "step": 9290 }, { "epoch": 2.481966337162704, "step": 9290, "train/loss_ctc": 0.49930712580680847, "train/loss_error": 0.4359816312789917, "train/loss_total": 0.4486467242240906 }, { "epoch": 2.482233502538071, "step": 9291, "train/loss_ctc": 0.3137766718864441, "train/loss_error": 0.49427974224090576, "train/loss_total": 0.45817914605140686 }, { "epoch": 2.4825006679134383, "step": 9292, "train/loss_ctc": 0.4727534055709839, "train/loss_error": 0.45736590027809143, "train/loss_total": 0.4604434370994568 }, { "epoch": 2.482767833288806, "step": 9293, "train/loss_ctc": 0.47964951395988464, "train/loss_error": 0.40267953276634216, "train/loss_total": 0.41807353496551514 }, { "epoch": 2.483034998664173, "step": 9294, "train/loss_ctc": 0.9625906348228455, "train/loss_error": 0.4506170451641083, "train/loss_total": 0.5530117750167847 }, { "epoch": 2.4833021640395403, "step": 9295, "train/loss_ctc": 1.3413091897964478, "train/loss_error": 0.530267059803009, "train/loss_total": 0.6924754977226257 }, { "epoch": 2.483569329414908, "step": 9296, "train/loss_ctc": 0.6173421740531921, "train/loss_error": 0.468716025352478, "train/loss_total": 0.49844124913215637 }, { "epoch": 2.483836494790275, "step": 9297, "train/loss_ctc": 0.759964108467102, "train/loss_error": 0.4756909906864166, "train/loss_total": 0.5325456261634827 }, { "epoch": 2.4841036601656423, "step": 9298, "train/loss_ctc": 1.0678585767745972, "train/loss_error": 0.40970897674560547, "train/loss_total": 0.5413389205932617 }, { "epoch": 2.48437082554101, "step": 9299, "train/loss_ctc": 0.5519641637802124, "train/loss_error": 0.4600304067134857, "train/loss_total": 0.47841715812683105 }, { "epoch": 2.484637990916377, "grad_norm": 1.4473094940185547, "learning_rate": 1.5101790008014962e-05, "loss": 0.5082, "step": 9300 }, { "epoch": 2.484637990916377, "step": 9300, "train/loss_ctc": 0.6405333876609802, "train/loss_error": 0.40168774127960205, "train/loss_total": 0.4494568705558777 }, { "epoch": 2.4849051562917444, "step": 9301, "train/loss_ctc": 0.5015711188316345, "train/loss_error": 0.44278642535209656, "train/loss_total": 0.4545433819293976 }, { "epoch": 2.485172321667112, "step": 9302, "train/loss_ctc": 1.630083680152893, "train/loss_error": 0.5144944787025452, "train/loss_total": 0.7376123666763306 }, { "epoch": 2.485439487042479, "step": 9303, "train/loss_ctc": 0.9634318947792053, "train/loss_error": 0.4213404655456543, "train/loss_total": 0.5297587513923645 }, { "epoch": 2.485706652417847, "step": 9304, "train/loss_ctc": 0.5551797151565552, "train/loss_error": 0.4374551475048065, "train/loss_total": 0.4610000550746918 }, { "epoch": 2.485973817793214, "step": 9305, "train/loss_ctc": 0.9674831628799438, "train/loss_error": 0.39490580558776855, "train/loss_total": 0.5094212889671326 }, { "epoch": 2.486240983168581, "step": 9306, "train/loss_ctc": 0.6353594064712524, "train/loss_error": 0.4915052652359009, "train/loss_total": 0.5202760696411133 }, { "epoch": 2.486508148543949, "step": 9307, "train/loss_ctc": 0.705562949180603, "train/loss_error": 0.428930401802063, "train/loss_total": 0.48425692319869995 }, { "epoch": 2.486775313919316, "step": 9308, "train/loss_ctc": 0.4798300266265869, "train/loss_error": 0.42755016684532166, "train/loss_total": 0.4380061626434326 }, { "epoch": 2.4870424792946833, "step": 9309, "train/loss_ctc": 0.6354559659957886, "train/loss_error": 0.46793657541275024, "train/loss_total": 0.5014404654502869 }, { "epoch": 2.487309644670051, "grad_norm": 3.4923770427703857, "learning_rate": 1.508576008549292e-05, "loss": 0.5086, "step": 9310 }, { "epoch": 2.487309644670051, "step": 9310, "train/loss_ctc": 0.4447854161262512, "train/loss_error": 0.47503453493118286, "train/loss_total": 0.4689847230911255 }, { "epoch": 2.487576810045418, "step": 9311, "train/loss_ctc": 0.46307605504989624, "train/loss_error": 0.4568052291870117, "train/loss_total": 0.4580594003200531 }, { "epoch": 2.4878439754207853, "step": 9312, "train/loss_ctc": 0.8223638534545898, "train/loss_error": 0.43748196959495544, "train/loss_total": 0.5144583582878113 }, { "epoch": 2.488111140796153, "step": 9313, "train/loss_ctc": 0.4185846149921417, "train/loss_error": 0.45804378390312195, "train/loss_total": 0.4501519501209259 }, { "epoch": 2.48837830617152, "step": 9314, "train/loss_ctc": 0.7693749070167542, "train/loss_error": 0.46407467126846313, "train/loss_total": 0.5251347422599792 }, { "epoch": 2.4886454715468878, "step": 9315, "train/loss_ctc": 1.024630069732666, "train/loss_error": 0.3978107273578644, "train/loss_total": 0.5231746435165405 }, { "epoch": 2.488912636922255, "step": 9316, "train/loss_ctc": 0.6121760606765747, "train/loss_error": 0.38730356097221375, "train/loss_total": 0.4322780668735504 }, { "epoch": 2.489179802297622, "step": 9317, "train/loss_ctc": 0.8391512632369995, "train/loss_error": 0.49855348467826843, "train/loss_total": 0.5666730403900146 }, { "epoch": 2.48944696767299, "step": 9318, "train/loss_ctc": 0.9073750972747803, "train/loss_error": 0.3862065374851227, "train/loss_total": 0.4904402494430542 }, { "epoch": 2.489714133048357, "step": 9319, "train/loss_ctc": 1.0730116367340088, "train/loss_error": 0.44182565808296204, "train/loss_total": 0.5680628418922424 }, { "epoch": 2.489981298423724, "grad_norm": 1.135919451713562, "learning_rate": 1.5069730162970878e-05, "loss": 0.4997, "step": 9320 }, { "epoch": 2.489981298423724, "step": 9320, "train/loss_ctc": 0.5487881898880005, "train/loss_error": 0.4623039662837982, "train/loss_total": 0.47960081696510315 }, { "epoch": 2.490248463799092, "step": 9321, "train/loss_ctc": 0.5752438306808472, "train/loss_error": 0.45182016491889954, "train/loss_total": 0.4765048921108246 }, { "epoch": 2.490515629174459, "step": 9322, "train/loss_ctc": 0.49794071912765503, "train/loss_error": 0.42596715688705444, "train/loss_total": 0.440361887216568 }, { "epoch": 2.490782794549826, "step": 9323, "train/loss_ctc": 1.1547296047210693, "train/loss_error": 0.4978911876678467, "train/loss_total": 0.6292588710784912 }, { "epoch": 2.491049959925194, "step": 9324, "train/loss_ctc": 0.5344043970108032, "train/loss_error": 0.4772423505783081, "train/loss_total": 0.48867475986480713 }, { "epoch": 2.491317125300561, "step": 9325, "train/loss_ctc": 1.0225692987442017, "train/loss_error": 0.45972663164138794, "train/loss_total": 0.5722951889038086 }, { "epoch": 2.4915842906759282, "step": 9326, "train/loss_ctc": 0.5589461922645569, "train/loss_error": 0.5484449863433838, "train/loss_total": 0.5505452156066895 }, { "epoch": 2.491851456051296, "step": 9327, "train/loss_ctc": 1.0329582691192627, "train/loss_error": 0.47577357292175293, "train/loss_total": 0.5872105360031128 }, { "epoch": 2.492118621426663, "step": 9328, "train/loss_ctc": 1.4649262428283691, "train/loss_error": 0.4274898171424866, "train/loss_total": 0.6349771022796631 }, { "epoch": 2.4923857868020303, "step": 9329, "train/loss_ctc": 0.5514726042747498, "train/loss_error": 0.4411648213863373, "train/loss_total": 0.4632263779640198 }, { "epoch": 2.492652952177398, "grad_norm": 3.2870585918426514, "learning_rate": 1.5053700240448836e-05, "loss": 0.5323, "step": 9330 }, { "epoch": 2.492652952177398, "step": 9330, "train/loss_ctc": 0.9018294811248779, "train/loss_error": 0.4967365860939026, "train/loss_total": 0.5777551531791687 }, { "epoch": 2.492920117552765, "step": 9331, "train/loss_ctc": 1.0404114723205566, "train/loss_error": 0.5092450976371765, "train/loss_total": 0.6154783964157104 }, { "epoch": 2.4931872829281323, "step": 9332, "train/loss_ctc": 0.527046263217926, "train/loss_error": 0.4527636170387268, "train/loss_total": 0.4676201641559601 }, { "epoch": 2.4934544483035, "step": 9333, "train/loss_ctc": 1.6661697626113892, "train/loss_error": 0.4715811014175415, "train/loss_total": 0.7104988098144531 }, { "epoch": 2.493721613678867, "step": 9334, "train/loss_ctc": 0.9984681606292725, "train/loss_error": 0.4871899485588074, "train/loss_total": 0.5894455909729004 }, { "epoch": 2.4939887790542343, "step": 9335, "train/loss_ctc": 0.5368741750717163, "train/loss_error": 0.43415525555610657, "train/loss_total": 0.4546990692615509 }, { "epoch": 2.494255944429602, "step": 9336, "train/loss_ctc": 0.9151520729064941, "train/loss_error": 0.4677744507789612, "train/loss_total": 0.5572499632835388 }, { "epoch": 2.494523109804969, "step": 9337, "train/loss_ctc": 0.6879469156265259, "train/loss_error": 0.44835835695266724, "train/loss_total": 0.4962760806083679 }, { "epoch": 2.494790275180337, "step": 9338, "train/loss_ctc": 0.6915449500083923, "train/loss_error": 0.3914318084716797, "train/loss_total": 0.4514544606208801 }, { "epoch": 2.495057440555704, "step": 9339, "train/loss_ctc": 1.0009357929229736, "train/loss_error": 0.46662062406539917, "train/loss_total": 0.5734836459159851 }, { "epoch": 2.495324605931071, "grad_norm": 2.088775634765625, "learning_rate": 1.5037670317926798e-05, "loss": 0.5494, "step": 9340 }, { "epoch": 2.495324605931071, "step": 9340, "train/loss_ctc": 0.41768407821655273, "train/loss_error": 0.5769585371017456, "train/loss_total": 0.5451036691665649 }, { "epoch": 2.495591771306439, "step": 9341, "train/loss_ctc": 0.5974227786064148, "train/loss_error": 0.4275115430355072, "train/loss_total": 0.4614937901496887 }, { "epoch": 2.495858936681806, "step": 9342, "train/loss_ctc": 1.0468595027923584, "train/loss_error": 0.43332916498184204, "train/loss_total": 0.5560352206230164 }, { "epoch": 2.4961261020571732, "step": 9343, "train/loss_ctc": 1.3811159133911133, "train/loss_error": 0.48010924458503723, "train/loss_total": 0.6603106260299683 }, { "epoch": 2.496393267432541, "step": 9344, "train/loss_ctc": 1.0254169702529907, "train/loss_error": 0.5429008603096008, "train/loss_total": 0.6394040584564209 }, { "epoch": 2.496660432807908, "step": 9345, "train/loss_ctc": 1.4124081134796143, "train/loss_error": 0.4224044978618622, "train/loss_total": 0.6204052567481995 }, { "epoch": 2.4969275981832753, "step": 9346, "train/loss_ctc": 0.7633363008499146, "train/loss_error": 0.4589594602584839, "train/loss_total": 0.5198348164558411 }, { "epoch": 2.497194763558643, "step": 9347, "train/loss_ctc": 0.8162254095077515, "train/loss_error": 0.5170570015907288, "train/loss_total": 0.5768907070159912 }, { "epoch": 2.49746192893401, "step": 9348, "train/loss_ctc": 0.6592594385147095, "train/loss_error": 0.5320908427238464, "train/loss_total": 0.557524561882019 }, { "epoch": 2.4977290943093777, "step": 9349, "train/loss_ctc": 0.7357943058013916, "train/loss_error": 0.4471031427383423, "train/loss_total": 0.5048413872718811 }, { "epoch": 2.497996259684745, "grad_norm": 1.453761100769043, "learning_rate": 1.5021640395404756e-05, "loss": 0.5642, "step": 9350 }, { "epoch": 2.497996259684745, "step": 9350, "train/loss_ctc": 0.6879647970199585, "train/loss_error": 0.4460276961326599, "train/loss_total": 0.4944151043891907 }, { "epoch": 2.498263425060112, "step": 9351, "train/loss_ctc": 0.49927160143852234, "train/loss_error": 0.49976253509521484, "train/loss_total": 0.4996643662452698 }, { "epoch": 2.4985305904354798, "step": 9352, "train/loss_ctc": 0.4554430842399597, "train/loss_error": 0.44413700699806213, "train/loss_total": 0.44639822840690613 }, { "epoch": 2.498797755810847, "step": 9353, "train/loss_ctc": 0.6225487589836121, "train/loss_error": 0.4928094148635864, "train/loss_total": 0.5187572836875916 }, { "epoch": 2.499064921186214, "step": 9354, "train/loss_ctc": 0.5491901636123657, "train/loss_error": 0.4551403522491455, "train/loss_total": 0.4739503264427185 }, { "epoch": 2.499332086561582, "step": 9355, "train/loss_ctc": 0.5291457772254944, "train/loss_error": 0.424736887216568, "train/loss_total": 0.4456186592578888 }, { "epoch": 2.499599251936949, "step": 9356, "train/loss_ctc": 0.5011764764785767, "train/loss_error": 0.43875136971473694, "train/loss_total": 0.45123642683029175 }, { "epoch": 2.499866417312316, "step": 9357, "train/loss_ctc": 0.6451167464256287, "train/loss_error": 0.446515828371048, "train/loss_total": 0.486236035823822 }, { "epoch": 2.500133582687684, "step": 9358, "train/loss_ctc": 0.6948730945587158, "train/loss_error": 0.48028701543807983, "train/loss_total": 0.5232042670249939 }, { "epoch": 2.500400748063051, "step": 9359, "train/loss_ctc": 0.5635108947753906, "train/loss_error": 0.4356726109981537, "train/loss_total": 0.461240291595459 }, { "epoch": 2.500667913438418, "grad_norm": 6.999894618988037, "learning_rate": 1.5005610472882714e-05, "loss": 0.4801, "step": 9360 }, { "epoch": 2.500667913438418, "step": 9360, "train/loss_ctc": 1.2536756992340088, "train/loss_error": 0.4492870569229126, "train/loss_total": 0.6101647615432739 }, { "epoch": 2.500935078813786, "step": 9361, "train/loss_ctc": 0.41803187131881714, "train/loss_error": 0.43534475564956665, "train/loss_total": 0.43188220262527466 }, { "epoch": 2.501202244189153, "step": 9362, "train/loss_ctc": 0.4250950217247009, "train/loss_error": 0.4711039066314697, "train/loss_total": 0.4619021415710449 }, { "epoch": 2.5014694095645202, "step": 9363, "train/loss_ctc": 0.660265326499939, "train/loss_error": 0.4775334894657135, "train/loss_total": 0.5140798687934875 }, { "epoch": 2.501736574939888, "step": 9364, "train/loss_ctc": 0.5524958372116089, "train/loss_error": 0.44256913661956787, "train/loss_total": 0.46455448865890503 }, { "epoch": 2.502003740315255, "step": 9365, "train/loss_ctc": 0.7048191428184509, "train/loss_error": 0.42881515622138977, "train/loss_total": 0.48401594161987305 }, { "epoch": 2.5022709056906223, "step": 9366, "train/loss_ctc": 0.6418442726135254, "train/loss_error": 0.425237238407135, "train/loss_total": 0.4685586392879486 }, { "epoch": 2.50253807106599, "step": 9367, "train/loss_ctc": 0.6426703929901123, "train/loss_error": 0.4842512905597687, "train/loss_total": 0.5159351229667664 }, { "epoch": 2.502805236441357, "step": 9368, "train/loss_ctc": 0.7990248203277588, "train/loss_error": 0.544159471988678, "train/loss_total": 0.59513258934021 }, { "epoch": 2.5030724018167243, "step": 9369, "train/loss_ctc": 0.6255295276641846, "train/loss_error": 0.4491065442562103, "train/loss_total": 0.48439115285873413 }, { "epoch": 2.503339567192092, "grad_norm": 2.1619701385498047, "learning_rate": 1.4989580550360674e-05, "loss": 0.5031, "step": 9370 }, { "epoch": 2.503339567192092, "step": 9370, "train/loss_ctc": 0.7786513566970825, "train/loss_error": 0.5035772919654846, "train/loss_total": 0.5585920810699463 }, { "epoch": 2.503606732567459, "step": 9371, "train/loss_ctc": 0.5800734162330627, "train/loss_error": 0.4457104504108429, "train/loss_total": 0.4725830554962158 }, { "epoch": 2.5038738979428268, "step": 9372, "train/loss_ctc": 1.1408504247665405, "train/loss_error": 0.4814513027667999, "train/loss_total": 0.613331139087677 }, { "epoch": 2.504141063318194, "step": 9373, "train/loss_ctc": 1.2700912952423096, "train/loss_error": 0.4338452219963074, "train/loss_total": 0.6010944843292236 }, { "epoch": 2.504408228693561, "step": 9374, "train/loss_ctc": 0.9176249504089355, "train/loss_error": 0.4711759388446808, "train/loss_total": 0.5604657530784607 }, { "epoch": 2.504675394068929, "step": 9375, "train/loss_ctc": 1.1511359214782715, "train/loss_error": 0.46403613686561584, "train/loss_total": 0.6014561057090759 }, { "epoch": 2.504942559444296, "step": 9376, "train/loss_ctc": 0.3896964192390442, "train/loss_error": 0.45023179054260254, "train/loss_total": 0.43812471628189087 }, { "epoch": 2.5052097248196636, "step": 9377, "train/loss_ctc": 0.6928301453590393, "train/loss_error": 0.35538825392723083, "train/loss_total": 0.42287665605545044 }, { "epoch": 2.505476890195031, "step": 9378, "train/loss_ctc": 0.6402784585952759, "train/loss_error": 0.4411947727203369, "train/loss_total": 0.4810115098953247 }, { "epoch": 2.505744055570398, "step": 9379, "train/loss_ctc": 0.6844276785850525, "train/loss_error": 0.48215287923812866, "train/loss_total": 0.5226078629493713 }, { "epoch": 2.5060112209457657, "grad_norm": 2.0207552909851074, "learning_rate": 1.4973550627838634e-05, "loss": 0.5272, "step": 9380 }, { "epoch": 2.5060112209457657, "step": 9380, "train/loss_ctc": 0.9731742143630981, "train/loss_error": 0.4848310649394989, "train/loss_total": 0.5824996829032898 }, { "epoch": 2.506278386321133, "step": 9381, "train/loss_ctc": 0.7583959102630615, "train/loss_error": 0.40474849939346313, "train/loss_total": 0.47547799348831177 }, { "epoch": 2.5065455516965, "step": 9382, "train/loss_ctc": 0.8637843132019043, "train/loss_error": 0.3498800992965698, "train/loss_total": 0.4526609778404236 }, { "epoch": 2.5068127170718677, "step": 9383, "train/loss_ctc": 0.3690262734889984, "train/loss_error": 0.42108476161956787, "train/loss_total": 0.4106730818748474 }, { "epoch": 2.507079882447235, "step": 9384, "train/loss_ctc": 0.7589313387870789, "train/loss_error": 0.4756374657154083, "train/loss_total": 0.5322962403297424 }, { "epoch": 2.507347047822602, "step": 9385, "train/loss_ctc": 0.6811881065368652, "train/loss_error": 0.41784584522247314, "train/loss_total": 0.47051429748535156 }, { "epoch": 2.5076142131979697, "step": 9386, "train/loss_ctc": 0.8513767719268799, "train/loss_error": 0.46225622296333313, "train/loss_total": 0.5400803089141846 }, { "epoch": 2.507881378573337, "step": 9387, "train/loss_ctc": 0.39212101697921753, "train/loss_error": 0.5460290908813477, "train/loss_total": 0.5152474641799927 }, { "epoch": 2.508148543948704, "step": 9388, "train/loss_ctc": 0.8008142113685608, "train/loss_error": 0.4852774739265442, "train/loss_total": 0.5483848452568054 }, { "epoch": 2.5084157093240718, "step": 9389, "train/loss_ctc": 0.4492177963256836, "train/loss_error": 0.45718154311180115, "train/loss_total": 0.45558881759643555 }, { "epoch": 2.508682874699439, "grad_norm": 1.5385446548461914, "learning_rate": 1.4957520705316592e-05, "loss": 0.4983, "step": 9390 }, { "epoch": 2.508682874699439, "step": 9390, "train/loss_ctc": 0.34208229184150696, "train/loss_error": 0.41432854533195496, "train/loss_total": 0.3998792767524719 }, { "epoch": 2.508950040074806, "step": 9391, "train/loss_ctc": 0.7275495529174805, "train/loss_error": 0.46485015749931335, "train/loss_total": 0.5173900723457336 }, { "epoch": 2.509217205450174, "step": 9392, "train/loss_ctc": 0.5222710967063904, "train/loss_error": 0.37389513850212097, "train/loss_total": 0.4035703241825104 }, { "epoch": 2.509484370825541, "step": 9393, "train/loss_ctc": 1.010878086090088, "train/loss_error": 0.4372716248035431, "train/loss_total": 0.5519928932189941 }, { "epoch": 2.509751536200908, "step": 9394, "train/loss_ctc": 0.8564633131027222, "train/loss_error": 0.45893701910972595, "train/loss_total": 0.5384422540664673 }, { "epoch": 2.510018701576276, "step": 9395, "train/loss_ctc": 0.7078915238380432, "train/loss_error": 0.4923768639564514, "train/loss_total": 0.5354797840118408 }, { "epoch": 2.510285866951643, "step": 9396, "train/loss_ctc": 1.532225251197815, "train/loss_error": 0.46222901344299316, "train/loss_total": 0.6762282848358154 }, { "epoch": 2.51055303232701, "step": 9397, "train/loss_ctc": 0.6856981515884399, "train/loss_error": 0.48475342988967896, "train/loss_total": 0.5249423980712891 }, { "epoch": 2.510820197702378, "step": 9398, "train/loss_ctc": 0.6171145439147949, "train/loss_error": 0.44414424896240234, "train/loss_total": 0.47873833775520325 }, { "epoch": 2.511087363077745, "step": 9399, "train/loss_ctc": 0.6468453407287598, "train/loss_error": 0.4358823299407959, "train/loss_total": 0.47807490825653076 }, { "epoch": 2.5113545284531122, "grad_norm": 3.4699947834014893, "learning_rate": 1.494149078279455e-05, "loss": 0.5105, "step": 9400 }, { "epoch": 2.5113545284531122, "step": 9400, "train/loss_ctc": 0.8555399775505066, "train/loss_error": 0.49944373965263367, "train/loss_total": 0.5706629753112793 }, { "epoch": 2.51162169382848, "step": 9401, "train/loss_ctc": 0.2557274103164673, "train/loss_error": 0.48571711778640747, "train/loss_total": 0.43971920013427734 }, { "epoch": 2.511888859203847, "step": 9402, "train/loss_ctc": 1.0515934228897095, "train/loss_error": 0.4426054358482361, "train/loss_total": 0.5644030570983887 }, { "epoch": 2.5121560245792143, "step": 9403, "train/loss_ctc": 1.3016822338104248, "train/loss_error": 0.43177470564842224, "train/loss_total": 0.6057562232017517 }, { "epoch": 2.512423189954582, "step": 9404, "train/loss_ctc": 0.5326756238937378, "train/loss_error": 0.4565456211566925, "train/loss_total": 0.47177162766456604 }, { "epoch": 2.512690355329949, "step": 9405, "train/loss_ctc": 1.0465924739837646, "train/loss_error": 0.3920683264732361, "train/loss_total": 0.5229731798171997 }, { "epoch": 2.5129575207053167, "step": 9406, "train/loss_ctc": 0.6919267773628235, "train/loss_error": 0.4789993166923523, "train/loss_total": 0.5215848088264465 }, { "epoch": 2.513224686080684, "step": 9407, "train/loss_ctc": 1.102789044380188, "train/loss_error": 0.4229346513748169, "train/loss_total": 0.5589055418968201 }, { "epoch": 2.513491851456051, "step": 9408, "train/loss_ctc": 0.38991934061050415, "train/loss_error": 0.4272631108760834, "train/loss_total": 0.41979438066482544 }, { "epoch": 2.513759016831419, "step": 9409, "train/loss_ctc": 0.5082204341888428, "train/loss_error": 0.4846515357494354, "train/loss_total": 0.4893653392791748 }, { "epoch": 2.514026182206786, "grad_norm": 2.5740363597869873, "learning_rate": 1.492546086027251e-05, "loss": 0.5165, "step": 9410 }, { "epoch": 2.514026182206786, "step": 9410, "train/loss_ctc": 0.5173968076705933, "train/loss_error": 0.4128527045249939, "train/loss_total": 0.4337615370750427 }, { "epoch": 2.5142933475821536, "step": 9411, "train/loss_ctc": 0.7340564727783203, "train/loss_error": 0.42426562309265137, "train/loss_total": 0.48622381687164307 }, { "epoch": 2.514560512957521, "step": 9412, "train/loss_ctc": 0.6611074209213257, "train/loss_error": 0.3972706198692322, "train/loss_total": 0.45003798604011536 }, { "epoch": 2.514827678332888, "step": 9413, "train/loss_ctc": 0.5824706554412842, "train/loss_error": 0.4720952808856964, "train/loss_total": 0.4941703677177429 }, { "epoch": 2.5150948437082556, "step": 9414, "train/loss_ctc": 0.8040677309036255, "train/loss_error": 0.4398212730884552, "train/loss_total": 0.5126705765724182 }, { "epoch": 2.515362009083623, "step": 9415, "train/loss_ctc": 0.5065182447433472, "train/loss_error": 0.4540197253227234, "train/loss_total": 0.4645194411277771 }, { "epoch": 2.51562917445899, "step": 9416, "train/loss_ctc": 0.7050496935844421, "train/loss_error": 0.38575035333633423, "train/loss_total": 0.44961023330688477 }, { "epoch": 2.5158963398343577, "step": 9417, "train/loss_ctc": 0.2857489287853241, "train/loss_error": 0.40317144989967346, "train/loss_total": 0.37968695163726807 }, { "epoch": 2.516163505209725, "step": 9418, "train/loss_ctc": 0.829006552696228, "train/loss_error": 0.5351893901824951, "train/loss_total": 0.5939528346061707 }, { "epoch": 2.516430670585092, "step": 9419, "train/loss_ctc": 0.9035027027130127, "train/loss_error": 0.510556161403656, "train/loss_total": 0.5891454815864563 }, { "epoch": 2.5166978359604597, "grad_norm": 1.5446968078613281, "learning_rate": 1.4909430937750468e-05, "loss": 0.4854, "step": 9420 }, { "epoch": 2.5166978359604597, "step": 9420, "train/loss_ctc": 0.6153931617736816, "train/loss_error": 0.464572548866272, "train/loss_total": 0.4947367012500763 }, { "epoch": 2.516965001335827, "step": 9421, "train/loss_ctc": 0.7223868370056152, "train/loss_error": 0.5080008506774902, "train/loss_total": 0.5508780479431152 }, { "epoch": 2.517232166711194, "step": 9422, "train/loss_ctc": 0.546518862247467, "train/loss_error": 0.48317328095436096, "train/loss_total": 0.4958423972129822 }, { "epoch": 2.5174993320865617, "step": 9423, "train/loss_ctc": 0.4133451581001282, "train/loss_error": 0.4295698404312134, "train/loss_total": 0.42632490396499634 }, { "epoch": 2.517766497461929, "step": 9424, "train/loss_ctc": 0.9854722619056702, "train/loss_error": 0.43389204144477844, "train/loss_total": 0.5442081093788147 }, { "epoch": 2.518033662837296, "step": 9425, "train/loss_ctc": 0.5680005550384521, "train/loss_error": 0.4605428874492645, "train/loss_total": 0.4820344150066376 }, { "epoch": 2.5183008282126638, "step": 9426, "train/loss_ctc": 1.047572374343872, "train/loss_error": 0.47636690735816956, "train/loss_total": 0.5906080007553101 }, { "epoch": 2.518567993588031, "step": 9427, "train/loss_ctc": 0.5211532711982727, "train/loss_error": 0.48564133048057556, "train/loss_total": 0.49274373054504395 }, { "epoch": 2.518835158963398, "step": 9428, "train/loss_ctc": 0.39782893657684326, "train/loss_error": 0.3926544785499573, "train/loss_total": 0.3936893939971924 }, { "epoch": 2.519102324338766, "step": 9429, "train/loss_ctc": 1.0514765977859497, "train/loss_error": 0.3938939869403839, "train/loss_total": 0.525410532951355 }, { "epoch": 2.519369489714133, "grad_norm": 1.1234443187713623, "learning_rate": 1.4893401015228426e-05, "loss": 0.4996, "step": 9430 }, { "epoch": 2.519369489714133, "step": 9430, "train/loss_ctc": 0.45959416031837463, "train/loss_error": 0.44810739159584045, "train/loss_total": 0.45040473341941833 }, { "epoch": 2.5196366550895, "step": 9431, "train/loss_ctc": 0.4513680934906006, "train/loss_error": 0.4154101610183716, "train/loss_total": 0.42260175943374634 }, { "epoch": 2.519903820464868, "step": 9432, "train/loss_ctc": 1.062979817390442, "train/loss_error": 0.5495262742042542, "train/loss_total": 0.6522170305252075 }, { "epoch": 2.520170985840235, "step": 9433, "train/loss_ctc": 0.9332782030105591, "train/loss_error": 0.4173583388328552, "train/loss_total": 0.520542323589325 }, { "epoch": 2.520438151215602, "step": 9434, "train/loss_ctc": 0.5818215608596802, "train/loss_error": 0.4063069522380829, "train/loss_total": 0.4414098858833313 }, { "epoch": 2.52070531659097, "step": 9435, "train/loss_ctc": 0.32924699783325195, "train/loss_error": 0.46513277292251587, "train/loss_total": 0.4379556179046631 }, { "epoch": 2.520972481966337, "step": 9436, "train/loss_ctc": 0.7466849088668823, "train/loss_error": 0.4890810549259186, "train/loss_total": 0.5406018495559692 }, { "epoch": 2.5212396473417042, "step": 9437, "train/loss_ctc": 0.4082579016685486, "train/loss_error": 0.3788641393184662, "train/loss_total": 0.3847429156303406 }, { "epoch": 2.521506812717072, "step": 9438, "train/loss_ctc": 0.6583866477012634, "train/loss_error": 0.4429055154323578, "train/loss_total": 0.48600175976753235 }, { "epoch": 2.521773978092439, "step": 9439, "train/loss_ctc": 1.2482019662857056, "train/loss_error": 0.5219526886940002, "train/loss_total": 0.6672025322914124 }, { "epoch": 2.5220411434678067, "grad_norm": 2.939816951751709, "learning_rate": 1.4877371092706385e-05, "loss": 0.5004, "step": 9440 }, { "epoch": 2.5220411434678067, "step": 9440, "train/loss_ctc": 0.7006420493125916, "train/loss_error": 0.40523767471313477, "train/loss_total": 0.46431857347488403 }, { "epoch": 2.522308308843174, "step": 9441, "train/loss_ctc": 1.156385898590088, "train/loss_error": 0.4129752814769745, "train/loss_total": 0.5616574287414551 }, { "epoch": 2.522575474218541, "step": 9442, "train/loss_ctc": 0.6608984470367432, "train/loss_error": 0.5506690740585327, "train/loss_total": 0.5727149844169617 }, { "epoch": 2.5228426395939088, "step": 9443, "train/loss_ctc": 0.5204905271530151, "train/loss_error": 0.48631003499031067, "train/loss_total": 0.493146151304245 }, { "epoch": 2.523109804969276, "step": 9444, "train/loss_ctc": 1.1128857135772705, "train/loss_error": 0.49591630697250366, "train/loss_total": 0.619310200214386 }, { "epoch": 2.5233769703446436, "step": 9445, "train/loss_ctc": 0.6051380038261414, "train/loss_error": 0.4691143035888672, "train/loss_total": 0.496319055557251 }, { "epoch": 2.523644135720011, "step": 9446, "train/loss_ctc": 0.6144546270370483, "train/loss_error": 0.492023229598999, "train/loss_total": 0.5165095329284668 }, { "epoch": 2.523911301095378, "step": 9447, "train/loss_ctc": 0.5690559148788452, "train/loss_error": 0.3963583707809448, "train/loss_total": 0.43089789152145386 }, { "epoch": 2.5241784664707456, "step": 9448, "train/loss_ctc": 0.2635175287723541, "train/loss_error": 0.4331878125667572, "train/loss_total": 0.3992537558078766 }, { "epoch": 2.524445631846113, "step": 9449, "train/loss_ctc": 0.5315353870391846, "train/loss_error": 0.41967177391052246, "train/loss_total": 0.44204452633857727 }, { "epoch": 2.52471279722148, "grad_norm": 1.6350001096725464, "learning_rate": 1.4861341170184344e-05, "loss": 0.4996, "step": 9450 }, { "epoch": 2.52471279722148, "step": 9450, "train/loss_ctc": 1.2928814888000488, "train/loss_error": 0.47921353578567505, "train/loss_total": 0.6419471502304077 }, { "epoch": 2.5249799625968476, "step": 9451, "train/loss_ctc": 0.38492780923843384, "train/loss_error": 0.4401741027832031, "train/loss_total": 0.4291248619556427 }, { "epoch": 2.525247127972215, "step": 9452, "train/loss_ctc": 0.7712306976318359, "train/loss_error": 0.506295919418335, "train/loss_total": 0.5592828989028931 }, { "epoch": 2.525514293347582, "step": 9453, "train/loss_ctc": 0.5566571354866028, "train/loss_error": 0.4741852879524231, "train/loss_total": 0.49067968130111694 }, { "epoch": 2.5257814587229497, "step": 9454, "train/loss_ctc": 0.6474428176879883, "train/loss_error": 0.41036081314086914, "train/loss_total": 0.457777202129364 }, { "epoch": 2.526048624098317, "step": 9455, "train/loss_ctc": 0.5389133095741272, "train/loss_error": 0.4735868275165558, "train/loss_total": 0.486652135848999 }, { "epoch": 2.526315789473684, "step": 9456, "train/loss_ctc": 0.6589109897613525, "train/loss_error": 0.46381184458732605, "train/loss_total": 0.5028316974639893 }, { "epoch": 2.5265829548490517, "step": 9457, "train/loss_ctc": 1.1314021348953247, "train/loss_error": 0.4886595904827118, "train/loss_total": 0.6172081232070923 }, { "epoch": 2.526850120224419, "step": 9458, "train/loss_ctc": 0.5346459746360779, "train/loss_error": 0.384758859872818, "train/loss_total": 0.414736270904541 }, { "epoch": 2.527117285599786, "step": 9459, "train/loss_ctc": 0.8359707593917847, "train/loss_error": 0.43444401025772095, "train/loss_total": 0.5147494077682495 }, { "epoch": 2.5273844509751537, "grad_norm": 2.2370967864990234, "learning_rate": 1.4845311247662303e-05, "loss": 0.5115, "step": 9460 }, { "epoch": 2.5273844509751537, "step": 9460, "train/loss_ctc": 1.6036348342895508, "train/loss_error": 0.4823872148990631, "train/loss_total": 0.7066367268562317 }, { "epoch": 2.527651616350521, "step": 9461, "train/loss_ctc": 0.5798896551132202, "train/loss_error": 0.49877801537513733, "train/loss_total": 0.5150003433227539 }, { "epoch": 2.527918781725888, "step": 9462, "train/loss_ctc": 0.9727898836135864, "train/loss_error": 0.4887983798980713, "train/loss_total": 0.5855966806411743 }, { "epoch": 2.5281859471012558, "step": 9463, "train/loss_ctc": 0.5039576292037964, "train/loss_error": 0.40544670820236206, "train/loss_total": 0.4251489043235779 }, { "epoch": 2.528453112476623, "step": 9464, "train/loss_ctc": 0.4218858480453491, "train/loss_error": 0.4849955439567566, "train/loss_total": 0.4723736047744751 }, { "epoch": 2.52872027785199, "step": 9465, "train/loss_ctc": 0.8708692789077759, "train/loss_error": 0.4977441728115082, "train/loss_total": 0.5723692178726196 }, { "epoch": 2.528987443227358, "step": 9466, "train/loss_ctc": 1.1462082862854004, "train/loss_error": 0.4352112114429474, "train/loss_total": 0.5774106383323669 }, { "epoch": 2.529254608602725, "step": 9467, "train/loss_ctc": 0.6385296583175659, "train/loss_error": 0.41301849484443665, "train/loss_total": 0.458120733499527 }, { "epoch": 2.529521773978092, "step": 9468, "train/loss_ctc": 0.7826852798461914, "train/loss_error": 0.5268071889877319, "train/loss_total": 0.5779827833175659 }, { "epoch": 2.52978893935346, "step": 9469, "train/loss_ctc": 0.6426206231117249, "train/loss_error": 0.37651944160461426, "train/loss_total": 0.42973968386650085 }, { "epoch": 2.530056104728827, "grad_norm": 2.6474475860595703, "learning_rate": 1.4829281325140263e-05, "loss": 0.532, "step": 9470 }, { "epoch": 2.530056104728827, "step": 9470, "train/loss_ctc": 0.9634788632392883, "train/loss_error": 0.49946328997612, "train/loss_total": 0.5922664403915405 }, { "epoch": 2.5303232701041942, "step": 9471, "train/loss_ctc": 0.44438454508781433, "train/loss_error": 0.4086531102657318, "train/loss_total": 0.41579940915107727 }, { "epoch": 2.530590435479562, "step": 9472, "train/loss_ctc": 0.6912893056869507, "train/loss_error": 0.44690731167793274, "train/loss_total": 0.4957837164402008 }, { "epoch": 2.530857600854929, "step": 9473, "train/loss_ctc": 0.467498779296875, "train/loss_error": 0.507258415222168, "train/loss_total": 0.49930649995803833 }, { "epoch": 2.5311247662302967, "step": 9474, "train/loss_ctc": 0.6986446380615234, "train/loss_error": 0.48868054151535034, "train/loss_total": 0.5306733846664429 }, { "epoch": 2.531391931605664, "step": 9475, "train/loss_ctc": 0.38360896706581116, "train/loss_error": 0.4426461458206177, "train/loss_total": 0.4308387041091919 }, { "epoch": 2.5316590969810315, "step": 9476, "train/loss_ctc": 0.7554413080215454, "train/loss_error": 0.46917885541915894, "train/loss_total": 0.5264313220977783 }, { "epoch": 2.5319262623563987, "step": 9477, "train/loss_ctc": 1.2429287433624268, "train/loss_error": 0.4602201282978058, "train/loss_total": 0.6167618632316589 }, { "epoch": 2.532193427731766, "step": 9478, "train/loss_ctc": 0.5828344225883484, "train/loss_error": 0.44188472628593445, "train/loss_total": 0.47007468342781067 }, { "epoch": 2.5324605931071336, "step": 9479, "train/loss_ctc": 0.4227811396121979, "train/loss_error": 0.5593268871307373, "train/loss_total": 0.5320177674293518 }, { "epoch": 2.5327277584825008, "grad_norm": 2.6378979682922363, "learning_rate": 1.4813251402618221e-05, "loss": 0.511, "step": 9480 }, { "epoch": 2.5327277584825008, "step": 9480, "train/loss_ctc": 0.3215346038341522, "train/loss_error": 0.4642981290817261, "train/loss_total": 0.4357454180717468 }, { "epoch": 2.532994923857868, "step": 9481, "train/loss_ctc": 0.6100638508796692, "train/loss_error": 0.4907403886318207, "train/loss_total": 0.5146051049232483 }, { "epoch": 2.5332620892332356, "step": 9482, "train/loss_ctc": 0.8777879476547241, "train/loss_error": 0.4826396405696869, "train/loss_total": 0.5616692900657654 }, { "epoch": 2.533529254608603, "step": 9483, "train/loss_ctc": 0.6780511140823364, "train/loss_error": 0.4683414697647095, "train/loss_total": 0.5102834105491638 }, { "epoch": 2.53379641998397, "step": 9484, "train/loss_ctc": 0.2768620252609253, "train/loss_error": 0.42017513513565063, "train/loss_total": 0.39151254296302795 }, { "epoch": 2.5340635853593376, "step": 9485, "train/loss_ctc": 0.4437997341156006, "train/loss_error": 0.49412739276885986, "train/loss_total": 0.4840618669986725 }, { "epoch": 2.534330750734705, "step": 9486, "train/loss_ctc": 0.6241843104362488, "train/loss_error": 0.42332950234413147, "train/loss_total": 0.4635004699230194 }, { "epoch": 2.534597916110072, "step": 9487, "train/loss_ctc": 0.39099040627479553, "train/loss_error": 0.46328914165496826, "train/loss_total": 0.44882938265800476 }, { "epoch": 2.5348650814854397, "step": 9488, "train/loss_ctc": 1.3871835470199585, "train/loss_error": 0.4543457627296448, "train/loss_total": 0.6409133076667786 }, { "epoch": 2.535132246860807, "step": 9489, "train/loss_ctc": 1.482877492904663, "train/loss_error": 0.4683774411678314, "train/loss_total": 0.6712774634361267 }, { "epoch": 2.535399412236174, "grad_norm": 1.450471043586731, "learning_rate": 1.479722148009618e-05, "loss": 0.5122, "step": 9490 }, { "epoch": 2.535399412236174, "step": 9490, "train/loss_ctc": 0.7647779583930969, "train/loss_error": 0.46377718448638916, "train/loss_total": 0.5239773392677307 }, { "epoch": 2.5356665776115417, "step": 9491, "train/loss_ctc": 0.9288322329521179, "train/loss_error": 0.4632602334022522, "train/loss_total": 0.5563746094703674 }, { "epoch": 2.535933742986909, "step": 9492, "train/loss_ctc": 0.4913998246192932, "train/loss_error": 0.4264774024486542, "train/loss_total": 0.439461886882782 }, { "epoch": 2.536200908362276, "step": 9493, "train/loss_ctc": 0.6336182355880737, "train/loss_error": 0.4493741989135742, "train/loss_total": 0.4862230122089386 }, { "epoch": 2.5364680737376437, "step": 9494, "train/loss_ctc": 1.3569042682647705, "train/loss_error": 0.4802562892436981, "train/loss_total": 0.6555858850479126 }, { "epoch": 2.536735239113011, "step": 9495, "train/loss_ctc": 0.4724828600883484, "train/loss_error": 0.4241727888584137, "train/loss_total": 0.43383482098579407 }, { "epoch": 2.537002404488378, "step": 9496, "train/loss_ctc": 1.0381237268447876, "train/loss_error": 0.44279220700263977, "train/loss_total": 0.5618585348129272 }, { "epoch": 2.5372695698637457, "step": 9497, "train/loss_ctc": 1.542933702468872, "train/loss_error": 0.48384177684783936, "train/loss_total": 0.6956601738929749 }, { "epoch": 2.537536735239113, "step": 9498, "train/loss_ctc": 0.7831602096557617, "train/loss_error": 0.45020806789398193, "train/loss_total": 0.5167984962463379 }, { "epoch": 2.53780390061448, "step": 9499, "train/loss_ctc": 0.9351890683174133, "train/loss_error": 0.46300065517425537, "train/loss_total": 0.557438313961029 }, { "epoch": 2.5380710659898478, "grad_norm": 1.4035414457321167, "learning_rate": 1.4781191557574139e-05, "loss": 0.5427, "step": 9500 }, { "epoch": 2.5380710659898478, "step": 9500, "train/loss_ctc": 0.37546518445014954, "train/loss_error": 0.3955139219760895, "train/loss_total": 0.391504168510437 }, { "epoch": 2.538338231365215, "step": 9501, "train/loss_ctc": 0.5202174782752991, "train/loss_error": 0.5181134343147278, "train/loss_total": 0.518534243106842 }, { "epoch": 2.538605396740582, "step": 9502, "train/loss_ctc": 0.4115822911262512, "train/loss_error": 0.3968985676765442, "train/loss_total": 0.3998353183269501 }, { "epoch": 2.53887256211595, "step": 9503, "train/loss_ctc": 1.2180256843566895, "train/loss_error": 0.4247685968875885, "train/loss_total": 0.5834200382232666 }, { "epoch": 2.539139727491317, "step": 9504, "train/loss_ctc": 1.216639757156372, "train/loss_error": 0.518717885017395, "train/loss_total": 0.6583022475242615 }, { "epoch": 2.5394068928666846, "step": 9505, "train/loss_ctc": 0.6371245384216309, "train/loss_error": 0.4502391517162323, "train/loss_total": 0.48761624097824097 }, { "epoch": 2.539674058242052, "step": 9506, "train/loss_ctc": 0.5417466759681702, "train/loss_error": 0.48676908016204834, "train/loss_total": 0.49776458740234375 }, { "epoch": 2.539941223617419, "step": 9507, "train/loss_ctc": 1.2594304084777832, "train/loss_error": 0.5096337199211121, "train/loss_total": 0.6595931053161621 }, { "epoch": 2.5402083889927867, "step": 9508, "train/loss_ctc": 0.7612093687057495, "train/loss_error": 0.4497801959514618, "train/loss_total": 0.5120660066604614 }, { "epoch": 2.540475554368154, "step": 9509, "train/loss_ctc": 0.895916759967804, "train/loss_error": 0.45683741569519043, "train/loss_total": 0.5446532964706421 }, { "epoch": 2.5407427197435215, "grad_norm": 3.262617588043213, "learning_rate": 1.4765161635052097e-05, "loss": 0.5253, "step": 9510 }, { "epoch": 2.5407427197435215, "step": 9510, "train/loss_ctc": 0.9461688995361328, "train/loss_error": 0.5732371807098389, "train/loss_total": 0.6478235721588135 }, { "epoch": 2.5410098851188887, "step": 9511, "train/loss_ctc": 0.5810755491256714, "train/loss_error": 0.48497772216796875, "train/loss_total": 0.5041972994804382 }, { "epoch": 2.541277050494256, "step": 9512, "train/loss_ctc": 0.7449100017547607, "train/loss_error": 0.44800040125846863, "train/loss_total": 0.507382333278656 }, { "epoch": 2.5415442158696235, "step": 9513, "train/loss_ctc": 1.2711597681045532, "train/loss_error": 0.4548967480659485, "train/loss_total": 0.6181493997573853 }, { "epoch": 2.5418113812449907, "step": 9514, "train/loss_ctc": 0.23140767216682434, "train/loss_error": 0.4494524598121643, "train/loss_total": 0.4058435261249542 }, { "epoch": 2.542078546620358, "step": 9515, "train/loss_ctc": 0.9573544859886169, "train/loss_error": 0.44771209359169006, "train/loss_total": 0.5496405959129333 }, { "epoch": 2.5423457119957256, "step": 9516, "train/loss_ctc": 1.6537232398986816, "train/loss_error": 0.4531392753124237, "train/loss_total": 0.6932560801506042 }, { "epoch": 2.5426128773710928, "step": 9517, "train/loss_ctc": 0.5553455352783203, "train/loss_error": 0.46685945987701416, "train/loss_total": 0.4845566749572754 }, { "epoch": 2.54288004274646, "step": 9518, "train/loss_ctc": 0.5830057859420776, "train/loss_error": 0.43290555477142334, "train/loss_total": 0.46292561292648315 }, { "epoch": 2.5431472081218276, "step": 9519, "train/loss_ctc": 0.8240585327148438, "train/loss_error": 0.49960559606552124, "train/loss_total": 0.5644962191581726 }, { "epoch": 2.543414373497195, "grad_norm": 7.924184799194336, "learning_rate": 1.4749131712530055e-05, "loss": 0.5438, "step": 9520 }, { "epoch": 2.543414373497195, "step": 9520, "train/loss_ctc": 0.7192115187644958, "train/loss_error": 0.4734613597393036, "train/loss_total": 0.5226113796234131 }, { "epoch": 2.543681538872562, "step": 9521, "train/loss_ctc": 0.42045265436172485, "train/loss_error": 0.4323714077472687, "train/loss_total": 0.42998766899108887 }, { "epoch": 2.5439487042479296, "step": 9522, "train/loss_ctc": 0.5603440403938293, "train/loss_error": 0.48446500301361084, "train/loss_total": 0.4996408224105835 }, { "epoch": 2.544215869623297, "step": 9523, "train/loss_ctc": 0.48018574714660645, "train/loss_error": 0.5002796053886414, "train/loss_total": 0.4962608516216278 }, { "epoch": 2.544483034998664, "step": 9524, "train/loss_ctc": 0.756636917591095, "train/loss_error": 0.5088358521461487, "train/loss_total": 0.5583961009979248 }, { "epoch": 2.5447502003740317, "step": 9525, "train/loss_ctc": 0.6315330862998962, "train/loss_error": 0.4664860665798187, "train/loss_total": 0.4994954764842987 }, { "epoch": 2.545017365749399, "step": 9526, "train/loss_ctc": 0.7269454002380371, "train/loss_error": 0.4506714940071106, "train/loss_total": 0.505926251411438 }, { "epoch": 2.545284531124766, "step": 9527, "train/loss_ctc": 0.2381107211112976, "train/loss_error": 0.3569272756576538, "train/loss_total": 0.3331639766693115 }, { "epoch": 2.5455516965001337, "step": 9528, "train/loss_ctc": 0.5440413951873779, "train/loss_error": 0.48294198513031006, "train/loss_total": 0.49516186118125916 }, { "epoch": 2.545818861875501, "step": 9529, "train/loss_ctc": 0.6154270768165588, "train/loss_error": 0.3960333466529846, "train/loss_total": 0.4399120807647705 }, { "epoch": 2.546086027250868, "grad_norm": 2.18766713142395, "learning_rate": 1.4733101790008015e-05, "loss": 0.4781, "step": 9530 }, { "epoch": 2.546086027250868, "step": 9530, "train/loss_ctc": 0.9427801966667175, "train/loss_error": 0.5316756367683411, "train/loss_total": 0.6138965487480164 }, { "epoch": 2.5463531926262357, "step": 9531, "train/loss_ctc": 0.5709970593452454, "train/loss_error": 0.39456111192703247, "train/loss_total": 0.429848313331604 }, { "epoch": 2.546620358001603, "step": 9532, "train/loss_ctc": 0.7390118837356567, "train/loss_error": 0.47145959734916687, "train/loss_total": 0.5249700546264648 }, { "epoch": 2.54688752337697, "step": 9533, "train/loss_ctc": 0.9597628712654114, "train/loss_error": 0.4523867070674896, "train/loss_total": 0.5538619160652161 }, { "epoch": 2.5471546887523377, "step": 9534, "train/loss_ctc": 0.936040997505188, "train/loss_error": 0.43588942289352417, "train/loss_total": 0.5359197854995728 }, { "epoch": 2.547421854127705, "step": 9535, "train/loss_ctc": 0.34983310103416443, "train/loss_error": 0.4564700722694397, "train/loss_total": 0.4351426661014557 }, { "epoch": 2.547689019503072, "step": 9536, "train/loss_ctc": 0.8191718459129333, "train/loss_error": 0.41781213879585266, "train/loss_total": 0.49808406829833984 }, { "epoch": 2.54795618487844, "step": 9537, "train/loss_ctc": 1.308422327041626, "train/loss_error": 0.5163537263870239, "train/loss_total": 0.6747674942016602 }, { "epoch": 2.548223350253807, "step": 9538, "train/loss_ctc": 0.34885409474372864, "train/loss_error": 0.4911673367023468, "train/loss_total": 0.46270468831062317 }, { "epoch": 2.5484905156291746, "step": 9539, "train/loss_ctc": 0.508515477180481, "train/loss_error": 0.4586387872695923, "train/loss_total": 0.4686141312122345 }, { "epoch": 2.548757681004542, "grad_norm": 1.4454106092453003, "learning_rate": 1.4717071867485973e-05, "loss": 0.5198, "step": 9540 }, { "epoch": 2.548757681004542, "step": 9540, "train/loss_ctc": 1.2329866886138916, "train/loss_error": 0.4493905007839203, "train/loss_total": 0.6061097383499146 }, { "epoch": 2.549024846379909, "step": 9541, "train/loss_ctc": 0.9978733062744141, "train/loss_error": 0.44685545563697815, "train/loss_total": 0.5570590496063232 }, { "epoch": 2.5492920117552766, "step": 9542, "train/loss_ctc": 0.7449262142181396, "train/loss_error": 0.4259961247444153, "train/loss_total": 0.4897821545600891 }, { "epoch": 2.549559177130644, "step": 9543, "train/loss_ctc": 1.084634780883789, "train/loss_error": 0.43652865290641785, "train/loss_total": 0.566149890422821 }, { "epoch": 2.5498263425060115, "step": 9544, "train/loss_ctc": 0.40596723556518555, "train/loss_error": 0.41634607315063477, "train/loss_total": 0.4142703115940094 }, { "epoch": 2.5500935078813787, "step": 9545, "train/loss_ctc": 1.4135680198669434, "train/loss_error": 0.41994190216064453, "train/loss_total": 0.6186671257019043 }, { "epoch": 2.550360673256746, "step": 9546, "train/loss_ctc": 1.1539844274520874, "train/loss_error": 0.4106956124305725, "train/loss_total": 0.5593534111976624 }, { "epoch": 2.5506278386321135, "step": 9547, "train/loss_ctc": 0.580436646938324, "train/loss_error": 0.5158812403678894, "train/loss_total": 0.5287923216819763 }, { "epoch": 2.5508950040074807, "step": 9548, "train/loss_ctc": 0.36265572905540466, "train/loss_error": 0.43452271819114685, "train/loss_total": 0.4201493263244629 }, { "epoch": 2.551162169382848, "step": 9549, "train/loss_ctc": 0.3893052339553833, "train/loss_error": 0.513051450252533, "train/loss_total": 0.48830220103263855 }, { "epoch": 2.5514293347582155, "grad_norm": 3.015955686569214, "learning_rate": 1.4701041944963935e-05, "loss": 0.5249, "step": 9550 }, { "epoch": 2.5514293347582155, "step": 9550, "train/loss_ctc": 0.8759094476699829, "train/loss_error": 0.4332011342048645, "train/loss_total": 0.5217428207397461 }, { "epoch": 2.5516965001335827, "step": 9551, "train/loss_ctc": 1.1405071020126343, "train/loss_error": 0.46005165576934814, "train/loss_total": 0.5961427688598633 }, { "epoch": 2.55196366550895, "step": 9552, "train/loss_ctc": 0.37381094694137573, "train/loss_error": 0.4099319279193878, "train/loss_total": 0.4027077555656433 }, { "epoch": 2.5522308308843176, "step": 9553, "train/loss_ctc": 0.5978383421897888, "train/loss_error": 0.4497726261615753, "train/loss_total": 0.47938576340675354 }, { "epoch": 2.5524979962596848, "step": 9554, "train/loss_ctc": 0.5686355829238892, "train/loss_error": 0.4072042405605316, "train/loss_total": 0.43949052691459656 }, { "epoch": 2.552765161635052, "step": 9555, "train/loss_ctc": 0.3133772611618042, "train/loss_error": 0.40615418553352356, "train/loss_total": 0.38759881258010864 }, { "epoch": 2.5530323270104196, "step": 9556, "train/loss_ctc": 0.8670576810836792, "train/loss_error": 0.40419599413871765, "train/loss_total": 0.49676835536956787 }, { "epoch": 2.553299492385787, "step": 9557, "train/loss_ctc": 1.4325578212738037, "train/loss_error": 0.490085631608963, "train/loss_total": 0.6785800457000732 }, { "epoch": 2.553566657761154, "step": 9558, "train/loss_ctc": 0.8039056062698364, "train/loss_error": 0.4800397455692291, "train/loss_total": 0.5448129177093506 }, { "epoch": 2.5538338231365216, "step": 9559, "train/loss_ctc": 0.500685453414917, "train/loss_error": 0.4581340253353119, "train/loss_total": 0.4666443169116974 }, { "epoch": 2.554100988511889, "grad_norm": 3.5982353687286377, "learning_rate": 1.4685012022441893e-05, "loss": 0.5014, "step": 9560 }, { "epoch": 2.554100988511889, "step": 9560, "train/loss_ctc": 0.46124333143234253, "train/loss_error": 0.38429102301597595, "train/loss_total": 0.3996815085411072 }, { "epoch": 2.554368153887256, "step": 9561, "train/loss_ctc": 0.4018302857875824, "train/loss_error": 0.410467267036438, "train/loss_total": 0.4087398648262024 }, { "epoch": 2.5546353192626237, "step": 9562, "train/loss_ctc": 0.6998421549797058, "train/loss_error": 0.46285614371299744, "train/loss_total": 0.510253369808197 }, { "epoch": 2.554902484637991, "step": 9563, "train/loss_ctc": 0.800819993019104, "train/loss_error": 0.4373962879180908, "train/loss_total": 0.5100810527801514 }, { "epoch": 2.555169650013358, "step": 9564, "train/loss_ctc": 1.4790339469909668, "train/loss_error": 0.4599105417728424, "train/loss_total": 0.6637352705001831 }, { "epoch": 2.5554368153887257, "step": 9565, "train/loss_ctc": 0.6886785626411438, "train/loss_error": 0.42362746596336365, "train/loss_total": 0.47663766145706177 }, { "epoch": 2.555703980764093, "step": 9566, "train/loss_ctc": 1.295849084854126, "train/loss_error": 0.4800763726234436, "train/loss_total": 0.6432309150695801 }, { "epoch": 2.55597114613946, "step": 9567, "train/loss_ctc": 1.3695744276046753, "train/loss_error": 0.4385768175125122, "train/loss_total": 0.6247763633728027 }, { "epoch": 2.5562383115148277, "step": 9568, "train/loss_ctc": 0.6758847236633301, "train/loss_error": 0.5058997869491577, "train/loss_total": 0.5398967862129211 }, { "epoch": 2.556505476890195, "step": 9569, "train/loss_ctc": 0.37972909212112427, "train/loss_error": 0.42509540915489197, "train/loss_total": 0.4160221517086029 }, { "epoch": 2.556772642265562, "grad_norm": 2.012996196746826, "learning_rate": 1.466898209991985e-05, "loss": 0.5193, "step": 9570 }, { "epoch": 2.556772642265562, "step": 9570, "train/loss_ctc": 1.1415061950683594, "train/loss_error": 0.3818378448486328, "train/loss_total": 0.5337715148925781 }, { "epoch": 2.5570398076409298, "step": 9571, "train/loss_ctc": 0.38767170906066895, "train/loss_error": 0.41123807430267334, "train/loss_total": 0.40652480721473694 }, { "epoch": 2.557306973016297, "step": 9572, "train/loss_ctc": 0.8988524675369263, "train/loss_error": 0.5035736560821533, "train/loss_total": 0.5826294422149658 }, { "epoch": 2.5575741383916646, "step": 9573, "train/loss_ctc": 1.3209997415542603, "train/loss_error": 0.4432099759578705, "train/loss_total": 0.6187679171562195 }, { "epoch": 2.557841303767032, "step": 9574, "train/loss_ctc": 1.0366946458816528, "train/loss_error": 0.5110474228858948, "train/loss_total": 0.6161768436431885 }, { "epoch": 2.5581084691423994, "step": 9575, "train/loss_ctc": 0.2779437303543091, "train/loss_error": 0.4236927926540375, "train/loss_total": 0.39454299211502075 }, { "epoch": 2.5583756345177666, "step": 9576, "train/loss_ctc": 0.9964183568954468, "train/loss_error": 0.43173080682754517, "train/loss_total": 0.5446683168411255 }, { "epoch": 2.558642799893134, "step": 9577, "train/loss_ctc": 0.46181994676589966, "train/loss_error": 0.3978465795516968, "train/loss_total": 0.41064125299453735 }, { "epoch": 2.5589099652685015, "step": 9578, "train/loss_ctc": 0.7311967611312866, "train/loss_error": 0.42863479256629944, "train/loss_total": 0.4891471862792969 }, { "epoch": 2.5591771306438686, "step": 9579, "train/loss_ctc": 0.9818236231803894, "train/loss_error": 0.47230473160743713, "train/loss_total": 0.5742084980010986 }, { "epoch": 2.559444296019236, "grad_norm": 2.4347012042999268, "learning_rate": 1.465295217739781e-05, "loss": 0.5171, "step": 9580 }, { "epoch": 2.559444296019236, "step": 9580, "train/loss_ctc": 0.5233144760131836, "train/loss_error": 0.5378931164741516, "train/loss_total": 0.5349774360656738 }, { "epoch": 2.5597114613946035, "step": 9581, "train/loss_ctc": 0.9447528123855591, "train/loss_error": 0.47435665130615234, "train/loss_total": 0.5684359073638916 }, { "epoch": 2.5599786267699707, "step": 9582, "train/loss_ctc": 0.8842006921768188, "train/loss_error": 0.47465020418167114, "train/loss_total": 0.5565603375434875 }, { "epoch": 2.560245792145338, "step": 9583, "train/loss_ctc": 0.4829196035861969, "train/loss_error": 0.4256417155265808, "train/loss_total": 0.43709731101989746 }, { "epoch": 2.5605129575207055, "step": 9584, "train/loss_ctc": 0.5170480608940125, "train/loss_error": 0.5365225672721863, "train/loss_total": 0.5326276421546936 }, { "epoch": 2.5607801228960727, "step": 9585, "train/loss_ctc": 0.6834321618080139, "train/loss_error": 0.3730742335319519, "train/loss_total": 0.4351457953453064 }, { "epoch": 2.56104728827144, "step": 9586, "train/loss_ctc": 0.5724294185638428, "train/loss_error": 0.48018231987953186, "train/loss_total": 0.4986317455768585 }, { "epoch": 2.5613144536468075, "step": 9587, "train/loss_ctc": 0.6106172204017639, "train/loss_error": 0.4504638910293579, "train/loss_total": 0.4824945628643036 }, { "epoch": 2.5615816190221747, "step": 9588, "train/loss_ctc": 1.3458552360534668, "train/loss_error": 0.4474290609359741, "train/loss_total": 0.6271142959594727 }, { "epoch": 2.561848784397542, "step": 9589, "train/loss_ctc": 0.4962305426597595, "train/loss_error": 0.42448827624320984, "train/loss_total": 0.4388367533683777 }, { "epoch": 2.5621159497729096, "grad_norm": 1.8578263521194458, "learning_rate": 1.4636922254875769e-05, "loss": 0.5112, "step": 9590 }, { "epoch": 2.5621159497729096, "step": 9590, "train/loss_ctc": 0.5692028999328613, "train/loss_error": 0.40884557366371155, "train/loss_total": 0.440917044878006 }, { "epoch": 2.5623831151482768, "step": 9591, "train/loss_ctc": 0.9755316972732544, "train/loss_error": 0.4369853436946869, "train/loss_total": 0.5446946024894714 }, { "epoch": 2.562650280523644, "step": 9592, "train/loss_ctc": 0.6755209565162659, "train/loss_error": 0.4370000660419464, "train/loss_total": 0.48470425605773926 }, { "epoch": 2.5629174458990116, "step": 9593, "train/loss_ctc": 0.8981557488441467, "train/loss_error": 0.43316635489463806, "train/loss_total": 0.5261642336845398 }, { "epoch": 2.563184611274379, "step": 9594, "train/loss_ctc": 0.8241369128227234, "train/loss_error": 0.480521559715271, "train/loss_total": 0.5492446422576904 }, { "epoch": 2.563451776649746, "step": 9595, "train/loss_ctc": 0.7208936214447021, "train/loss_error": 0.4180864989757538, "train/loss_total": 0.47864794731140137 }, { "epoch": 2.5637189420251136, "step": 9596, "train/loss_ctc": 1.3218995332717896, "train/loss_error": 0.43063247203826904, "train/loss_total": 0.6088858842849731 }, { "epoch": 2.563986107400481, "step": 9597, "train/loss_ctc": 0.5790780782699585, "train/loss_error": 0.4995456039905548, "train/loss_total": 0.5154520869255066 }, { "epoch": 2.564253272775848, "step": 9598, "train/loss_ctc": 0.4292123317718506, "train/loss_error": 0.4377327561378479, "train/loss_total": 0.4360286593437195 }, { "epoch": 2.5645204381512157, "step": 9599, "train/loss_ctc": 1.1148769855499268, "train/loss_error": 0.41782721877098083, "train/loss_total": 0.5572371482849121 }, { "epoch": 2.564787603526583, "grad_norm": 3.1291983127593994, "learning_rate": 1.4620892332353727e-05, "loss": 0.5142, "step": 9600 }, { "epoch": 2.564787603526583, "step": 9600, "train/loss_ctc": 0.4717910587787628, "train/loss_error": 0.4647141098976135, "train/loss_total": 0.46612951159477234 }, { "epoch": 2.56505476890195, "step": 9601, "train/loss_ctc": 0.856456458568573, "train/loss_error": 0.4352110028266907, "train/loss_total": 0.5194600820541382 }, { "epoch": 2.5653219342773177, "step": 9602, "train/loss_ctc": 0.9229875802993774, "train/loss_error": 0.47163334488868713, "train/loss_total": 0.5619041919708252 }, { "epoch": 2.565589099652685, "step": 9603, "train/loss_ctc": 0.4053037166595459, "train/loss_error": 0.4585581421852112, "train/loss_total": 0.4479072690010071 }, { "epoch": 2.5658562650280525, "step": 9604, "train/loss_ctc": 0.4297989010810852, "train/loss_error": 0.5034824013710022, "train/loss_total": 0.48874571919441223 }, { "epoch": 2.5661234304034197, "step": 9605, "train/loss_ctc": 0.5955810546875, "train/loss_error": 0.38569918274879456, "train/loss_total": 0.4276755750179291 }, { "epoch": 2.566390595778787, "step": 9606, "train/loss_ctc": 1.0732204914093018, "train/loss_error": 0.41318538784980774, "train/loss_total": 0.5451924204826355 }, { "epoch": 2.5666577611541546, "step": 9607, "train/loss_ctc": 0.5705198049545288, "train/loss_error": 0.4640851318836212, "train/loss_total": 0.4853720963001251 }, { "epoch": 2.5669249265295218, "step": 9608, "train/loss_ctc": 1.0226397514343262, "train/loss_error": 0.39979568123817444, "train/loss_total": 0.5243645310401917 }, { "epoch": 2.5671920919048894, "step": 9609, "train/loss_ctc": 0.517548143863678, "train/loss_error": 0.4076058566570282, "train/loss_total": 0.42959433794021606 }, { "epoch": 2.5674592572802566, "grad_norm": 5.611142158508301, "learning_rate": 1.4604862409831686e-05, "loss": 0.4896, "step": 9610 }, { "epoch": 2.5674592572802566, "step": 9610, "train/loss_ctc": 0.31204459071159363, "train/loss_error": 0.435341477394104, "train/loss_total": 0.4106821119785309 }, { "epoch": 2.567726422655624, "step": 9611, "train/loss_ctc": 0.27979356050491333, "train/loss_error": 0.4827200174331665, "train/loss_total": 0.4421347379684448 }, { "epoch": 2.5679935880309914, "step": 9612, "train/loss_ctc": 0.5137763023376465, "train/loss_error": 0.3448988199234009, "train/loss_total": 0.37867432832717896 }, { "epoch": 2.5682607534063586, "step": 9613, "train/loss_ctc": 0.9771920442581177, "train/loss_error": 0.4602762758731842, "train/loss_total": 0.5636594295501709 }, { "epoch": 2.568527918781726, "step": 9614, "train/loss_ctc": 1.2381396293640137, "train/loss_error": 0.4627418518066406, "train/loss_total": 0.6178213953971863 }, { "epoch": 2.5687950841570935, "step": 9615, "train/loss_ctc": 0.55976402759552, "train/loss_error": 0.4040789306163788, "train/loss_total": 0.43521595001220703 }, { "epoch": 2.5690622495324607, "step": 9616, "train/loss_ctc": 0.5030674934387207, "train/loss_error": 0.4297390282154083, "train/loss_total": 0.4444047212600708 }, { "epoch": 2.569329414907828, "step": 9617, "train/loss_ctc": 1.3930552005767822, "train/loss_error": 0.4718531668186188, "train/loss_total": 0.6560935974121094 }, { "epoch": 2.5695965802831955, "step": 9618, "train/loss_ctc": 0.6561654806137085, "train/loss_error": 0.5620602369308472, "train/loss_total": 0.5808812975883484 }, { "epoch": 2.5698637456585627, "step": 9619, "train/loss_ctc": 0.88658607006073, "train/loss_error": 0.49302759766578674, "train/loss_total": 0.5717393159866333 }, { "epoch": 2.57013091103393, "grad_norm": 3.348534345626831, "learning_rate": 1.4588832487309645e-05, "loss": 0.5101, "step": 9620 }, { "epoch": 2.57013091103393, "step": 9620, "train/loss_ctc": 0.42708033323287964, "train/loss_error": 0.5179163217544556, "train/loss_total": 0.49974915385246277 }, { "epoch": 2.5703980764092975, "step": 9621, "train/loss_ctc": 0.7412451505661011, "train/loss_error": 0.40788936614990234, "train/loss_total": 0.47456052899360657 }, { "epoch": 2.5706652417846647, "step": 9622, "train/loss_ctc": 1.1739146709442139, "train/loss_error": 0.5705229043960571, "train/loss_total": 0.6912012696266174 }, { "epoch": 2.570932407160032, "step": 9623, "train/loss_ctc": 0.42172279953956604, "train/loss_error": 0.4079292118549347, "train/loss_total": 0.41068795323371887 }, { "epoch": 2.5711995725353995, "step": 9624, "train/loss_ctc": 0.6132066249847412, "train/loss_error": 0.4742412269115448, "train/loss_total": 0.5020343065261841 }, { "epoch": 2.5714667379107667, "step": 9625, "train/loss_ctc": 0.5481656193733215, "train/loss_error": 0.46272996068000793, "train/loss_total": 0.47981709241867065 }, { "epoch": 2.571733903286134, "step": 9626, "train/loss_ctc": 1.928628921508789, "train/loss_error": 0.5689388513565063, "train/loss_total": 0.8408768773078918 }, { "epoch": 2.5720010686615016, "step": 9627, "train/loss_ctc": 0.9731882214546204, "train/loss_error": 0.4204949736595154, "train/loss_total": 0.5310336351394653 }, { "epoch": 2.5722682340368688, "step": 9628, "train/loss_ctc": 0.8677958846092224, "train/loss_error": 0.4976142644882202, "train/loss_total": 0.5716505646705627 }, { "epoch": 2.572535399412236, "step": 9629, "train/loss_ctc": 0.33427053689956665, "train/loss_error": 0.4837486445903778, "train/loss_total": 0.453853040933609 }, { "epoch": 2.5728025647876036, "grad_norm": 18.332366943359375, "learning_rate": 1.4572802564787603e-05, "loss": 0.5455, "step": 9630 }, { "epoch": 2.5728025647876036, "step": 9630, "train/loss_ctc": 0.881994366645813, "train/loss_error": 0.4672880470752716, "train/loss_total": 0.5502293109893799 }, { "epoch": 2.573069730162971, "step": 9631, "train/loss_ctc": 1.0251245498657227, "train/loss_error": 0.498509019613266, "train/loss_total": 0.6038321256637573 }, { "epoch": 2.573336895538338, "step": 9632, "train/loss_ctc": 0.5562304854393005, "train/loss_error": 0.41326460242271423, "train/loss_total": 0.44185778498649597 }, { "epoch": 2.5736040609137056, "step": 9633, "train/loss_ctc": 0.359364777803421, "train/loss_error": 0.3900792598724365, "train/loss_total": 0.3839363753795624 }, { "epoch": 2.573871226289073, "step": 9634, "train/loss_ctc": 0.5648083686828613, "train/loss_error": 0.4578922390937805, "train/loss_total": 0.4792754650115967 }, { "epoch": 2.57413839166444, "step": 9635, "train/loss_ctc": 1.0661224126815796, "train/loss_error": 0.47643357515335083, "train/loss_total": 0.5943713784217834 }, { "epoch": 2.5744055570398077, "step": 9636, "train/loss_ctc": 0.5873117446899414, "train/loss_error": 0.423419713973999, "train/loss_total": 0.45619815587997437 }, { "epoch": 2.574672722415175, "step": 9637, "train/loss_ctc": 0.76677405834198, "train/loss_error": 0.3965403735637665, "train/loss_total": 0.4705871343612671 }, { "epoch": 2.5749398877905425, "step": 9638, "train/loss_ctc": 1.2208970785140991, "train/loss_error": 0.4756106436252594, "train/loss_total": 0.6246679425239563 }, { "epoch": 2.5752070531659097, "step": 9639, "train/loss_ctc": 1.4579408168792725, "train/loss_error": 0.5795393586158752, "train/loss_total": 0.7552196383476257 }, { "epoch": 2.575474218541277, "grad_norm": 2.542559862136841, "learning_rate": 1.4556772642265564e-05, "loss": 0.536, "step": 9640 }, { "epoch": 2.575474218541277, "step": 9640, "train/loss_ctc": 0.8271896839141846, "train/loss_error": 0.4584979712963104, "train/loss_total": 0.5322363376617432 }, { "epoch": 2.5757413839166445, "step": 9641, "train/loss_ctc": 0.8079708218574524, "train/loss_error": 0.5045635104179382, "train/loss_total": 0.5652449727058411 }, { "epoch": 2.5760085492920117, "step": 9642, "train/loss_ctc": 0.8323279619216919, "train/loss_error": 0.5003376603126526, "train/loss_total": 0.5667357444763184 }, { "epoch": 2.5762757146673794, "step": 9643, "train/loss_ctc": 0.39594995975494385, "train/loss_error": 0.4883185029029846, "train/loss_total": 0.469844788312912 }, { "epoch": 2.5765428800427466, "step": 9644, "train/loss_ctc": 0.6677196621894836, "train/loss_error": 0.4983677864074707, "train/loss_total": 0.5322381854057312 }, { "epoch": 2.5768100454181138, "step": 9645, "train/loss_ctc": 0.594028115272522, "train/loss_error": 0.42258220911026, "train/loss_total": 0.4568713903427124 }, { "epoch": 2.5770772107934814, "step": 9646, "train/loss_ctc": 0.4790729284286499, "train/loss_error": 0.4307854473590851, "train/loss_total": 0.4404429495334625 }, { "epoch": 2.5773443761688486, "step": 9647, "train/loss_ctc": 0.5267552137374878, "train/loss_error": 0.4172896444797516, "train/loss_total": 0.43918275833129883 }, { "epoch": 2.577611541544216, "step": 9648, "train/loss_ctc": 1.2058277130126953, "train/loss_error": 0.4831914007663727, "train/loss_total": 0.6277186870574951 }, { "epoch": 2.5778787069195834, "step": 9649, "train/loss_ctc": 0.2714148759841919, "train/loss_error": 0.43742960691452026, "train/loss_total": 0.4042266607284546 }, { "epoch": 2.5781458722949506, "grad_norm": 5.097647190093994, "learning_rate": 1.4540742719743522e-05, "loss": 0.5035, "step": 9650 }, { "epoch": 2.5781458722949506, "step": 9650, "train/loss_ctc": 1.2871809005737305, "train/loss_error": 0.5075418949127197, "train/loss_total": 0.663469672203064 }, { "epoch": 2.578413037670318, "step": 9651, "train/loss_ctc": 0.5512550473213196, "train/loss_error": 0.4238932132720947, "train/loss_total": 0.4493655860424042 }, { "epoch": 2.5786802030456855, "step": 9652, "train/loss_ctc": 0.488861083984375, "train/loss_error": 0.4974970519542694, "train/loss_total": 0.4957698583602905 }, { "epoch": 2.5789473684210527, "step": 9653, "train/loss_ctc": 0.49433454871177673, "train/loss_error": 0.4640927016735077, "train/loss_total": 0.47014108300209045 }, { "epoch": 2.57921453379642, "step": 9654, "train/loss_ctc": 0.6723780632019043, "train/loss_error": 0.4102954566478729, "train/loss_total": 0.46271198987960815 }, { "epoch": 2.5794816991717875, "step": 9655, "train/loss_ctc": 0.9670175909996033, "train/loss_error": 0.4291796088218689, "train/loss_total": 0.5367472171783447 }, { "epoch": 2.5797488645471547, "step": 9656, "train/loss_ctc": 1.0722646713256836, "train/loss_error": 0.44334855675697327, "train/loss_total": 0.5691317915916443 }, { "epoch": 2.580016029922522, "step": 9657, "train/loss_ctc": 2.00378680229187, "train/loss_error": 0.47537490725517273, "train/loss_total": 0.7810572981834412 }, { "epoch": 2.5802831952978895, "step": 9658, "train/loss_ctc": 0.669898509979248, "train/loss_error": 0.40696170926094055, "train/loss_total": 0.45954906940460205 }, { "epoch": 2.5805503606732567, "step": 9659, "train/loss_ctc": 0.30927005410194397, "train/loss_error": 0.45564889907836914, "train/loss_total": 0.42637312412261963 }, { "epoch": 2.580817526048624, "grad_norm": 1.1010055541992188, "learning_rate": 1.452471279722148e-05, "loss": 0.5314, "step": 9660 }, { "epoch": 2.580817526048624, "step": 9660, "train/loss_ctc": 1.2505145072937012, "train/loss_error": 0.4599432349205017, "train/loss_total": 0.6180574893951416 }, { "epoch": 2.5810846914239916, "step": 9661, "train/loss_ctc": 0.7741259932518005, "train/loss_error": 0.4711652100086212, "train/loss_total": 0.5317573547363281 }, { "epoch": 2.5813518567993587, "step": 9662, "train/loss_ctc": 0.5407737493515015, "train/loss_error": 0.4337711036205292, "train/loss_total": 0.4551716446876526 }, { "epoch": 2.581619022174726, "step": 9663, "train/loss_ctc": 0.5650813579559326, "train/loss_error": 0.4058523178100586, "train/loss_total": 0.4376981258392334 }, { "epoch": 2.5818861875500936, "step": 9664, "train/loss_ctc": 0.8798746466636658, "train/loss_error": 0.4582080841064453, "train/loss_total": 0.5425413846969604 }, { "epoch": 2.5821533529254608, "step": 9665, "train/loss_ctc": 0.6745913028717041, "train/loss_error": 0.4940347671508789, "train/loss_total": 0.530146062374115 }, { "epoch": 2.582420518300828, "step": 9666, "train/loss_ctc": 1.0094397068023682, "train/loss_error": 0.48798438906669617, "train/loss_total": 0.5922754406929016 }, { "epoch": 2.5826876836761956, "step": 9667, "train/loss_ctc": 0.49302926659584045, "train/loss_error": 0.45783329010009766, "train/loss_total": 0.46487247943878174 }, { "epoch": 2.582954849051563, "step": 9668, "train/loss_ctc": 0.9151483774185181, "train/loss_error": 0.4382209777832031, "train/loss_total": 0.5336064696311951 }, { "epoch": 2.58322201442693, "step": 9669, "train/loss_ctc": 0.5903069376945496, "train/loss_error": 0.47442588210105896, "train/loss_total": 0.49760210514068604 }, { "epoch": 2.5834891798022976, "grad_norm": 2.527409315109253, "learning_rate": 1.450868287469944e-05, "loss": 0.5204, "step": 9670 }, { "epoch": 2.5834891798022976, "step": 9670, "train/loss_ctc": 0.6270408630371094, "train/loss_error": 0.4125099182128906, "train/loss_total": 0.45541611313819885 }, { "epoch": 2.583756345177665, "step": 9671, "train/loss_ctc": 1.082045078277588, "train/loss_error": 0.49117639660835266, "train/loss_total": 0.6093501448631287 }, { "epoch": 2.5840235105530325, "step": 9672, "train/loss_ctc": 0.5062909722328186, "train/loss_error": 0.3898915946483612, "train/loss_total": 0.4131714701652527 }, { "epoch": 2.5842906759283997, "step": 9673, "train/loss_ctc": 0.6734943389892578, "train/loss_error": 0.5400315523147583, "train/loss_total": 0.5667241215705872 }, { "epoch": 2.584557841303767, "step": 9674, "train/loss_ctc": 0.31855469942092896, "train/loss_error": 0.4616018533706665, "train/loss_total": 0.43299245834350586 }, { "epoch": 2.5848250066791345, "step": 9675, "train/loss_ctc": 0.6499841213226318, "train/loss_error": 0.43176889419555664, "train/loss_total": 0.47541195154190063 }, { "epoch": 2.5850921720545017, "step": 9676, "train/loss_ctc": 0.8767081499099731, "train/loss_error": 0.4677623510360718, "train/loss_total": 0.5495514869689941 }, { "epoch": 2.5853593374298693, "step": 9677, "train/loss_ctc": 0.6394364833831787, "train/loss_error": 0.5390009880065918, "train/loss_total": 0.5590881109237671 }, { "epoch": 2.5856265028052365, "step": 9678, "train/loss_ctc": 0.4476715326309204, "train/loss_error": 0.46557968854904175, "train/loss_total": 0.4619980752468109 }, { "epoch": 2.5858936681806037, "step": 9679, "train/loss_ctc": 1.4240227937698364, "train/loss_error": 0.46991968154907227, "train/loss_total": 0.660740315914154 }, { "epoch": 2.5861608335559714, "grad_norm": 3.7165238857269287, "learning_rate": 1.4492652952177398e-05, "loss": 0.5184, "step": 9680 }, { "epoch": 2.5861608335559714, "step": 9680, "train/loss_ctc": 0.3702775835990906, "train/loss_error": 0.41142740845680237, "train/loss_total": 0.4031974673271179 }, { "epoch": 2.5864279989313386, "step": 9681, "train/loss_ctc": 1.1999388933181763, "train/loss_error": 0.4663676619529724, "train/loss_total": 0.6130819320678711 }, { "epoch": 2.5866951643067058, "step": 9682, "train/loss_ctc": 0.29728859663009644, "train/loss_error": 0.37395310401916504, "train/loss_total": 0.35862019658088684 }, { "epoch": 2.5869623296820734, "step": 9683, "train/loss_ctc": 0.9605262279510498, "train/loss_error": 0.4573157727718353, "train/loss_total": 0.5579578876495361 }, { "epoch": 2.5872294950574406, "step": 9684, "train/loss_ctc": 0.3891649842262268, "train/loss_error": 0.49223408102989197, "train/loss_total": 0.47162026166915894 }, { "epoch": 2.587496660432808, "step": 9685, "train/loss_ctc": 0.42845776677131653, "train/loss_error": 0.4348262846469879, "train/loss_total": 0.4335525631904602 }, { "epoch": 2.5877638258081754, "step": 9686, "train/loss_ctc": 0.5816044807434082, "train/loss_error": 0.46441227197647095, "train/loss_total": 0.48785072565078735 }, { "epoch": 2.5880309911835426, "step": 9687, "train/loss_ctc": 0.7478201389312744, "train/loss_error": 0.4363210201263428, "train/loss_total": 0.4986208379268646 }, { "epoch": 2.58829815655891, "step": 9688, "train/loss_ctc": 1.0253922939300537, "train/loss_error": 0.48662474751472473, "train/loss_total": 0.5943782925605774 }, { "epoch": 2.5885653219342775, "step": 9689, "train/loss_ctc": 0.8404765129089355, "train/loss_error": 0.4317656457424164, "train/loss_total": 0.5135078430175781 }, { "epoch": 2.5888324873096447, "grad_norm": 1.5040310621261597, "learning_rate": 1.4476623029655356e-05, "loss": 0.4932, "step": 9690 }, { "epoch": 2.5888324873096447, "step": 9690, "train/loss_ctc": 0.7497243881225586, "train/loss_error": 0.39123302698135376, "train/loss_total": 0.4629313349723816 }, { "epoch": 2.589099652685012, "step": 9691, "train/loss_ctc": 0.7496042251586914, "train/loss_error": 0.43425217270851135, "train/loss_total": 0.49732258915901184 }, { "epoch": 2.5893668180603795, "step": 9692, "train/loss_ctc": 0.362729012966156, "train/loss_error": 0.4561346471309662, "train/loss_total": 0.4374535083770752 }, { "epoch": 2.5896339834357467, "step": 9693, "train/loss_ctc": 1.068745732307434, "train/loss_error": 0.5459781885147095, "train/loss_total": 0.6505317091941833 }, { "epoch": 2.589901148811114, "step": 9694, "train/loss_ctc": 1.1491200923919678, "train/loss_error": 0.5246310234069824, "train/loss_total": 0.6495288610458374 }, { "epoch": 2.5901683141864815, "step": 9695, "train/loss_ctc": 0.38182333111763, "train/loss_error": 0.46735167503356934, "train/loss_total": 0.45024600625038147 }, { "epoch": 2.5904354795618487, "step": 9696, "train/loss_ctc": 0.701452374458313, "train/loss_error": 0.4194892346858978, "train/loss_total": 0.4758818745613098 }, { "epoch": 2.590702644937216, "step": 9697, "train/loss_ctc": 1.1931405067443848, "train/loss_error": 0.46096742153167725, "train/loss_total": 0.6074020266532898 }, { "epoch": 2.5909698103125836, "step": 9698, "train/loss_ctc": 0.7399924993515015, "train/loss_error": 0.45654845237731934, "train/loss_total": 0.5132372379302979 }, { "epoch": 2.5912369756879507, "step": 9699, "train/loss_ctc": 1.2939863204956055, "train/loss_error": 0.4556427299976349, "train/loss_total": 0.623311460018158 }, { "epoch": 2.591504141063318, "grad_norm": 1.4351228475570679, "learning_rate": 1.4460593107133316e-05, "loss": 0.5368, "step": 9700 }, { "epoch": 2.591504141063318, "step": 9700, "train/loss_ctc": 1.0998237133026123, "train/loss_error": 0.3888776898384094, "train/loss_total": 0.53106689453125 }, { "epoch": 2.5917713064386856, "step": 9701, "train/loss_ctc": 0.582190215587616, "train/loss_error": 0.5190359354019165, "train/loss_total": 0.5316668152809143 }, { "epoch": 2.592038471814053, "step": 9702, "train/loss_ctc": 0.9191805124282837, "train/loss_error": 0.4648111164569855, "train/loss_total": 0.5556850433349609 }, { "epoch": 2.59230563718942, "step": 9703, "train/loss_ctc": 0.561001718044281, "train/loss_error": 0.3993307948112488, "train/loss_total": 0.43166500329971313 }, { "epoch": 2.5925728025647876, "step": 9704, "train/loss_ctc": 0.7124695777893066, "train/loss_error": 0.5535910725593567, "train/loss_total": 0.5853667855262756 }, { "epoch": 2.592839967940155, "step": 9705, "train/loss_ctc": 0.5840637683868408, "train/loss_error": 0.5236303806304932, "train/loss_total": 0.5357170701026917 }, { "epoch": 2.5931071333155225, "step": 9706, "train/loss_ctc": 0.46857190132141113, "train/loss_error": 0.38742396235466003, "train/loss_total": 0.4036535620689392 }, { "epoch": 2.5933742986908896, "step": 9707, "train/loss_ctc": 0.8444818258285522, "train/loss_error": 0.4724081754684448, "train/loss_total": 0.5468229055404663 }, { "epoch": 2.5936414640662573, "step": 9708, "train/loss_ctc": 0.9246228933334351, "train/loss_error": 0.46560537815093994, "train/loss_total": 0.55740886926651 }, { "epoch": 2.5939086294416245, "step": 9709, "train/loss_ctc": 0.49442362785339355, "train/loss_error": 0.43626368045806885, "train/loss_total": 0.44789567589759827 }, { "epoch": 2.5941757948169917, "grad_norm": 2.1130428314208984, "learning_rate": 1.4444563184611274e-05, "loss": 0.5127, "step": 9710 }, { "epoch": 2.5941757948169917, "step": 9710, "train/loss_ctc": 0.5521528720855713, "train/loss_error": 0.44304367899894714, "train/loss_total": 0.464865505695343 }, { "epoch": 2.5944429601923593, "step": 9711, "train/loss_ctc": 1.120100736618042, "train/loss_error": 0.5067610144615173, "train/loss_total": 0.6294289827346802 }, { "epoch": 2.5947101255677265, "step": 9712, "train/loss_ctc": 0.419650137424469, "train/loss_error": 0.41369661688804626, "train/loss_total": 0.41488730907440186 }, { "epoch": 2.5949772909430937, "step": 9713, "train/loss_ctc": 0.640442967414856, "train/loss_error": 0.47910112142562866, "train/loss_total": 0.5113694667816162 }, { "epoch": 2.5952444563184613, "step": 9714, "train/loss_ctc": 1.1575407981872559, "train/loss_error": 0.45370426774024963, "train/loss_total": 0.5944715738296509 }, { "epoch": 2.5955116216938285, "step": 9715, "train/loss_ctc": 0.8275485038757324, "train/loss_error": 0.39539456367492676, "train/loss_total": 0.4818253517150879 }, { "epoch": 2.5957787870691957, "step": 9716, "train/loss_ctc": 0.4047172963619232, "train/loss_error": 0.4035162925720215, "train/loss_total": 0.4037564992904663 }, { "epoch": 2.5960459524445634, "step": 9717, "train/loss_ctc": 0.5001553297042847, "train/loss_error": 0.4981171488761902, "train/loss_total": 0.49852481484413147 }, { "epoch": 2.5963131178199306, "step": 9718, "train/loss_ctc": 0.4451763331890106, "train/loss_error": 0.4950711131095886, "train/loss_total": 0.4850921630859375 }, { "epoch": 2.5965802831952978, "step": 9719, "train/loss_ctc": 0.7062324285507202, "train/loss_error": 0.45500776171684265, "train/loss_total": 0.5052527189254761 }, { "epoch": 2.5968474485706654, "grad_norm": 1.2240400314331055, "learning_rate": 1.4428533262089232e-05, "loss": 0.4989, "step": 9720 }, { "epoch": 2.5968474485706654, "step": 9720, "train/loss_ctc": 0.8757376074790955, "train/loss_error": 0.470931738615036, "train/loss_total": 0.5518929362297058 }, { "epoch": 2.5971146139460326, "step": 9721, "train/loss_ctc": 0.913168728351593, "train/loss_error": 0.4860703647136688, "train/loss_total": 0.5714900493621826 }, { "epoch": 2.5973817793214, "step": 9722, "train/loss_ctc": 1.0535444021224976, "train/loss_error": 0.434481143951416, "train/loss_total": 0.5582938194274902 }, { "epoch": 2.5976489446967674, "step": 9723, "train/loss_ctc": 0.6449613571166992, "train/loss_error": 0.48732632398605347, "train/loss_total": 0.5188533663749695 }, { "epoch": 2.5979161100721346, "step": 9724, "train/loss_ctc": 0.7702846527099609, "train/loss_error": 0.4531354010105133, "train/loss_total": 0.5165652632713318 }, { "epoch": 2.598183275447502, "step": 9725, "train/loss_ctc": 1.4597606658935547, "train/loss_error": 0.35480040311813354, "train/loss_total": 0.5757924318313599 }, { "epoch": 2.5984504408228695, "step": 9726, "train/loss_ctc": 0.16121652722358704, "train/loss_error": 0.36908450722694397, "train/loss_total": 0.32751092314720154 }, { "epoch": 2.5987176061982367, "step": 9727, "train/loss_ctc": 0.6489790678024292, "train/loss_error": 0.4312358796596527, "train/loss_total": 0.4747845232486725 }, { "epoch": 2.598984771573604, "step": 9728, "train/loss_ctc": 0.7293722629547119, "train/loss_error": 0.5187225937843323, "train/loss_total": 0.5608525276184082 }, { "epoch": 2.5992519369489715, "step": 9729, "train/loss_ctc": 0.7601323127746582, "train/loss_error": 0.42291080951690674, "train/loss_total": 0.49035513401031494 }, { "epoch": 2.5995191023243387, "grad_norm": 1.7063945531845093, "learning_rate": 1.4412503339567194e-05, "loss": 0.5146, "step": 9730 }, { "epoch": 2.5995191023243387, "step": 9730, "train/loss_ctc": 1.209007978439331, "train/loss_error": 0.46856316924095154, "train/loss_total": 0.6166521310806274 }, { "epoch": 2.599786267699706, "step": 9731, "train/loss_ctc": 0.9268746376037598, "train/loss_error": 0.4242457449436188, "train/loss_total": 0.524771511554718 }, { "epoch": 2.6000534330750735, "step": 9732, "train/loss_ctc": 0.628082275390625, "train/loss_error": 0.4959794580936432, "train/loss_total": 0.5224000215530396 }, { "epoch": 2.6003205984504407, "step": 9733, "train/loss_ctc": 1.0035731792449951, "train/loss_error": 0.47094792127609253, "train/loss_total": 0.577472984790802 }, { "epoch": 2.600587763825808, "step": 9734, "train/loss_ctc": 0.188913494348526, "train/loss_error": 0.44191327691078186, "train/loss_total": 0.3913133144378662 }, { "epoch": 2.6008549292011756, "step": 9735, "train/loss_ctc": 1.1114883422851562, "train/loss_error": 0.4983600378036499, "train/loss_total": 0.620985746383667 }, { "epoch": 2.6011220945765428, "step": 9736, "train/loss_ctc": 0.7215714454650879, "train/loss_error": 0.43015363812446594, "train/loss_total": 0.4884372055530548 }, { "epoch": 2.6013892599519104, "step": 9737, "train/loss_ctc": 1.0091415643692017, "train/loss_error": 0.4634804129600525, "train/loss_total": 0.5726126432418823 }, { "epoch": 2.6016564253272776, "step": 9738, "train/loss_ctc": 0.7267435789108276, "train/loss_error": 0.4716808795928955, "train/loss_total": 0.522693395614624 }, { "epoch": 2.601923590702645, "step": 9739, "train/loss_ctc": 0.6034338474273682, "train/loss_error": 0.38943609595298767, "train/loss_total": 0.4322356581687927 }, { "epoch": 2.6021907560780124, "grad_norm": 2.2867939472198486, "learning_rate": 1.4396473417045152e-05, "loss": 0.527, "step": 9740 }, { "epoch": 2.6021907560780124, "step": 9740, "train/loss_ctc": 0.44353026151657104, "train/loss_error": 0.4193858206272125, "train/loss_total": 0.4242147207260132 }, { "epoch": 2.6024579214533796, "step": 9741, "train/loss_ctc": 0.6432682275772095, "train/loss_error": 0.5014188289642334, "train/loss_total": 0.5297887325286865 }, { "epoch": 2.6027250868287473, "step": 9742, "train/loss_ctc": 0.616257905960083, "train/loss_error": 0.41804638504981995, "train/loss_total": 0.45768868923187256 }, { "epoch": 2.6029922522041145, "step": 9743, "train/loss_ctc": 0.8398462533950806, "train/loss_error": 0.4476602375507355, "train/loss_total": 0.5260974168777466 }, { "epoch": 2.6032594175794816, "step": 9744, "train/loss_ctc": 0.8746426701545715, "train/loss_error": 0.44213050603866577, "train/loss_total": 0.5286329388618469 }, { "epoch": 2.6035265829548493, "step": 9745, "train/loss_ctc": 0.4888401925563812, "train/loss_error": 0.47885212302207947, "train/loss_total": 0.4808497428894043 }, { "epoch": 2.6037937483302165, "step": 9746, "train/loss_ctc": 1.1219687461853027, "train/loss_error": 0.4396313726902008, "train/loss_total": 0.5760988593101501 }, { "epoch": 2.6040609137055837, "step": 9747, "train/loss_ctc": 0.36034199595451355, "train/loss_error": 0.4541783630847931, "train/loss_total": 0.43541109561920166 }, { "epoch": 2.6043280790809513, "step": 9748, "train/loss_ctc": 0.9892821311950684, "train/loss_error": 0.5094830393791199, "train/loss_total": 0.6054428815841675 }, { "epoch": 2.6045952444563185, "step": 9749, "train/loss_ctc": 0.6288314461708069, "train/loss_error": 0.45747458934783936, "train/loss_total": 0.4917459487915039 }, { "epoch": 2.6048624098316857, "grad_norm": 1.6769919395446777, "learning_rate": 1.438044349452311e-05, "loss": 0.5056, "step": 9750 }, { "epoch": 2.6048624098316857, "step": 9750, "train/loss_ctc": 0.7978754043579102, "train/loss_error": 0.45701488852500916, "train/loss_total": 0.5251870155334473 }, { "epoch": 2.6051295752070533, "step": 9751, "train/loss_ctc": 0.6131621599197388, "train/loss_error": 0.5248401165008545, "train/loss_total": 0.5425045490264893 }, { "epoch": 2.6053967405824205, "step": 9752, "train/loss_ctc": 0.28328773379325867, "train/loss_error": 0.42321205139160156, "train/loss_total": 0.39522719383239746 }, { "epoch": 2.6056639059577877, "step": 9753, "train/loss_ctc": 0.6903153657913208, "train/loss_error": 0.4066329002380371, "train/loss_total": 0.4633693993091583 }, { "epoch": 2.6059310713331554, "step": 9754, "train/loss_ctc": 0.8784291744232178, "train/loss_error": 0.41220158338546753, "train/loss_total": 0.5054470896720886 }, { "epoch": 2.6061982367085226, "step": 9755, "train/loss_ctc": 0.5997352600097656, "train/loss_error": 0.4488210082054138, "train/loss_total": 0.4790038466453552 }, { "epoch": 2.6064654020838898, "step": 9756, "train/loss_ctc": 0.7326347827911377, "train/loss_error": 0.47883322834968567, "train/loss_total": 0.5295935869216919 }, { "epoch": 2.6067325674592574, "step": 9757, "train/loss_ctc": 1.115013837814331, "train/loss_error": 0.42915138602256775, "train/loss_total": 0.5663238763809204 }, { "epoch": 2.6069997328346246, "step": 9758, "train/loss_ctc": 0.973231852054596, "train/loss_error": 0.45920470356941223, "train/loss_total": 0.5620101690292358 }, { "epoch": 2.607266898209992, "step": 9759, "train/loss_ctc": 0.6581918001174927, "train/loss_error": 0.4521237313747406, "train/loss_total": 0.49333733320236206 }, { "epoch": 2.6075340635853594, "grad_norm": 1.623794674873352, "learning_rate": 1.436441357200107e-05, "loss": 0.5062, "step": 9760 }, { "epoch": 2.6075340635853594, "step": 9760, "train/loss_ctc": 0.6485799551010132, "train/loss_error": 0.4829370975494385, "train/loss_total": 0.5160656571388245 }, { "epoch": 2.6078012289607266, "step": 9761, "train/loss_ctc": 0.36757075786590576, "train/loss_error": 0.42438894510269165, "train/loss_total": 0.4130253195762634 }, { "epoch": 2.608068394336094, "step": 9762, "train/loss_ctc": 0.8843034505844116, "train/loss_error": 0.47460898756980896, "train/loss_total": 0.5565478801727295 }, { "epoch": 2.6083355597114615, "step": 9763, "train/loss_ctc": 1.4281818866729736, "train/loss_error": 0.4035078287124634, "train/loss_total": 0.6084426641464233 }, { "epoch": 2.6086027250868287, "step": 9764, "train/loss_ctc": 0.84333735704422, "train/loss_error": 0.4969674050807953, "train/loss_total": 0.5662413835525513 }, { "epoch": 2.608869890462196, "step": 9765, "train/loss_ctc": 0.4849134087562561, "train/loss_error": 0.4282754957675934, "train/loss_total": 0.4396030902862549 }, { "epoch": 2.6091370558375635, "step": 9766, "train/loss_ctc": 0.3581736385822296, "train/loss_error": 0.4625551998615265, "train/loss_total": 0.441678911447525 }, { "epoch": 2.6094042212129307, "step": 9767, "train/loss_ctc": 0.668331503868103, "train/loss_error": 0.5253025889396667, "train/loss_total": 0.5539083480834961 }, { "epoch": 2.609671386588298, "step": 9768, "train/loss_ctc": 1.5994141101837158, "train/loss_error": 0.4111449420452118, "train/loss_total": 0.6487988233566284 }, { "epoch": 2.6099385519636655, "step": 9769, "train/loss_ctc": 0.49120259284973145, "train/loss_error": 0.4617237150669098, "train/loss_total": 0.46761950850486755 }, { "epoch": 2.6102057173390327, "grad_norm": 2.0312273502349854, "learning_rate": 1.4348383649479028e-05, "loss": 0.5212, "step": 9770 }, { "epoch": 2.6102057173390327, "step": 9770, "train/loss_ctc": 0.7863168120384216, "train/loss_error": 0.4895751178264618, "train/loss_total": 0.5489234924316406 }, { "epoch": 2.6104728827144004, "step": 9771, "train/loss_ctc": 0.7121293544769287, "train/loss_error": 0.45268502831459045, "train/loss_total": 0.5045738816261292 }, { "epoch": 2.6107400480897676, "step": 9772, "train/loss_ctc": 0.9282580018043518, "train/loss_error": 0.49179866909980774, "train/loss_total": 0.5790905356407166 }, { "epoch": 2.6110072134651348, "step": 9773, "train/loss_ctc": 1.1213682889938354, "train/loss_error": 0.43896007537841797, "train/loss_total": 0.5754417181015015 }, { "epoch": 2.6112743788405024, "step": 9774, "train/loss_ctc": 0.6186779737472534, "train/loss_error": 0.45136314630508423, "train/loss_total": 0.48482611775398254 }, { "epoch": 2.6115415442158696, "step": 9775, "train/loss_ctc": 0.5377453565597534, "train/loss_error": 0.4603724479675293, "train/loss_total": 0.4758470356464386 }, { "epoch": 2.6118087095912372, "step": 9776, "train/loss_ctc": 0.780051052570343, "train/loss_error": 0.46214786171913147, "train/loss_total": 0.5257284641265869 }, { "epoch": 2.6120758749666044, "step": 9777, "train/loss_ctc": 0.5274184942245483, "train/loss_error": 0.43752649426460266, "train/loss_total": 0.4555049240589142 }, { "epoch": 2.6123430403419716, "step": 9778, "train/loss_ctc": 0.8091833591461182, "train/loss_error": 0.49725526571273804, "train/loss_total": 0.5596408843994141 }, { "epoch": 2.6126102057173393, "step": 9779, "train/loss_ctc": 0.4484260678291321, "train/loss_error": 0.5339601635932922, "train/loss_total": 0.5168533325195312 }, { "epoch": 2.6128773710927065, "grad_norm": 5.926982879638672, "learning_rate": 1.4332353726956986e-05, "loss": 0.5226, "step": 9780 }, { "epoch": 2.6128773710927065, "step": 9780, "train/loss_ctc": 0.6980553269386292, "train/loss_error": 0.45025834441185, "train/loss_total": 0.49981775879859924 }, { "epoch": 2.6131445364680737, "step": 9781, "train/loss_ctc": 0.6140179634094238, "train/loss_error": 0.4456390142440796, "train/loss_total": 0.47931480407714844 }, { "epoch": 2.6134117018434413, "step": 9782, "train/loss_ctc": 0.5061835050582886, "train/loss_error": 0.4594644606113434, "train/loss_total": 0.46880826354026794 }, { "epoch": 2.6136788672188085, "step": 9783, "train/loss_ctc": 0.4041733741760254, "train/loss_error": 0.4075442850589752, "train/loss_total": 0.40687012672424316 }, { "epoch": 2.6139460325941757, "step": 9784, "train/loss_ctc": 1.0711116790771484, "train/loss_error": 0.4802279472351074, "train/loss_total": 0.5984047055244446 }, { "epoch": 2.6142131979695433, "step": 9785, "train/loss_ctc": 0.3658783435821533, "train/loss_error": 0.40580227971076965, "train/loss_total": 0.3978174924850464 }, { "epoch": 2.6144803633449105, "step": 9786, "train/loss_ctc": 1.2190961837768555, "train/loss_error": 0.4628392457962036, "train/loss_total": 0.6140906810760498 }, { "epoch": 2.6147475287202777, "step": 9787, "train/loss_ctc": 1.4306371212005615, "train/loss_error": 0.46492257714271545, "train/loss_total": 0.6580654978752136 }, { "epoch": 2.6150146940956454, "step": 9788, "train/loss_ctc": 0.7992411255836487, "train/loss_error": 0.5332386493682861, "train/loss_total": 0.5864391326904297 }, { "epoch": 2.6152818594710125, "step": 9789, "train/loss_ctc": 0.4224100112915039, "train/loss_error": 0.41761061549186707, "train/loss_total": 0.41857051849365234 }, { "epoch": 2.6155490248463797, "grad_norm": 2.053851842880249, "learning_rate": 1.4316323804434945e-05, "loss": 0.5128, "step": 9790 }, { "epoch": 2.6155490248463797, "step": 9790, "train/loss_ctc": 0.48748254776000977, "train/loss_error": 0.4516680836677551, "train/loss_total": 0.45883098244667053 }, { "epoch": 2.6158161902217474, "step": 9791, "train/loss_ctc": 0.6205470561981201, "train/loss_error": 0.40208151936531067, "train/loss_total": 0.445774644613266 }, { "epoch": 2.6160833555971146, "step": 9792, "train/loss_ctc": 1.440782070159912, "train/loss_error": 0.4430112838745117, "train/loss_total": 0.6425654888153076 }, { "epoch": 2.6163505209724818, "step": 9793, "train/loss_ctc": 0.8632751107215881, "train/loss_error": 0.5158393383026123, "train/loss_total": 0.5853264927864075 }, { "epoch": 2.6166176863478494, "step": 9794, "train/loss_ctc": 1.1225054264068604, "train/loss_error": 0.4166652262210846, "train/loss_total": 0.5578332543373108 }, { "epoch": 2.6168848517232166, "step": 9795, "train/loss_ctc": 1.6470911502838135, "train/loss_error": 0.4290173053741455, "train/loss_total": 0.672632098197937 }, { "epoch": 2.617152017098584, "step": 9796, "train/loss_ctc": 0.910823404788971, "train/loss_error": 0.4274771809577942, "train/loss_total": 0.5241464376449585 }, { "epoch": 2.6174191824739514, "step": 9797, "train/loss_ctc": 0.7987542748451233, "train/loss_error": 0.3841460347175598, "train/loss_total": 0.4670677185058594 }, { "epoch": 2.6176863478493186, "step": 9798, "train/loss_ctc": 0.4993875026702881, "train/loss_error": 0.4995063543319702, "train/loss_total": 0.4994826018810272 }, { "epoch": 2.617953513224686, "step": 9799, "train/loss_ctc": 0.4534400701522827, "train/loss_error": 0.43653830885887146, "train/loss_total": 0.4399186670780182 }, { "epoch": 2.6182206786000535, "grad_norm": 3.001373052597046, "learning_rate": 1.4300293881912904e-05, "loss": 0.5294, "step": 9800 }, { "epoch": 2.6182206786000535, "step": 9800, "train/loss_ctc": 0.8157907724380493, "train/loss_error": 0.4572184979915619, "train/loss_total": 0.5289329886436462 }, { "epoch": 2.6184878439754207, "step": 9801, "train/loss_ctc": 0.7741410732269287, "train/loss_error": 0.5080775022506714, "train/loss_total": 0.5612902641296387 }, { "epoch": 2.618755009350788, "step": 9802, "train/loss_ctc": 0.838719367980957, "train/loss_error": 0.43803688883781433, "train/loss_total": 0.5181733965873718 }, { "epoch": 2.6190221747261555, "step": 9803, "train/loss_ctc": 0.5912154316902161, "train/loss_error": 0.5160412192344666, "train/loss_total": 0.5310760736465454 }, { "epoch": 2.6192893401015227, "step": 9804, "train/loss_ctc": 0.6672240495681763, "train/loss_error": 0.4382556974887848, "train/loss_total": 0.48404937982559204 }, { "epoch": 2.6195565054768903, "step": 9805, "train/loss_ctc": 0.33801186084747314, "train/loss_error": 0.48240387439727783, "train/loss_total": 0.45352548360824585 }, { "epoch": 2.6198236708522575, "step": 9806, "train/loss_ctc": 0.6218804121017456, "train/loss_error": 0.42208409309387207, "train/loss_total": 0.4620433747768402 }, { "epoch": 2.620090836227625, "step": 9807, "train/loss_ctc": 1.9050477743148804, "train/loss_error": 0.4665902256965637, "train/loss_total": 0.754281759262085 }, { "epoch": 2.6203580016029924, "step": 9808, "train/loss_ctc": 1.2916345596313477, "train/loss_error": 0.4525943398475647, "train/loss_total": 0.6204023957252502 }, { "epoch": 2.6206251669783596, "step": 9809, "train/loss_ctc": 0.528159499168396, "train/loss_error": 0.4315638244152069, "train/loss_total": 0.4508829712867737 }, { "epoch": 2.620892332353727, "grad_norm": 1.8908119201660156, "learning_rate": 1.4284263959390862e-05, "loss": 0.5365, "step": 9810 }, { "epoch": 2.620892332353727, "step": 9810, "train/loss_ctc": 0.44459784030914307, "train/loss_error": 0.4801678955554962, "train/loss_total": 0.473053902387619 }, { "epoch": 2.6211594977290944, "step": 9811, "train/loss_ctc": 0.4532533586025238, "train/loss_error": 0.3412988483905792, "train/loss_total": 0.36368975043296814 }, { "epoch": 2.6214266631044616, "step": 9812, "train/loss_ctc": 0.6705074906349182, "train/loss_error": 0.4099905490875244, "train/loss_total": 0.46209394931793213 }, { "epoch": 2.6216938284798292, "step": 9813, "train/loss_ctc": 0.7826417684555054, "train/loss_error": 0.40024396777153015, "train/loss_total": 0.4767235219478607 }, { "epoch": 2.6219609938551964, "step": 9814, "train/loss_ctc": 0.8140462636947632, "train/loss_error": 0.44142135977745056, "train/loss_total": 0.5159463882446289 }, { "epoch": 2.6222281592305636, "step": 9815, "train/loss_ctc": 0.6430990695953369, "train/loss_error": 0.36512255668640137, "train/loss_total": 0.42071786522865295 }, { "epoch": 2.6224953246059313, "step": 9816, "train/loss_ctc": 0.8204250335693359, "train/loss_error": 0.48812252283096313, "train/loss_total": 0.5545830130577087 }, { "epoch": 2.6227624899812985, "step": 9817, "train/loss_ctc": 0.7485415935516357, "train/loss_error": 0.49879175424575806, "train/loss_total": 0.5487416982650757 }, { "epoch": 2.6230296553566657, "step": 9818, "train/loss_ctc": 1.0407178401947021, "train/loss_error": 0.47959625720977783, "train/loss_total": 0.5918205976486206 }, { "epoch": 2.6232968207320333, "step": 9819, "train/loss_ctc": 0.8963872194290161, "train/loss_error": 0.5200573801994324, "train/loss_total": 0.5953233242034912 }, { "epoch": 2.6235639861074005, "grad_norm": 1.748848557472229, "learning_rate": 1.4268234036868823e-05, "loss": 0.5003, "step": 9820 }, { "epoch": 2.6235639861074005, "step": 9820, "train/loss_ctc": 0.5573872327804565, "train/loss_error": 0.5822390913963318, "train/loss_total": 0.5772687196731567 }, { "epoch": 2.6238311514827677, "step": 9821, "train/loss_ctc": 0.5227543115615845, "train/loss_error": 0.42471641302108765, "train/loss_total": 0.4443240165710449 }, { "epoch": 2.6240983168581353, "step": 9822, "train/loss_ctc": 0.21189957857131958, "train/loss_error": 0.4172152280807495, "train/loss_total": 0.3761520981788635 }, { "epoch": 2.6243654822335025, "step": 9823, "train/loss_ctc": 0.8484057188034058, "train/loss_error": 0.522650420665741, "train/loss_total": 0.5878015160560608 }, { "epoch": 2.6246326476088697, "step": 9824, "train/loss_ctc": 0.43934065103530884, "train/loss_error": 0.45562997460365295, "train/loss_total": 0.45237210392951965 }, { "epoch": 2.6248998129842374, "step": 9825, "train/loss_ctc": 1.005284309387207, "train/loss_error": 0.48280102014541626, "train/loss_total": 0.5872976779937744 }, { "epoch": 2.6251669783596046, "step": 9826, "train/loss_ctc": 0.9124284982681274, "train/loss_error": 0.48510274291038513, "train/loss_total": 0.5705679059028625 }, { "epoch": 2.6254341437349717, "step": 9827, "train/loss_ctc": 0.9426872730255127, "train/loss_error": 0.47357985377311707, "train/loss_total": 0.5674013495445251 }, { "epoch": 2.6257013091103394, "step": 9828, "train/loss_ctc": 0.7484440207481384, "train/loss_error": 0.4051973223686218, "train/loss_total": 0.4738466739654541 }, { "epoch": 2.6259684744857066, "step": 9829, "train/loss_ctc": 0.6817036867141724, "train/loss_error": 0.4638234078884125, "train/loss_total": 0.5073994398117065 }, { "epoch": 2.626235639861074, "grad_norm": 2.5234813690185547, "learning_rate": 1.4252204114346781e-05, "loss": 0.5144, "step": 9830 }, { "epoch": 2.626235639861074, "step": 9830, "train/loss_ctc": 0.6197153329849243, "train/loss_error": 0.4190112054347992, "train/loss_total": 0.4591520428657532 }, { "epoch": 2.6265028052364414, "step": 9831, "train/loss_ctc": 0.7080169320106506, "train/loss_error": 0.5098271369934082, "train/loss_total": 0.5494651198387146 }, { "epoch": 2.6267699706118086, "step": 9832, "train/loss_ctc": 0.4996228516101837, "train/loss_error": 0.45172229409217834, "train/loss_total": 0.46130239963531494 }, { "epoch": 2.627037135987176, "step": 9833, "train/loss_ctc": 0.6701115369796753, "train/loss_error": 0.49544528126716614, "train/loss_total": 0.530378520488739 }, { "epoch": 2.6273043013625434, "step": 9834, "train/loss_ctc": 0.5993603467941284, "train/loss_error": 0.3826315402984619, "train/loss_total": 0.42597728967666626 }, { "epoch": 2.6275714667379106, "step": 9835, "train/loss_ctc": 0.9474942088127136, "train/loss_error": 0.38531970977783203, "train/loss_total": 0.49775460362434387 }, { "epoch": 2.6278386321132783, "step": 9836, "train/loss_ctc": 0.6294358372688293, "train/loss_error": 0.5357639789581299, "train/loss_total": 0.5544983744621277 }, { "epoch": 2.6281057974886455, "step": 9837, "train/loss_ctc": 0.361217200756073, "train/loss_error": 0.40406355261802673, "train/loss_total": 0.3954943120479584 }, { "epoch": 2.6283729628640127, "step": 9838, "train/loss_ctc": 0.43037256598472595, "train/loss_error": 0.405855268239975, "train/loss_total": 0.41075873374938965 }, { "epoch": 2.6286401282393803, "step": 9839, "train/loss_ctc": 1.0919790267944336, "train/loss_error": 0.5319223999977112, "train/loss_total": 0.6439337134361267 }, { "epoch": 2.6289072936147475, "grad_norm": 4.824332237243652, "learning_rate": 1.4236174191824741e-05, "loss": 0.4929, "step": 9840 }, { "epoch": 2.6289072936147475, "step": 9840, "train/loss_ctc": 1.0136888027191162, "train/loss_error": 0.5242986083030701, "train/loss_total": 0.6221766471862793 }, { "epoch": 2.629174458990115, "step": 9841, "train/loss_ctc": 0.989098072052002, "train/loss_error": 0.4447229206562042, "train/loss_total": 0.5535979270935059 }, { "epoch": 2.6294416243654823, "step": 9842, "train/loss_ctc": 0.7732314467430115, "train/loss_error": 0.48823392391204834, "train/loss_total": 0.545233428478241 }, { "epoch": 2.6297087897408495, "step": 9843, "train/loss_ctc": 0.6808691620826721, "train/loss_error": 0.45783406496047974, "train/loss_total": 0.5024411082267761 }, { "epoch": 2.629975955116217, "step": 9844, "train/loss_ctc": 0.734827995300293, "train/loss_error": 0.3857743740081787, "train/loss_total": 0.4555851221084595 }, { "epoch": 2.6302431204915844, "step": 9845, "train/loss_ctc": 0.6991039514541626, "train/loss_error": 0.4516899883747101, "train/loss_total": 0.5011727809906006 }, { "epoch": 2.6305102858669516, "step": 9846, "train/loss_ctc": 0.9865871667861938, "train/loss_error": 0.405142605304718, "train/loss_total": 0.5214315056800842 }, { "epoch": 2.630777451242319, "step": 9847, "train/loss_ctc": 0.4586383104324341, "train/loss_error": 0.5201615691184998, "train/loss_total": 0.5078569054603577 }, { "epoch": 2.6310446166176864, "step": 9848, "train/loss_ctc": 0.6789499521255493, "train/loss_error": 0.4610196352005005, "train/loss_total": 0.5046057105064392 }, { "epoch": 2.6313117819930536, "step": 9849, "train/loss_ctc": 0.24682441353797913, "train/loss_error": 0.49149972200393677, "train/loss_total": 0.4425646662712097 }, { "epoch": 2.6315789473684212, "grad_norm": 1.8188217878341675, "learning_rate": 1.4220144269302699e-05, "loss": 0.5157, "step": 9850 }, { "epoch": 2.6315789473684212, "step": 9850, "train/loss_ctc": 0.9012637138366699, "train/loss_error": 0.49593234062194824, "train/loss_total": 0.5769986510276794 }, { "epoch": 2.6318461127437884, "step": 9851, "train/loss_ctc": 0.7777488827705383, "train/loss_error": 0.47598230838775635, "train/loss_total": 0.5363356471061707 }, { "epoch": 2.6321132781191556, "step": 9852, "train/loss_ctc": 0.4432098865509033, "train/loss_error": 0.43774670362472534, "train/loss_total": 0.4388393461704254 }, { "epoch": 2.6323804434945233, "step": 9853, "train/loss_ctc": 0.49082326889038086, "train/loss_error": 0.4703461527824402, "train/loss_total": 0.4744415879249573 }, { "epoch": 2.6326476088698905, "step": 9854, "train/loss_ctc": 0.7337661385536194, "train/loss_error": 0.4067718982696533, "train/loss_total": 0.47217077016830444 }, { "epoch": 2.6329147742452577, "step": 9855, "train/loss_ctc": 0.72512286901474, "train/loss_error": 0.49722158908843994, "train/loss_total": 0.5428018569946289 }, { "epoch": 2.6331819396206253, "step": 9856, "train/loss_ctc": 1.1043856143951416, "train/loss_error": 0.4299304187297821, "train/loss_total": 0.5648214817047119 }, { "epoch": 2.6334491049959925, "step": 9857, "train/loss_ctc": 0.9654892086982727, "train/loss_error": 0.44576823711395264, "train/loss_total": 0.5497124195098877 }, { "epoch": 2.6337162703713597, "step": 9858, "train/loss_ctc": 0.9682905673980713, "train/loss_error": 0.5036773085594177, "train/loss_total": 0.5965999364852905 }, { "epoch": 2.6339834357467273, "step": 9859, "train/loss_ctc": 0.7262192964553833, "train/loss_error": 0.48243793845176697, "train/loss_total": 0.5311942100524902 }, { "epoch": 2.6342506011220945, "grad_norm": 2.136413812637329, "learning_rate": 1.4204114346780657e-05, "loss": 0.5284, "step": 9860 }, { "epoch": 2.6342506011220945, "step": 9860, "train/loss_ctc": 0.5924854278564453, "train/loss_error": 0.485539972782135, "train/loss_total": 0.5069290995597839 }, { "epoch": 2.6345177664974617, "step": 9861, "train/loss_ctc": 1.7485491037368774, "train/loss_error": 0.4544820189476013, "train/loss_total": 0.7132954597473145 }, { "epoch": 2.6347849318728294, "step": 9862, "train/loss_ctc": 0.3415864109992981, "train/loss_error": 0.427910178899765, "train/loss_total": 0.410645455121994 }, { "epoch": 2.6350520972481966, "step": 9863, "train/loss_ctc": 1.0839550495147705, "train/loss_error": 0.4445553123950958, "train/loss_total": 0.5724352598190308 }, { "epoch": 2.6353192626235638, "step": 9864, "train/loss_ctc": 0.7300016283988953, "train/loss_error": 0.4825703203678131, "train/loss_total": 0.5320565700531006 }, { "epoch": 2.6355864279989314, "step": 9865, "train/loss_ctc": 0.5258513689041138, "train/loss_error": 0.4270440638065338, "train/loss_total": 0.44680553674697876 }, { "epoch": 2.6358535933742986, "step": 9866, "train/loss_ctc": 1.053501844406128, "train/loss_error": 0.4968307316303253, "train/loss_total": 0.6081649661064148 }, { "epoch": 2.636120758749666, "step": 9867, "train/loss_ctc": 0.7850064039230347, "train/loss_error": 0.4342977702617645, "train/loss_total": 0.5044394731521606 }, { "epoch": 2.6363879241250334, "step": 9868, "train/loss_ctc": 0.7057053446769714, "train/loss_error": 0.4644494354724884, "train/loss_total": 0.512700617313385 }, { "epoch": 2.6366550895004006, "step": 9869, "train/loss_ctc": 0.4041992127895355, "train/loss_error": 0.4732014238834381, "train/loss_total": 0.4594009816646576 }, { "epoch": 2.6369222548757683, "grad_norm": 5.708249568939209, "learning_rate": 1.4188084424258617e-05, "loss": 0.5267, "step": 9870 }, { "epoch": 2.6369222548757683, "step": 9870, "train/loss_ctc": 1.3200782537460327, "train/loss_error": 0.44468626379966736, "train/loss_total": 0.6197646856307983 }, { "epoch": 2.6371894202511355, "step": 9871, "train/loss_ctc": 0.48745667934417725, "train/loss_error": 0.3648829460144043, "train/loss_total": 0.38939768075942993 }, { "epoch": 2.6374565856265026, "step": 9872, "train/loss_ctc": 0.7386965155601501, "train/loss_error": 0.5127639174461365, "train/loss_total": 0.5579504370689392 }, { "epoch": 2.6377237510018703, "step": 9873, "train/loss_ctc": 0.8296729922294617, "train/loss_error": 0.45929402112960815, "train/loss_total": 0.5333698391914368 }, { "epoch": 2.6379909163772375, "step": 9874, "train/loss_ctc": 0.9623384475708008, "train/loss_error": 0.37188348174095154, "train/loss_total": 0.4899744689464569 }, { "epoch": 2.638258081752605, "step": 9875, "train/loss_ctc": 0.35714930295944214, "train/loss_error": 0.40205061435699463, "train/loss_total": 0.3930703401565552 }, { "epoch": 2.6385252471279723, "step": 9876, "train/loss_ctc": 0.5411506295204163, "train/loss_error": 0.4852134585380554, "train/loss_total": 0.4964008927345276 }, { "epoch": 2.6387924125033395, "step": 9877, "train/loss_ctc": 0.7366514205932617, "train/loss_error": 0.44190114736557007, "train/loss_total": 0.5008512139320374 }, { "epoch": 2.639059577878707, "step": 9878, "train/loss_ctc": 0.7478044033050537, "train/loss_error": 0.44762086868286133, "train/loss_total": 0.5076575875282288 }, { "epoch": 2.6393267432540743, "step": 9879, "train/loss_ctc": 0.8996784687042236, "train/loss_error": 0.421444833278656, "train/loss_total": 0.5170915722846985 }, { "epoch": 2.6395939086294415, "grad_norm": 1.4672538042068481, "learning_rate": 1.4172054501736575e-05, "loss": 0.5006, "step": 9880 }, { "epoch": 2.6395939086294415, "step": 9880, "train/loss_ctc": 0.9071458578109741, "train/loss_error": 0.45588862895965576, "train/loss_total": 0.5461400747299194 }, { "epoch": 2.639861074004809, "step": 9881, "train/loss_ctc": 0.4287387430667877, "train/loss_error": 0.48846614360809326, "train/loss_total": 0.4765206575393677 }, { "epoch": 2.6401282393801764, "step": 9882, "train/loss_ctc": 0.30260783433914185, "train/loss_error": 0.4037437438964844, "train/loss_total": 0.3835165798664093 }, { "epoch": 2.6403954047555436, "step": 9883, "train/loss_ctc": 1.6361010074615479, "train/loss_error": 0.5115867853164673, "train/loss_total": 0.7364896535873413 }, { "epoch": 2.640662570130911, "step": 9884, "train/loss_ctc": 0.3734463155269623, "train/loss_error": 0.43284088373184204, "train/loss_total": 0.42096197605133057 }, { "epoch": 2.6409297355062784, "step": 9885, "train/loss_ctc": 0.809546947479248, "train/loss_error": 0.4634651243686676, "train/loss_total": 0.5326814651489258 }, { "epoch": 2.6411969008816456, "step": 9886, "train/loss_ctc": 0.6047502756118774, "train/loss_error": 0.5117078423500061, "train/loss_total": 0.5303163528442383 }, { "epoch": 2.6414640662570132, "step": 9887, "train/loss_ctc": 0.8184468746185303, "train/loss_error": 0.44994083046913147, "train/loss_total": 0.5236420631408691 }, { "epoch": 2.6417312316323804, "step": 9888, "train/loss_ctc": 0.5635912418365479, "train/loss_error": 0.4197458028793335, "train/loss_total": 0.4485149085521698 }, { "epoch": 2.6419983970077476, "step": 9889, "train/loss_ctc": 1.3060669898986816, "train/loss_error": 0.44812852144241333, "train/loss_total": 0.619716227054596 }, { "epoch": 2.6422655623831153, "grad_norm": 2.0941712856292725, "learning_rate": 1.4156024579214533e-05, "loss": 0.5218, "step": 9890 }, { "epoch": 2.6422655623831153, "step": 9890, "train/loss_ctc": 0.47576087713241577, "train/loss_error": 0.438529372215271, "train/loss_total": 0.445975661277771 }, { "epoch": 2.6425327277584825, "step": 9891, "train/loss_ctc": 0.5106775760650635, "train/loss_error": 0.43367800116539, "train/loss_total": 0.44907790422439575 }, { "epoch": 2.6427998931338497, "step": 9892, "train/loss_ctc": 0.558527946472168, "train/loss_error": 0.39920076727867126, "train/loss_total": 0.43106621503829956 }, { "epoch": 2.6430670585092173, "step": 9893, "train/loss_ctc": 0.9033485651016235, "train/loss_error": 0.4636727571487427, "train/loss_total": 0.5516079068183899 }, { "epoch": 2.6433342238845845, "step": 9894, "train/loss_ctc": 0.5686328411102295, "train/loss_error": 0.4447178244590759, "train/loss_total": 0.4695008397102356 }, { "epoch": 2.6436013892599517, "step": 9895, "train/loss_ctc": 0.34558558464050293, "train/loss_error": 0.4680473804473877, "train/loss_total": 0.4435550272464752 }, { "epoch": 2.6438685546353193, "step": 9896, "train/loss_ctc": 0.3277302384376526, "train/loss_error": 0.39279794692993164, "train/loss_total": 0.37978440523147583 }, { "epoch": 2.6441357200106865, "step": 9897, "train/loss_ctc": 0.6599055528640747, "train/loss_error": 0.37095779180526733, "train/loss_total": 0.42874735593795776 }, { "epoch": 2.6444028853860537, "step": 9898, "train/loss_ctc": 0.7773313522338867, "train/loss_error": 0.4442870020866394, "train/loss_total": 0.5108959078788757 }, { "epoch": 2.6446700507614214, "step": 9899, "train/loss_ctc": 0.5290278792381287, "train/loss_error": 0.409999281167984, "train/loss_total": 0.43380501866340637 }, { "epoch": 2.6449372161367886, "grad_norm": 1.2897273302078247, "learning_rate": 1.4139994656692493e-05, "loss": 0.4544, "step": 9900 }, { "epoch": 2.6449372161367886, "step": 9900, "train/loss_ctc": 1.0874223709106445, "train/loss_error": 0.5368616580963135, "train/loss_total": 0.6469738483428955 }, { "epoch": 2.6452043815121558, "step": 9901, "train/loss_ctc": 0.8341138362884521, "train/loss_error": 0.444460928440094, "train/loss_total": 0.5223914980888367 }, { "epoch": 2.6454715468875234, "step": 9902, "train/loss_ctc": 1.1777112483978271, "train/loss_error": 0.48105448484420776, "train/loss_total": 0.6203858256340027 }, { "epoch": 2.6457387122628906, "step": 9903, "train/loss_ctc": 0.6056133508682251, "train/loss_error": 0.48459479212760925, "train/loss_total": 0.5087985396385193 }, { "epoch": 2.6460058776382582, "step": 9904, "train/loss_ctc": 0.2949492931365967, "train/loss_error": 0.5044176578521729, "train/loss_total": 0.4625239968299866 }, { "epoch": 2.6462730430136254, "step": 9905, "train/loss_ctc": 0.5013723373413086, "train/loss_error": 0.49613770842552185, "train/loss_total": 0.4971846342086792 }, { "epoch": 2.6465402083889926, "step": 9906, "train/loss_ctc": 0.5186330080032349, "train/loss_error": 0.429479718208313, "train/loss_total": 0.4473103880882263 }, { "epoch": 2.6468073737643603, "step": 9907, "train/loss_ctc": 0.8005956411361694, "train/loss_error": 0.4433671236038208, "train/loss_total": 0.5148128271102905 }, { "epoch": 2.6470745391397275, "step": 9908, "train/loss_ctc": 1.3408234119415283, "train/loss_error": 0.4905996024608612, "train/loss_total": 0.6606444120407104 }, { "epoch": 2.647341704515095, "step": 9909, "train/loss_ctc": 1.168348789215088, "train/loss_error": 0.4868238568305969, "train/loss_total": 0.6231288909912109 }, { "epoch": 2.6476088698904623, "grad_norm": 2.07476544380188, "learning_rate": 1.4123964734170453e-05, "loss": 0.5504, "step": 9910 }, { "epoch": 2.6476088698904623, "step": 9910, "train/loss_ctc": 0.5710108876228333, "train/loss_error": 0.4985803961753845, "train/loss_total": 0.5130665302276611 }, { "epoch": 2.6478760352658295, "step": 9911, "train/loss_ctc": 1.1133646965026855, "train/loss_error": 0.4918113052845001, "train/loss_total": 0.6161220073699951 }, { "epoch": 2.648143200641197, "step": 9912, "train/loss_ctc": 0.7514474987983704, "train/loss_error": 0.42706361413002014, "train/loss_total": 0.4919404089450836 }, { "epoch": 2.6484103660165643, "step": 9913, "train/loss_ctc": 0.4982476532459259, "train/loss_error": 0.4068042039871216, "train/loss_total": 0.425092875957489 }, { "epoch": 2.6486775313919315, "step": 9914, "train/loss_ctc": 1.1836551427841187, "train/loss_error": 0.389198362827301, "train/loss_total": 0.5480897426605225 }, { "epoch": 2.648944696767299, "step": 9915, "train/loss_ctc": 1.1920965909957886, "train/loss_error": 0.40845605731010437, "train/loss_total": 0.5651841759681702 }, { "epoch": 2.6492118621426664, "step": 9916, "train/loss_ctc": 0.7167582511901855, "train/loss_error": 0.392467200756073, "train/loss_total": 0.45732539892196655 }, { "epoch": 2.6494790275180335, "step": 9917, "train/loss_ctc": 0.7078120708465576, "train/loss_error": 0.42626580595970154, "train/loss_total": 0.48257505893707275 }, { "epoch": 2.649746192893401, "step": 9918, "train/loss_ctc": 0.9514780044555664, "train/loss_error": 0.5527856945991516, "train/loss_total": 0.6325241327285767 }, { "epoch": 2.6500133582687684, "step": 9919, "train/loss_ctc": 0.6208053231239319, "train/loss_error": 0.4057186245918274, "train/loss_total": 0.4487359821796417 }, { "epoch": 2.6502805236441356, "grad_norm": 1.8487318754196167, "learning_rate": 1.410793481164841e-05, "loss": 0.5181, "step": 9920 }, { "epoch": 2.6502805236441356, "step": 9920, "train/loss_ctc": 0.5918779969215393, "train/loss_error": 0.43210485577583313, "train/loss_total": 0.4640595018863678 }, { "epoch": 2.650547689019503, "step": 9921, "train/loss_ctc": 1.0484380722045898, "train/loss_error": 0.4712648093700409, "train/loss_total": 0.5866994857788086 }, { "epoch": 2.6508148543948704, "step": 9922, "train/loss_ctc": 0.5039361715316772, "train/loss_error": 0.42570003867149353, "train/loss_total": 0.44134730100631714 }, { "epoch": 2.6510820197702376, "step": 9923, "train/loss_ctc": 0.5648117661476135, "train/loss_error": 0.4463901221752167, "train/loss_total": 0.47007447481155396 }, { "epoch": 2.6513491851456052, "step": 9924, "train/loss_ctc": 0.9071040153503418, "train/loss_error": 0.5048108100891113, "train/loss_total": 0.5852694511413574 }, { "epoch": 2.6516163505209724, "step": 9925, "train/loss_ctc": 0.5643404126167297, "train/loss_error": 0.47333279252052307, "train/loss_total": 0.49153435230255127 }, { "epoch": 2.6518835158963396, "step": 9926, "train/loss_ctc": 0.7490819692611694, "train/loss_error": 0.5136584043502808, "train/loss_total": 0.5607430934906006 }, { "epoch": 2.6521506812717073, "step": 9927, "train/loss_ctc": 0.8266088962554932, "train/loss_error": 0.3913242220878601, "train/loss_total": 0.4783811569213867 }, { "epoch": 2.6524178466470745, "step": 9928, "train/loss_ctc": 1.0771737098693848, "train/loss_error": 0.5100165009498596, "train/loss_total": 0.6234479546546936 }, { "epoch": 2.6526850120224417, "step": 9929, "train/loss_ctc": 0.6409914493560791, "train/loss_error": 0.43154364824295044, "train/loss_total": 0.4734332263469696 }, { "epoch": 2.6529521773978093, "grad_norm": 2.953845977783203, "learning_rate": 1.409190488912637e-05, "loss": 0.5175, "step": 9930 }, { "epoch": 2.6529521773978093, "step": 9930, "train/loss_ctc": 0.5553776025772095, "train/loss_error": 0.48071756958961487, "train/loss_total": 0.4956495761871338 }, { "epoch": 2.6532193427731765, "step": 9931, "train/loss_ctc": 1.0433740615844727, "train/loss_error": 0.42518100142478943, "train/loss_total": 0.5488196611404419 }, { "epoch": 2.6534865081485437, "step": 9932, "train/loss_ctc": 0.5309934020042419, "train/loss_error": 0.36817681789398193, "train/loss_total": 0.4007401466369629 }, { "epoch": 2.6537536735239113, "step": 9933, "train/loss_ctc": 0.16425946354866028, "train/loss_error": 0.45113569498062134, "train/loss_total": 0.39376047253608704 }, { "epoch": 2.6540208388992785, "step": 9934, "train/loss_ctc": 0.28256699442863464, "train/loss_error": 0.44122228026390076, "train/loss_total": 0.40949124097824097 }, { "epoch": 2.6542880042746457, "step": 9935, "train/loss_ctc": 0.4540860652923584, "train/loss_error": 0.3803568184375763, "train/loss_total": 0.39510267972946167 }, { "epoch": 2.6545551696500134, "step": 9936, "train/loss_ctc": 0.5005896091461182, "train/loss_error": 0.4425623118877411, "train/loss_total": 0.45416778326034546 }, { "epoch": 2.6548223350253806, "step": 9937, "train/loss_ctc": 0.599281907081604, "train/loss_error": 0.41184642910957336, "train/loss_total": 0.4493335485458374 }, { "epoch": 2.655089500400748, "step": 9938, "train/loss_ctc": 0.3870839774608612, "train/loss_error": 0.4646686911582947, "train/loss_total": 0.44915175437927246 }, { "epoch": 2.6553566657761154, "step": 9939, "train/loss_ctc": 0.7441203594207764, "train/loss_error": 0.45271188020706177, "train/loss_total": 0.5109935998916626 }, { "epoch": 2.655623831151483, "grad_norm": 1.4163042306900024, "learning_rate": 1.4075874966604329e-05, "loss": 0.4507, "step": 9940 }, { "epoch": 2.655623831151483, "step": 9940, "train/loss_ctc": 0.4077153205871582, "train/loss_error": 0.46821147203445435, "train/loss_total": 0.45611223578453064 }, { "epoch": 2.6558909965268502, "step": 9941, "train/loss_ctc": 0.7131959199905396, "train/loss_error": 0.4433716833591461, "train/loss_total": 0.4973365366458893 }, { "epoch": 2.6561581619022174, "step": 9942, "train/loss_ctc": 2.9252400398254395, "train/loss_error": 0.4462062418460846, "train/loss_total": 0.9420130252838135 }, { "epoch": 2.656425327277585, "step": 9943, "train/loss_ctc": 1.2173200845718384, "train/loss_error": 0.43837296962738037, "train/loss_total": 0.5941624045372009 }, { "epoch": 2.6566924926529523, "step": 9944, "train/loss_ctc": 0.34687936305999756, "train/loss_error": 0.4086502194404602, "train/loss_total": 0.39629605412483215 }, { "epoch": 2.6569596580283195, "step": 9945, "train/loss_ctc": 0.6299921274185181, "train/loss_error": 0.4988320767879486, "train/loss_total": 0.5250641107559204 }, { "epoch": 2.657226823403687, "step": 9946, "train/loss_ctc": 0.327667236328125, "train/loss_error": 0.4797903299331665, "train/loss_total": 0.4493657350540161 }, { "epoch": 2.6574939887790543, "step": 9947, "train/loss_ctc": 0.7107663750648499, "train/loss_error": 0.40188848972320557, "train/loss_total": 0.46366405487060547 }, { "epoch": 2.6577611541544215, "step": 9948, "train/loss_ctc": 0.6525171995162964, "train/loss_error": 0.4695959687232971, "train/loss_total": 0.5061802268028259 }, { "epoch": 2.658028319529789, "step": 9949, "train/loss_ctc": 0.7934167385101318, "train/loss_error": 0.37823107838630676, "train/loss_total": 0.46126818656921387 }, { "epoch": 2.6582954849051563, "grad_norm": 5.132875919342041, "learning_rate": 1.4059845044082287e-05, "loss": 0.5291, "step": 9950 }, { "epoch": 2.6582954849051563, "step": 9950, "train/loss_ctc": 0.626029372215271, "train/loss_error": 0.4966402053833008, "train/loss_total": 0.5225180387496948 }, { "epoch": 2.6585626502805235, "step": 9951, "train/loss_ctc": 0.7344404458999634, "train/loss_error": 0.4892623722553253, "train/loss_total": 0.5382980108261108 }, { "epoch": 2.658829815655891, "step": 9952, "train/loss_ctc": 1.0760140419006348, "train/loss_error": 0.4845065474510193, "train/loss_total": 0.6028080582618713 }, { "epoch": 2.6590969810312584, "step": 9953, "train/loss_ctc": 0.5252820253372192, "train/loss_error": 0.49347054958343506, "train/loss_total": 0.4998328387737274 }, { "epoch": 2.6593641464066256, "step": 9954, "train/loss_ctc": 0.2733445167541504, "train/loss_error": 0.5169029235839844, "train/loss_total": 0.4681912362575531 }, { "epoch": 2.659631311781993, "step": 9955, "train/loss_ctc": 0.9432728290557861, "train/loss_error": 0.45481446385383606, "train/loss_total": 0.552506148815155 }, { "epoch": 2.6598984771573604, "step": 9956, "train/loss_ctc": 0.2876605987548828, "train/loss_error": 0.4349451959133148, "train/loss_total": 0.4054882824420929 }, { "epoch": 2.6601656425327276, "step": 9957, "train/loss_ctc": 0.47011175751686096, "train/loss_error": 0.42768049240112305, "train/loss_total": 0.43616676330566406 }, { "epoch": 2.660432807908095, "step": 9958, "train/loss_ctc": 0.7210274934768677, "train/loss_error": 0.4997875690460205, "train/loss_total": 0.5440355539321899 }, { "epoch": 2.6606999732834624, "step": 9959, "train/loss_ctc": 1.239898681640625, "train/loss_error": 0.40534651279449463, "train/loss_total": 0.5722569823265076 }, { "epoch": 2.6609671386588296, "grad_norm": 4.90590763092041, "learning_rate": 1.4043815121560246e-05, "loss": 0.5142, "step": 9960 }, { "epoch": 2.6609671386588296, "step": 9960, "train/loss_ctc": 0.9432259202003479, "train/loss_error": 0.41403016448020935, "train/loss_total": 0.519869327545166 }, { "epoch": 2.6612343040341973, "step": 9961, "train/loss_ctc": 1.4519333839416504, "train/loss_error": 0.5153091549873352, "train/loss_total": 0.7026339769363403 }, { "epoch": 2.6615014694095644, "step": 9962, "train/loss_ctc": 1.030333161354065, "train/loss_error": 0.4338435232639313, "train/loss_total": 0.5531414747238159 }, { "epoch": 2.6617686347849316, "step": 9963, "train/loss_ctc": 0.8781938552856445, "train/loss_error": 0.4873389005661011, "train/loss_total": 0.5655099153518677 }, { "epoch": 2.6620358001602993, "step": 9964, "train/loss_ctc": 0.4569248557090759, "train/loss_error": 0.4412439167499542, "train/loss_total": 0.44438010454177856 }, { "epoch": 2.6623029655356665, "step": 9965, "train/loss_ctc": 0.44878244400024414, "train/loss_error": 0.4504944384098053, "train/loss_total": 0.45015203952789307 }, { "epoch": 2.6625701309110337, "step": 9966, "train/loss_ctc": 0.38161659240722656, "train/loss_error": 0.4505990445613861, "train/loss_total": 0.43680256605148315 }, { "epoch": 2.6628372962864013, "step": 9967, "train/loss_ctc": 0.6276110410690308, "train/loss_error": 0.42167383432388306, "train/loss_total": 0.4628612995147705 }, { "epoch": 2.6631044616617685, "step": 9968, "train/loss_ctc": 0.9862191677093506, "train/loss_error": 0.45203644037246704, "train/loss_total": 0.5588729977607727 }, { "epoch": 2.663371627037136, "step": 9969, "train/loss_ctc": 0.46294525265693665, "train/loss_error": 0.5558879375457764, "train/loss_total": 0.537299394607544 }, { "epoch": 2.6636387924125033, "grad_norm": 2.514864921569824, "learning_rate": 1.4027785199038205e-05, "loss": 0.5232, "step": 9970 }, { "epoch": 2.6636387924125033, "step": 9970, "train/loss_ctc": 1.4522387981414795, "train/loss_error": 0.4833628833293915, "train/loss_total": 0.677138090133667 }, { "epoch": 2.6639059577878705, "step": 9971, "train/loss_ctc": 0.46665066480636597, "train/loss_error": 0.48051509261131287, "train/loss_total": 0.4777422249317169 }, { "epoch": 2.664173123163238, "step": 9972, "train/loss_ctc": 0.5318067073822021, "train/loss_error": 0.4543897211551666, "train/loss_total": 0.4698731303215027 }, { "epoch": 2.6644402885386054, "step": 9973, "train/loss_ctc": 0.8400377035140991, "train/loss_error": 0.4353715479373932, "train/loss_total": 0.5163047909736633 }, { "epoch": 2.664707453913973, "step": 9974, "train/loss_ctc": 0.8520660996437073, "train/loss_error": 0.4760831296443939, "train/loss_total": 0.5512797236442566 }, { "epoch": 2.66497461928934, "step": 9975, "train/loss_ctc": 0.576409101486206, "train/loss_error": 0.43447181582450867, "train/loss_total": 0.46285927295684814 }, { "epoch": 2.6652417846647074, "step": 9976, "train/loss_ctc": 0.6681076288223267, "train/loss_error": 0.5022059679031372, "train/loss_total": 0.535386323928833 }, { "epoch": 2.665508950040075, "step": 9977, "train/loss_ctc": 1.1382899284362793, "train/loss_error": 0.4696364998817444, "train/loss_total": 0.6033672094345093 }, { "epoch": 2.6657761154154422, "step": 9978, "train/loss_ctc": 0.42628443241119385, "train/loss_error": 0.40524959564208984, "train/loss_total": 0.4094565510749817 }, { "epoch": 2.6660432807908094, "step": 9979, "train/loss_ctc": 0.9345605969429016, "train/loss_error": 0.41457951068878174, "train/loss_total": 0.5185757279396057 }, { "epoch": 2.666310446166177, "grad_norm": 3.066594362258911, "learning_rate": 1.4011755276516163e-05, "loss": 0.5222, "step": 9980 }, { "epoch": 2.666310446166177, "step": 9980, "train/loss_ctc": 0.7074844241142273, "train/loss_error": 0.43558111786842346, "train/loss_total": 0.48996180295944214 }, { "epoch": 2.6665776115415443, "step": 9981, "train/loss_ctc": 0.5699511766433716, "train/loss_error": 0.4187301993370056, "train/loss_total": 0.4489744007587433 }, { "epoch": 2.6668447769169115, "step": 9982, "train/loss_ctc": 0.5756293535232544, "train/loss_error": 0.37319427728652954, "train/loss_total": 0.413681298494339 }, { "epoch": 2.667111942292279, "step": 9983, "train/loss_ctc": 0.5253947973251343, "train/loss_error": 0.3997780382633209, "train/loss_total": 0.42490139603614807 }, { "epoch": 2.6673791076676463, "step": 9984, "train/loss_ctc": 0.779062032699585, "train/loss_error": 0.5012814402580261, "train/loss_total": 0.5568375587463379 }, { "epoch": 2.6676462730430135, "step": 9985, "train/loss_ctc": 0.5807210803031921, "train/loss_error": 0.44904056191444397, "train/loss_total": 0.4753766655921936 }, { "epoch": 2.667913438418381, "step": 9986, "train/loss_ctc": 0.6434779763221741, "train/loss_error": 0.49252110719680786, "train/loss_total": 0.5227124691009521 }, { "epoch": 2.6681806037937483, "step": 9987, "train/loss_ctc": 0.3857250213623047, "train/loss_error": 0.48843449354171753, "train/loss_total": 0.4678926169872284 }, { "epoch": 2.6684477691691155, "step": 9988, "train/loss_ctc": 0.3047400712966919, "train/loss_error": 0.47276511788368225, "train/loss_total": 0.4391601085662842 }, { "epoch": 2.668714934544483, "step": 9989, "train/loss_ctc": 0.7127248048782349, "train/loss_error": 0.43291306495666504, "train/loss_total": 0.48887544870376587 }, { "epoch": 2.6689820999198504, "grad_norm": 2.156761884689331, "learning_rate": 1.3995725353994122e-05, "loss": 0.4728, "step": 9990 }, { "epoch": 2.6689820999198504, "step": 9990, "train/loss_ctc": 0.6819072961807251, "train/loss_error": 0.35033658146858215, "train/loss_total": 0.4166507124900818 }, { "epoch": 2.6692492652952176, "step": 9991, "train/loss_ctc": 0.6291005611419678, "train/loss_error": 0.4831399619579315, "train/loss_total": 0.5123320817947388 }, { "epoch": 2.669516430670585, "step": 9992, "train/loss_ctc": 0.26622116565704346, "train/loss_error": 0.45391687750816345, "train/loss_total": 0.4163777530193329 }, { "epoch": 2.6697835960459524, "step": 9993, "train/loss_ctc": 0.7199465036392212, "train/loss_error": 0.4674111604690552, "train/loss_total": 0.5179182291030884 }, { "epoch": 2.6700507614213196, "step": 9994, "train/loss_ctc": 0.575504720211029, "train/loss_error": 0.41416382789611816, "train/loss_total": 0.4464320242404938 }, { "epoch": 2.6703179267966872, "step": 9995, "train/loss_ctc": 0.5076508522033691, "train/loss_error": 0.47594308853149414, "train/loss_total": 0.48228463530540466 }, { "epoch": 2.6705850921720544, "step": 9996, "train/loss_ctc": 0.6736701130867004, "train/loss_error": 0.48703449964523315, "train/loss_total": 0.5243616104125977 }, { "epoch": 2.6708522575474216, "step": 9997, "train/loss_ctc": 0.9010272026062012, "train/loss_error": 0.4077410101890564, "train/loss_total": 0.5063982605934143 }, { "epoch": 2.6711194229227893, "step": 9998, "train/loss_ctc": 0.9394989609718323, "train/loss_error": 0.3993310034275055, "train/loss_total": 0.5073646306991577 }, { "epoch": 2.6713865882981565, "step": 9999, "train/loss_ctc": 0.7607293128967285, "train/loss_error": 0.4454462230205536, "train/loss_total": 0.5085028409957886 }, { "epoch": 2.6716537536735236, "grad_norm": 2.3930702209472656, "learning_rate": 1.3979695431472082e-05, "loss": 0.4839, "step": 10000 }, { "epoch": 2.6716537536735236, "step": 10000, "train/loss_ctc": 0.8327140808105469, "train/loss_error": 0.44448161125183105, "train/loss_total": 0.5221281051635742 }, { "epoch": 2.6719209190488913, "step": 10001, "train/loss_ctc": 0.7206964492797852, "train/loss_error": 0.47787851095199585, "train/loss_total": 0.5264421105384827 }, { "epoch": 2.6721880844242585, "step": 10002, "train/loss_ctc": 0.883743941783905, "train/loss_error": 0.4796775281429291, "train/loss_total": 0.5604908466339111 }, { "epoch": 2.672455249799626, "step": 10003, "train/loss_ctc": 0.6552371978759766, "train/loss_error": 0.43400245904922485, "train/loss_total": 0.4782494306564331 }, { "epoch": 2.6727224151749933, "step": 10004, "train/loss_ctc": 1.2549827098846436, "train/loss_error": 0.3888179361820221, "train/loss_total": 0.5620509386062622 }, { "epoch": 2.6729895805503605, "step": 10005, "train/loss_ctc": 0.7872569561004639, "train/loss_error": 0.41068822145462036, "train/loss_total": 0.48600196838378906 }, { "epoch": 2.673256745925728, "step": 10006, "train/loss_ctc": 1.0683501958847046, "train/loss_error": 0.4887288510799408, "train/loss_total": 0.6046531200408936 }, { "epoch": 2.6735239113010953, "step": 10007, "train/loss_ctc": 0.7056101560592651, "train/loss_error": 0.48880699276924133, "train/loss_total": 0.5321676135063171 }, { "epoch": 2.673791076676463, "step": 10008, "train/loss_ctc": 1.3164936304092407, "train/loss_error": 0.5186225771903992, "train/loss_total": 0.6781967878341675 }, { "epoch": 2.67405824205183, "step": 10009, "train/loss_ctc": 1.1685936450958252, "train/loss_error": 0.5572567582130432, "train/loss_total": 0.6795241236686707 }, { "epoch": 2.6743254074271974, "grad_norm": 2.8836286067962646, "learning_rate": 1.396366550895004e-05, "loss": 0.563, "step": 10010 }, { "epoch": 2.6743254074271974, "step": 10010, "train/loss_ctc": 0.7299509644508362, "train/loss_error": 0.4038585424423218, "train/loss_total": 0.4690770208835602 }, { "epoch": 2.674592572802565, "step": 10011, "train/loss_ctc": 1.3084708452224731, "train/loss_error": 0.4812202453613281, "train/loss_total": 0.6466703414916992 }, { "epoch": 2.674859738177932, "step": 10012, "train/loss_ctc": 0.49549829959869385, "train/loss_error": 0.4197169840335846, "train/loss_total": 0.4348732531070709 }, { "epoch": 2.6751269035532994, "step": 10013, "train/loss_ctc": 0.9107611179351807, "train/loss_error": 0.4709591269493103, "train/loss_total": 0.5589195489883423 }, { "epoch": 2.675394068928667, "step": 10014, "train/loss_ctc": 0.4014767110347748, "train/loss_error": 0.41080647706985474, "train/loss_total": 0.40894055366516113 }, { "epoch": 2.6756612343040342, "step": 10015, "train/loss_ctc": 0.43116313219070435, "train/loss_error": 0.5196313858032227, "train/loss_total": 0.501937747001648 }, { "epoch": 2.6759283996794014, "step": 10016, "train/loss_ctc": 0.43039560317993164, "train/loss_error": 0.5168507099151611, "train/loss_total": 0.4995597004890442 }, { "epoch": 2.676195565054769, "step": 10017, "train/loss_ctc": 0.5087001323699951, "train/loss_error": 0.525342583656311, "train/loss_total": 0.5220140814781189 }, { "epoch": 2.6764627304301363, "step": 10018, "train/loss_ctc": 0.5998112559318542, "train/loss_error": 0.3507840037345886, "train/loss_total": 0.4005894660949707 }, { "epoch": 2.6767298958055035, "step": 10019, "train/loss_ctc": 0.8004528284072876, "train/loss_error": 0.44972023367881775, "train/loss_total": 0.5198667645454407 }, { "epoch": 2.676997061180871, "grad_norm": 1.3371042013168335, "learning_rate": 1.3947635586428e-05, "loss": 0.4962, "step": 10020 }, { "epoch": 2.676997061180871, "step": 10020, "train/loss_ctc": 0.9830800294876099, "train/loss_error": 0.48462340235710144, "train/loss_total": 0.58431476354599 }, { "epoch": 2.6772642265562383, "step": 10021, "train/loss_ctc": 0.7727307081222534, "train/loss_error": 0.4280392825603485, "train/loss_total": 0.4969775676727295 }, { "epoch": 2.6775313919316055, "step": 10022, "train/loss_ctc": 1.0150328874588013, "train/loss_error": 0.48550325632095337, "train/loss_total": 0.5914092063903809 }, { "epoch": 2.677798557306973, "step": 10023, "train/loss_ctc": 0.4849677085876465, "train/loss_error": 0.43376192450523376, "train/loss_total": 0.44400307536125183 }, { "epoch": 2.6780657226823403, "step": 10024, "train/loss_ctc": 0.9297140836715698, "train/loss_error": 0.4387262761592865, "train/loss_total": 0.5369238257408142 }, { "epoch": 2.6783328880577075, "step": 10025, "train/loss_ctc": 0.9146245121955872, "train/loss_error": 0.43734198808670044, "train/loss_total": 0.5327985286712646 }, { "epoch": 2.678600053433075, "step": 10026, "train/loss_ctc": 0.5552235245704651, "train/loss_error": 0.44524216651916504, "train/loss_total": 0.4672384560108185 }, { "epoch": 2.6788672188084424, "step": 10027, "train/loss_ctc": 0.9495829343795776, "train/loss_error": 0.44761165976524353, "train/loss_total": 0.5480059385299683 }, { "epoch": 2.6791343841838096, "step": 10028, "train/loss_ctc": 0.1761976182460785, "train/loss_error": 0.41231822967529297, "train/loss_total": 0.3650940954685211 }, { "epoch": 2.679401549559177, "step": 10029, "train/loss_ctc": 0.9135657548904419, "train/loss_error": 0.45454707741737366, "train/loss_total": 0.5463508367538452 }, { "epoch": 2.6796687149345444, "grad_norm": 2.163074254989624, "learning_rate": 1.3931605663905958e-05, "loss": 0.5113, "step": 10030 }, { "epoch": 2.6796687149345444, "step": 10030, "train/loss_ctc": 0.9675813913345337, "train/loss_error": 0.4219411313533783, "train/loss_total": 0.5310691595077515 }, { "epoch": 2.6799358803099116, "step": 10031, "train/loss_ctc": 0.6004884839057922, "train/loss_error": 0.5088901519775391, "train/loss_total": 0.5272098183631897 }, { "epoch": 2.6802030456852792, "step": 10032, "train/loss_ctc": 0.39803579449653625, "train/loss_error": 0.474842369556427, "train/loss_total": 0.45948106050491333 }, { "epoch": 2.6804702110606464, "step": 10033, "train/loss_ctc": 0.5438115000724792, "train/loss_error": 0.4124181866645813, "train/loss_total": 0.43869686126708984 }, { "epoch": 2.6807373764360136, "step": 10034, "train/loss_ctc": 0.3101465404033661, "train/loss_error": 0.433820515871048, "train/loss_total": 0.4090857207775116 }, { "epoch": 2.6810045418113813, "step": 10035, "train/loss_ctc": 0.4124943017959595, "train/loss_error": 0.40279343724250793, "train/loss_total": 0.4047335982322693 }, { "epoch": 2.6812717071867485, "step": 10036, "train/loss_ctc": 0.7148041129112244, "train/loss_error": 0.41939303278923035, "train/loss_total": 0.47847527265548706 }, { "epoch": 2.681538872562116, "step": 10037, "train/loss_ctc": 0.797348141670227, "train/loss_error": 0.4233279526233673, "train/loss_total": 0.49813199043273926 }, { "epoch": 2.6818060379374833, "step": 10038, "train/loss_ctc": 1.619517207145691, "train/loss_error": 0.4460373818874359, "train/loss_total": 0.680733323097229 }, { "epoch": 2.682073203312851, "step": 10039, "train/loss_ctc": 0.7745537757873535, "train/loss_error": 0.4154631495475769, "train/loss_total": 0.48728126287460327 }, { "epoch": 2.682340368688218, "grad_norm": 2.377852201461792, "learning_rate": 1.3915575741383916e-05, "loss": 0.4915, "step": 10040 }, { "epoch": 2.682340368688218, "step": 10040, "train/loss_ctc": 1.401200294494629, "train/loss_error": 0.4903740882873535, "train/loss_total": 0.6725393533706665 }, { "epoch": 2.6826075340635853, "step": 10041, "train/loss_ctc": 0.6250509023666382, "train/loss_error": 0.4054405987262726, "train/loss_total": 0.4493626356124878 }, { "epoch": 2.682874699438953, "step": 10042, "train/loss_ctc": 1.7203736305236816, "train/loss_error": 0.4585345387458801, "train/loss_total": 0.7109023332595825 }, { "epoch": 2.68314186481432, "step": 10043, "train/loss_ctc": 0.31105154752731323, "train/loss_error": 0.461670458316803, "train/loss_total": 0.431546688079834 }, { "epoch": 2.6834090301896873, "step": 10044, "train/loss_ctc": 0.5451366901397705, "train/loss_error": 0.4204324781894684, "train/loss_total": 0.4453733563423157 }, { "epoch": 2.683676195565055, "step": 10045, "train/loss_ctc": 1.2525413036346436, "train/loss_error": 0.3882635235786438, "train/loss_total": 0.5611190795898438 }, { "epoch": 2.683943360940422, "step": 10046, "train/loss_ctc": 0.3273790180683136, "train/loss_error": 0.4619245231151581, "train/loss_total": 0.4350154399871826 }, { "epoch": 2.6842105263157894, "step": 10047, "train/loss_ctc": 0.26069414615631104, "train/loss_error": 0.43284308910369873, "train/loss_total": 0.3984133005142212 }, { "epoch": 2.684477691691157, "step": 10048, "train/loss_ctc": 0.8300191164016724, "train/loss_error": 0.51188725233078, "train/loss_total": 0.5755136013031006 }, { "epoch": 2.684744857066524, "step": 10049, "train/loss_ctc": 0.4047721028327942, "train/loss_error": 0.43460139632225037, "train/loss_total": 0.4286355674266815 }, { "epoch": 2.6850120224418914, "grad_norm": 1.3502837419509888, "learning_rate": 1.3899545818861876e-05, "loss": 0.5108, "step": 10050 }, { "epoch": 2.6850120224418914, "step": 10050, "train/loss_ctc": 0.6529968976974487, "train/loss_error": 0.44247350096702576, "train/loss_total": 0.4845781922340393 }, { "epoch": 2.685279187817259, "step": 10051, "train/loss_ctc": 0.4146443009376526, "train/loss_error": 0.4000512361526489, "train/loss_total": 0.4029698669910431 }, { "epoch": 2.6855463531926262, "step": 10052, "train/loss_ctc": 0.5651825666427612, "train/loss_error": 0.47046977281570435, "train/loss_total": 0.4894123375415802 }, { "epoch": 2.6858135185679934, "step": 10053, "train/loss_ctc": 0.6708285212516785, "train/loss_error": 0.3817041218280792, "train/loss_total": 0.4395290017127991 }, { "epoch": 2.686080683943361, "step": 10054, "train/loss_ctc": 0.6264788508415222, "train/loss_error": 0.450809508562088, "train/loss_total": 0.48594337701797485 }, { "epoch": 2.6863478493187283, "step": 10055, "train/loss_ctc": 0.6405028104782104, "train/loss_error": 0.38420096039772034, "train/loss_total": 0.4354613423347473 }, { "epoch": 2.6866150146940955, "step": 10056, "train/loss_ctc": 0.6558569073677063, "train/loss_error": 0.42738229036331177, "train/loss_total": 0.4730772376060486 }, { "epoch": 2.686882180069463, "step": 10057, "train/loss_ctc": 1.9677891731262207, "train/loss_error": 0.4535049796104431, "train/loss_total": 0.7563618421554565 }, { "epoch": 2.6871493454448303, "step": 10058, "train/loss_ctc": 0.41161927580833435, "train/loss_error": 0.5053638815879822, "train/loss_total": 0.48661497235298157 }, { "epoch": 2.6874165108201975, "step": 10059, "train/loss_ctc": 0.6192055940628052, "train/loss_error": 0.5376669764518738, "train/loss_total": 0.5539746880531311 }, { "epoch": 2.687683676195565, "grad_norm": 1.714676856994629, "learning_rate": 1.3883515896339834e-05, "loss": 0.5008, "step": 10060 }, { "epoch": 2.687683676195565, "step": 10060, "train/loss_ctc": 0.4065384268760681, "train/loss_error": 0.396592915058136, "train/loss_total": 0.39858201146125793 }, { "epoch": 2.6879508415709323, "step": 10061, "train/loss_ctc": 1.2199161052703857, "train/loss_error": 0.4271186590194702, "train/loss_total": 0.5856781601905823 }, { "epoch": 2.6882180069462995, "step": 10062, "train/loss_ctc": 0.41989728808403015, "train/loss_error": 0.47367942333221436, "train/loss_total": 0.46292299032211304 }, { "epoch": 2.688485172321667, "step": 10063, "train/loss_ctc": 0.9269049167633057, "train/loss_error": 0.3856908082962036, "train/loss_total": 0.49393361806869507 }, { "epoch": 2.6887523376970344, "step": 10064, "train/loss_ctc": 0.2689904570579529, "train/loss_error": 0.360834002494812, "train/loss_total": 0.34246528148651123 }, { "epoch": 2.6890195030724016, "step": 10065, "train/loss_ctc": 0.7966304421424866, "train/loss_error": 0.4058411717414856, "train/loss_total": 0.48399901390075684 }, { "epoch": 2.689286668447769, "step": 10066, "train/loss_ctc": 0.9135485291481018, "train/loss_error": 0.4276326894760132, "train/loss_total": 0.5248158574104309 }, { "epoch": 2.6895538338231364, "step": 10067, "train/loss_ctc": 0.5454084873199463, "train/loss_error": 0.39280346035957336, "train/loss_total": 0.42332446575164795 }, { "epoch": 2.689820999198504, "step": 10068, "train/loss_ctc": 0.8291221857070923, "train/loss_error": 0.47966238856315613, "train/loss_total": 0.5495543479919434 }, { "epoch": 2.6900881645738712, "step": 10069, "train/loss_ctc": 0.5172224044799805, "train/loss_error": 0.4439372718334198, "train/loss_total": 0.45859432220458984 }, { "epoch": 2.6903553299492384, "grad_norm": 3.4094011783599854, "learning_rate": 1.3867485973817792e-05, "loss": 0.4724, "step": 10070 }, { "epoch": 2.6903553299492384, "step": 10070, "train/loss_ctc": 0.5181170105934143, "train/loss_error": 0.4395151436328888, "train/loss_total": 0.4552355408668518 }, { "epoch": 2.690622495324606, "step": 10071, "train/loss_ctc": 1.3881691694259644, "train/loss_error": 0.4241432547569275, "train/loss_total": 0.6169484853744507 }, { "epoch": 2.6908896606999733, "step": 10072, "train/loss_ctc": 0.9538886547088623, "train/loss_error": 0.5241464376449585, "train/loss_total": 0.6100949048995972 }, { "epoch": 2.691156826075341, "step": 10073, "train/loss_ctc": 0.8695437908172607, "train/loss_error": 0.5113696455955505, "train/loss_total": 0.5830044746398926 }, { "epoch": 2.691423991450708, "step": 10074, "train/loss_ctc": 0.6013745069503784, "train/loss_error": 0.44342827796936035, "train/loss_total": 0.4750175178050995 }, { "epoch": 2.6916911568260753, "step": 10075, "train/loss_ctc": 0.9404626488685608, "train/loss_error": 0.37602007389068604, "train/loss_total": 0.488908588886261 }, { "epoch": 2.691958322201443, "step": 10076, "train/loss_ctc": 0.2790334224700928, "train/loss_error": 0.48119568824768066, "train/loss_total": 0.4407632648944855 }, { "epoch": 2.69222548757681, "step": 10077, "train/loss_ctc": 1.0332401990890503, "train/loss_error": 0.4722967743873596, "train/loss_total": 0.5844854712486267 }, { "epoch": 2.6924926529521773, "step": 10078, "train/loss_ctc": 1.192724585533142, "train/loss_error": 0.48966184258461, "train/loss_total": 0.6302744150161743 }, { "epoch": 2.692759818327545, "step": 10079, "train/loss_ctc": 0.9209614396095276, "train/loss_error": 0.40595167875289917, "train/loss_total": 0.5089536309242249 }, { "epoch": 2.693026983702912, "grad_norm": 1.8882240056991577, "learning_rate": 1.3851456051295752e-05, "loss": 0.5394, "step": 10080 }, { "epoch": 2.693026983702912, "step": 10080, "train/loss_ctc": 0.7888745069503784, "train/loss_error": 0.4919315278530121, "train/loss_total": 0.5513201355934143 }, { "epoch": 2.6932941490782794, "step": 10081, "train/loss_ctc": 1.1011244058609009, "train/loss_error": 0.4331750273704529, "train/loss_total": 0.5667649507522583 }, { "epoch": 2.693561314453647, "step": 10082, "train/loss_ctc": 0.4784929156303406, "train/loss_error": 0.41167131066322327, "train/loss_total": 0.42503565549850464 }, { "epoch": 2.693828479829014, "step": 10083, "train/loss_ctc": 0.7257299423217773, "train/loss_error": 0.48505309224128723, "train/loss_total": 0.5331884622573853 }, { "epoch": 2.6940956452043814, "step": 10084, "train/loss_ctc": 0.769143283367157, "train/loss_error": 0.448225736618042, "train/loss_total": 0.5124092698097229 }, { "epoch": 2.694362810579749, "step": 10085, "train/loss_ctc": 0.9528549909591675, "train/loss_error": 0.5216275453567505, "train/loss_total": 0.6078730225563049 }, { "epoch": 2.694629975955116, "step": 10086, "train/loss_ctc": 0.5644410848617554, "train/loss_error": 0.43322622776031494, "train/loss_total": 0.459469199180603 }, { "epoch": 2.6948971413304834, "step": 10087, "train/loss_ctc": 1.2021499872207642, "train/loss_error": 0.4474702477455139, "train/loss_total": 0.598406195640564 }, { "epoch": 2.695164306705851, "step": 10088, "train/loss_ctc": 1.1752197742462158, "train/loss_error": 0.47122713923454285, "train/loss_total": 0.6120256781578064 }, { "epoch": 2.6954314720812182, "step": 10089, "train/loss_ctc": 0.6773191690444946, "train/loss_error": 0.4542734920978546, "train/loss_total": 0.49888262152671814 }, { "epoch": 2.6956986374565854, "grad_norm": 1.0929254293441772, "learning_rate": 1.3835426128773712e-05, "loss": 0.5365, "step": 10090 }, { "epoch": 2.6956986374565854, "step": 10090, "train/loss_ctc": 0.5330203175544739, "train/loss_error": 0.42431971430778503, "train/loss_total": 0.44605985283851624 }, { "epoch": 2.695965802831953, "step": 10091, "train/loss_ctc": 0.3708265423774719, "train/loss_error": 0.42940428853034973, "train/loss_total": 0.4176887571811676 }, { "epoch": 2.6962329682073203, "step": 10092, "train/loss_ctc": 1.0669281482696533, "train/loss_error": 0.4577750265598297, "train/loss_total": 0.5796056389808655 }, { "epoch": 2.6965001335826875, "step": 10093, "train/loss_ctc": 0.4382287859916687, "train/loss_error": 0.4949447214603424, "train/loss_total": 0.48360154032707214 }, { "epoch": 2.696767298958055, "step": 10094, "train/loss_ctc": 0.46375736594200134, "train/loss_error": 0.4447135925292969, "train/loss_total": 0.4485223591327667 }, { "epoch": 2.6970344643334223, "step": 10095, "train/loss_ctc": 1.117959976196289, "train/loss_error": 0.44342631101608276, "train/loss_total": 0.5783330798149109 }, { "epoch": 2.6973016297087895, "step": 10096, "train/loss_ctc": 0.41570937633514404, "train/loss_error": 0.39787381887435913, "train/loss_total": 0.40144091844558716 }, { "epoch": 2.697568795084157, "step": 10097, "train/loss_ctc": 0.7714236974716187, "train/loss_error": 0.4552207291126251, "train/loss_total": 0.5184613466262817 }, { "epoch": 2.6978359604595243, "step": 10098, "train/loss_ctc": 0.6955657005310059, "train/loss_error": 0.4479963481426239, "train/loss_total": 0.49751025438308716 }, { "epoch": 2.6981031258348915, "step": 10099, "train/loss_ctc": 0.4828375577926636, "train/loss_error": 0.4092165231704712, "train/loss_total": 0.4239407479763031 }, { "epoch": 2.698370291210259, "grad_norm": 2.4872517585754395, "learning_rate": 1.381939620625167e-05, "loss": 0.4795, "step": 10100 }, { "epoch": 2.698370291210259, "step": 10100, "train/loss_ctc": 0.7566202878952026, "train/loss_error": 0.5683314800262451, "train/loss_total": 0.6059892177581787 }, { "epoch": 2.6986374565856264, "step": 10101, "train/loss_ctc": 0.43179264664649963, "train/loss_error": 0.49933382868766785, "train/loss_total": 0.4858255982398987 }, { "epoch": 2.698904621960994, "step": 10102, "train/loss_ctc": 0.855663537979126, "train/loss_error": 0.43230199813842773, "train/loss_total": 0.5169743299484253 }, { "epoch": 2.699171787336361, "step": 10103, "train/loss_ctc": 0.573576033115387, "train/loss_error": 0.4772050976753235, "train/loss_total": 0.4964793026447296 }, { "epoch": 2.6994389527117284, "step": 10104, "train/loss_ctc": 0.6588447093963623, "train/loss_error": 0.4227350056171417, "train/loss_total": 0.4699569642543793 }, { "epoch": 2.699706118087096, "step": 10105, "train/loss_ctc": 1.09761643409729, "train/loss_error": 0.4843073785305023, "train/loss_total": 0.6069691777229309 }, { "epoch": 2.6999732834624632, "step": 10106, "train/loss_ctc": 1.1996111869812012, "train/loss_error": 0.48745444416999817, "train/loss_total": 0.6298857927322388 }, { "epoch": 2.700240448837831, "step": 10107, "train/loss_ctc": 0.8671445250511169, "train/loss_error": 0.4746864140033722, "train/loss_total": 0.553178071975708 }, { "epoch": 2.700507614213198, "step": 10108, "train/loss_ctc": 0.8267430067062378, "train/loss_error": 0.3739785850048065, "train/loss_total": 0.46453148126602173 }, { "epoch": 2.7007747795885653, "step": 10109, "train/loss_ctc": 0.9765664339065552, "train/loss_error": 0.4794471263885498, "train/loss_total": 0.5788710117340088 }, { "epoch": 2.701041944963933, "grad_norm": 1.6097571849822998, "learning_rate": 1.380336628372963e-05, "loss": 0.5409, "step": 10110 }, { "epoch": 2.701041944963933, "step": 10110, "train/loss_ctc": 0.29730767011642456, "train/loss_error": 0.409663587808609, "train/loss_total": 0.38719239830970764 }, { "epoch": 2.7013091103393, "step": 10111, "train/loss_ctc": 0.9526078104972839, "train/loss_error": 0.4047032296657562, "train/loss_total": 0.5142841339111328 }, { "epoch": 2.7015762757146673, "step": 10112, "train/loss_ctc": 1.152071237564087, "train/loss_error": 0.43949297070503235, "train/loss_total": 0.5820086598396301 }, { "epoch": 2.701843441090035, "step": 10113, "train/loss_ctc": 1.3968825340270996, "train/loss_error": 0.43090271949768066, "train/loss_total": 0.6240986585617065 }, { "epoch": 2.702110606465402, "step": 10114, "train/loss_ctc": 0.5811479687690735, "train/loss_error": 0.4730381667613983, "train/loss_total": 0.4946601390838623 }, { "epoch": 2.7023777718407693, "step": 10115, "train/loss_ctc": 0.9551475048065186, "train/loss_error": 0.46554189920425415, "train/loss_total": 0.563463032245636 }, { "epoch": 2.702644937216137, "step": 10116, "train/loss_ctc": 0.931297242641449, "train/loss_error": 0.38946962356567383, "train/loss_total": 0.4978351593017578 }, { "epoch": 2.702912102591504, "step": 10117, "train/loss_ctc": 0.6214086413383484, "train/loss_error": 0.4741396903991699, "train/loss_total": 0.5035935044288635 }, { "epoch": 2.7031792679668714, "step": 10118, "train/loss_ctc": 1.0673068761825562, "train/loss_error": 0.4436569809913635, "train/loss_total": 0.568386971950531 }, { "epoch": 2.703446433342239, "step": 10119, "train/loss_ctc": 0.8366846442222595, "train/loss_error": 0.4035055935382843, "train/loss_total": 0.4901413917541504 }, { "epoch": 2.703713598717606, "grad_norm": 1.7766011953353882, "learning_rate": 1.3787336361207588e-05, "loss": 0.5226, "step": 10120 }, { "epoch": 2.703713598717606, "step": 10120, "train/loss_ctc": 0.599912166595459, "train/loss_error": 0.3909645974636078, "train/loss_total": 0.4327540993690491 }, { "epoch": 2.7039807640929734, "step": 10121, "train/loss_ctc": 0.5636512637138367, "train/loss_error": 0.5305039286613464, "train/loss_total": 0.5371333956718445 }, { "epoch": 2.704247929468341, "step": 10122, "train/loss_ctc": 0.9623416662216187, "train/loss_error": 0.45241767168045044, "train/loss_total": 0.5544024705886841 }, { "epoch": 2.7045150948437082, "step": 10123, "train/loss_ctc": 0.624870777130127, "train/loss_error": 0.47592446208000183, "train/loss_total": 0.505713701248169 }, { "epoch": 2.7047822602190754, "step": 10124, "train/loss_ctc": 0.5843513607978821, "train/loss_error": 0.38640087842941284, "train/loss_total": 0.4259909987449646 }, { "epoch": 2.705049425594443, "step": 10125, "train/loss_ctc": 0.4627327620983124, "train/loss_error": 0.4229373633861542, "train/loss_total": 0.43089646100997925 }, { "epoch": 2.7053165909698103, "step": 10126, "train/loss_ctc": 0.3318273425102234, "train/loss_error": 0.453739196062088, "train/loss_total": 0.4293568432331085 }, { "epoch": 2.7055837563451774, "step": 10127, "train/loss_ctc": 0.979668915271759, "train/loss_error": 0.45814400911331177, "train/loss_total": 0.5624489784240723 }, { "epoch": 2.705850921720545, "step": 10128, "train/loss_ctc": 0.3972916603088379, "train/loss_error": 0.49051353335380554, "train/loss_total": 0.47186917066574097 }, { "epoch": 2.7061180870959123, "step": 10129, "train/loss_ctc": 0.9340221881866455, "train/loss_error": 0.40410181879997253, "train/loss_total": 0.510085940361023 }, { "epoch": 2.7063852524712795, "grad_norm": 8.904211044311523, "learning_rate": 1.3771306438685547e-05, "loss": 0.4861, "step": 10130 }, { "epoch": 2.7063852524712795, "step": 10130, "train/loss_ctc": 0.45998045802116394, "train/loss_error": 0.42734426259994507, "train/loss_total": 0.4338715076446533 }, { "epoch": 2.706652417846647, "step": 10131, "train/loss_ctc": 0.9706446528434753, "train/loss_error": 0.44921964406967163, "train/loss_total": 0.5535046458244324 }, { "epoch": 2.7069195832220143, "step": 10132, "train/loss_ctc": 0.47551339864730835, "train/loss_error": 0.42873507738113403, "train/loss_total": 0.4380907416343689 }, { "epoch": 2.7071867485973815, "step": 10133, "train/loss_ctc": 0.5381301641464233, "train/loss_error": 0.42830419540405273, "train/loss_total": 0.4502694010734558 }, { "epoch": 2.707453913972749, "step": 10134, "train/loss_ctc": 0.818089485168457, "train/loss_error": 0.43749532103538513, "train/loss_total": 0.5136141777038574 }, { "epoch": 2.7077210793481163, "step": 10135, "train/loss_ctc": 0.9765228629112244, "train/loss_error": 0.5306700468063354, "train/loss_total": 0.6198406219482422 }, { "epoch": 2.707988244723484, "step": 10136, "train/loss_ctc": 1.231157660484314, "train/loss_error": 0.43452969193458557, "train/loss_total": 0.5938553214073181 }, { "epoch": 2.708255410098851, "step": 10137, "train/loss_ctc": 0.4999401867389679, "train/loss_error": 0.4015914797782898, "train/loss_total": 0.4212612211704254 }, { "epoch": 2.708522575474219, "step": 10138, "train/loss_ctc": 0.7275824546813965, "train/loss_error": 0.4415193796157837, "train/loss_total": 0.4987320303916931 }, { "epoch": 2.708789740849586, "step": 10139, "train/loss_ctc": 0.3827160596847534, "train/loss_error": 0.4396817982196808, "train/loss_total": 0.42828866839408875 }, { "epoch": 2.709056906224953, "grad_norm": 1.7022662162780762, "learning_rate": 1.3755276516163506e-05, "loss": 0.4951, "step": 10140 }, { "epoch": 2.709056906224953, "step": 10140, "train/loss_ctc": 0.980553150177002, "train/loss_error": 0.46267327666282654, "train/loss_total": 0.5662492513656616 }, { "epoch": 2.709324071600321, "step": 10141, "train/loss_ctc": 0.6144120097160339, "train/loss_error": 0.4792301654815674, "train/loss_total": 0.5062665343284607 }, { "epoch": 2.709591236975688, "step": 10142, "train/loss_ctc": 0.7412419319152832, "train/loss_error": 0.4233439862728119, "train/loss_total": 0.48692357540130615 }, { "epoch": 2.7098584023510552, "step": 10143, "train/loss_ctc": 0.5310021042823792, "train/loss_error": 0.4257824718952179, "train/loss_total": 0.44682639837265015 }, { "epoch": 2.710125567726423, "step": 10144, "train/loss_ctc": 0.593043327331543, "train/loss_error": 0.4966805577278137, "train/loss_total": 0.5159531235694885 }, { "epoch": 2.71039273310179, "step": 10145, "train/loss_ctc": 0.7485355734825134, "train/loss_error": 0.47026219964027405, "train/loss_total": 0.5259168744087219 }, { "epoch": 2.7106598984771573, "step": 10146, "train/loss_ctc": 0.6312621831893921, "train/loss_error": 0.47079652547836304, "train/loss_total": 0.5028896331787109 }, { "epoch": 2.710927063852525, "step": 10147, "train/loss_ctc": 0.8249393701553345, "train/loss_error": 0.4770453870296478, "train/loss_total": 0.5466241836547852 }, { "epoch": 2.711194229227892, "step": 10148, "train/loss_ctc": 0.9330083131790161, "train/loss_error": 0.4257412254810333, "train/loss_total": 0.527194619178772 }, { "epoch": 2.7114613946032593, "step": 10149, "train/loss_ctc": 0.4585760533809662, "train/loss_error": 0.4305121600627899, "train/loss_total": 0.4361249506473541 }, { "epoch": 2.711728559978627, "grad_norm": 1.6805250644683838, "learning_rate": 1.3739246593641464e-05, "loss": 0.5061, "step": 10150 }, { "epoch": 2.711728559978627, "step": 10150, "train/loss_ctc": 0.6479572057723999, "train/loss_error": 0.4421452283859253, "train/loss_total": 0.4833076596260071 }, { "epoch": 2.711995725353994, "step": 10151, "train/loss_ctc": 0.5725560188293457, "train/loss_error": 0.4034512937068939, "train/loss_total": 0.43727225065231323 }, { "epoch": 2.7122628907293613, "step": 10152, "train/loss_ctc": 0.47020581364631653, "train/loss_error": 0.47661638259887695, "train/loss_total": 0.4753342866897583 }, { "epoch": 2.712530056104729, "step": 10153, "train/loss_ctc": 0.43749916553497314, "train/loss_error": 0.42274922132492065, "train/loss_total": 0.4256992042064667 }, { "epoch": 2.712797221480096, "step": 10154, "train/loss_ctc": 0.6502195596694946, "train/loss_error": 0.4423334300518036, "train/loss_total": 0.4839106798171997 }, { "epoch": 2.7130643868554634, "step": 10155, "train/loss_ctc": 0.6244035959243774, "train/loss_error": 0.3739515244960785, "train/loss_total": 0.4240419566631317 }, { "epoch": 2.713331552230831, "step": 10156, "train/loss_ctc": 0.6129335165023804, "train/loss_error": 0.49193471670150757, "train/loss_total": 0.51613450050354 }, { "epoch": 2.713598717606198, "step": 10157, "train/loss_ctc": 1.0845649242401123, "train/loss_error": 0.41652539372444153, "train/loss_total": 0.5501333475112915 }, { "epoch": 2.7138658829815654, "step": 10158, "train/loss_ctc": 0.9190133810043335, "train/loss_error": 0.3746089041233063, "train/loss_total": 0.4834898114204407 }, { "epoch": 2.714133048356933, "step": 10159, "train/loss_ctc": 1.14383065700531, "train/loss_error": 0.4959019124507904, "train/loss_total": 0.6254876852035522 }, { "epoch": 2.7144002137323002, "grad_norm": 3.507937431335449, "learning_rate": 1.3723216671119423e-05, "loss": 0.4905, "step": 10160 }, { "epoch": 2.7144002137323002, "step": 10160, "train/loss_ctc": 0.7013721466064453, "train/loss_error": 0.4698612689971924, "train/loss_total": 0.5161634683609009 }, { "epoch": 2.7146673791076674, "step": 10161, "train/loss_ctc": 1.0826616287231445, "train/loss_error": 0.4255768954753876, "train/loss_total": 0.556993842124939 }, { "epoch": 2.714934544483035, "step": 10162, "train/loss_ctc": 0.9383528232574463, "train/loss_error": 0.5155864953994751, "train/loss_total": 0.6001397967338562 }, { "epoch": 2.7152017098584023, "step": 10163, "train/loss_ctc": 0.4774720072746277, "train/loss_error": 0.5155360698699951, "train/loss_total": 0.5079232454299927 }, { "epoch": 2.7154688752337695, "step": 10164, "train/loss_ctc": 0.48144808411598206, "train/loss_error": 0.432626873254776, "train/loss_total": 0.4423910975456238 }, { "epoch": 2.715736040609137, "step": 10165, "train/loss_ctc": 0.7795336246490479, "train/loss_error": 0.4350956976413727, "train/loss_total": 0.5039832592010498 }, { "epoch": 2.7160032059845043, "step": 10166, "train/loss_ctc": 0.5036559104919434, "train/loss_error": 0.3611034154891968, "train/loss_total": 0.38961392641067505 }, { "epoch": 2.716270371359872, "step": 10167, "train/loss_ctc": 0.5992500185966492, "train/loss_error": 0.4457804560661316, "train/loss_total": 0.4764743745326996 }, { "epoch": 2.716537536735239, "step": 10168, "train/loss_ctc": 0.5253092050552368, "train/loss_error": 0.46496808528900146, "train/loss_total": 0.4770362973213196 }, { "epoch": 2.7168047021106063, "step": 10169, "train/loss_ctc": 0.8378511071205139, "train/loss_error": 0.42843711376190186, "train/loss_total": 0.5103198885917664 }, { "epoch": 2.717071867485974, "grad_norm": 3.7695424556732178, "learning_rate": 1.3707186748597383e-05, "loss": 0.4981, "step": 10170 }, { "epoch": 2.717071867485974, "step": 10170, "train/loss_ctc": 1.0698658227920532, "train/loss_error": 0.44812947511672974, "train/loss_total": 0.5724767446517944 }, { "epoch": 2.717339032861341, "step": 10171, "train/loss_ctc": 0.561009407043457, "train/loss_error": 0.44238898158073425, "train/loss_total": 0.4661130905151367 }, { "epoch": 2.717606198236709, "step": 10172, "train/loss_ctc": 0.42991340160369873, "train/loss_error": 0.45798084139823914, "train/loss_total": 0.45236736536026 }, { "epoch": 2.717873363612076, "step": 10173, "train/loss_ctc": 0.6506361365318298, "train/loss_error": 0.48019883036613464, "train/loss_total": 0.5142862796783447 }, { "epoch": 2.718140528987443, "step": 10174, "train/loss_ctc": 0.7747787833213806, "train/loss_error": 0.39402976632118225, "train/loss_total": 0.47017955780029297 }, { "epoch": 2.718407694362811, "step": 10175, "train/loss_ctc": 0.7118727564811707, "train/loss_error": 0.44143998622894287, "train/loss_total": 0.4955265522003174 }, { "epoch": 2.718674859738178, "step": 10176, "train/loss_ctc": 0.955184280872345, "train/loss_error": 0.5067816972732544, "train/loss_total": 0.5964622497558594 }, { "epoch": 2.718942025113545, "step": 10177, "train/loss_ctc": 0.6066125631332397, "train/loss_error": 0.43749016523361206, "train/loss_total": 0.4713146388530731 }, { "epoch": 2.719209190488913, "step": 10178, "train/loss_ctc": 1.3982452154159546, "train/loss_error": 0.5159090757369995, "train/loss_total": 0.6923763155937195 }, { "epoch": 2.71947635586428, "step": 10179, "train/loss_ctc": 0.42033329606056213, "train/loss_error": 0.39624521136283875, "train/loss_total": 0.40106284618377686 }, { "epoch": 2.7197435212396472, "grad_norm": 1.5391086339950562, "learning_rate": 1.3691156826075341e-05, "loss": 0.5132, "step": 10180 }, { "epoch": 2.7197435212396472, "step": 10180, "train/loss_ctc": 0.8606266975402832, "train/loss_error": 0.39562249183654785, "train/loss_total": 0.48862335085868835 }, { "epoch": 2.720010686615015, "step": 10181, "train/loss_ctc": 1.2498037815093994, "train/loss_error": 0.4945386052131653, "train/loss_total": 0.645591676235199 }, { "epoch": 2.720277851990382, "step": 10182, "train/loss_ctc": 1.1705660820007324, "train/loss_error": 0.42600804567337036, "train/loss_total": 0.5749197006225586 }, { "epoch": 2.7205450173657493, "step": 10183, "train/loss_ctc": 0.4705747663974762, "train/loss_error": 0.4157775044441223, "train/loss_total": 0.426736980676651 }, { "epoch": 2.720812182741117, "step": 10184, "train/loss_ctc": 0.6881377696990967, "train/loss_error": 0.4665467441082001, "train/loss_total": 0.5108649730682373 }, { "epoch": 2.721079348116484, "step": 10185, "train/loss_ctc": 0.49487531185150146, "train/loss_error": 0.4105350375175476, "train/loss_total": 0.4274030923843384 }, { "epoch": 2.7213465134918513, "step": 10186, "train/loss_ctc": 0.4600921869277954, "train/loss_error": 0.4106742739677429, "train/loss_total": 0.4205578565597534 }, { "epoch": 2.721613678867219, "step": 10187, "train/loss_ctc": 0.6305721402168274, "train/loss_error": 0.5038348436355591, "train/loss_total": 0.5291823148727417 }, { "epoch": 2.721880844242586, "step": 10188, "train/loss_ctc": 0.6730616092681885, "train/loss_error": 0.43471214175224304, "train/loss_total": 0.48238202929496765 }, { "epoch": 2.7221480096179533, "step": 10189, "train/loss_ctc": 0.5993240475654602, "train/loss_error": 0.35910263657569885, "train/loss_total": 0.4071469306945801 }, { "epoch": 2.722415174993321, "grad_norm": 7.995317459106445, "learning_rate": 1.3675126903553301e-05, "loss": 0.4913, "step": 10190 }, { "epoch": 2.722415174993321, "step": 10190, "train/loss_ctc": 0.34622713923454285, "train/loss_error": 0.3858794867992401, "train/loss_total": 0.3779490292072296 }, { "epoch": 2.722682340368688, "step": 10191, "train/loss_ctc": 0.6879945993423462, "train/loss_error": 0.39872461557388306, "train/loss_total": 0.4565786123275757 }, { "epoch": 2.7229495057440554, "step": 10192, "train/loss_ctc": 1.393813133239746, "train/loss_error": 0.4947471618652344, "train/loss_total": 0.6745603680610657 }, { "epoch": 2.723216671119423, "step": 10193, "train/loss_ctc": 0.6287108659744263, "train/loss_error": 0.42035990953445435, "train/loss_total": 0.4620301127433777 }, { "epoch": 2.72348383649479, "step": 10194, "train/loss_ctc": 0.7796270847320557, "train/loss_error": 0.4600239098072052, "train/loss_total": 0.5239445567131042 }, { "epoch": 2.7237510018701574, "step": 10195, "train/loss_ctc": 1.1281449794769287, "train/loss_error": 0.5089931488037109, "train/loss_total": 0.6328235268592834 }, { "epoch": 2.724018167245525, "step": 10196, "train/loss_ctc": 0.8340023756027222, "train/loss_error": 0.44570621848106384, "train/loss_total": 0.5233654379844666 }, { "epoch": 2.7242853326208922, "step": 10197, "train/loss_ctc": 1.0836713314056396, "train/loss_error": 0.4186537563800812, "train/loss_total": 0.5516572594642639 }, { "epoch": 2.7245524979962594, "step": 10198, "train/loss_ctc": 0.33902543783187866, "train/loss_error": 0.525028645992279, "train/loss_total": 0.48782801628112793 }, { "epoch": 2.724819663371627, "step": 10199, "train/loss_ctc": 0.6787978410720825, "train/loss_error": 0.4927259087562561, "train/loss_total": 0.5299403071403503 }, { "epoch": 2.7250868287469943, "grad_norm": 6.21245813369751, "learning_rate": 1.3659096981031259e-05, "loss": 0.5221, "step": 10200 }, { "epoch": 2.7250868287469943, "step": 10200, "train/loss_ctc": 1.0612618923187256, "train/loss_error": 0.5411888957023621, "train/loss_total": 0.6452034711837769 }, { "epoch": 2.725353994122362, "step": 10201, "train/loss_ctc": 1.309572458267212, "train/loss_error": 0.49833783507347107, "train/loss_total": 0.6605848073959351 }, { "epoch": 2.725621159497729, "step": 10202, "train/loss_ctc": 0.943220853805542, "train/loss_error": 0.5002357959747314, "train/loss_total": 0.5888328552246094 }, { "epoch": 2.7258883248730963, "step": 10203, "train/loss_ctc": 0.33737555146217346, "train/loss_error": 0.5229985117912292, "train/loss_total": 0.4858739376068115 }, { "epoch": 2.726155490248464, "step": 10204, "train/loss_ctc": 0.5520791411399841, "train/loss_error": 0.45762869715690613, "train/loss_total": 0.47651880979537964 }, { "epoch": 2.726422655623831, "step": 10205, "train/loss_ctc": 0.6683300733566284, "train/loss_error": 0.4293117821216583, "train/loss_total": 0.4771154522895813 }, { "epoch": 2.7266898209991988, "step": 10206, "train/loss_ctc": 1.1317763328552246, "train/loss_error": 0.3838455080986023, "train/loss_total": 0.5334317088127136 }, { "epoch": 2.726956986374566, "step": 10207, "train/loss_ctc": 0.8677563667297363, "train/loss_error": 0.4047297537326813, "train/loss_total": 0.4973350763320923 }, { "epoch": 2.727224151749933, "step": 10208, "train/loss_ctc": 1.195494532585144, "train/loss_error": 0.46080100536346436, "train/loss_total": 0.6077396869659424 }, { "epoch": 2.727491317125301, "step": 10209, "train/loss_ctc": 0.4096807837486267, "train/loss_error": 0.4293937683105469, "train/loss_total": 0.4254511594772339 }, { "epoch": 2.727758482500668, "grad_norm": 1.3864291906356812, "learning_rate": 1.3643067058509217e-05, "loss": 0.5398, "step": 10210 }, { "epoch": 2.727758482500668, "step": 10210, "train/loss_ctc": 0.40003371238708496, "train/loss_error": 0.41930246353149414, "train/loss_total": 0.41544872522354126 }, { "epoch": 2.728025647876035, "step": 10211, "train/loss_ctc": 0.3707122206687927, "train/loss_error": 0.4079033136367798, "train/loss_total": 0.40046510100364685 }, { "epoch": 2.728292813251403, "step": 10212, "train/loss_ctc": 0.7237294912338257, "train/loss_error": 0.46374058723449707, "train/loss_total": 0.5157383680343628 }, { "epoch": 2.72855997862677, "step": 10213, "train/loss_ctc": 0.5581147074699402, "train/loss_error": 0.4625485837459564, "train/loss_total": 0.4816617965698242 }, { "epoch": 2.728827144002137, "step": 10214, "train/loss_ctc": 0.47211772203445435, "train/loss_error": 0.40333256125450134, "train/loss_total": 0.417089581489563 }, { "epoch": 2.729094309377505, "step": 10215, "train/loss_ctc": 0.8508962988853455, "train/loss_error": 0.37576138973236084, "train/loss_total": 0.4707883596420288 }, { "epoch": 2.729361474752872, "step": 10216, "train/loss_ctc": 0.6367841958999634, "train/loss_error": 0.3620004653930664, "train/loss_total": 0.41695719957351685 }, { "epoch": 2.7296286401282392, "step": 10217, "train/loss_ctc": 0.7188338041305542, "train/loss_error": 0.5234227180480957, "train/loss_total": 0.5625049471855164 }, { "epoch": 2.729895805503607, "step": 10218, "train/loss_ctc": 0.7114323377609253, "train/loss_error": 0.4235292673110962, "train/loss_total": 0.4811098575592041 }, { "epoch": 2.730162970878974, "step": 10219, "train/loss_ctc": 1.0635713338851929, "train/loss_error": 0.4738273620605469, "train/loss_total": 0.5917761921882629 }, { "epoch": 2.7304301362543413, "grad_norm": 2.3056743144989014, "learning_rate": 1.3627037135987177e-05, "loss": 0.4754, "step": 10220 }, { "epoch": 2.7304301362543413, "step": 10220, "train/loss_ctc": 0.9856400489807129, "train/loss_error": 0.48730525374412537, "train/loss_total": 0.5869722366333008 }, { "epoch": 2.730697301629709, "step": 10221, "train/loss_ctc": 1.0206485986709595, "train/loss_error": 0.4133949279785156, "train/loss_total": 0.5348457098007202 }, { "epoch": 2.730964467005076, "step": 10222, "train/loss_ctc": 0.2136313021183014, "train/loss_error": 0.5432993769645691, "train/loss_total": 0.47736576199531555 }, { "epoch": 2.7312316323804433, "step": 10223, "train/loss_ctc": 0.8296452760696411, "train/loss_error": 0.4745573401451111, "train/loss_total": 0.545574963092804 }, { "epoch": 2.731498797755811, "step": 10224, "train/loss_ctc": 0.7028322219848633, "train/loss_error": 0.4409414827823639, "train/loss_total": 0.49331963062286377 }, { "epoch": 2.731765963131178, "step": 10225, "train/loss_ctc": 0.8393615484237671, "train/loss_error": 0.5098829865455627, "train/loss_total": 0.5757787227630615 }, { "epoch": 2.7320331285065453, "step": 10226, "train/loss_ctc": 1.144214153289795, "train/loss_error": 0.49421679973602295, "train/loss_total": 0.6242162585258484 }, { "epoch": 2.732300293881913, "step": 10227, "train/loss_ctc": 1.6650266647338867, "train/loss_error": 0.48870792984962463, "train/loss_total": 0.7239717245101929 }, { "epoch": 2.73256745925728, "step": 10228, "train/loss_ctc": 0.5765444040298462, "train/loss_error": 0.4820556640625, "train/loss_total": 0.5009534358978271 }, { "epoch": 2.7328346246326474, "step": 10229, "train/loss_ctc": 0.6829288005828857, "train/loss_error": 0.41597747802734375, "train/loss_total": 0.46936774253845215 }, { "epoch": 2.733101790008015, "grad_norm": 2.5176913738250732, "learning_rate": 1.3611007213465135e-05, "loss": 0.5532, "step": 10230 }, { "epoch": 2.733101790008015, "step": 10230, "train/loss_ctc": 0.883527398109436, "train/loss_error": 0.4508148431777954, "train/loss_total": 0.5373573303222656 }, { "epoch": 2.733368955383382, "step": 10231, "train/loss_ctc": 1.0938589572906494, "train/loss_error": 0.4460372030735016, "train/loss_total": 0.5756015777587891 }, { "epoch": 2.7336361207587494, "step": 10232, "train/loss_ctc": 0.7712342739105225, "train/loss_error": 0.4972365200519562, "train/loss_total": 0.5520360469818115 }, { "epoch": 2.733903286134117, "step": 10233, "train/loss_ctc": 1.2457588911056519, "train/loss_error": 0.49471697211265564, "train/loss_total": 0.6449253559112549 }, { "epoch": 2.7341704515094842, "step": 10234, "train/loss_ctc": 0.6516078114509583, "train/loss_error": 0.49080777168273926, "train/loss_total": 0.5229678153991699 }, { "epoch": 2.734437616884852, "step": 10235, "train/loss_ctc": 0.34842777252197266, "train/loss_error": 0.4149514436721802, "train/loss_total": 0.4016467034816742 }, { "epoch": 2.734704782260219, "step": 10236, "train/loss_ctc": 0.8115664720535278, "train/loss_error": 0.460942804813385, "train/loss_total": 0.5310675501823425 }, { "epoch": 2.7349719476355863, "step": 10237, "train/loss_ctc": 1.1145130395889282, "train/loss_error": 0.4379499852657318, "train/loss_total": 0.573262631893158 }, { "epoch": 2.735239113010954, "step": 10238, "train/loss_ctc": 0.5988886952400208, "train/loss_error": 0.49604716897010803, "train/loss_total": 0.5166155099868774 }, { "epoch": 2.735506278386321, "step": 10239, "train/loss_ctc": 0.9771328568458557, "train/loss_error": 0.4755719006061554, "train/loss_total": 0.5758841037750244 }, { "epoch": 2.7357734437616887, "grad_norm": 2.6735587120056152, "learning_rate": 1.3594977290943093e-05, "loss": 0.5431, "step": 10240 }, { "epoch": 2.7357734437616887, "step": 10240, "train/loss_ctc": 0.6908581256866455, "train/loss_error": 0.4583520293235779, "train/loss_total": 0.5048532485961914 }, { "epoch": 2.736040609137056, "step": 10241, "train/loss_ctc": 0.7162675261497498, "train/loss_error": 0.46277326345443726, "train/loss_total": 0.5134720802307129 }, { "epoch": 2.736307774512423, "step": 10242, "train/loss_ctc": 1.5497621297836304, "train/loss_error": 0.46879908442497253, "train/loss_total": 0.684991717338562 }, { "epoch": 2.7365749398877908, "step": 10243, "train/loss_ctc": 0.28475967049598694, "train/loss_error": 0.4825246334075928, "train/loss_total": 0.4429716467857361 }, { "epoch": 2.736842105263158, "step": 10244, "train/loss_ctc": 0.6023241281509399, "train/loss_error": 0.5508438944816589, "train/loss_total": 0.5611399412155151 }, { "epoch": 2.737109270638525, "step": 10245, "train/loss_ctc": 0.6739886403083801, "train/loss_error": 0.4424786865711212, "train/loss_total": 0.488780677318573 }, { "epoch": 2.737376436013893, "step": 10246, "train/loss_ctc": 0.8395330309867859, "train/loss_error": 0.504988431930542, "train/loss_total": 0.5718973875045776 }, { "epoch": 2.73764360138926, "step": 10247, "train/loss_ctc": 0.5225343108177185, "train/loss_error": 0.3658081293106079, "train/loss_total": 0.397153377532959 }, { "epoch": 2.737910766764627, "step": 10248, "train/loss_ctc": 0.36549049615859985, "train/loss_error": 0.3351781368255615, "train/loss_total": 0.34124061465263367 }, { "epoch": 2.738177932139995, "step": 10249, "train/loss_ctc": 1.1380960941314697, "train/loss_error": 0.3945809006690979, "train/loss_total": 0.5432839393615723 }, { "epoch": 2.738445097515362, "grad_norm": 1.437743902206421, "learning_rate": 1.3578947368421053e-05, "loss": 0.505, "step": 10250 }, { "epoch": 2.738445097515362, "step": 10250, "train/loss_ctc": 1.1884846687316895, "train/loss_error": 0.5255466103553772, "train/loss_total": 0.6581342220306396 }, { "epoch": 2.738712262890729, "step": 10251, "train/loss_ctc": 0.8555479645729065, "train/loss_error": 0.38422948122024536, "train/loss_total": 0.47849321365356445 }, { "epoch": 2.738979428266097, "step": 10252, "train/loss_ctc": 0.46223241090774536, "train/loss_error": 0.42883995175361633, "train/loss_total": 0.43551844358444214 }, { "epoch": 2.739246593641464, "step": 10253, "train/loss_ctc": 1.078380823135376, "train/loss_error": 0.41165342926979065, "train/loss_total": 0.5449989438056946 }, { "epoch": 2.7395137590168313, "step": 10254, "train/loss_ctc": 0.47413337230682373, "train/loss_error": 0.44810912013053894, "train/loss_total": 0.4533139765262604 }, { "epoch": 2.739780924392199, "step": 10255, "train/loss_ctc": 0.8058953285217285, "train/loss_error": 0.44230300188064575, "train/loss_total": 0.5150214433670044 }, { "epoch": 2.740048089767566, "step": 10256, "train/loss_ctc": 0.6786545515060425, "train/loss_error": 0.40582823753356934, "train/loss_total": 0.460393488407135 }, { "epoch": 2.7403152551429333, "step": 10257, "train/loss_ctc": 0.550580620765686, "train/loss_error": 0.4771749973297119, "train/loss_total": 0.4918561279773712 }, { "epoch": 2.740582420518301, "step": 10258, "train/loss_ctc": 0.2783302366733551, "train/loss_error": 0.3960017263889313, "train/loss_total": 0.3724674582481384 }, { "epoch": 2.740849585893668, "step": 10259, "train/loss_ctc": 0.8544155359268188, "train/loss_error": 0.3984966278076172, "train/loss_total": 0.4896804094314575 }, { "epoch": 2.7411167512690353, "grad_norm": 1.3486659526824951, "learning_rate": 1.3562917445899013e-05, "loss": 0.49, "step": 10260 }, { "epoch": 2.7411167512690353, "step": 10260, "train/loss_ctc": 0.6869945526123047, "train/loss_error": 0.4464960992336273, "train/loss_total": 0.49459582567214966 }, { "epoch": 2.741383916644403, "step": 10261, "train/loss_ctc": 1.0289497375488281, "train/loss_error": 0.4954603910446167, "train/loss_total": 0.6021583080291748 }, { "epoch": 2.74165108201977, "step": 10262, "train/loss_ctc": 1.1069084405899048, "train/loss_error": 0.4615720808506012, "train/loss_total": 0.5906393527984619 }, { "epoch": 2.7419182473951373, "step": 10263, "train/loss_ctc": 0.3585112690925598, "train/loss_error": 0.5056073069572449, "train/loss_total": 0.47618812322616577 }, { "epoch": 2.742185412770505, "step": 10264, "train/loss_ctc": 0.5741395950317383, "train/loss_error": 0.5217882394790649, "train/loss_total": 0.5322585105895996 }, { "epoch": 2.742452578145872, "step": 10265, "train/loss_ctc": 0.4855058193206787, "train/loss_error": 0.45128336548805237, "train/loss_total": 0.45812785625457764 }, { "epoch": 2.7427197435212394, "step": 10266, "train/loss_ctc": 0.23868589103221893, "train/loss_error": 0.429307758808136, "train/loss_total": 0.39118340611457825 }, { "epoch": 2.742986908896607, "step": 10267, "train/loss_ctc": 1.037614345550537, "train/loss_error": 0.4022541046142578, "train/loss_total": 0.5293262004852295 }, { "epoch": 2.743254074271974, "step": 10268, "train/loss_ctc": 0.4922427535057068, "train/loss_error": 0.4735448360443115, "train/loss_total": 0.47728443145751953 }, { "epoch": 2.743521239647342, "step": 10269, "train/loss_ctc": 0.30383071303367615, "train/loss_error": 0.43064719438552856, "train/loss_total": 0.4052838981151581 }, { "epoch": 2.743788405022709, "grad_norm": 1.9855425357818604, "learning_rate": 1.354688752337697e-05, "loss": 0.4957, "step": 10270 }, { "epoch": 2.743788405022709, "step": 10270, "train/loss_ctc": 0.22884497046470642, "train/loss_error": 0.3858444392681122, "train/loss_total": 0.35444456338882446 }, { "epoch": 2.7440555703980767, "step": 10271, "train/loss_ctc": 0.742964506149292, "train/loss_error": 0.48742467164993286, "train/loss_total": 0.5385326743125916 }, { "epoch": 2.744322735773444, "step": 10272, "train/loss_ctc": 0.7455164790153503, "train/loss_error": 0.3782912790775299, "train/loss_total": 0.45173633098602295 }, { "epoch": 2.744589901148811, "step": 10273, "train/loss_ctc": 1.0280647277832031, "train/loss_error": 0.5007538795471191, "train/loss_total": 0.6062160730361938 }, { "epoch": 2.7448570665241787, "step": 10274, "train/loss_ctc": 0.5030739903450012, "train/loss_error": 0.498301237821579, "train/loss_total": 0.4992557764053345 }, { "epoch": 2.745124231899546, "step": 10275, "train/loss_ctc": 0.5856205224990845, "train/loss_error": 0.44793808460235596, "train/loss_total": 0.47547459602355957 }, { "epoch": 2.745391397274913, "step": 10276, "train/loss_ctc": 0.8256106376647949, "train/loss_error": 0.45021510124206543, "train/loss_total": 0.5252942442893982 }, { "epoch": 2.7456585626502807, "step": 10277, "train/loss_ctc": 0.5711629986763, "train/loss_error": 0.4789750277996063, "train/loss_total": 0.49741262197494507 }, { "epoch": 2.745925728025648, "step": 10278, "train/loss_ctc": 0.5402662754058838, "train/loss_error": 0.5419432520866394, "train/loss_total": 0.5416078567504883 }, { "epoch": 2.746192893401015, "step": 10279, "train/loss_ctc": 0.4568732678890228, "train/loss_error": 0.4583137035369873, "train/loss_total": 0.45802563428878784 }, { "epoch": 2.7464600587763828, "grad_norm": 1.2692385911941528, "learning_rate": 1.353085760085493e-05, "loss": 0.4948, "step": 10280 }, { "epoch": 2.7464600587763828, "step": 10280, "train/loss_ctc": 0.7152957916259766, "train/loss_error": 0.4944106638431549, "train/loss_total": 0.5385876893997192 }, { "epoch": 2.74672722415175, "step": 10281, "train/loss_ctc": 0.4163331091403961, "train/loss_error": 0.4900801479816437, "train/loss_total": 0.47533074021339417 }, { "epoch": 2.746994389527117, "step": 10282, "train/loss_ctc": 0.5035020709037781, "train/loss_error": 0.4477156400680542, "train/loss_total": 0.45887291431427 }, { "epoch": 2.747261554902485, "step": 10283, "train/loss_ctc": 0.8814247846603394, "train/loss_error": 0.49876484274864197, "train/loss_total": 0.5752968192100525 }, { "epoch": 2.747528720277852, "step": 10284, "train/loss_ctc": 0.7006586194038391, "train/loss_error": 0.44639745354652405, "train/loss_total": 0.4972497224807739 }, { "epoch": 2.747795885653219, "step": 10285, "train/loss_ctc": 0.5358814001083374, "train/loss_error": 0.45567840337753296, "train/loss_total": 0.47171899676322937 }, { "epoch": 2.748063051028587, "step": 10286, "train/loss_ctc": 0.4080739915370941, "train/loss_error": 0.457614928483963, "train/loss_total": 0.4477067291736603 }, { "epoch": 2.748330216403954, "step": 10287, "train/loss_ctc": 0.7893681526184082, "train/loss_error": 0.4329100549221039, "train/loss_total": 0.5042016506195068 }, { "epoch": 2.7485973817793212, "step": 10288, "train/loss_ctc": 0.4351390600204468, "train/loss_error": 0.44490116834640503, "train/loss_total": 0.44294875860214233 }, { "epoch": 2.748864547154689, "step": 10289, "train/loss_ctc": 0.4307796061038971, "train/loss_error": 0.46957921981811523, "train/loss_total": 0.46181929111480713 }, { "epoch": 2.749131712530056, "grad_norm": 2.5264244079589844, "learning_rate": 1.3514827678332889e-05, "loss": 0.4874, "step": 10290 }, { "epoch": 2.749131712530056, "step": 10290, "train/loss_ctc": 0.38303864002227783, "train/loss_error": 0.4606809616088867, "train/loss_total": 0.44515252113342285 }, { "epoch": 2.7493988779054233, "step": 10291, "train/loss_ctc": 1.1684365272521973, "train/loss_error": 0.42934450507164, "train/loss_total": 0.5771629214286804 }, { "epoch": 2.749666043280791, "step": 10292, "train/loss_ctc": 1.0173453092575073, "train/loss_error": 0.46814873814582825, "train/loss_total": 0.5779880285263062 }, { "epoch": 2.749933208656158, "step": 10293, "train/loss_ctc": 0.6823591589927673, "train/loss_error": 0.45868635177612305, "train/loss_total": 0.5034209489822388 }, { "epoch": 2.7502003740315253, "step": 10294, "train/loss_ctc": 0.4977003335952759, "train/loss_error": 0.4907046854496002, "train/loss_total": 0.49210381507873535 }, { "epoch": 2.750467539406893, "step": 10295, "train/loss_ctc": 0.9995235800743103, "train/loss_error": 0.4061059355735779, "train/loss_total": 0.5247894525527954 }, { "epoch": 2.75073470478226, "step": 10296, "train/loss_ctc": 0.8747696876525879, "train/loss_error": 0.389262318611145, "train/loss_total": 0.48636379837989807 }, { "epoch": 2.7510018701576273, "step": 10297, "train/loss_ctc": 0.2891126275062561, "train/loss_error": 0.4654841423034668, "train/loss_total": 0.43020984530448914 }, { "epoch": 2.751269035532995, "step": 10298, "train/loss_ctc": 0.5334110260009766, "train/loss_error": 0.43181943893432617, "train/loss_total": 0.4521377682685852 }, { "epoch": 2.751536200908362, "step": 10299, "train/loss_ctc": 0.5018960237503052, "train/loss_error": 0.41059401631355286, "train/loss_total": 0.42885440587997437 }, { "epoch": 2.75180336628373, "grad_norm": 1.7875301837921143, "learning_rate": 1.3498797755810847e-05, "loss": 0.4918, "step": 10300 }, { "epoch": 2.75180336628373, "step": 10300, "train/loss_ctc": 1.0620224475860596, "train/loss_error": 0.4620630741119385, "train/loss_total": 0.5820549726486206 }, { "epoch": 2.752070531659097, "step": 10301, "train/loss_ctc": 0.7092807292938232, "train/loss_error": 0.4189070165157318, "train/loss_total": 0.4769817590713501 }, { "epoch": 2.752337697034464, "step": 10302, "train/loss_ctc": 0.9723295569419861, "train/loss_error": 0.511395275592804, "train/loss_total": 0.6035821437835693 }, { "epoch": 2.752604862409832, "step": 10303, "train/loss_ctc": 0.40900295972824097, "train/loss_error": 0.4495548605918884, "train/loss_total": 0.4414445161819458 }, { "epoch": 2.752872027785199, "step": 10304, "train/loss_ctc": 1.0673294067382812, "train/loss_error": 0.44432851672172546, "train/loss_total": 0.5689287185668945 }, { "epoch": 2.7531391931605667, "step": 10305, "train/loss_ctc": 0.4404996335506439, "train/loss_error": 0.46676620841026306, "train/loss_total": 0.46151289343833923 }, { "epoch": 2.753406358535934, "step": 10306, "train/loss_ctc": 0.5550857186317444, "train/loss_error": 0.4785480499267578, "train/loss_total": 0.4938555955886841 }, { "epoch": 2.753673523911301, "step": 10307, "train/loss_ctc": 1.3627521991729736, "train/loss_error": 0.47581127285957336, "train/loss_total": 0.6531994342803955 }, { "epoch": 2.7539406892866687, "step": 10308, "train/loss_ctc": 0.8538225889205933, "train/loss_error": 0.4949150085449219, "train/loss_total": 0.5666965246200562 }, { "epoch": 2.754207854662036, "step": 10309, "train/loss_ctc": 0.7660871744155884, "train/loss_error": 0.46290045976638794, "train/loss_total": 0.523537814617157 }, { "epoch": 2.754475020037403, "grad_norm": 2.4935314655303955, "learning_rate": 1.3482767833288806e-05, "loss": 0.5372, "step": 10310 }, { "epoch": 2.754475020037403, "step": 10310, "train/loss_ctc": 0.5363714694976807, "train/loss_error": 0.41871416568756104, "train/loss_total": 0.44224563241004944 }, { "epoch": 2.7547421854127707, "step": 10311, "train/loss_ctc": 0.5505799055099487, "train/loss_error": 0.4985663890838623, "train/loss_total": 0.5089691281318665 }, { "epoch": 2.755009350788138, "step": 10312, "train/loss_ctc": 0.8226540684700012, "train/loss_error": 0.5197412967681885, "train/loss_total": 0.5803238153457642 }, { "epoch": 2.755276516163505, "step": 10313, "train/loss_ctc": 0.9106042385101318, "train/loss_error": 0.46019288897514343, "train/loss_total": 0.5502751469612122 }, { "epoch": 2.7555436815388727, "step": 10314, "train/loss_ctc": 0.5330963134765625, "train/loss_error": 0.4659722149372101, "train/loss_total": 0.4793970584869385 }, { "epoch": 2.75581084691424, "step": 10315, "train/loss_ctc": 1.0216584205627441, "train/loss_error": 0.45164206624031067, "train/loss_total": 0.5656453371047974 }, { "epoch": 2.756078012289607, "step": 10316, "train/loss_ctc": 0.3761785626411438, "train/loss_error": 0.3758224844932556, "train/loss_total": 0.3758937120437622 }, { "epoch": 2.7563451776649748, "step": 10317, "train/loss_ctc": 0.959017813205719, "train/loss_error": 0.4023497998714447, "train/loss_total": 0.5136833786964417 }, { "epoch": 2.756612343040342, "step": 10318, "train/loss_ctc": 0.8289129137992859, "train/loss_error": 0.49929603934288025, "train/loss_total": 0.5652194023132324 }, { "epoch": 2.756879508415709, "step": 10319, "train/loss_ctc": 0.8212815523147583, "train/loss_error": 0.460286408662796, "train/loss_total": 0.5324854254722595 }, { "epoch": 2.757146673791077, "grad_norm": 1.7366148233413696, "learning_rate": 1.3466737910766765e-05, "loss": 0.5114, "step": 10320 }, { "epoch": 2.757146673791077, "step": 10320, "train/loss_ctc": 0.5990349054336548, "train/loss_error": 0.48474186658859253, "train/loss_total": 0.5076004862785339 }, { "epoch": 2.757413839166444, "step": 10321, "train/loss_ctc": 1.021558403968811, "train/loss_error": 0.4544495940208435, "train/loss_total": 0.5678713917732239 }, { "epoch": 2.757681004541811, "step": 10322, "train/loss_ctc": 0.5705310106277466, "train/loss_error": 0.39896997809410095, "train/loss_total": 0.43328219652175903 }, { "epoch": 2.757948169917179, "step": 10323, "train/loss_ctc": 1.1604177951812744, "train/loss_error": 0.42544662952423096, "train/loss_total": 0.5724408626556396 }, { "epoch": 2.758215335292546, "step": 10324, "train/loss_ctc": 0.6782200932502747, "train/loss_error": 0.4006419777870178, "train/loss_total": 0.4561575949192047 }, { "epoch": 2.7584825006679132, "step": 10325, "train/loss_ctc": 0.6366262435913086, "train/loss_error": 0.4035090506076813, "train/loss_total": 0.45013248920440674 }, { "epoch": 2.758749666043281, "step": 10326, "train/loss_ctc": 0.3410415053367615, "train/loss_error": 0.4463910758495331, "train/loss_total": 0.42532116174697876 }, { "epoch": 2.759016831418648, "step": 10327, "train/loss_ctc": 0.5018308162689209, "train/loss_error": 0.4194283187389374, "train/loss_total": 0.43590882420539856 }, { "epoch": 2.7592839967940153, "step": 10328, "train/loss_ctc": 1.004707932472229, "train/loss_error": 0.4251839518547058, "train/loss_total": 0.5410887598991394 }, { "epoch": 2.759551162169383, "step": 10329, "train/loss_ctc": 0.5351477861404419, "train/loss_error": 0.48453274369239807, "train/loss_total": 0.4946557581424713 }, { "epoch": 2.75981832754475, "grad_norm": 1.7829492092132568, "learning_rate": 1.3450707988244723e-05, "loss": 0.4884, "step": 10330 }, { "epoch": 2.75981832754475, "step": 10330, "train/loss_ctc": 0.5546322464942932, "train/loss_error": 0.42784374952316284, "train/loss_total": 0.45320144295692444 }, { "epoch": 2.7600854929201173, "step": 10331, "train/loss_ctc": 0.29861950874328613, "train/loss_error": 0.4030498266220093, "train/loss_total": 0.38216379284858704 }, { "epoch": 2.760352658295485, "step": 10332, "train/loss_ctc": 0.7982916831970215, "train/loss_error": 0.4868413507938385, "train/loss_total": 0.5491313934326172 }, { "epoch": 2.760619823670852, "step": 10333, "train/loss_ctc": 0.4595367908477783, "train/loss_error": 0.5432729721069336, "train/loss_total": 0.5265257358551025 }, { "epoch": 2.7608869890462198, "step": 10334, "train/loss_ctc": 0.7679842710494995, "train/loss_error": 0.4215998351573944, "train/loss_total": 0.4908767342567444 }, { "epoch": 2.761154154421587, "step": 10335, "train/loss_ctc": 0.48147517442703247, "train/loss_error": 0.46964454650878906, "train/loss_total": 0.47201067209243774 }, { "epoch": 2.761421319796954, "step": 10336, "train/loss_ctc": 0.37599581480026245, "train/loss_error": 0.4431437849998474, "train/loss_total": 0.4297142028808594 }, { "epoch": 2.761688485172322, "step": 10337, "train/loss_ctc": 0.4083159565925598, "train/loss_error": 0.4169730246067047, "train/loss_total": 0.41524162888526917 }, { "epoch": 2.761955650547689, "step": 10338, "train/loss_ctc": 1.2595019340515137, "train/loss_error": 0.4219456911087036, "train/loss_total": 0.5894569754600525 }, { "epoch": 2.7622228159230566, "step": 10339, "train/loss_ctc": 0.679572343826294, "train/loss_error": 0.5758798122406006, "train/loss_total": 0.5966182947158813 }, { "epoch": 2.762489981298424, "grad_norm": 4.265928745269775, "learning_rate": 1.3434678065722682e-05, "loss": 0.4905, "step": 10340 }, { "epoch": 2.762489981298424, "step": 10340, "train/loss_ctc": 0.2865748405456543, "train/loss_error": 0.4575751721858978, "train/loss_total": 0.42337509989738464 }, { "epoch": 2.762757146673791, "step": 10341, "train/loss_ctc": 0.890825629234314, "train/loss_error": 0.4684792160987854, "train/loss_total": 0.5529484748840332 }, { "epoch": 2.7630243120491587, "step": 10342, "train/loss_ctc": 0.27100253105163574, "train/loss_error": 0.41549888253211975, "train/loss_total": 0.386599600315094 }, { "epoch": 2.763291477424526, "step": 10343, "train/loss_ctc": 1.114478349685669, "train/loss_error": 0.5376972556114197, "train/loss_total": 0.6530534625053406 }, { "epoch": 2.763558642799893, "step": 10344, "train/loss_ctc": 0.7260615825653076, "train/loss_error": 0.4799242317676544, "train/loss_total": 0.5291516780853271 }, { "epoch": 2.7638258081752607, "step": 10345, "train/loss_ctc": 1.1248400211334229, "train/loss_error": 0.437797874212265, "train/loss_total": 0.5752062797546387 }, { "epoch": 2.764092973550628, "step": 10346, "train/loss_ctc": 0.4010854959487915, "train/loss_error": 0.4343167245388031, "train/loss_total": 0.4276704788208008 }, { "epoch": 2.764360138925995, "step": 10347, "train/loss_ctc": 0.7301279306411743, "train/loss_error": 0.4641420543193817, "train/loss_total": 0.5173392295837402 }, { "epoch": 2.7646273043013627, "step": 10348, "train/loss_ctc": 0.22232559323310852, "train/loss_error": 0.3898490369319916, "train/loss_total": 0.3563443720340729 }, { "epoch": 2.76489446967673, "step": 10349, "train/loss_ctc": 0.8628520965576172, "train/loss_error": 0.4793914258480072, "train/loss_total": 0.5560835599899292 }, { "epoch": 2.765161635052097, "grad_norm": 1.8026422262191772, "learning_rate": 1.3418648143200642e-05, "loss": 0.4978, "step": 10350 }, { "epoch": 2.765161635052097, "step": 10350, "train/loss_ctc": 1.1950762271881104, "train/loss_error": 0.4458214342594147, "train/loss_total": 0.5956723690032959 }, { "epoch": 2.7654288004274648, "step": 10351, "train/loss_ctc": 0.35375696420669556, "train/loss_error": 0.41891637444496155, "train/loss_total": 0.4058845043182373 }, { "epoch": 2.765695965802832, "step": 10352, "train/loss_ctc": 0.8218992352485657, "train/loss_error": 0.5358031988143921, "train/loss_total": 0.5930224061012268 }, { "epoch": 2.765963131178199, "step": 10353, "train/loss_ctc": 0.5136284828186035, "train/loss_error": 0.4063640832901001, "train/loss_total": 0.4278169870376587 }, { "epoch": 2.766230296553567, "step": 10354, "train/loss_ctc": 0.9957382678985596, "train/loss_error": 0.39602336287498474, "train/loss_total": 0.5159663558006287 }, { "epoch": 2.766497461928934, "step": 10355, "train/loss_ctc": 0.5216426849365234, "train/loss_error": 0.4631814956665039, "train/loss_total": 0.47487375140190125 }, { "epoch": 2.766764627304301, "step": 10356, "train/loss_ctc": 0.7784161567687988, "train/loss_error": 0.48582178354263306, "train/loss_total": 0.5443406701087952 }, { "epoch": 2.767031792679669, "step": 10357, "train/loss_ctc": 1.6796108484268188, "train/loss_error": 0.502419650554657, "train/loss_total": 0.7378579378128052 }, { "epoch": 2.767298958055036, "step": 10358, "train/loss_ctc": 0.6151589751243591, "train/loss_error": 0.4132780134677887, "train/loss_total": 0.4536541998386383 }, { "epoch": 2.767566123430403, "step": 10359, "train/loss_ctc": 0.5540226697921753, "train/loss_error": 0.39121103286743164, "train/loss_total": 0.4237733483314514 }, { "epoch": 2.767833288805771, "grad_norm": 1.042266607284546, "learning_rate": 1.34026182206786e-05, "loss": 0.5173, "step": 10360 }, { "epoch": 2.767833288805771, "step": 10360, "train/loss_ctc": 0.8524946570396423, "train/loss_error": 0.4721490442752838, "train/loss_total": 0.5482181906700134 }, { "epoch": 2.768100454181138, "step": 10361, "train/loss_ctc": 1.3358228206634521, "train/loss_error": 0.4560476243495941, "train/loss_total": 0.6320026516914368 }, { "epoch": 2.7683676195565052, "step": 10362, "train/loss_ctc": 0.8424522280693054, "train/loss_error": 0.39864903688430786, "train/loss_total": 0.48740971088409424 }, { "epoch": 2.768634784931873, "step": 10363, "train/loss_ctc": 0.7626844644546509, "train/loss_error": 0.4467500150203705, "train/loss_total": 0.5099369287490845 }, { "epoch": 2.76890195030724, "step": 10364, "train/loss_ctc": 0.8578760027885437, "train/loss_error": 0.4687716066837311, "train/loss_total": 0.5465924739837646 }, { "epoch": 2.7691691156826073, "step": 10365, "train/loss_ctc": 0.5978184342384338, "train/loss_error": 0.410702645778656, "train/loss_total": 0.44812580943107605 }, { "epoch": 2.769436281057975, "step": 10366, "train/loss_ctc": 0.2531535029411316, "train/loss_error": 0.494350403547287, "train/loss_total": 0.4461110234260559 }, { "epoch": 2.769703446433342, "step": 10367, "train/loss_ctc": 0.6986408233642578, "train/loss_error": 0.3780193030834198, "train/loss_total": 0.44214361906051636 }, { "epoch": 2.7699706118087097, "step": 10368, "train/loss_ctc": 0.5224584341049194, "train/loss_error": 0.49415111541748047, "train/loss_total": 0.4998125731945038 }, { "epoch": 2.770237777184077, "step": 10369, "train/loss_ctc": 0.44462352991104126, "train/loss_error": 0.4602743089199066, "train/loss_total": 0.457144170999527 }, { "epoch": 2.7705049425594446, "grad_norm": 5.884192943572998, "learning_rate": 1.338658829815656e-05, "loss": 0.5017, "step": 10370 }, { "epoch": 2.7705049425594446, "step": 10370, "train/loss_ctc": 0.959997296333313, "train/loss_error": 0.4587790369987488, "train/loss_total": 0.5590226650238037 }, { "epoch": 2.7707721079348118, "step": 10371, "train/loss_ctc": 0.3308945298194885, "train/loss_error": 0.4039100408554077, "train/loss_total": 0.3893069624900818 }, { "epoch": 2.771039273310179, "step": 10372, "train/loss_ctc": 1.4446836709976196, "train/loss_error": 0.4487532377243042, "train/loss_total": 0.6479393243789673 }, { "epoch": 2.7713064386855466, "step": 10373, "train/loss_ctc": 1.2180395126342773, "train/loss_error": 0.5366382002830505, "train/loss_total": 0.672918438911438 }, { "epoch": 2.771573604060914, "step": 10374, "train/loss_ctc": 0.26104509830474854, "train/loss_error": 0.44563743472099304, "train/loss_total": 0.4087189733982086 }, { "epoch": 2.771840769436281, "step": 10375, "train/loss_ctc": 1.0631334781646729, "train/loss_error": 0.556601345539093, "train/loss_total": 0.6579077839851379 }, { "epoch": 2.7721079348116486, "step": 10376, "train/loss_ctc": 0.7044894695281982, "train/loss_error": 0.49730184674263, "train/loss_total": 0.5387393832206726 }, { "epoch": 2.772375100187016, "step": 10377, "train/loss_ctc": 0.6838271617889404, "train/loss_error": 0.4049215316772461, "train/loss_total": 0.46070265769958496 }, { "epoch": 2.772642265562383, "step": 10378, "train/loss_ctc": 0.3909136652946472, "train/loss_error": 0.43071678280830383, "train/loss_total": 0.422756165266037 }, { "epoch": 2.7729094309377507, "step": 10379, "train/loss_ctc": 0.670724630355835, "train/loss_error": 0.4745517075061798, "train/loss_total": 0.5137863159179688 }, { "epoch": 2.773176596313118, "grad_norm": 2.5770838260650635, "learning_rate": 1.3370558375634518e-05, "loss": 0.5272, "step": 10380 }, { "epoch": 2.773176596313118, "step": 10380, "train/loss_ctc": 0.8952347040176392, "train/loss_error": 0.48067519068717957, "train/loss_total": 0.5635871291160583 }, { "epoch": 2.773443761688485, "step": 10381, "train/loss_ctc": 0.8512156009674072, "train/loss_error": 0.43339693546295166, "train/loss_total": 0.5169606804847717 }, { "epoch": 2.7737109270638527, "step": 10382, "train/loss_ctc": 0.44371598958969116, "train/loss_error": 0.45625221729278564, "train/loss_total": 0.4537449777126312 }, { "epoch": 2.77397809243922, "step": 10383, "train/loss_ctc": 0.4373607635498047, "train/loss_error": 0.3909718692302704, "train/loss_total": 0.4002496600151062 }, { "epoch": 2.774245257814587, "step": 10384, "train/loss_ctc": 0.7362160086631775, "train/loss_error": 0.49583199620246887, "train/loss_total": 0.5439088344573975 }, { "epoch": 2.7745124231899547, "step": 10385, "train/loss_ctc": 1.1324493885040283, "train/loss_error": 0.515821099281311, "train/loss_total": 0.6391467452049255 }, { "epoch": 2.774779588565322, "step": 10386, "train/loss_ctc": 0.9483813047409058, "train/loss_error": 0.5075138807296753, "train/loss_total": 0.5956873893737793 }, { "epoch": 2.775046753940689, "step": 10387, "train/loss_ctc": 0.8634098768234253, "train/loss_error": 0.45728370547294617, "train/loss_total": 0.538508951663971 }, { "epoch": 2.7753139193160568, "step": 10388, "train/loss_ctc": 0.897375226020813, "train/loss_error": 0.4299422800540924, "train/loss_total": 0.5234288573265076 }, { "epoch": 2.775581084691424, "step": 10389, "train/loss_ctc": 0.9229626655578613, "train/loss_error": 0.39948737621307373, "train/loss_total": 0.5041824579238892 }, { "epoch": 2.775848250066791, "grad_norm": 3.633458137512207, "learning_rate": 1.3354528453112476e-05, "loss": 0.5279, "step": 10390 }, { "epoch": 2.775848250066791, "step": 10390, "train/loss_ctc": 0.4988727569580078, "train/loss_error": 0.5184134244918823, "train/loss_total": 0.5145053267478943 }, { "epoch": 2.776115415442159, "step": 10391, "train/loss_ctc": 0.7904874086380005, "train/loss_error": 0.40615779161453247, "train/loss_total": 0.4830237030982971 }, { "epoch": 2.776382580817526, "step": 10392, "train/loss_ctc": 0.5850080251693726, "train/loss_error": 0.41999948024749756, "train/loss_total": 0.4530012011528015 }, { "epoch": 2.776649746192893, "step": 10393, "train/loss_ctc": 0.6586591005325317, "train/loss_error": 0.49589017033576965, "train/loss_total": 0.5284439921379089 }, { "epoch": 2.776916911568261, "step": 10394, "train/loss_ctc": 0.9539393186569214, "train/loss_error": 0.4390288293361664, "train/loss_total": 0.5420109629631042 }, { "epoch": 2.777184076943628, "step": 10395, "train/loss_ctc": 0.6431589126586914, "train/loss_error": 0.4428335726261139, "train/loss_total": 0.48289865255355835 }, { "epoch": 2.777451242318995, "step": 10396, "train/loss_ctc": 0.6959816813468933, "train/loss_error": 0.4164050221443176, "train/loss_total": 0.4723203480243683 }, { "epoch": 2.777718407694363, "step": 10397, "train/loss_ctc": 0.5627482533454895, "train/loss_error": 0.3907172381877899, "train/loss_total": 0.4251234531402588 }, { "epoch": 2.77798557306973, "step": 10398, "train/loss_ctc": 0.9651809930801392, "train/loss_error": 0.4414059519767761, "train/loss_total": 0.5461609363555908 }, { "epoch": 2.7782527384450977, "step": 10399, "train/loss_ctc": 0.8596075773239136, "train/loss_error": 0.44430679082870483, "train/loss_total": 0.5273669958114624 }, { "epoch": 2.778519903820465, "grad_norm": 1.7318885326385498, "learning_rate": 1.3338498530590436e-05, "loss": 0.4975, "step": 10400 }, { "epoch": 2.778519903820465, "step": 10400, "train/loss_ctc": 0.386854887008667, "train/loss_error": 0.44727569818496704, "train/loss_total": 0.4351915419101715 }, { "epoch": 2.778787069195832, "step": 10401, "train/loss_ctc": 1.5394624471664429, "train/loss_error": 0.5112608075141907, "train/loss_total": 0.7169011831283569 }, { "epoch": 2.7790542345711997, "step": 10402, "train/loss_ctc": 0.4681773781776428, "train/loss_error": 0.4657753109931946, "train/loss_total": 0.46625572443008423 }, { "epoch": 2.779321399946567, "step": 10403, "train/loss_ctc": 0.6919605731964111, "train/loss_error": 0.42457959055900574, "train/loss_total": 0.47805580496788025 }, { "epoch": 2.7795885653219345, "step": 10404, "train/loss_ctc": 1.2599514722824097, "train/loss_error": 0.4777846932411194, "train/loss_total": 0.6342180371284485 }, { "epoch": 2.7798557306973017, "step": 10405, "train/loss_ctc": 0.6936135292053223, "train/loss_error": 0.4465816020965576, "train/loss_total": 0.49598801136016846 }, { "epoch": 2.780122896072669, "step": 10406, "train/loss_ctc": 0.6697201728820801, "train/loss_error": 0.4309949278831482, "train/loss_total": 0.47873997688293457 }, { "epoch": 2.7803900614480366, "step": 10407, "train/loss_ctc": 0.477144718170166, "train/loss_error": 0.4368076026439667, "train/loss_total": 0.444875031709671 }, { "epoch": 2.7806572268234038, "step": 10408, "train/loss_ctc": 0.4839468002319336, "train/loss_error": 0.4333457946777344, "train/loss_total": 0.4434660077095032 }, { "epoch": 2.780924392198771, "step": 10409, "train/loss_ctc": 1.1577224731445312, "train/loss_error": 0.5192995667457581, "train/loss_total": 0.6469841599464417 }, { "epoch": 2.7811915575741386, "grad_norm": 3.129770278930664, "learning_rate": 1.3322468608068394e-05, "loss": 0.5241, "step": 10410 }, { "epoch": 2.7811915575741386, "step": 10410, "train/loss_ctc": 1.0076533555984497, "train/loss_error": 0.47430282831192017, "train/loss_total": 0.5809729695320129 }, { "epoch": 2.781458722949506, "step": 10411, "train/loss_ctc": 0.687161922454834, "train/loss_error": 0.43748554587364197, "train/loss_total": 0.48742079734802246 }, { "epoch": 2.781725888324873, "step": 10412, "train/loss_ctc": 0.4115554094314575, "train/loss_error": 0.46106380224227905, "train/loss_total": 0.4511621296405792 }, { "epoch": 2.7819930537002406, "step": 10413, "train/loss_ctc": 0.9528995156288147, "train/loss_error": 0.41119349002838135, "train/loss_total": 0.519534707069397 }, { "epoch": 2.782260219075608, "step": 10414, "train/loss_ctc": 0.6288490295410156, "train/loss_error": 0.4908798933029175, "train/loss_total": 0.518473744392395 }, { "epoch": 2.782527384450975, "step": 10415, "train/loss_ctc": 1.1440842151641846, "train/loss_error": 0.46709805727005005, "train/loss_total": 0.6024953126907349 }, { "epoch": 2.7827945498263427, "step": 10416, "train/loss_ctc": 1.0601181983947754, "train/loss_error": 0.4174617528915405, "train/loss_total": 0.5459930896759033 }, { "epoch": 2.78306171520171, "step": 10417, "train/loss_ctc": 0.9558703899383545, "train/loss_error": 0.4677996337413788, "train/loss_total": 0.565413773059845 }, { "epoch": 2.783328880577077, "step": 10418, "train/loss_ctc": 0.8114514350891113, "train/loss_error": 0.4379574954509735, "train/loss_total": 0.5126562714576721 }, { "epoch": 2.7835960459524447, "step": 10419, "train/loss_ctc": 0.5938603281974792, "train/loss_error": 0.4926813840866089, "train/loss_total": 0.512917160987854 }, { "epoch": 2.783863211327812, "grad_norm": 2.6522929668426514, "learning_rate": 1.3306438685546354e-05, "loss": 0.5297, "step": 10420 }, { "epoch": 2.783863211327812, "step": 10420, "train/loss_ctc": 0.6963499784469604, "train/loss_error": 0.4443041682243347, "train/loss_total": 0.49471330642700195 }, { "epoch": 2.784130376703179, "step": 10421, "train/loss_ctc": 0.6313046813011169, "train/loss_error": 0.38305002450942993, "train/loss_total": 0.4327009618282318 }, { "epoch": 2.7843975420785467, "step": 10422, "train/loss_ctc": 0.6092332601547241, "train/loss_error": 0.3867340385913849, "train/loss_total": 0.43123388290405273 }, { "epoch": 2.784664707453914, "step": 10423, "train/loss_ctc": 0.708958625793457, "train/loss_error": 0.4466402232646942, "train/loss_total": 0.4991039037704468 }, { "epoch": 2.784931872829281, "step": 10424, "train/loss_ctc": 0.3701423108577728, "train/loss_error": 0.3649531304836273, "train/loss_total": 0.3659909665584564 }, { "epoch": 2.7851990382046488, "step": 10425, "train/loss_ctc": 0.43166518211364746, "train/loss_error": 0.531204104423523, "train/loss_total": 0.5112963318824768 }, { "epoch": 2.785466203580016, "step": 10426, "train/loss_ctc": 0.8836667537689209, "train/loss_error": 0.4478570520877838, "train/loss_total": 0.5350189805030823 }, { "epoch": 2.785733368955383, "step": 10427, "train/loss_ctc": 0.7881381511688232, "train/loss_error": 0.4350690245628357, "train/loss_total": 0.5056828260421753 }, { "epoch": 2.786000534330751, "step": 10428, "train/loss_ctc": 0.6591271758079529, "train/loss_error": 0.43730661273002625, "train/loss_total": 0.4816707372665405 }, { "epoch": 2.786267699706118, "step": 10429, "train/loss_ctc": 0.656944215297699, "train/loss_error": 0.41933831572532654, "train/loss_total": 0.46685948967933655 }, { "epoch": 2.786534865081485, "grad_norm": 3.303759813308716, "learning_rate": 1.3290408763024312e-05, "loss": 0.4724, "step": 10430 }, { "epoch": 2.786534865081485, "step": 10430, "train/loss_ctc": 0.5680826902389526, "train/loss_error": 0.45614463090896606, "train/loss_total": 0.47853225469589233 }, { "epoch": 2.786802030456853, "step": 10431, "train/loss_ctc": 0.4947403073310852, "train/loss_error": 0.4824022054672241, "train/loss_total": 0.4848698377609253 }, { "epoch": 2.78706919583222, "step": 10432, "train/loss_ctc": 0.9114898443222046, "train/loss_error": 0.449324369430542, "train/loss_total": 0.5417574644088745 }, { "epoch": 2.7873363612075877, "step": 10433, "train/loss_ctc": 0.8079090118408203, "train/loss_error": 0.3878539204597473, "train/loss_total": 0.4718649387359619 }, { "epoch": 2.787603526582955, "step": 10434, "train/loss_ctc": 0.7906458377838135, "train/loss_error": 0.40119606256484985, "train/loss_total": 0.4790860414505005 }, { "epoch": 2.787870691958322, "step": 10435, "train/loss_ctc": 0.28136056661605835, "train/loss_error": 0.39682191610336304, "train/loss_total": 0.3737296462059021 }, { "epoch": 2.7881378573336897, "step": 10436, "train/loss_ctc": 0.7419345378875732, "train/loss_error": 0.44691288471221924, "train/loss_total": 0.5059172511100769 }, { "epoch": 2.788405022709057, "step": 10437, "train/loss_ctc": 0.7713392972946167, "train/loss_error": 0.4277101457118988, "train/loss_total": 0.4964359998703003 }, { "epoch": 2.7886721880844245, "step": 10438, "train/loss_ctc": 0.9762780666351318, "train/loss_error": 0.4787903130054474, "train/loss_total": 0.5782878994941711 }, { "epoch": 2.7889393534597917, "step": 10439, "train/loss_ctc": 0.6513388156890869, "train/loss_error": 0.42314469814300537, "train/loss_total": 0.46878352761268616 }, { "epoch": 2.789206518835159, "grad_norm": 1.6766098737716675, "learning_rate": 1.3274378840502272e-05, "loss": 0.4879, "step": 10440 }, { "epoch": 2.789206518835159, "step": 10440, "train/loss_ctc": 0.45804017782211304, "train/loss_error": 0.453721284866333, "train/loss_total": 0.45458507537841797 }, { "epoch": 2.7894736842105265, "step": 10441, "train/loss_ctc": 1.0679504871368408, "train/loss_error": 0.407103031873703, "train/loss_total": 0.5392725467681885 }, { "epoch": 2.7897408495858937, "step": 10442, "train/loss_ctc": 1.0140469074249268, "train/loss_error": 0.5071942806243896, "train/loss_total": 0.6085647940635681 }, { "epoch": 2.790008014961261, "step": 10443, "train/loss_ctc": 0.9043264389038086, "train/loss_error": 0.37892386317253113, "train/loss_total": 0.4840043783187866 }, { "epoch": 2.7902751803366286, "step": 10444, "train/loss_ctc": 0.7511852383613586, "train/loss_error": 0.4102964997291565, "train/loss_total": 0.4784742593765259 }, { "epoch": 2.7905423457119958, "step": 10445, "train/loss_ctc": 0.8822640180587769, "train/loss_error": 0.42904427647590637, "train/loss_total": 0.5196882486343384 }, { "epoch": 2.790809511087363, "step": 10446, "train/loss_ctc": 0.5046277046203613, "train/loss_error": 0.5458518266677856, "train/loss_total": 0.5376070141792297 }, { "epoch": 2.7910766764627306, "step": 10447, "train/loss_ctc": 1.013543963432312, "train/loss_error": 0.441159725189209, "train/loss_total": 0.5556365847587585 }, { "epoch": 2.791343841838098, "step": 10448, "train/loss_ctc": 0.5429341793060303, "train/loss_error": 0.4110366404056549, "train/loss_total": 0.4374161660671234 }, { "epoch": 2.791611007213465, "step": 10449, "train/loss_ctc": 0.24777819216251373, "train/loss_error": 0.41222280263900757, "train/loss_total": 0.37933388352394104 }, { "epoch": 2.7918781725888326, "grad_norm": 1.406442403793335, "learning_rate": 1.3258348917980231e-05, "loss": 0.4995, "step": 10450 }, { "epoch": 2.7918781725888326, "step": 10450, "train/loss_ctc": 0.6722744703292847, "train/loss_error": 0.43301060795783997, "train/loss_total": 0.48086339235305786 }, { "epoch": 2.7921453379642, "step": 10451, "train/loss_ctc": 1.0455515384674072, "train/loss_error": 0.4174143373966217, "train/loss_total": 0.5430417656898499 }, { "epoch": 2.792412503339567, "step": 10452, "train/loss_ctc": 0.6852904558181763, "train/loss_error": 0.4119514524936676, "train/loss_total": 0.46661925315856934 }, { "epoch": 2.7926796687149347, "step": 10453, "train/loss_ctc": 0.542072057723999, "train/loss_error": 0.4176270365715027, "train/loss_total": 0.4425160586833954 }, { "epoch": 2.792946834090302, "step": 10454, "train/loss_ctc": 0.6877947449684143, "train/loss_error": 0.47157523036003113, "train/loss_total": 0.5148191452026367 }, { "epoch": 2.793213999465669, "step": 10455, "train/loss_ctc": 0.6428635120391846, "train/loss_error": 0.4227175712585449, "train/loss_total": 0.4667467772960663 }, { "epoch": 2.7934811648410367, "step": 10456, "train/loss_ctc": 0.653927743434906, "train/loss_error": 0.43196579813957214, "train/loss_total": 0.47635820508003235 }, { "epoch": 2.793748330216404, "step": 10457, "train/loss_ctc": 0.4693382978439331, "train/loss_error": 0.43495696783065796, "train/loss_total": 0.4418332278728485 }, { "epoch": 2.794015495591771, "step": 10458, "train/loss_ctc": 0.7561631202697754, "train/loss_error": 0.427742600440979, "train/loss_total": 0.49342671036720276 }, { "epoch": 2.7942826609671387, "step": 10459, "train/loss_ctc": 0.7340039014816284, "train/loss_error": 0.44126632809638977, "train/loss_total": 0.49981385469436646 }, { "epoch": 2.794549826342506, "grad_norm": 4.613345623016357, "learning_rate": 1.324231899545819e-05, "loss": 0.4826, "step": 10460 }, { "epoch": 2.794549826342506, "step": 10460, "train/loss_ctc": 0.2809014916419983, "train/loss_error": 0.4033428132534027, "train/loss_total": 0.37885454297065735 }, { "epoch": 2.794816991717873, "step": 10461, "train/loss_ctc": 0.45662736892700195, "train/loss_error": 0.3949069380760193, "train/loss_total": 0.4072510600090027 }, { "epoch": 2.7950841570932408, "step": 10462, "train/loss_ctc": 0.3707084059715271, "train/loss_error": 0.4402688145637512, "train/loss_total": 0.4263567328453064 }, { "epoch": 2.795351322468608, "step": 10463, "train/loss_ctc": 0.7615038752555847, "train/loss_error": 0.44074127078056335, "train/loss_total": 0.5048937797546387 }, { "epoch": 2.795618487843975, "step": 10464, "train/loss_ctc": 0.5903387069702148, "train/loss_error": 0.4734787046909332, "train/loss_total": 0.4968506991863251 }, { "epoch": 2.795885653219343, "step": 10465, "train/loss_ctc": 1.0394775867462158, "train/loss_error": 0.4600415527820587, "train/loss_total": 0.575928807258606 }, { "epoch": 2.79615281859471, "step": 10466, "train/loss_ctc": 1.4450874328613281, "train/loss_error": 0.4966234564781189, "train/loss_total": 0.6863162517547607 }, { "epoch": 2.7964199839700776, "step": 10467, "train/loss_ctc": 0.7731496095657349, "train/loss_error": 0.44431811571121216, "train/loss_total": 0.5100844502449036 }, { "epoch": 2.796687149345445, "step": 10468, "train/loss_ctc": 0.8270008563995361, "train/loss_error": 0.4850354790687561, "train/loss_total": 0.5534285306930542 }, { "epoch": 2.796954314720812, "step": 10469, "train/loss_ctc": 0.5545955896377563, "train/loss_error": 0.4222244322299957, "train/loss_total": 0.44869866967201233 }, { "epoch": 2.7972214800961797, "grad_norm": 1.6303601264953613, "learning_rate": 1.3226289072936148e-05, "loss": 0.4989, "step": 10470 }, { "epoch": 2.7972214800961797, "step": 10470, "train/loss_ctc": 0.7640153169631958, "train/loss_error": 0.4438152313232422, "train/loss_total": 0.5078552961349487 }, { "epoch": 2.797488645471547, "step": 10471, "train/loss_ctc": 0.3869166374206543, "train/loss_error": 0.4002512991428375, "train/loss_total": 0.39758437871932983 }, { "epoch": 2.7977558108469145, "step": 10472, "train/loss_ctc": 0.5444470643997192, "train/loss_error": 0.42049407958984375, "train/loss_total": 0.4452846646308899 }, { "epoch": 2.7980229762222817, "step": 10473, "train/loss_ctc": 0.41073471307754517, "train/loss_error": 0.42966151237487793, "train/loss_total": 0.4258761703968048 }, { "epoch": 2.798290141597649, "step": 10474, "train/loss_ctc": 0.7274633646011353, "train/loss_error": 0.403472900390625, "train/loss_total": 0.4682709872722626 }, { "epoch": 2.7985573069730165, "step": 10475, "train/loss_ctc": 0.7387297749519348, "train/loss_error": 0.5091677308082581, "train/loss_total": 0.5550801157951355 }, { "epoch": 2.7988244723483837, "step": 10476, "train/loss_ctc": 0.4881010949611664, "train/loss_error": 0.45991936326026917, "train/loss_total": 0.46555572748184204 }, { "epoch": 2.799091637723751, "step": 10477, "train/loss_ctc": 0.8122251033782959, "train/loss_error": 0.465719997882843, "train/loss_total": 0.5350210070610046 }, { "epoch": 2.7993588030991186, "step": 10478, "train/loss_ctc": 0.6941594481468201, "train/loss_error": 0.4633336365222931, "train/loss_total": 0.5094988346099854 }, { "epoch": 2.7996259684744857, "step": 10479, "train/loss_ctc": 0.5864872932434082, "train/loss_error": 0.399432510137558, "train/loss_total": 0.43684348464012146 }, { "epoch": 2.799893133849853, "grad_norm": 1.7855234146118164, "learning_rate": 1.3210259150414107e-05, "loss": 0.4747, "step": 10480 }, { "epoch": 2.799893133849853, "step": 10480, "train/loss_ctc": 0.3911983072757721, "train/loss_error": 0.4292869567871094, "train/loss_total": 0.42166924476623535 }, { "epoch": 2.8001602992252206, "step": 10481, "train/loss_ctc": 1.1216700077056885, "train/loss_error": 0.43234172463417053, "train/loss_total": 0.5702073574066162 }, { "epoch": 2.800427464600588, "step": 10482, "train/loss_ctc": 1.3734925985336304, "train/loss_error": 0.5167596936225891, "train/loss_total": 0.6881062984466553 }, { "epoch": 2.800694629975955, "step": 10483, "train/loss_ctc": 0.3464314937591553, "train/loss_error": 0.5056737065315247, "train/loss_total": 0.47382527589797974 }, { "epoch": 2.8009617953513226, "step": 10484, "train/loss_ctc": 0.3911653161048889, "train/loss_error": 0.4947827458381653, "train/loss_total": 0.47405925393104553 }, { "epoch": 2.80122896072669, "step": 10485, "train/loss_ctc": 0.5541452765464783, "train/loss_error": 0.5324534177780151, "train/loss_total": 0.5367918014526367 }, { "epoch": 2.801496126102057, "step": 10486, "train/loss_ctc": 0.9185487031936646, "train/loss_error": 0.40571391582489014, "train/loss_total": 0.508280873298645 }, { "epoch": 2.8017632914774246, "step": 10487, "train/loss_ctc": 0.6928907632827759, "train/loss_error": 0.45948004722595215, "train/loss_total": 0.5061622262001038 }, { "epoch": 2.802030456852792, "step": 10488, "train/loss_ctc": 0.5367703437805176, "train/loss_error": 0.4561009109020233, "train/loss_total": 0.4722348153591156 }, { "epoch": 2.802297622228159, "step": 10489, "train/loss_ctc": 0.7956585884094238, "train/loss_error": 0.4238114356994629, "train/loss_total": 0.4981808662414551 }, { "epoch": 2.8025647876035267, "grad_norm": 3.0390355587005615, "learning_rate": 1.3194229227892066e-05, "loss": 0.515, "step": 10490 }, { "epoch": 2.8025647876035267, "step": 10490, "train/loss_ctc": 0.7883810997009277, "train/loss_error": 0.5252853035926819, "train/loss_total": 0.577904462814331 }, { "epoch": 2.802831952978894, "step": 10491, "train/loss_ctc": 0.6712382435798645, "train/loss_error": 0.423043817281723, "train/loss_total": 0.4726827144622803 }, { "epoch": 2.803099118354261, "step": 10492, "train/loss_ctc": 0.9099472761154175, "train/loss_error": 0.4403878152370453, "train/loss_total": 0.5342997312545776 }, { "epoch": 2.8033662837296287, "step": 10493, "train/loss_ctc": 0.6238412261009216, "train/loss_error": 0.4448094964027405, "train/loss_total": 0.48061585426330566 }, { "epoch": 2.803633449104996, "step": 10494, "train/loss_ctc": 0.9077988266944885, "train/loss_error": 0.5221990346908569, "train/loss_total": 0.5993189811706543 }, { "epoch": 2.803900614480363, "step": 10495, "train/loss_ctc": 0.9385498762130737, "train/loss_error": 0.41863104701042175, "train/loss_total": 0.5226148366928101 }, { "epoch": 2.8041677798557307, "step": 10496, "train/loss_ctc": 0.3413606584072113, "train/loss_error": 0.4161919951438904, "train/loss_total": 0.401225745677948 }, { "epoch": 2.804434945231098, "step": 10497, "train/loss_ctc": 0.8605334162712097, "train/loss_error": 0.5081450343132019, "train/loss_total": 0.5786226987838745 }, { "epoch": 2.804702110606465, "step": 10498, "train/loss_ctc": 0.44025561213493347, "train/loss_error": 0.4762021601200104, "train/loss_total": 0.4690128564834595 }, { "epoch": 2.8049692759818328, "step": 10499, "train/loss_ctc": 0.6498523950576782, "train/loss_error": 0.40531256794929504, "train/loss_total": 0.4542205333709717 }, { "epoch": 2.8052364413572, "grad_norm": 2.6689114570617676, "learning_rate": 1.3178199305370024e-05, "loss": 0.5091, "step": 10500 }, { "epoch": 2.8052364413572, "step": 10500, "train/loss_ctc": 0.49302938580513, "train/loss_error": 0.45528852939605713, "train/loss_total": 0.46283671259880066 }, { "epoch": 2.8055036067325676, "step": 10501, "train/loss_ctc": 1.19748055934906, "train/loss_error": 0.48718079924583435, "train/loss_total": 0.6292407512664795 }, { "epoch": 2.805770772107935, "step": 10502, "train/loss_ctc": 0.4757612347602844, "train/loss_error": 0.4782080054283142, "train/loss_total": 0.47771868109703064 }, { "epoch": 2.8060379374833024, "step": 10503, "train/loss_ctc": 0.835543155670166, "train/loss_error": 0.4528246223926544, "train/loss_total": 0.5293683409690857 }, { "epoch": 2.8063051028586696, "step": 10504, "train/loss_ctc": 1.1686371564865112, "train/loss_error": 0.4675159454345703, "train/loss_total": 0.6077402234077454 }, { "epoch": 2.806572268234037, "step": 10505, "train/loss_ctc": 0.7717750072479248, "train/loss_error": 0.4652113914489746, "train/loss_total": 0.5265241265296936 }, { "epoch": 2.8068394336094045, "step": 10506, "train/loss_ctc": 0.9726348519325256, "train/loss_error": 0.47067517042160034, "train/loss_total": 0.5710670948028564 }, { "epoch": 2.8071065989847717, "step": 10507, "train/loss_ctc": 0.8055760860443115, "train/loss_error": 0.4669584631919861, "train/loss_total": 0.5346819758415222 }, { "epoch": 2.807373764360139, "step": 10508, "train/loss_ctc": 1.1814603805541992, "train/loss_error": 0.5415412187576294, "train/loss_total": 0.6695250868797302 }, { "epoch": 2.8076409297355065, "step": 10509, "train/loss_ctc": 0.7260146737098694, "train/loss_error": 0.4232266843318939, "train/loss_total": 0.4837842881679535 }, { "epoch": 2.8079080951108737, "grad_norm": 1.611035704612732, "learning_rate": 1.3162169382847983e-05, "loss": 0.5492, "step": 10510 }, { "epoch": 2.8079080951108737, "step": 10510, "train/loss_ctc": 0.8012783527374268, "train/loss_error": 0.38816365599632263, "train/loss_total": 0.47078660130500793 }, { "epoch": 2.808175260486241, "step": 10511, "train/loss_ctc": 1.076164960861206, "train/loss_error": 0.47417616844177246, "train/loss_total": 0.594573974609375 }, { "epoch": 2.8084424258616085, "step": 10512, "train/loss_ctc": 0.7857093214988708, "train/loss_error": 0.5008569359779358, "train/loss_total": 0.5578274130821228 }, { "epoch": 2.8087095912369757, "step": 10513, "train/loss_ctc": 0.4636109173297882, "train/loss_error": 0.44112324714660645, "train/loss_total": 0.4456207752227783 }, { "epoch": 2.808976756612343, "step": 10514, "train/loss_ctc": 0.24322427809238434, "train/loss_error": 0.4421391785144806, "train/loss_total": 0.40235620737075806 }, { "epoch": 2.8092439219877106, "step": 10515, "train/loss_ctc": 1.2764123678207397, "train/loss_error": 0.48865529894828796, "train/loss_total": 0.6462067365646362 }, { "epoch": 2.8095110873630778, "step": 10516, "train/loss_ctc": 0.7127203345298767, "train/loss_error": 0.4439619481563568, "train/loss_total": 0.4977136254310608 }, { "epoch": 2.809778252738445, "step": 10517, "train/loss_ctc": 0.8598125576972961, "train/loss_error": 0.49210649728775024, "train/loss_total": 0.5656477212905884 }, { "epoch": 2.8100454181138126, "step": 10518, "train/loss_ctc": 0.37164247035980225, "train/loss_error": 0.4549788534641266, "train/loss_total": 0.4383115768432617 }, { "epoch": 2.81031258348918, "step": 10519, "train/loss_ctc": 0.5036637187004089, "train/loss_error": 0.48411113023757935, "train/loss_total": 0.4880216419696808 }, { "epoch": 2.810579748864547, "grad_norm": 1.579764723777771, "learning_rate": 1.3147742452578146e-05, "loss": 0.5107, "step": 10520 }, { "epoch": 2.810579748864547, "step": 10520, "train/loss_ctc": 1.2791545391082764, "train/loss_error": 0.4581889510154724, "train/loss_total": 0.6223820447921753 }, { "epoch": 2.8108469142399146, "step": 10521, "train/loss_ctc": 0.7703832387924194, "train/loss_error": 0.4992140829563141, "train/loss_total": 0.5534479022026062 }, { "epoch": 2.811114079615282, "step": 10522, "train/loss_ctc": 1.475846529006958, "train/loss_error": 0.5036906599998474, "train/loss_total": 0.6981218457221985 }, { "epoch": 2.811381244990649, "step": 10523, "train/loss_ctc": 1.5379948616027832, "train/loss_error": 0.526328444480896, "train/loss_total": 0.7286617755889893 }, { "epoch": 2.8116484103660166, "step": 10524, "train/loss_ctc": 0.633800745010376, "train/loss_error": 0.44693392515182495, "train/loss_total": 0.48430728912353516 }, { "epoch": 2.811915575741384, "step": 10525, "train/loss_ctc": 0.7773245573043823, "train/loss_error": 0.4527861177921295, "train/loss_total": 0.517693817615509 }, { "epoch": 2.812182741116751, "step": 10526, "train/loss_ctc": 0.7641124725341797, "train/loss_error": 0.4571700990200043, "train/loss_total": 0.5185586214065552 }, { "epoch": 2.8124499064921187, "step": 10527, "train/loss_ctc": 1.0216262340545654, "train/loss_error": 0.4807440936565399, "train/loss_total": 0.588920533657074 }, { "epoch": 2.812717071867486, "step": 10528, "train/loss_ctc": 1.0030150413513184, "train/loss_error": 0.4389997124671936, "train/loss_total": 0.5518027544021606 }, { "epoch": 2.812984237242853, "step": 10529, "train/loss_ctc": 0.5181175470352173, "train/loss_error": 0.5057556629180908, "train/loss_total": 0.508228063583374 }, { "epoch": 2.8132514026182207, "grad_norm": 1.596815824508667, "learning_rate": 1.3131712530056106e-05, "loss": 0.5772, "step": 10530 }, { "epoch": 2.8132514026182207, "step": 10530, "train/loss_ctc": 1.2516298294067383, "train/loss_error": 0.461003839969635, "train/loss_total": 0.6191290616989136 }, { "epoch": 2.813518567993588, "step": 10531, "train/loss_ctc": 0.48178336024284363, "train/loss_error": 0.40663716197013855, "train/loss_total": 0.42166638374328613 }, { "epoch": 2.8137857333689555, "step": 10532, "train/loss_ctc": 0.5775090456008911, "train/loss_error": 0.421161413192749, "train/loss_total": 0.45243096351623535 }, { "epoch": 2.8140528987443227, "step": 10533, "train/loss_ctc": 0.524389922618866, "train/loss_error": 0.4092805087566376, "train/loss_total": 0.4323023855686188 }, { "epoch": 2.81432006411969, "step": 10534, "train/loss_ctc": 0.5017440319061279, "train/loss_error": 0.473905086517334, "train/loss_total": 0.4794728755950928 }, { "epoch": 2.8145872294950576, "step": 10535, "train/loss_ctc": 0.7011562585830688, "train/loss_error": 0.3916972279548645, "train/loss_total": 0.4535890519618988 }, { "epoch": 2.8148543948704248, "step": 10536, "train/loss_ctc": 0.870761513710022, "train/loss_error": 0.46409180760383606, "train/loss_total": 0.5454257726669312 }, { "epoch": 2.8151215602457924, "step": 10537, "train/loss_ctc": 0.8842791318893433, "train/loss_error": 0.4374265670776367, "train/loss_total": 0.5267970561981201 }, { "epoch": 2.8153887256211596, "step": 10538, "train/loss_ctc": 0.8483859896659851, "train/loss_error": 0.3607570230960846, "train/loss_total": 0.45828282833099365 }, { "epoch": 2.815655890996527, "step": 10539, "train/loss_ctc": 0.45320361852645874, "train/loss_error": 0.5051192045211792, "train/loss_total": 0.49473610520362854 }, { "epoch": 2.8159230563718944, "grad_norm": 2.254848003387451, "learning_rate": 1.3115682607534064e-05, "loss": 0.4884, "step": 10540 }, { "epoch": 2.8159230563718944, "step": 10540, "train/loss_ctc": 0.8751727342605591, "train/loss_error": 0.4012421667575836, "train/loss_total": 0.4960283041000366 }, { "epoch": 2.8161902217472616, "step": 10541, "train/loss_ctc": 0.6437852382659912, "train/loss_error": 0.4619993269443512, "train/loss_total": 0.49835652112960815 }, { "epoch": 2.816457387122629, "step": 10542, "train/loss_ctc": 0.7275717854499817, "train/loss_error": 0.5055415034294128, "train/loss_total": 0.5499475598335266 }, { "epoch": 2.8167245524979965, "step": 10543, "train/loss_ctc": 0.5443700551986694, "train/loss_error": 0.4299420118331909, "train/loss_total": 0.4528276324272156 }, { "epoch": 2.8169917178733637, "step": 10544, "train/loss_ctc": 0.6941856145858765, "train/loss_error": 0.42730310559272766, "train/loss_total": 0.48067963123321533 }, { "epoch": 2.817258883248731, "step": 10545, "train/loss_ctc": 1.1078357696533203, "train/loss_error": 0.480185329914093, "train/loss_total": 0.6057153940200806 }, { "epoch": 2.8175260486240985, "step": 10546, "train/loss_ctc": 0.7921301126480103, "train/loss_error": 0.4648737907409668, "train/loss_total": 0.5303250551223755 }, { "epoch": 2.8177932139994657, "step": 10547, "train/loss_ctc": 0.358224093914032, "train/loss_error": 0.37592390179634094, "train/loss_total": 0.3723839521408081 }, { "epoch": 2.818060379374833, "step": 10548, "train/loss_ctc": 0.78322434425354, "train/loss_error": 0.4494723975658417, "train/loss_total": 0.5162227749824524 }, { "epoch": 2.8183275447502005, "step": 10549, "train/loss_ctc": 1.1003221273422241, "train/loss_error": 0.4810110628604889, "train/loss_total": 0.6048732995986938 }, { "epoch": 2.8185947101255677, "grad_norm": 4.870266914367676, "learning_rate": 1.3099652685012024e-05, "loss": 0.5107, "step": 10550 }, { "epoch": 2.8185947101255677, "step": 10550, "train/loss_ctc": 0.8046297430992126, "train/loss_error": 0.4663444757461548, "train/loss_total": 0.5340015292167664 }, { "epoch": 2.818861875500935, "step": 10551, "train/loss_ctc": 0.9255409240722656, "train/loss_error": 0.4437997043132782, "train/loss_total": 0.5401479601860046 }, { "epoch": 2.8191290408763026, "step": 10552, "train/loss_ctc": 0.6554895639419556, "train/loss_error": 0.4661746919155121, "train/loss_total": 0.5040376782417297 }, { "epoch": 2.8193962062516698, "step": 10553, "train/loss_ctc": 0.813403844833374, "train/loss_error": 0.4836568236351013, "train/loss_total": 0.549606204032898 }, { "epoch": 2.819663371627037, "step": 10554, "train/loss_ctc": 1.290547490119934, "train/loss_error": 0.47135797142982483, "train/loss_total": 0.6351958513259888 }, { "epoch": 2.8199305370024046, "step": 10555, "train/loss_ctc": 0.44946467876434326, "train/loss_error": 0.4754921793937683, "train/loss_total": 0.47028666734695435 }, { "epoch": 2.820197702377772, "step": 10556, "train/loss_ctc": 0.4444441497325897, "train/loss_error": 0.3459377884864807, "train/loss_total": 0.3656390607357025 }, { "epoch": 2.820464867753139, "step": 10557, "train/loss_ctc": 1.0533560514450073, "train/loss_error": 0.4385407567024231, "train/loss_total": 0.5615038275718689 }, { "epoch": 2.8207320331285066, "step": 10558, "train/loss_ctc": 0.7709997296333313, "train/loss_error": 0.41321641206741333, "train/loss_total": 0.48477309942245483 }, { "epoch": 2.820999198503874, "step": 10559, "train/loss_ctc": 0.9857031106948853, "train/loss_error": 0.4273276627063751, "train/loss_total": 0.5390027761459351 }, { "epoch": 2.821266363879241, "grad_norm": 1.4010401964187622, "learning_rate": 1.3083622762489982e-05, "loss": 0.5184, "step": 10560 }, { "epoch": 2.821266363879241, "step": 10560, "train/loss_ctc": 0.5416550636291504, "train/loss_error": 0.43794381618499756, "train/loss_total": 0.45868608355522156 }, { "epoch": 2.8215335292546087, "step": 10561, "train/loss_ctc": 0.7256501913070679, "train/loss_error": 0.4132969081401825, "train/loss_total": 0.475767582654953 }, { "epoch": 2.821800694629976, "step": 10562, "train/loss_ctc": 0.799891471862793, "train/loss_error": 0.41349175572395325, "train/loss_total": 0.49077171087265015 }, { "epoch": 2.822067860005343, "step": 10563, "train/loss_ctc": 0.5117775201797485, "train/loss_error": 0.46069130301475525, "train/loss_total": 0.4709085524082184 }, { "epoch": 2.8223350253807107, "step": 10564, "train/loss_ctc": 0.7524252533912659, "train/loss_error": 0.44730880856513977, "train/loss_total": 0.5083321332931519 }, { "epoch": 2.822602190756078, "step": 10565, "train/loss_ctc": 0.5721968412399292, "train/loss_error": 0.46549251675605774, "train/loss_total": 0.486833393573761 }, { "epoch": 2.8228693561314455, "step": 10566, "train/loss_ctc": 0.6506386995315552, "train/loss_error": 0.47854629158973694, "train/loss_total": 0.5129647850990295 }, { "epoch": 2.8231365215068127, "step": 10567, "train/loss_ctc": 0.8077131509780884, "train/loss_error": 0.4516693651676178, "train/loss_total": 0.522878110408783 }, { "epoch": 2.82340368688218, "step": 10568, "train/loss_ctc": 0.8336123824119568, "train/loss_error": 0.4908180236816406, "train/loss_total": 0.5593768954277039 }, { "epoch": 2.8236708522575475, "step": 10569, "train/loss_ctc": 0.5114028453826904, "train/loss_error": 0.4481409192085266, "train/loss_total": 0.46079331636428833 }, { "epoch": 2.8239380176329147, "grad_norm": 1.9703623056411743, "learning_rate": 1.306759283996794e-05, "loss": 0.4947, "step": 10570 }, { "epoch": 2.8239380176329147, "step": 10570, "train/loss_ctc": 0.48477911949157715, "train/loss_error": 0.43561920523643494, "train/loss_total": 0.44545120000839233 }, { "epoch": 2.8242051830082824, "step": 10571, "train/loss_ctc": 1.5800837278366089, "train/loss_error": 0.4858185052871704, "train/loss_total": 0.7046715617179871 }, { "epoch": 2.8244723483836496, "step": 10572, "train/loss_ctc": 1.0800371170043945, "train/loss_error": 0.46197420358657837, "train/loss_total": 0.5855867862701416 }, { "epoch": 2.8247395137590168, "step": 10573, "train/loss_ctc": 0.7375342845916748, "train/loss_error": 0.4845399856567383, "train/loss_total": 0.5351388454437256 }, { "epoch": 2.8250066791343844, "step": 10574, "train/loss_ctc": 0.5711494088172913, "train/loss_error": 0.3922162353992462, "train/loss_total": 0.42800289392471313 }, { "epoch": 2.8252738445097516, "step": 10575, "train/loss_ctc": 0.2853083312511444, "train/loss_error": 0.41839128732681274, "train/loss_total": 0.3917747139930725 }, { "epoch": 2.825541009885119, "step": 10576, "train/loss_ctc": 1.1771348714828491, "train/loss_error": 0.48256823420524597, "train/loss_total": 0.6214815974235535 }, { "epoch": 2.8258081752604864, "step": 10577, "train/loss_ctc": 0.5186057090759277, "train/loss_error": 0.42460882663726807, "train/loss_total": 0.44340822100639343 }, { "epoch": 2.8260753406358536, "step": 10578, "train/loss_ctc": 0.5726612210273743, "train/loss_error": 0.46919500827789307, "train/loss_total": 0.4898882508277893 }, { "epoch": 2.826342506011221, "step": 10579, "train/loss_ctc": 0.5073215961456299, "train/loss_error": 0.44549044966697693, "train/loss_total": 0.457856684923172 }, { "epoch": 2.8266096713865885, "grad_norm": 2.6457314491271973, "learning_rate": 1.30515629174459e-05, "loss": 0.5103, "step": 10580 }, { "epoch": 2.8266096713865885, "step": 10580, "train/loss_ctc": 0.6441507339477539, "train/loss_error": 0.4125077724456787, "train/loss_total": 0.4588363766670227 }, { "epoch": 2.8268768367619557, "step": 10581, "train/loss_ctc": 0.36910977959632874, "train/loss_error": 0.4410671889781952, "train/loss_total": 0.4266757071018219 }, { "epoch": 2.827144002137323, "step": 10582, "train/loss_ctc": 0.9161780476570129, "train/loss_error": 0.5072103142738342, "train/loss_total": 0.58900386095047 }, { "epoch": 2.8274111675126905, "step": 10583, "train/loss_ctc": 1.2436622381210327, "train/loss_error": 0.49149951338768005, "train/loss_total": 0.6419320702552795 }, { "epoch": 2.8276783328880577, "step": 10584, "train/loss_ctc": 2.400120496749878, "train/loss_error": 0.5277345180511475, "train/loss_total": 0.9022117257118225 }, { "epoch": 2.827945498263425, "step": 10585, "train/loss_ctc": 0.4156413674354553, "train/loss_error": 0.43976354598999023, "train/loss_total": 0.43493911623954773 }, { "epoch": 2.8282126636387925, "step": 10586, "train/loss_ctc": 0.8930115699768066, "train/loss_error": 0.5370588898658752, "train/loss_total": 0.6082494258880615 }, { "epoch": 2.8284798290141597, "step": 10587, "train/loss_ctc": 0.3858107626438141, "train/loss_error": 0.38697922229766846, "train/loss_total": 0.38674554228782654 }, { "epoch": 2.828746994389527, "step": 10588, "train/loss_ctc": 0.38168132305145264, "train/loss_error": 0.42723432183265686, "train/loss_total": 0.418123722076416 }, { "epoch": 2.8290141597648946, "step": 10589, "train/loss_ctc": 0.42177093029022217, "train/loss_error": 0.4137207865715027, "train/loss_total": 0.41533082723617554 }, { "epoch": 2.8292813251402618, "grad_norm": 2.4194748401641846, "learning_rate": 1.3035532994923858e-05, "loss": 0.5282, "step": 10590 }, { "epoch": 2.8292813251402618, "step": 10590, "train/loss_ctc": 0.3904193341732025, "train/loss_error": 0.38644036650657654, "train/loss_total": 0.38723617792129517 }, { "epoch": 2.829548490515629, "step": 10591, "train/loss_ctc": 0.7038357853889465, "train/loss_error": 0.5269617438316345, "train/loss_total": 0.5623365640640259 }, { "epoch": 2.8298156558909966, "step": 10592, "train/loss_ctc": 0.397918701171875, "train/loss_error": 0.4284206032752991, "train/loss_total": 0.4223202168941498 }, { "epoch": 2.830082821266364, "step": 10593, "train/loss_ctc": 0.5693581104278564, "train/loss_error": 0.43225255608558655, "train/loss_total": 0.459673672914505 }, { "epoch": 2.830349986641731, "step": 10594, "train/loss_ctc": 0.6724147796630859, "train/loss_error": 0.4352998435497284, "train/loss_total": 0.48272281885147095 }, { "epoch": 2.8306171520170986, "step": 10595, "train/loss_ctc": 1.0094358921051025, "train/loss_error": 0.44026821851730347, "train/loss_total": 0.5541017651557922 }, { "epoch": 2.830884317392466, "step": 10596, "train/loss_ctc": 0.349098265171051, "train/loss_error": 0.48573237657546997, "train/loss_total": 0.4584055542945862 }, { "epoch": 2.831151482767833, "step": 10597, "train/loss_ctc": 1.2328615188598633, "train/loss_error": 0.4783741533756256, "train/loss_total": 0.6292716264724731 }, { "epoch": 2.8314186481432007, "step": 10598, "train/loss_ctc": 0.5434228181838989, "train/loss_error": 0.45389625430107117, "train/loss_total": 0.4718015789985657 }, { "epoch": 2.831685813518568, "step": 10599, "train/loss_ctc": 0.9457191824913025, "train/loss_error": 0.4650774896144867, "train/loss_total": 0.5612058639526367 }, { "epoch": 2.8319529788939355, "grad_norm": 20.05660057067871, "learning_rate": 1.3019503072401816e-05, "loss": 0.4989, "step": 10600 }, { "epoch": 2.8319529788939355, "step": 10600, "train/loss_ctc": 0.9814764261245728, "train/loss_error": 0.43597787618637085, "train/loss_total": 0.5450775623321533 }, { "epoch": 2.8322201442693027, "step": 10601, "train/loss_ctc": 1.2384471893310547, "train/loss_error": 0.42024046182632446, "train/loss_total": 0.5838817954063416 }, { "epoch": 2.8324873096446703, "step": 10602, "train/loss_ctc": 0.9996258020401001, "train/loss_error": 0.4760916531085968, "train/loss_total": 0.5807985067367554 }, { "epoch": 2.8327544750200375, "step": 10603, "train/loss_ctc": 0.6954782009124756, "train/loss_error": 0.43805286288261414, "train/loss_total": 0.48953795433044434 }, { "epoch": 2.8330216403954047, "step": 10604, "train/loss_ctc": 0.5134918689727783, "train/loss_error": 0.3863025903701782, "train/loss_total": 0.4117404520511627 }, { "epoch": 2.8332888057707724, "step": 10605, "train/loss_ctc": 0.7553333044052124, "train/loss_error": 0.42428091168403625, "train/loss_total": 0.4904913902282715 }, { "epoch": 2.8335559711461396, "step": 10606, "train/loss_ctc": 2.187922716140747, "train/loss_error": 0.4595663845539093, "train/loss_total": 0.8052376508712769 }, { "epoch": 2.8338231365215067, "step": 10607, "train/loss_ctc": 0.2736436724662781, "train/loss_error": 0.4651908874511719, "train/loss_total": 0.42688146233558655 }, { "epoch": 2.8340903018968744, "step": 10608, "train/loss_ctc": 0.26486703753471375, "train/loss_error": 0.39411789178848267, "train/loss_total": 0.3682677447795868 }, { "epoch": 2.8343574672722416, "step": 10609, "train/loss_ctc": 0.5032908916473389, "train/loss_error": 0.37841397523880005, "train/loss_total": 0.4033893644809723 }, { "epoch": 2.8346246326476088, "grad_norm": 2.0021512508392334, "learning_rate": 1.3003473149879776e-05, "loss": 0.5105, "step": 10610 }, { "epoch": 2.8346246326476088, "step": 10610, "train/loss_ctc": 0.45960870385169983, "train/loss_error": 0.37089625000953674, "train/loss_total": 0.38863876461982727 }, { "epoch": 2.8348917980229764, "step": 10611, "train/loss_ctc": 0.4432738721370697, "train/loss_error": 0.40017595887184143, "train/loss_total": 0.408795565366745 }, { "epoch": 2.8351589633983436, "step": 10612, "train/loss_ctc": 0.8162444829940796, "train/loss_error": 0.4767228364944458, "train/loss_total": 0.5446271896362305 }, { "epoch": 2.835426128773711, "step": 10613, "train/loss_ctc": 0.9162319302558899, "train/loss_error": 0.4298054277896881, "train/loss_total": 0.5270907282829285 }, { "epoch": 2.8356932941490784, "step": 10614, "train/loss_ctc": 0.6634520292282104, "train/loss_error": 0.47936320304870605, "train/loss_total": 0.5161809921264648 }, { "epoch": 2.8359604595244456, "step": 10615, "train/loss_ctc": 1.6297128200531006, "train/loss_error": 0.5197283029556274, "train/loss_total": 0.7417252063751221 }, { "epoch": 2.836227624899813, "step": 10616, "train/loss_ctc": 0.3708294928073883, "train/loss_error": 0.4485546946525574, "train/loss_total": 0.43300968408584595 }, { "epoch": 2.8364947902751805, "step": 10617, "train/loss_ctc": 0.8476341366767883, "train/loss_error": 0.4593648314476013, "train/loss_total": 0.5370187163352966 }, { "epoch": 2.8367619556505477, "step": 10618, "train/loss_ctc": 1.1639456748962402, "train/loss_error": 0.527338445186615, "train/loss_total": 0.6546599268913269 }, { "epoch": 2.837029121025915, "step": 10619, "train/loss_ctc": 0.5521723031997681, "train/loss_error": 0.45605573058128357, "train/loss_total": 0.4752790629863739 }, { "epoch": 2.8372962864012825, "grad_norm": 6.177260875701904, "learning_rate": 1.2987443227357736e-05, "loss": 0.5227, "step": 10620 }, { "epoch": 2.8372962864012825, "step": 10620, "train/loss_ctc": 0.933080792427063, "train/loss_error": 0.4272425174713135, "train/loss_total": 0.5284101963043213 }, { "epoch": 2.8375634517766497, "step": 10621, "train/loss_ctc": 0.5218113660812378, "train/loss_error": 0.3839989900588989, "train/loss_total": 0.4115614891052246 }, { "epoch": 2.837830617152017, "step": 10622, "train/loss_ctc": 0.903315544128418, "train/loss_error": 0.454785019159317, "train/loss_total": 0.544491171836853 }, { "epoch": 2.8380977825273845, "step": 10623, "train/loss_ctc": 0.6292592287063599, "train/loss_error": 0.4402700960636139, "train/loss_total": 0.47806793451309204 }, { "epoch": 2.8383649479027517, "step": 10624, "train/loss_ctc": 1.0674490928649902, "train/loss_error": 0.4961974322795868, "train/loss_total": 0.6104477643966675 }, { "epoch": 2.838632113278119, "step": 10625, "train/loss_ctc": 0.9675571918487549, "train/loss_error": 0.42410707473754883, "train/loss_total": 0.53279709815979 }, { "epoch": 2.8388992786534866, "step": 10626, "train/loss_ctc": 0.4488258361816406, "train/loss_error": 0.48239314556121826, "train/loss_total": 0.4756796956062317 }, { "epoch": 2.8391664440288538, "step": 10627, "train/loss_ctc": 0.6773088574409485, "train/loss_error": 0.424991637468338, "train/loss_total": 0.475455105304718 }, { "epoch": 2.839433609404221, "step": 10628, "train/loss_ctc": 1.5099530220031738, "train/loss_error": 0.4990309178829193, "train/loss_total": 0.7012153267860413 }, { "epoch": 2.8397007747795886, "step": 10629, "train/loss_ctc": 0.8392060995101929, "train/loss_error": 0.44202014803886414, "train/loss_total": 0.521457314491272 }, { "epoch": 2.839967940154956, "grad_norm": 3.942153215408325, "learning_rate": 1.2971413304835694e-05, "loss": 0.528, "step": 10630 }, { "epoch": 2.839967940154956, "step": 10630, "train/loss_ctc": 1.1167140007019043, "train/loss_error": 0.4675827622413635, "train/loss_total": 0.5974090099334717 }, { "epoch": 2.8402351055303234, "step": 10631, "train/loss_ctc": 1.5458145141601562, "train/loss_error": 0.501674234867096, "train/loss_total": 0.7105022668838501 }, { "epoch": 2.8405022709056906, "step": 10632, "train/loss_ctc": 0.7011311650276184, "train/loss_error": 0.36521729826927185, "train/loss_total": 0.43240004777908325 }, { "epoch": 2.840769436281058, "step": 10633, "train/loss_ctc": 0.5017861127853394, "train/loss_error": 0.4835182726383209, "train/loss_total": 0.48717185854911804 }, { "epoch": 2.8410366016564255, "step": 10634, "train/loss_ctc": 0.46738606691360474, "train/loss_error": 0.4522850811481476, "train/loss_total": 0.455305278301239 }, { "epoch": 2.8413037670317927, "step": 10635, "train/loss_ctc": 0.5579430460929871, "train/loss_error": 0.40798547863960266, "train/loss_total": 0.43797701597213745 }, { "epoch": 2.8415709324071603, "step": 10636, "train/loss_ctc": 0.3471503257751465, "train/loss_error": 0.40961751341819763, "train/loss_total": 0.39712411165237427 }, { "epoch": 2.8418380977825275, "step": 10637, "train/loss_ctc": 0.8648002743721008, "train/loss_error": 0.4304622411727905, "train/loss_total": 0.5173298716545105 }, { "epoch": 2.8421052631578947, "step": 10638, "train/loss_ctc": 0.5607432126998901, "train/loss_error": 0.4349152445793152, "train/loss_total": 0.4600808322429657 }, { "epoch": 2.8423724285332623, "step": 10639, "train/loss_ctc": 0.4501338601112366, "train/loss_error": 0.37187105417251587, "train/loss_total": 0.3875236213207245 }, { "epoch": 2.8426395939086295, "grad_norm": 1.3829209804534912, "learning_rate": 1.2955383382313653e-05, "loss": 0.4883, "step": 10640 }, { "epoch": 2.8426395939086295, "step": 10640, "train/loss_ctc": 0.225102037191391, "train/loss_error": 0.48658472299575806, "train/loss_total": 0.4342881739139557 }, { "epoch": 2.8429067592839967, "step": 10641, "train/loss_ctc": 0.7856721878051758, "train/loss_error": 0.5482391715049744, "train/loss_total": 0.5957257747650146 }, { "epoch": 2.8431739246593644, "step": 10642, "train/loss_ctc": 0.4508496820926666, "train/loss_error": 0.3978559970855713, "train/loss_total": 0.4084547460079193 }, { "epoch": 2.8434410900347316, "step": 10643, "train/loss_ctc": 0.6250829696655273, "train/loss_error": 0.4519155025482178, "train/loss_total": 0.4865490198135376 }, { "epoch": 2.8437082554100988, "step": 10644, "train/loss_ctc": 0.8224151730537415, "train/loss_error": 0.38475358486175537, "train/loss_total": 0.4722859263420105 }, { "epoch": 2.8439754207854664, "step": 10645, "train/loss_ctc": 0.6348839998245239, "train/loss_error": 0.4876916706562042, "train/loss_total": 0.5171301364898682 }, { "epoch": 2.8442425861608336, "step": 10646, "train/loss_ctc": 0.9214262962341309, "train/loss_error": 0.5155702829360962, "train/loss_total": 0.5967414975166321 }, { "epoch": 2.844509751536201, "step": 10647, "train/loss_ctc": 1.0841033458709717, "train/loss_error": 0.46168142557144165, "train/loss_total": 0.5861658453941345 }, { "epoch": 2.8447769169115684, "step": 10648, "train/loss_ctc": 0.6776994466781616, "train/loss_error": 0.4187133014202118, "train/loss_total": 0.4705105423927307 }, { "epoch": 2.8450440822869356, "step": 10649, "train/loss_ctc": 1.3728781938552856, "train/loss_error": 0.4530301094055176, "train/loss_total": 0.6369997262954712 }, { "epoch": 2.845311247662303, "grad_norm": 2.6303417682647705, "learning_rate": 1.2939353459791612e-05, "loss": 0.5205, "step": 10650 }, { "epoch": 2.845311247662303, "step": 10650, "train/loss_ctc": 0.1927584409713745, "train/loss_error": 0.394768089056015, "train/loss_total": 0.35436615347862244 }, { "epoch": 2.8455784130376705, "step": 10651, "train/loss_ctc": 0.6281591653823853, "train/loss_error": 0.4681762456893921, "train/loss_total": 0.5001728534698486 }, { "epoch": 2.8458455784130376, "step": 10652, "train/loss_ctc": 0.6728814840316772, "train/loss_error": 0.4600909352302551, "train/loss_total": 0.5026490688323975 }, { "epoch": 2.846112743788405, "step": 10653, "train/loss_ctc": 0.4612676501274109, "train/loss_error": 0.4514443278312683, "train/loss_total": 0.45340901613235474 }, { "epoch": 2.8463799091637725, "step": 10654, "train/loss_ctc": 0.6798277497291565, "train/loss_error": 0.4225211441516876, "train/loss_total": 0.47398248314857483 }, { "epoch": 2.8466470745391397, "step": 10655, "train/loss_ctc": 0.8673654794692993, "train/loss_error": 0.4234426021575928, "train/loss_total": 0.5122271776199341 }, { "epoch": 2.846914239914507, "step": 10656, "train/loss_ctc": 0.4141501188278198, "train/loss_error": 0.3630787134170532, "train/loss_total": 0.3732929825782776 }, { "epoch": 2.8471814052898745, "step": 10657, "train/loss_ctc": 0.6819326877593994, "train/loss_error": 0.45795947313308716, "train/loss_total": 0.5027540922164917 }, { "epoch": 2.8474485706652417, "step": 10658, "train/loss_ctc": 1.3833295106887817, "train/loss_error": 0.4686239957809448, "train/loss_total": 0.6515650749206543 }, { "epoch": 2.847715736040609, "step": 10659, "train/loss_ctc": 0.7810057401657104, "train/loss_error": 0.46715688705444336, "train/loss_total": 0.5299266576766968 }, { "epoch": 2.8479829014159765, "grad_norm": 4.674605846405029, "learning_rate": 1.292332353726957e-05, "loss": 0.4854, "step": 10660 }, { "epoch": 2.8479829014159765, "step": 10660, "train/loss_ctc": 0.33925074338912964, "train/loss_error": 0.4351744055747986, "train/loss_total": 0.4159896671772003 }, { "epoch": 2.8482500667913437, "step": 10661, "train/loss_ctc": 0.7011052966117859, "train/loss_error": 0.4889201819896698, "train/loss_total": 0.5313571691513062 }, { "epoch": 2.848517232166711, "step": 10662, "train/loss_ctc": 0.7228411436080933, "train/loss_error": 0.45033371448516846, "train/loss_total": 0.5048352479934692 }, { "epoch": 2.8487843975420786, "step": 10663, "train/loss_ctc": 0.5119122266769409, "train/loss_error": 0.35926613211631775, "train/loss_total": 0.38979536294937134 }, { "epoch": 2.8490515629174458, "step": 10664, "train/loss_ctc": 0.4417986273765564, "train/loss_error": 0.47880819439888, "train/loss_total": 0.4714062809944153 }, { "epoch": 2.8493187282928134, "step": 10665, "train/loss_ctc": 1.0080604553222656, "train/loss_error": 0.42945754528045654, "train/loss_total": 0.5451781153678894 }, { "epoch": 2.8495858936681806, "step": 10666, "train/loss_ctc": 0.7196813225746155, "train/loss_error": 0.3824959099292755, "train/loss_total": 0.4499329924583435 }, { "epoch": 2.849853059043548, "step": 10667, "train/loss_ctc": 0.3600948452949524, "train/loss_error": 0.4596897065639496, "train/loss_total": 0.43977075815200806 }, { "epoch": 2.8501202244189154, "step": 10668, "train/loss_ctc": 0.8133901357650757, "train/loss_error": 0.48813337087631226, "train/loss_total": 0.5531847476959229 }, { "epoch": 2.8503873897942826, "step": 10669, "train/loss_ctc": 0.4359263777732849, "train/loss_error": 0.45740410685539246, "train/loss_total": 0.453108549118042 }, { "epoch": 2.8506545551696503, "grad_norm": 2.3687493801116943, "learning_rate": 1.290729361474753e-05, "loss": 0.4755, "step": 10670 }, { "epoch": 2.8506545551696503, "step": 10670, "train/loss_ctc": 0.548157274723053, "train/loss_error": 0.501952588558197, "train/loss_total": 0.5111935138702393 }, { "epoch": 2.8509217205450175, "step": 10671, "train/loss_ctc": 0.8237167596817017, "train/loss_error": 0.4902438521385193, "train/loss_total": 0.5569384098052979 }, { "epoch": 2.8511888859203847, "step": 10672, "train/loss_ctc": 1.1804475784301758, "train/loss_error": 0.45341023802757263, "train/loss_total": 0.5988177061080933 }, { "epoch": 2.8514560512957523, "step": 10673, "train/loss_ctc": 0.5521308183670044, "train/loss_error": 0.4394156038761139, "train/loss_total": 0.461958646774292 }, { "epoch": 2.8517232166711195, "step": 10674, "train/loss_ctc": 0.6744198203086853, "train/loss_error": 0.43012067675590515, "train/loss_total": 0.47898051142692566 }, { "epoch": 2.8519903820464867, "step": 10675, "train/loss_ctc": 0.6421205997467041, "train/loss_error": 0.45111083984375, "train/loss_total": 0.4893128275871277 }, { "epoch": 2.8522575474218543, "step": 10676, "train/loss_ctc": 0.8448565006256104, "train/loss_error": 0.43698734045028687, "train/loss_total": 0.5185611844062805 }, { "epoch": 2.8525247127972215, "step": 10677, "train/loss_ctc": 0.3103266954421997, "train/loss_error": 0.44862523674964905, "train/loss_total": 0.4209655225276947 }, { "epoch": 2.8527918781725887, "step": 10678, "train/loss_ctc": 0.43631547689437866, "train/loss_error": 0.45307061076164246, "train/loss_total": 0.4497196078300476 }, { "epoch": 2.8530590435479564, "step": 10679, "train/loss_ctc": 0.6388974189758301, "train/loss_error": 0.4133780896663666, "train/loss_total": 0.45848196744918823 }, { "epoch": 2.8533262089233236, "grad_norm": 2.3762500286102295, "learning_rate": 1.2891263692225488e-05, "loss": 0.4945, "step": 10680 }, { "epoch": 2.8533262089233236, "step": 10680, "train/loss_ctc": 0.515121579170227, "train/loss_error": 0.4831058084964752, "train/loss_total": 0.4895089864730835 }, { "epoch": 2.8535933742986908, "step": 10681, "train/loss_ctc": 0.9222668409347534, "train/loss_error": 0.5693146586418152, "train/loss_total": 0.6399050951004028 }, { "epoch": 2.8538605396740584, "step": 10682, "train/loss_ctc": 0.6306346654891968, "train/loss_error": 0.505922257900238, "train/loss_total": 0.5308647155761719 }, { "epoch": 2.8541277050494256, "step": 10683, "train/loss_ctc": 0.8113397359848022, "train/loss_error": 0.39350706338882446, "train/loss_total": 0.477073609828949 }, { "epoch": 2.854394870424793, "step": 10684, "train/loss_ctc": 0.42518365383148193, "train/loss_error": 0.44646018743515015, "train/loss_total": 0.44220489263534546 }, { "epoch": 2.8546620358001604, "step": 10685, "train/loss_ctc": 0.6249133348464966, "train/loss_error": 0.44438374042510986, "train/loss_total": 0.48048967123031616 }, { "epoch": 2.8549292011755276, "step": 10686, "train/loss_ctc": 0.4720541834831238, "train/loss_error": 0.4764346778392792, "train/loss_total": 0.4755585789680481 }, { "epoch": 2.855196366550895, "step": 10687, "train/loss_ctc": 0.7630955576896667, "train/loss_error": 0.4301140606403351, "train/loss_total": 0.4967103600502014 }, { "epoch": 2.8554635319262625, "step": 10688, "train/loss_ctc": 0.8231937885284424, "train/loss_error": 0.41605696082115173, "train/loss_total": 0.49748432636260986 }, { "epoch": 2.8557306973016297, "step": 10689, "train/loss_ctc": 1.2452234029769897, "train/loss_error": 0.46547701954841614, "train/loss_total": 0.6214263439178467 }, { "epoch": 2.855997862676997, "grad_norm": 1.893830418586731, "learning_rate": 1.2875233769703446e-05, "loss": 0.5151, "step": 10690 }, { "epoch": 2.855997862676997, "step": 10690, "train/loss_ctc": 0.8616095185279846, "train/loss_error": 0.5021846890449524, "train/loss_total": 0.5740696787834167 }, { "epoch": 2.8562650280523645, "step": 10691, "train/loss_ctc": 0.8586795330047607, "train/loss_error": 0.4471488893032074, "train/loss_total": 0.5294550657272339 }, { "epoch": 2.8565321934277317, "step": 10692, "train/loss_ctc": 0.6097343564033508, "train/loss_error": 0.45411717891693115, "train/loss_total": 0.4852406084537506 }, { "epoch": 2.856799358803099, "step": 10693, "train/loss_ctc": 0.5062481164932251, "train/loss_error": 0.40616583824157715, "train/loss_total": 0.4261822998523712 }, { "epoch": 2.8570665241784665, "step": 10694, "train/loss_ctc": 0.95570969581604, "train/loss_error": 0.44254714250564575, "train/loss_total": 0.5451796650886536 }, { "epoch": 2.8573336895538337, "step": 10695, "train/loss_ctc": 0.48288285732269287, "train/loss_error": 0.4808022379875183, "train/loss_total": 0.4812183678150177 }, { "epoch": 2.857600854929201, "step": 10696, "train/loss_ctc": 0.9749222993850708, "train/loss_error": 0.5141653418540955, "train/loss_total": 0.6063167452812195 }, { "epoch": 2.8578680203045685, "step": 10697, "train/loss_ctc": 0.7434170246124268, "train/loss_error": 0.4248369634151459, "train/loss_total": 0.488552987575531 }, { "epoch": 2.8581351856799357, "step": 10698, "train/loss_ctc": 0.7770911455154419, "train/loss_error": 0.4047016203403473, "train/loss_total": 0.4791795611381531 }, { "epoch": 2.8584023510553034, "step": 10699, "train/loss_ctc": 0.8605148792266846, "train/loss_error": 0.45766305923461914, "train/loss_total": 0.5382333993911743 }, { "epoch": 2.8586695164306706, "grad_norm": 1.9997135400772095, "learning_rate": 1.2859203847181405e-05, "loss": 0.5154, "step": 10700 }, { "epoch": 2.8586695164306706, "step": 10700, "train/loss_ctc": 0.7430521249771118, "train/loss_error": 0.3953694701194763, "train/loss_total": 0.4649060368537903 }, { "epoch": 2.8589366818060378, "step": 10701, "train/loss_ctc": 0.550715446472168, "train/loss_error": 0.42484143376350403, "train/loss_total": 0.4500162601470947 }, { "epoch": 2.8592038471814054, "step": 10702, "train/loss_ctc": 0.48604297637939453, "train/loss_error": 0.47185346484184265, "train/loss_total": 0.47469136118888855 }, { "epoch": 2.8594710125567726, "step": 10703, "train/loss_ctc": 0.5798121690750122, "train/loss_error": 0.4317132234573364, "train/loss_total": 0.4613330364227295 }, { "epoch": 2.8597381779321402, "step": 10704, "train/loss_ctc": 0.9623426198959351, "train/loss_error": 0.4775070548057556, "train/loss_total": 0.5744742155075073 }, { "epoch": 2.8600053433075074, "step": 10705, "train/loss_ctc": 0.778490424156189, "train/loss_error": 0.5099447965621948, "train/loss_total": 0.5636539459228516 }, { "epoch": 2.8602725086828746, "step": 10706, "train/loss_ctc": 1.0735490322113037, "train/loss_error": 0.5165379047393799, "train/loss_total": 0.6279401183128357 }, { "epoch": 2.8605396740582423, "step": 10707, "train/loss_ctc": 1.1515271663665771, "train/loss_error": 0.4820761978626251, "train/loss_total": 0.6159664392471313 }, { "epoch": 2.8608068394336095, "step": 10708, "train/loss_ctc": 0.5688612461090088, "train/loss_error": 0.3868873119354248, "train/loss_total": 0.42328208684921265 }, { "epoch": 2.8610740048089767, "step": 10709, "train/loss_ctc": 0.4791374206542969, "train/loss_error": 0.5296566486358643, "train/loss_total": 0.5195528268814087 }, { "epoch": 2.8613411701843443, "grad_norm": 1.712502121925354, "learning_rate": 1.2843173924659365e-05, "loss": 0.5176, "step": 10710 }, { "epoch": 2.8613411701843443, "step": 10710, "train/loss_ctc": 0.9822428226470947, "train/loss_error": 0.4918440282344818, "train/loss_total": 0.5899237990379333 }, { "epoch": 2.8616083355597115, "step": 10711, "train/loss_ctc": 0.5871556401252747, "train/loss_error": 0.5308742523193359, "train/loss_total": 0.5421305298805237 }, { "epoch": 2.8618755009350787, "step": 10712, "train/loss_ctc": 0.5424623489379883, "train/loss_error": 0.4808771312236786, "train/loss_total": 0.4931941628456116 }, { "epoch": 2.8621426663104463, "step": 10713, "train/loss_ctc": 0.44603532552719116, "train/loss_error": 0.4655076861381531, "train/loss_total": 0.4616132378578186 }, { "epoch": 2.8624098316858135, "step": 10714, "train/loss_ctc": 0.7078171968460083, "train/loss_error": 0.4197501838207245, "train/loss_total": 0.47736358642578125 }, { "epoch": 2.8626769970611807, "step": 10715, "train/loss_ctc": 0.435843288898468, "train/loss_error": 0.42993900179862976, "train/loss_total": 0.4311198592185974 }, { "epoch": 2.8629441624365484, "step": 10716, "train/loss_ctc": 0.5193284153938293, "train/loss_error": 0.4211692214012146, "train/loss_total": 0.44080105423927307 }, { "epoch": 2.8632113278119156, "step": 10717, "train/loss_ctc": 0.8880785703659058, "train/loss_error": 0.43219202756881714, "train/loss_total": 0.5233693718910217 }, { "epoch": 2.8634784931872828, "step": 10718, "train/loss_ctc": 0.6955534219741821, "train/loss_error": 0.45006969571113586, "train/loss_total": 0.49916645884513855 }, { "epoch": 2.8637456585626504, "step": 10719, "train/loss_ctc": 0.7332108020782471, "train/loss_error": 0.4498686194419861, "train/loss_total": 0.5065370798110962 }, { "epoch": 2.8640128239380176, "grad_norm": 1.5612770318984985, "learning_rate": 1.2827144002137323e-05, "loss": 0.4965, "step": 10720 }, { "epoch": 2.8640128239380176, "step": 10720, "train/loss_ctc": 0.6985570192337036, "train/loss_error": 0.4534517526626587, "train/loss_total": 0.5024728178977966 }, { "epoch": 2.864279989313385, "step": 10721, "train/loss_ctc": 1.6505907773971558, "train/loss_error": 0.4848729372024536, "train/loss_total": 0.718016505241394 }, { "epoch": 2.8645471546887524, "step": 10722, "train/loss_ctc": 1.1042364835739136, "train/loss_error": 0.44625407457351685, "train/loss_total": 0.5778505802154541 }, { "epoch": 2.8648143200641196, "step": 10723, "train/loss_ctc": 0.7688106298446655, "train/loss_error": 0.4418568015098572, "train/loss_total": 0.5072475671768188 }, { "epoch": 2.865081485439487, "step": 10724, "train/loss_ctc": 0.9509595632553101, "train/loss_error": 0.4090615510940552, "train/loss_total": 0.5174411535263062 }, { "epoch": 2.8653486508148545, "step": 10725, "train/loss_ctc": 0.3928243815898895, "train/loss_error": 0.4386621415615082, "train/loss_total": 0.42949458956718445 }, { "epoch": 2.8656158161902217, "step": 10726, "train/loss_ctc": 0.9010564088821411, "train/loss_error": 0.46721741557121277, "train/loss_total": 0.5539852380752563 }, { "epoch": 2.865882981565589, "step": 10727, "train/loss_ctc": 0.5904939770698547, "train/loss_error": 0.42268291115760803, "train/loss_total": 0.4562451243400574 }, { "epoch": 2.8661501469409565, "step": 10728, "train/loss_ctc": 0.7277146577835083, "train/loss_error": 0.4992334842681885, "train/loss_total": 0.5449297428131104 }, { "epoch": 2.8664173123163237, "step": 10729, "train/loss_ctc": 0.7200090289115906, "train/loss_error": 0.44337964057922363, "train/loss_total": 0.49870553612709045 }, { "epoch": 2.866684477691691, "grad_norm": 1.6294211149215698, "learning_rate": 1.2811114079615283e-05, "loss": 0.5306, "step": 10730 }, { "epoch": 2.866684477691691, "step": 10730, "train/loss_ctc": 0.5498067736625671, "train/loss_error": 0.352164089679718, "train/loss_total": 0.3916926383972168 }, { "epoch": 2.8669516430670585, "step": 10731, "train/loss_ctc": 0.9455386400222778, "train/loss_error": 0.4531497657299042, "train/loss_total": 0.5516275763511658 }, { "epoch": 2.8672188084424257, "step": 10732, "train/loss_ctc": 0.6703344583511353, "train/loss_error": 0.3938307464122772, "train/loss_total": 0.44913148880004883 }, { "epoch": 2.8674859738177934, "step": 10733, "train/loss_ctc": 0.5507771372795105, "train/loss_error": 0.4443139135837555, "train/loss_total": 0.46560657024383545 }, { "epoch": 2.8677531391931605, "step": 10734, "train/loss_ctc": 0.7031421661376953, "train/loss_error": 0.4599161446094513, "train/loss_total": 0.508561372756958 }, { "epoch": 2.868020304568528, "step": 10735, "train/loss_ctc": 0.5418729186058044, "train/loss_error": 0.4892214834690094, "train/loss_total": 0.4997517764568329 }, { "epoch": 2.8682874699438954, "step": 10736, "train/loss_ctc": 0.34311866760253906, "train/loss_error": 0.39885449409484863, "train/loss_total": 0.38770735263824463 }, { "epoch": 2.8685546353192626, "step": 10737, "train/loss_ctc": 1.211146593093872, "train/loss_error": 0.47051194310188293, "train/loss_total": 0.6186388731002808 }, { "epoch": 2.86882180069463, "step": 10738, "train/loss_ctc": 0.42950308322906494, "train/loss_error": 0.4338328242301941, "train/loss_total": 0.4329668879508972 }, { "epoch": 2.8690889660699974, "step": 10739, "train/loss_ctc": 0.7247976064682007, "train/loss_error": 0.42475056648254395, "train/loss_total": 0.48475998640060425 }, { "epoch": 2.8693561314453646, "grad_norm": 2.69275164604187, "learning_rate": 1.2795084157093241e-05, "loss": 0.479, "step": 10740 }, { "epoch": 2.8693561314453646, "step": 10740, "train/loss_ctc": 0.43971478939056396, "train/loss_error": 0.40646904706954956, "train/loss_total": 0.4131182134151459 }, { "epoch": 2.8696232968207323, "step": 10741, "train/loss_ctc": 0.4431472718715668, "train/loss_error": 0.40469253063201904, "train/loss_total": 0.412383496761322 }, { "epoch": 2.8698904621960994, "step": 10742, "train/loss_ctc": 0.6719797849655151, "train/loss_error": 0.3895739018917084, "train/loss_total": 0.4460550844669342 }, { "epoch": 2.8701576275714666, "step": 10743, "train/loss_ctc": 0.38254594802856445, "train/loss_error": 0.37082746624946594, "train/loss_total": 0.3731711506843567 }, { "epoch": 2.8704247929468343, "step": 10744, "train/loss_ctc": 0.5480556488037109, "train/loss_error": 0.4682067036628723, "train/loss_total": 0.48417648673057556 }, { "epoch": 2.8706919583222015, "step": 10745, "train/loss_ctc": 0.29917871952056885, "train/loss_error": 0.4272095561027527, "train/loss_total": 0.4016033709049225 }, { "epoch": 2.8709591236975687, "step": 10746, "train/loss_ctc": 0.43058884143829346, "train/loss_error": 0.4136241376399994, "train/loss_total": 0.4170171022415161 }, { "epoch": 2.8712262890729363, "step": 10747, "train/loss_ctc": 0.39775675535202026, "train/loss_error": 0.41201645135879517, "train/loss_total": 0.40916454792022705 }, { "epoch": 2.8714934544483035, "step": 10748, "train/loss_ctc": 0.7813550233840942, "train/loss_error": 0.4720054864883423, "train/loss_total": 0.5338754057884216 }, { "epoch": 2.8717606198236707, "step": 10749, "train/loss_ctc": 0.6477656364440918, "train/loss_error": 0.4355044364929199, "train/loss_total": 0.4779566526412964 }, { "epoch": 2.8720277851990383, "grad_norm": 2.204608678817749, "learning_rate": 1.27790542345712e-05, "loss": 0.4369, "step": 10750 }, { "epoch": 2.8720277851990383, "step": 10750, "train/loss_ctc": 0.41819560527801514, "train/loss_error": 0.4300735592842102, "train/loss_total": 0.42769795656204224 }, { "epoch": 2.8722949505744055, "step": 10751, "train/loss_ctc": 2.2564573287963867, "train/loss_error": 0.5241057872772217, "train/loss_total": 0.8705761432647705 }, { "epoch": 2.8725621159497727, "step": 10752, "train/loss_ctc": 0.3846736550331116, "train/loss_error": 0.45068761706352234, "train/loss_total": 0.43748483061790466 }, { "epoch": 2.8728292813251404, "step": 10753, "train/loss_ctc": 0.6729708313941956, "train/loss_error": 0.43711256980895996, "train/loss_total": 0.4842842221260071 }, { "epoch": 2.8730964467005076, "step": 10754, "train/loss_ctc": 1.4302093982696533, "train/loss_error": 0.4781479835510254, "train/loss_total": 0.668560266494751 }, { "epoch": 2.8733636120758748, "step": 10755, "train/loss_ctc": 0.900149941444397, "train/loss_error": 0.4416612386703491, "train/loss_total": 0.5333589911460876 }, { "epoch": 2.8736307774512424, "step": 10756, "train/loss_ctc": 0.3397194743156433, "train/loss_error": 0.4933595061302185, "train/loss_total": 0.4626315236091614 }, { "epoch": 2.8738979428266096, "step": 10757, "train/loss_ctc": 0.7412088513374329, "train/loss_error": 0.46531689167022705, "train/loss_total": 0.5204952955245972 }, { "epoch": 2.874165108201977, "step": 10758, "train/loss_ctc": 1.1279385089874268, "train/loss_error": 0.4514940083026886, "train/loss_total": 0.5867829322814941 }, { "epoch": 2.8744322735773444, "step": 10759, "train/loss_ctc": 0.47397980093955994, "train/loss_error": 0.5230668187141418, "train/loss_total": 0.513249397277832 }, { "epoch": 2.8746994389527116, "grad_norm": 1.9411507844924927, "learning_rate": 1.2763024312049159e-05, "loss": 0.5505, "step": 10760 }, { "epoch": 2.8746994389527116, "step": 10760, "train/loss_ctc": 0.7033857107162476, "train/loss_error": 0.47484922409057617, "train/loss_total": 0.5205565094947815 }, { "epoch": 2.874966604328079, "step": 10761, "train/loss_ctc": 0.4127347767353058, "train/loss_error": 0.4553248882293701, "train/loss_total": 0.4468068778514862 }, { "epoch": 2.8752337697034465, "step": 10762, "train/loss_ctc": 1.2907280921936035, "train/loss_error": 0.3957357406616211, "train/loss_total": 0.5747342109680176 }, { "epoch": 2.8755009350788137, "step": 10763, "train/loss_ctc": 0.5587907433509827, "train/loss_error": 0.3940536081790924, "train/loss_total": 0.427001029253006 }, { "epoch": 2.8757681004541813, "step": 10764, "train/loss_ctc": 1.1380152702331543, "train/loss_error": 0.4098338782787323, "train/loss_total": 0.5554701685905457 }, { "epoch": 2.8760352658295485, "step": 10765, "train/loss_ctc": 0.502408504486084, "train/loss_error": 0.3900255858898163, "train/loss_total": 0.4125021696090698 }, { "epoch": 2.8763024312049157, "step": 10766, "train/loss_ctc": 0.5538978576660156, "train/loss_error": 0.44915854930877686, "train/loss_total": 0.47010642290115356 }, { "epoch": 2.8765695965802833, "step": 10767, "train/loss_ctc": 0.5408060550689697, "train/loss_error": 0.46206918358802795, "train/loss_total": 0.47781655192375183 }, { "epoch": 2.8768367619556505, "step": 10768, "train/loss_ctc": 0.3742472231388092, "train/loss_error": 0.44004902243614197, "train/loss_total": 0.42688867449760437 }, { "epoch": 2.877103927331018, "step": 10769, "train/loss_ctc": 0.38772451877593994, "train/loss_error": 0.4229145050048828, "train/loss_total": 0.41587650775909424 }, { "epoch": 2.8773710927063854, "grad_norm": 2.9981961250305176, "learning_rate": 1.2746994389527117e-05, "loss": 0.4728, "step": 10770 }, { "epoch": 2.8773710927063854, "step": 10770, "train/loss_ctc": 0.5831418633460999, "train/loss_error": 0.42924022674560547, "train/loss_total": 0.4600205719470978 }, { "epoch": 2.8776382580817526, "step": 10771, "train/loss_ctc": 0.4515143036842346, "train/loss_error": 0.4122978448867798, "train/loss_total": 0.4201411306858063 }, { "epoch": 2.87790542345712, "step": 10772, "train/loss_ctc": 0.49250712990760803, "train/loss_error": 0.36836349964141846, "train/loss_total": 0.39319223165512085 }, { "epoch": 2.8781725888324874, "step": 10773, "train/loss_ctc": 0.36754873394966125, "train/loss_error": 0.4378567934036255, "train/loss_total": 0.4237951934337616 }, { "epoch": 2.8784397542078546, "step": 10774, "train/loss_ctc": 0.39699721336364746, "train/loss_error": 0.40789103507995605, "train/loss_total": 0.4057122766971588 }, { "epoch": 2.8787069195832222, "step": 10775, "train/loss_ctc": 0.8053817749023438, "train/loss_error": 0.44785818457603455, "train/loss_total": 0.5193629264831543 }, { "epoch": 2.8789740849585894, "step": 10776, "train/loss_ctc": 0.6161552667617798, "train/loss_error": 0.5291398167610168, "train/loss_total": 0.5465428829193115 }, { "epoch": 2.8792412503339566, "step": 10777, "train/loss_ctc": 0.793609619140625, "train/loss_error": 0.49582618474960327, "train/loss_total": 0.5553828477859497 }, { "epoch": 2.8795084157093243, "step": 10778, "train/loss_ctc": 0.5529887080192566, "train/loss_error": 0.46633613109588623, "train/loss_total": 0.48366665840148926 }, { "epoch": 2.8797755810846914, "step": 10779, "train/loss_ctc": 0.9449492692947388, "train/loss_error": 0.4884684681892395, "train/loss_total": 0.5797646641731262 }, { "epoch": 2.8800427464600586, "grad_norm": 3.8022706508636475, "learning_rate": 1.2730964467005075e-05, "loss": 0.4788, "step": 10780 }, { "epoch": 2.8800427464600586, "step": 10780, "train/loss_ctc": 1.5269243717193604, "train/loss_error": 0.4439961016178131, "train/loss_total": 0.6605817675590515 }, { "epoch": 2.8803099118354263, "step": 10781, "train/loss_ctc": 0.505429744720459, "train/loss_error": 0.4678371846675873, "train/loss_total": 0.47535571455955505 }, { "epoch": 2.8805770772107935, "step": 10782, "train/loss_ctc": 0.4345508813858032, "train/loss_error": 0.4098801016807556, "train/loss_total": 0.4148142635822296 }, { "epoch": 2.8808442425861607, "step": 10783, "train/loss_ctc": 0.8075672388076782, "train/loss_error": 0.4607144892215729, "train/loss_total": 0.5300850868225098 }, { "epoch": 2.8811114079615283, "step": 10784, "train/loss_ctc": 0.7624433040618896, "train/loss_error": 0.44130027294158936, "train/loss_total": 0.5055288672447205 }, { "epoch": 2.8813785733368955, "step": 10785, "train/loss_ctc": 0.4828947186470032, "train/loss_error": 0.3992970883846283, "train/loss_total": 0.4160166382789612 }, { "epoch": 2.8816457387122627, "step": 10786, "train/loss_ctc": 0.29530155658721924, "train/loss_error": 0.37167975306510925, "train/loss_total": 0.3564041256904602 }, { "epoch": 2.8819129040876303, "step": 10787, "train/loss_ctc": 1.2672655582427979, "train/loss_error": 0.4635058343410492, "train/loss_total": 0.6242578029632568 }, { "epoch": 2.8821800694629975, "step": 10788, "train/loss_ctc": 0.9800503849983215, "train/loss_error": 0.46540045738220215, "train/loss_total": 0.5683304667472839 }, { "epoch": 2.8824472348383647, "step": 10789, "train/loss_ctc": 0.5535012483596802, "train/loss_error": 0.42460814118385315, "train/loss_total": 0.45038676261901855 }, { "epoch": 2.8827144002137324, "grad_norm": 2.0949618816375732, "learning_rate": 1.2714934544483035e-05, "loss": 0.5002, "step": 10790 }, { "epoch": 2.8827144002137324, "step": 10790, "train/loss_ctc": 1.2416954040527344, "train/loss_error": 0.4851977527141571, "train/loss_total": 0.6364972591400146 }, { "epoch": 2.8829815655890996, "step": 10791, "train/loss_ctc": 1.378125786781311, "train/loss_error": 0.49388018250465393, "train/loss_total": 0.6707292795181274 }, { "epoch": 2.8832487309644668, "step": 10792, "train/loss_ctc": 0.3193173408508301, "train/loss_error": 0.4031081795692444, "train/loss_total": 0.38635003566741943 }, { "epoch": 2.8835158963398344, "step": 10793, "train/loss_ctc": 0.8674189448356628, "train/loss_error": 0.48174721002578735, "train/loss_total": 0.5588815212249756 }, { "epoch": 2.8837830617152016, "step": 10794, "train/loss_ctc": 0.801882266998291, "train/loss_error": 0.4668332636356354, "train/loss_total": 0.5338430404663086 }, { "epoch": 2.884050227090569, "step": 10795, "train/loss_ctc": 0.9115644693374634, "train/loss_error": 0.4683493673801422, "train/loss_total": 0.5569924116134644 }, { "epoch": 2.8843173924659364, "step": 10796, "train/loss_ctc": 0.8274222612380981, "train/loss_error": 0.47895216941833496, "train/loss_total": 0.5486462116241455 }, { "epoch": 2.8845845578413036, "step": 10797, "train/loss_ctc": 0.3779950439929962, "train/loss_error": 0.42634111642837524, "train/loss_total": 0.41667190194129944 }, { "epoch": 2.8848517232166713, "step": 10798, "train/loss_ctc": 0.5934270620346069, "train/loss_error": 0.45713862776756287, "train/loss_total": 0.4843963384628296 }, { "epoch": 2.8851188885920385, "step": 10799, "train/loss_ctc": 0.7188681364059448, "train/loss_error": 0.4588586688041687, "train/loss_total": 0.5108605623245239 }, { "epoch": 2.8853860539674057, "grad_norm": 3.2467458248138428, "learning_rate": 1.2698904621960995e-05, "loss": 0.5304, "step": 10800 }, { "epoch": 2.8853860539674057, "step": 10800, "train/loss_ctc": 1.1058533191680908, "train/loss_error": 0.42403140664100647, "train/loss_total": 0.5603958368301392 }, { "epoch": 2.8856532193427733, "step": 10801, "train/loss_ctc": 0.6067841649055481, "train/loss_error": 0.48581230640411377, "train/loss_total": 0.5100066661834717 }, { "epoch": 2.8859203847181405, "step": 10802, "train/loss_ctc": 0.39757290482521057, "train/loss_error": 0.4320876896381378, "train/loss_total": 0.4251847565174103 }, { "epoch": 2.886187550093508, "step": 10803, "train/loss_ctc": 1.083767294883728, "train/loss_error": 0.4060786962509155, "train/loss_total": 0.5416164398193359 }, { "epoch": 2.8864547154688753, "step": 10804, "train/loss_ctc": 0.654725193977356, "train/loss_error": 0.3619895875453949, "train/loss_total": 0.42053669691085815 }, { "epoch": 2.8867218808442425, "step": 10805, "train/loss_ctc": 0.5905177593231201, "train/loss_error": 0.5047969222068787, "train/loss_total": 0.521941065788269 }, { "epoch": 2.88698904621961, "step": 10806, "train/loss_ctc": 0.6931875944137573, "train/loss_error": 0.41174402832984924, "train/loss_total": 0.4680327773094177 }, { "epoch": 2.8872562115949774, "step": 10807, "train/loss_ctc": 0.3584883213043213, "train/loss_error": 0.3968314230442047, "train/loss_total": 0.3891628384590149 }, { "epoch": 2.8875233769703446, "step": 10808, "train/loss_ctc": 1.6940054893493652, "train/loss_error": 0.447890043258667, "train/loss_total": 0.6971131563186646 }, { "epoch": 2.887790542345712, "step": 10809, "train/loss_ctc": 0.883855402469635, "train/loss_error": 0.4038057029247284, "train/loss_total": 0.4998156428337097 }, { "epoch": 2.8880577077210794, "grad_norm": 2.8894755840301514, "learning_rate": 1.2682874699438954e-05, "loss": 0.5034, "step": 10810 }, { "epoch": 2.8880577077210794, "step": 10810, "train/loss_ctc": 0.8483438491821289, "train/loss_error": 0.4532330334186554, "train/loss_total": 0.532255232334137 }, { "epoch": 2.8883248730964466, "step": 10811, "train/loss_ctc": 0.6430768370628357, "train/loss_error": 0.4537408649921417, "train/loss_total": 0.4916080832481384 }, { "epoch": 2.8885920384718142, "step": 10812, "train/loss_ctc": 1.2284294366836548, "train/loss_error": 0.548951268196106, "train/loss_total": 0.6848469376564026 }, { "epoch": 2.8888592038471814, "step": 10813, "train/loss_ctc": 0.8794013261795044, "train/loss_error": 0.41865795850753784, "train/loss_total": 0.5108066201210022 }, { "epoch": 2.8891263692225486, "step": 10814, "train/loss_ctc": 0.5208055973052979, "train/loss_error": 0.456031858921051, "train/loss_total": 0.4689866006374359 }, { "epoch": 2.8893935345979163, "step": 10815, "train/loss_ctc": 1.4943435192108154, "train/loss_error": 0.4742002785205841, "train/loss_total": 0.6782289743423462 }, { "epoch": 2.8896606999732835, "step": 10816, "train/loss_ctc": 0.585954487323761, "train/loss_error": 0.4120616912841797, "train/loss_total": 0.4468402564525604 }, { "epoch": 2.8899278653486506, "step": 10817, "train/loss_ctc": 0.6142905354499817, "train/loss_error": 0.475125253200531, "train/loss_total": 0.5029582977294922 }, { "epoch": 2.8901950307240183, "step": 10818, "train/loss_ctc": 0.2500545382499695, "train/loss_error": 0.4245661199092865, "train/loss_total": 0.38966381549835205 }, { "epoch": 2.8904621960993855, "step": 10819, "train/loss_ctc": 0.5812395811080933, "train/loss_error": 0.43938204646110535, "train/loss_total": 0.4677535593509674 }, { "epoch": 2.8907293614747527, "grad_norm": 2.0128519535064697, "learning_rate": 1.2666844776916913e-05, "loss": 0.5174, "step": 10820 }, { "epoch": 2.8907293614747527, "step": 10820, "train/loss_ctc": 0.7904351949691772, "train/loss_error": 0.4876728951931, "train/loss_total": 0.5482254028320312 }, { "epoch": 2.8909965268501203, "step": 10821, "train/loss_ctc": 0.5098244547843933, "train/loss_error": 0.3973747789859772, "train/loss_total": 0.4198647141456604 }, { "epoch": 2.8912636922254875, "step": 10822, "train/loss_ctc": 0.2964272201061249, "train/loss_error": 0.4862728714942932, "train/loss_total": 0.4483037292957306 }, { "epoch": 2.8915308576008547, "step": 10823, "train/loss_ctc": 0.8455549478530884, "train/loss_error": 0.4759455919265747, "train/loss_total": 0.5498674511909485 }, { "epoch": 2.8917980229762223, "step": 10824, "train/loss_ctc": 0.530501127243042, "train/loss_error": 0.40851038694381714, "train/loss_total": 0.4329085350036621 }, { "epoch": 2.8920651883515895, "step": 10825, "train/loss_ctc": 0.9642991423606873, "train/loss_error": 0.4978087842464447, "train/loss_total": 0.5911068916320801 }, { "epoch": 2.8923323537269567, "step": 10826, "train/loss_ctc": 0.6301440000534058, "train/loss_error": 0.4408451020717621, "train/loss_total": 0.47870489954948425 }, { "epoch": 2.8925995191023244, "step": 10827, "train/loss_ctc": 0.7705816030502319, "train/loss_error": 0.4355403780937195, "train/loss_total": 0.5025486350059509 }, { "epoch": 2.8928666844776916, "step": 10828, "train/loss_ctc": 0.8584771752357483, "train/loss_error": 0.4948843717575073, "train/loss_total": 0.5676029324531555 }, { "epoch": 2.8931338498530588, "step": 10829, "train/loss_ctc": 0.6833095550537109, "train/loss_error": 0.4384720027446747, "train/loss_total": 0.48743951320648193 }, { "epoch": 2.8934010152284264, "grad_norm": 2.871577739715576, "learning_rate": 1.265081485439487e-05, "loss": 0.5027, "step": 10830 }, { "epoch": 2.8934010152284264, "step": 10830, "train/loss_ctc": 0.720244824886322, "train/loss_error": 0.402843713760376, "train/loss_total": 0.46632397174835205 }, { "epoch": 2.8936681806037936, "step": 10831, "train/loss_ctc": 0.6172986030578613, "train/loss_error": 0.5236829519271851, "train/loss_total": 0.5424060821533203 }, { "epoch": 2.8939353459791612, "step": 10832, "train/loss_ctc": 0.6120375394821167, "train/loss_error": 0.3549620807170868, "train/loss_total": 0.4063771963119507 }, { "epoch": 2.8942025113545284, "step": 10833, "train/loss_ctc": 0.9069766998291016, "train/loss_error": 0.45713382959365845, "train/loss_total": 0.5471023917198181 }, { "epoch": 2.894469676729896, "step": 10834, "train/loss_ctc": 0.8443474769592285, "train/loss_error": 0.4774540066719055, "train/loss_total": 0.5508327484130859 }, { "epoch": 2.8947368421052633, "step": 10835, "train/loss_ctc": 0.4263138473033905, "train/loss_error": 0.48594436049461365, "train/loss_total": 0.47401827573776245 }, { "epoch": 2.8950040074806305, "step": 10836, "train/loss_ctc": 0.46875935792922974, "train/loss_error": 0.42344334721565247, "train/loss_total": 0.4325065612792969 }, { "epoch": 2.895271172855998, "step": 10837, "train/loss_ctc": 0.5007756352424622, "train/loss_error": 0.4962124228477478, "train/loss_total": 0.4971250891685486 }, { "epoch": 2.8955383382313653, "step": 10838, "train/loss_ctc": 0.7152208089828491, "train/loss_error": 0.5042115449905396, "train/loss_total": 0.5464134216308594 }, { "epoch": 2.8958055036067325, "step": 10839, "train/loss_ctc": 0.19673451781272888, "train/loss_error": 0.427010715007782, "train/loss_total": 0.3809554874897003 }, { "epoch": 2.8960726689821, "grad_norm": 1.8515666723251343, "learning_rate": 1.263478493187283e-05, "loss": 0.4844, "step": 10840 }, { "epoch": 2.8960726689821, "step": 10840, "train/loss_ctc": 0.5360362529754639, "train/loss_error": 0.41324958205223083, "train/loss_total": 0.4378069043159485 }, { "epoch": 2.8963398343574673, "step": 10841, "train/loss_ctc": 0.2862340807914734, "train/loss_error": 0.541175365447998, "train/loss_total": 0.4901871085166931 }, { "epoch": 2.8966069997328345, "step": 10842, "train/loss_ctc": 0.40656036138534546, "train/loss_error": 0.42828813195228577, "train/loss_total": 0.42394256591796875 }, { "epoch": 2.896874165108202, "step": 10843, "train/loss_ctc": 0.9504109025001526, "train/loss_error": 0.4456460475921631, "train/loss_total": 0.5465990304946899 }, { "epoch": 2.8971413304835694, "step": 10844, "train/loss_ctc": 0.9407371282577515, "train/loss_error": 0.48482567071914673, "train/loss_total": 0.5760079622268677 }, { "epoch": 2.8974084958589366, "step": 10845, "train/loss_ctc": 0.3722173273563385, "train/loss_error": 0.483816534280777, "train/loss_total": 0.4614966809749603 }, { "epoch": 2.897675661234304, "step": 10846, "train/loss_ctc": 0.7634356617927551, "train/loss_error": 0.45091813802719116, "train/loss_total": 0.5134216547012329 }, { "epoch": 2.8979428266096714, "step": 10847, "train/loss_ctc": 0.7207282781600952, "train/loss_error": 0.41485396027565, "train/loss_total": 0.47602880001068115 }, { "epoch": 2.8982099919850386, "step": 10848, "train/loss_ctc": 0.7739633321762085, "train/loss_error": 0.3960469365119934, "train/loss_total": 0.4716302156448364 }, { "epoch": 2.8984771573604062, "step": 10849, "train/loss_ctc": 0.7016286849975586, "train/loss_error": 0.4641287922859192, "train/loss_total": 0.5116288065910339 }, { "epoch": 2.8987443227357734, "grad_norm": 1.5437939167022705, "learning_rate": 1.2618755009350788e-05, "loss": 0.4909, "step": 10850 }, { "epoch": 2.8987443227357734, "step": 10850, "train/loss_ctc": 0.4435764253139496, "train/loss_error": 0.4735924303531647, "train/loss_total": 0.46758922934532166 }, { "epoch": 2.8990114881111406, "step": 10851, "train/loss_ctc": 0.6930506229400635, "train/loss_error": 0.496715784072876, "train/loss_total": 0.5359827280044556 }, { "epoch": 2.8992786534865083, "step": 10852, "train/loss_ctc": 0.7096620202064514, "train/loss_error": 0.39848631620407104, "train/loss_total": 0.460721492767334 }, { "epoch": 2.8995458188618755, "step": 10853, "train/loss_ctc": 0.6276143789291382, "train/loss_error": 0.4173685312271118, "train/loss_total": 0.4594177007675171 }, { "epoch": 2.8998129842372427, "step": 10854, "train/loss_ctc": 1.2045543193817139, "train/loss_error": 0.4889971911907196, "train/loss_total": 0.6321086287498474 }, { "epoch": 2.9000801496126103, "step": 10855, "train/loss_ctc": 0.24687650799751282, "train/loss_error": 0.3989649713039398, "train/loss_total": 0.3685472905635834 }, { "epoch": 2.9003473149879775, "step": 10856, "train/loss_ctc": 1.7758088111877441, "train/loss_error": 0.4676758646965027, "train/loss_total": 0.7293024659156799 }, { "epoch": 2.9006144803633447, "step": 10857, "train/loss_ctc": 0.8167330026626587, "train/loss_error": 0.4534325897693634, "train/loss_total": 0.5260927081108093 }, { "epoch": 2.9008816457387123, "step": 10858, "train/loss_ctc": 0.4330294728279114, "train/loss_error": 0.46469131112098694, "train/loss_total": 0.4583589732646942 }, { "epoch": 2.9011488111140795, "step": 10859, "train/loss_ctc": 0.6642978191375732, "train/loss_error": 0.38699594140052795, "train/loss_total": 0.44245630502700806 }, { "epoch": 2.9014159764894467, "grad_norm": 2.4238085746765137, "learning_rate": 1.2602725086828747e-05, "loss": 0.5081, "step": 10860 }, { "epoch": 2.9014159764894467, "step": 10860, "train/loss_ctc": 0.4554823338985443, "train/loss_error": 0.4402495324611664, "train/loss_total": 0.4432961046695709 }, { "epoch": 2.9016831418648144, "step": 10861, "train/loss_ctc": 1.0589451789855957, "train/loss_error": 0.4806971251964569, "train/loss_total": 0.5963467359542847 }, { "epoch": 2.9019503072401815, "step": 10862, "train/loss_ctc": 0.5292276740074158, "train/loss_error": 0.4024660289287567, "train/loss_total": 0.4278183579444885 }, { "epoch": 2.902217472615549, "step": 10863, "train/loss_ctc": 0.5800784230232239, "train/loss_error": 0.41240522265434265, "train/loss_total": 0.44593989849090576 }, { "epoch": 2.9024846379909164, "step": 10864, "train/loss_ctc": 0.5125185251235962, "train/loss_error": 0.42810866236686707, "train/loss_total": 0.4449906647205353 }, { "epoch": 2.9027518033662836, "step": 10865, "train/loss_ctc": 0.704269289970398, "train/loss_error": 0.4434448778629303, "train/loss_total": 0.49560976028442383 }, { "epoch": 2.903018968741651, "step": 10866, "train/loss_ctc": 0.8862901926040649, "train/loss_error": 0.4103662967681885, "train/loss_total": 0.5055510997772217 }, { "epoch": 2.9032861341170184, "step": 10867, "train/loss_ctc": 0.9736626744270325, "train/loss_error": 0.47453317046165466, "train/loss_total": 0.5743590593338013 }, { "epoch": 2.903553299492386, "step": 10868, "train/loss_ctc": 1.346092939376831, "train/loss_error": 0.5092504620552063, "train/loss_total": 0.6766189336776733 }, { "epoch": 2.9038204648677532, "step": 10869, "train/loss_ctc": 0.4352928400039673, "train/loss_error": 0.42951011657714844, "train/loss_total": 0.4306666851043701 }, { "epoch": 2.9040876302431204, "grad_norm": 3.594482898712158, "learning_rate": 1.2586695164306706e-05, "loss": 0.5041, "step": 10870 }, { "epoch": 2.9040876302431204, "step": 10870, "train/loss_ctc": 0.6294045448303223, "train/loss_error": 0.48367446660995483, "train/loss_total": 0.5128204822540283 }, { "epoch": 2.904354795618488, "step": 10871, "train/loss_ctc": 0.6466598510742188, "train/loss_error": 0.46396082639694214, "train/loss_total": 0.5005006790161133 }, { "epoch": 2.9046219609938553, "step": 10872, "train/loss_ctc": 0.5747291445732117, "train/loss_error": 0.45164838433265686, "train/loss_total": 0.4762645363807678 }, { "epoch": 2.9048891263692225, "step": 10873, "train/loss_ctc": 1.1668740510940552, "train/loss_error": 0.4383573532104492, "train/loss_total": 0.5840607285499573 }, { "epoch": 2.90515629174459, "step": 10874, "train/loss_ctc": 0.8577145934104919, "train/loss_error": 0.45017555356025696, "train/loss_total": 0.5316833853721619 }, { "epoch": 2.9054234571199573, "step": 10875, "train/loss_ctc": 0.7437069416046143, "train/loss_error": 0.477131187915802, "train/loss_total": 0.5304463505744934 }, { "epoch": 2.9056906224953245, "step": 10876, "train/loss_ctc": 0.9441421031951904, "train/loss_error": 0.4912169277667999, "train/loss_total": 0.5818019509315491 }, { "epoch": 2.905957787870692, "step": 10877, "train/loss_ctc": 0.5490015149116516, "train/loss_error": 0.41653910279273987, "train/loss_total": 0.4430316090583801 }, { "epoch": 2.9062249532460593, "step": 10878, "train/loss_ctc": 1.003401517868042, "train/loss_error": 0.465370774269104, "train/loss_total": 0.5729769468307495 }, { "epoch": 2.9064921186214265, "step": 10879, "train/loss_ctc": 1.1453222036361694, "train/loss_error": 0.4470823109149933, "train/loss_total": 0.5867303013801575 }, { "epoch": 2.906759283996794, "grad_norm": 2.2378804683685303, "learning_rate": 1.2570665241784664e-05, "loss": 0.532, "step": 10880 }, { "epoch": 2.906759283996794, "step": 10880, "train/loss_ctc": 0.41305777430534363, "train/loss_error": 0.34174269437789917, "train/loss_total": 0.3560057282447815 }, { "epoch": 2.9070264493721614, "step": 10881, "train/loss_ctc": 0.7216261029243469, "train/loss_error": 0.4885901212692261, "train/loss_total": 0.5351973176002502 }, { "epoch": 2.9072936147475286, "step": 10882, "train/loss_ctc": 0.9075067043304443, "train/loss_error": 0.4084866940975189, "train/loss_total": 0.508290708065033 }, { "epoch": 2.907560780122896, "step": 10883, "train/loss_ctc": 0.8484673500061035, "train/loss_error": 0.48134005069732666, "train/loss_total": 0.554765522480011 }, { "epoch": 2.9078279454982634, "step": 10884, "train/loss_ctc": 0.4159863591194153, "train/loss_error": 0.4279966354370117, "train/loss_total": 0.4255945682525635 }, { "epoch": 2.9080951108736306, "step": 10885, "train/loss_ctc": 0.5106453895568848, "train/loss_error": 0.3474697768688202, "train/loss_total": 0.3801048994064331 }, { "epoch": 2.9083622762489982, "step": 10886, "train/loss_ctc": 0.9455752372741699, "train/loss_error": 0.4331129789352417, "train/loss_total": 0.5356054306030273 }, { "epoch": 2.9086294416243654, "step": 10887, "train/loss_ctc": 0.5282186269760132, "train/loss_error": 0.4520054757595062, "train/loss_total": 0.4672481119632721 }, { "epoch": 2.9088966069997326, "step": 10888, "train/loss_ctc": 0.2900332808494568, "train/loss_error": 0.4218120276927948, "train/loss_total": 0.3954562842845917 }, { "epoch": 2.9091637723751003, "step": 10889, "train/loss_ctc": 0.9663933515548706, "train/loss_error": 0.4133746027946472, "train/loss_total": 0.5239783525466919 }, { "epoch": 2.9094309377504675, "grad_norm": 1.8519774675369263, "learning_rate": 1.2554635319262624e-05, "loss": 0.4682, "step": 10890 }, { "epoch": 2.9094309377504675, "step": 10890, "train/loss_ctc": 0.399112731218338, "train/loss_error": 0.3116634786128998, "train/loss_total": 0.3291533291339874 }, { "epoch": 2.9096981031258347, "step": 10891, "train/loss_ctc": 0.9187338948249817, "train/loss_error": 0.451134592294693, "train/loss_total": 0.5446544885635376 }, { "epoch": 2.9099652685012023, "step": 10892, "train/loss_ctc": 0.7358271479606628, "train/loss_error": 0.3883468210697174, "train/loss_total": 0.4578428864479065 }, { "epoch": 2.9102324338765695, "step": 10893, "train/loss_ctc": 1.0353645086288452, "train/loss_error": 0.46124446392059326, "train/loss_total": 0.5760684609413147 }, { "epoch": 2.9104995992519367, "step": 10894, "train/loss_ctc": 0.803674042224884, "train/loss_error": 0.485871285200119, "train/loss_total": 0.5494318604469299 }, { "epoch": 2.9107667646273043, "step": 10895, "train/loss_ctc": 0.5126279592514038, "train/loss_error": 0.5079684257507324, "train/loss_total": 0.5089003443717957 }, { "epoch": 2.9110339300026715, "step": 10896, "train/loss_ctc": 0.9889750480651855, "train/loss_error": 0.49374011158943176, "train/loss_total": 0.5927870869636536 }, { "epoch": 2.911301095378039, "step": 10897, "train/loss_ctc": 0.6002908945083618, "train/loss_error": 0.44916144013404846, "train/loss_total": 0.4793873429298401 }, { "epoch": 2.9115682607534064, "step": 10898, "train/loss_ctc": 0.4205823540687561, "train/loss_error": 0.6018483638763428, "train/loss_total": 0.5655951499938965 }, { "epoch": 2.9118354261287736, "step": 10899, "train/loss_ctc": 1.3389599323272705, "train/loss_error": 0.5927319526672363, "train/loss_total": 0.7419775724411011 }, { "epoch": 2.912102591504141, "grad_norm": 3.676933526992798, "learning_rate": 1.2538605396740584e-05, "loss": 0.5346, "step": 10900 }, { "epoch": 2.912102591504141, "step": 10900, "train/loss_ctc": 0.6854101419448853, "train/loss_error": 0.4332936108112335, "train/loss_total": 0.4837169051170349 }, { "epoch": 2.9123697568795084, "step": 10901, "train/loss_ctc": 0.7946335077285767, "train/loss_error": 0.47711408138275146, "train/loss_total": 0.5406180024147034 }, { "epoch": 2.912636922254876, "step": 10902, "train/loss_ctc": 0.8828448057174683, "train/loss_error": 0.5176195502281189, "train/loss_total": 0.5906646251678467 }, { "epoch": 2.912904087630243, "step": 10903, "train/loss_ctc": 0.747032642364502, "train/loss_error": 0.49694883823394775, "train/loss_total": 0.5469655990600586 }, { "epoch": 2.9131712530056104, "step": 10904, "train/loss_ctc": 0.4546236991882324, "train/loss_error": 0.432250440120697, "train/loss_total": 0.43672510981559753 }, { "epoch": 2.913438418380978, "step": 10905, "train/loss_ctc": 0.8131937980651855, "train/loss_error": 0.47671452164649963, "train/loss_total": 0.5440104007720947 }, { "epoch": 2.9137055837563453, "step": 10906, "train/loss_ctc": 0.8217982053756714, "train/loss_error": 0.462056428194046, "train/loss_total": 0.534004807472229 }, { "epoch": 2.9139727491317124, "step": 10907, "train/loss_ctc": 0.8373980522155762, "train/loss_error": 0.3918834626674652, "train/loss_total": 0.48098641633987427 }, { "epoch": 2.91423991450708, "step": 10908, "train/loss_ctc": 0.7008342742919922, "train/loss_error": 0.4461081326007843, "train/loss_total": 0.4970533847808838 }, { "epoch": 2.9145070798824473, "step": 10909, "train/loss_ctc": 0.7217257022857666, "train/loss_error": 0.41554492712020874, "train/loss_total": 0.47678107023239136 }, { "epoch": 2.9147742452578145, "grad_norm": 1.346960186958313, "learning_rate": 1.2522575474218542e-05, "loss": 0.5132, "step": 10910 }, { "epoch": 2.9147742452578145, "step": 10910, "train/loss_ctc": 0.6134380102157593, "train/loss_error": 0.44434142112731934, "train/loss_total": 0.4781607389450073 }, { "epoch": 2.915041410633182, "step": 10911, "train/loss_ctc": 0.7627570629119873, "train/loss_error": 0.440470427274704, "train/loss_total": 0.5049277544021606 }, { "epoch": 2.9153085760085493, "step": 10912, "train/loss_ctc": 0.5689980983734131, "train/loss_error": 0.4630410969257355, "train/loss_total": 0.48423251509666443 }, { "epoch": 2.9155757413839165, "step": 10913, "train/loss_ctc": 0.9225221276283264, "train/loss_error": 0.4421831965446472, "train/loss_total": 0.5382509827613831 }, { "epoch": 2.915842906759284, "step": 10914, "train/loss_ctc": 0.911707878112793, "train/loss_error": 0.46596407890319824, "train/loss_total": 0.5551128387451172 }, { "epoch": 2.9161100721346513, "step": 10915, "train/loss_ctc": 0.9803733825683594, "train/loss_error": 0.4793846607208252, "train/loss_total": 0.5795823931694031 }, { "epoch": 2.9163772375100185, "step": 10916, "train/loss_ctc": 0.23410682380199432, "train/loss_error": 0.43319231271743774, "train/loss_total": 0.3933752179145813 }, { "epoch": 2.916644402885386, "step": 10917, "train/loss_ctc": 1.3594181537628174, "train/loss_error": 0.45118704438209534, "train/loss_total": 0.6328332424163818 }, { "epoch": 2.9169115682607534, "step": 10918, "train/loss_ctc": 1.3039791584014893, "train/loss_error": 0.4050067663192749, "train/loss_total": 0.5848012566566467 }, { "epoch": 2.9171787336361206, "step": 10919, "train/loss_ctc": 0.9415470361709595, "train/loss_error": 0.5326906442642212, "train/loss_total": 0.6144619584083557 }, { "epoch": 2.917445899011488, "grad_norm": 1.6152676343917847, "learning_rate": 1.25065455516965e-05, "loss": 0.5366, "step": 10920 }, { "epoch": 2.917445899011488, "step": 10920, "train/loss_ctc": 0.32406336069107056, "train/loss_error": 0.40685924887657166, "train/loss_total": 0.39030009508132935 }, { "epoch": 2.9177130643868554, "step": 10921, "train/loss_ctc": 1.094212293624878, "train/loss_error": 0.453263521194458, "train/loss_total": 0.581453263759613 }, { "epoch": 2.9179802297622226, "step": 10922, "train/loss_ctc": 0.7574091553688049, "train/loss_error": 0.4233388900756836, "train/loss_total": 0.4901529550552368 }, { "epoch": 2.9182473951375902, "step": 10923, "train/loss_ctc": 0.8391105532646179, "train/loss_error": 0.5216619968414307, "train/loss_total": 0.585151731967926 }, { "epoch": 2.9185145605129574, "step": 10924, "train/loss_ctc": 0.8127225637435913, "train/loss_error": 0.45562636852264404, "train/loss_total": 0.5270456075668335 }, { "epoch": 2.9187817258883246, "step": 10925, "train/loss_ctc": 1.1655058860778809, "train/loss_error": 0.5206720232963562, "train/loss_total": 0.6496387720108032 }, { "epoch": 2.9190488912636923, "step": 10926, "train/loss_ctc": 0.8102425932884216, "train/loss_error": 0.4254542887210846, "train/loss_total": 0.502411961555481 }, { "epoch": 2.9193160566390595, "step": 10927, "train/loss_ctc": 0.7764692306518555, "train/loss_error": 0.5090406537055969, "train/loss_total": 0.5625263452529907 }, { "epoch": 2.9195832220144267, "step": 10928, "train/loss_ctc": 0.6959877610206604, "train/loss_error": 0.4883645176887512, "train/loss_total": 0.5298891663551331 }, { "epoch": 2.9198503873897943, "step": 10929, "train/loss_ctc": 0.8072913885116577, "train/loss_error": 0.5087963342666626, "train/loss_total": 0.5684953927993774 }, { "epoch": 2.9201175527651615, "grad_norm": 2.1596617698669434, "learning_rate": 1.249051562917446e-05, "loss": 0.5387, "step": 10930 }, { "epoch": 2.9201175527651615, "step": 10930, "train/loss_ctc": 0.469418466091156, "train/loss_error": 0.3932190537452698, "train/loss_total": 0.408458948135376 }, { "epoch": 2.920384718140529, "step": 10931, "train/loss_ctc": 0.7727049589157104, "train/loss_error": 0.4713251292705536, "train/loss_total": 0.5316011309623718 }, { "epoch": 2.9206518835158963, "step": 10932, "train/loss_ctc": 0.5313748121261597, "train/loss_error": 0.5069988369941711, "train/loss_total": 0.5118740797042847 }, { "epoch": 2.920919048891264, "step": 10933, "train/loss_ctc": 0.7513916492462158, "train/loss_error": 0.47877585887908936, "train/loss_total": 0.5332990288734436 }, { "epoch": 2.921186214266631, "step": 10934, "train/loss_ctc": 0.8416491150856018, "train/loss_error": 0.3978009521961212, "train/loss_total": 0.4865705966949463 }, { "epoch": 2.9214533796419984, "step": 10935, "train/loss_ctc": 0.49061280488967896, "train/loss_error": 0.48985108733177185, "train/loss_total": 0.49000343680381775 }, { "epoch": 2.921720545017366, "step": 10936, "train/loss_ctc": 0.79459547996521, "train/loss_error": 0.42164936661720276, "train/loss_total": 0.4962385892868042 }, { "epoch": 2.921987710392733, "step": 10937, "train/loss_ctc": 0.48928022384643555, "train/loss_error": 0.4630347192287445, "train/loss_total": 0.46828383207321167 }, { "epoch": 2.9222548757681004, "step": 10938, "train/loss_ctc": 0.47818267345428467, "train/loss_error": 0.4046664834022522, "train/loss_total": 0.41936972737312317 }, { "epoch": 2.922522041143468, "step": 10939, "train/loss_ctc": 0.9133257865905762, "train/loss_error": 0.4496113359928131, "train/loss_total": 0.5423542261123657 }, { "epoch": 2.9227892065188352, "grad_norm": 1.8631893396377563, "learning_rate": 1.2474485706652418e-05, "loss": 0.4888, "step": 10940 }, { "epoch": 2.9227892065188352, "step": 10940, "train/loss_ctc": 0.8476929068565369, "train/loss_error": 0.455818772315979, "train/loss_total": 0.5341936349868774 }, { "epoch": 2.9230563718942024, "step": 10941, "train/loss_ctc": 0.7660022974014282, "train/loss_error": 0.5298475623130798, "train/loss_total": 0.5770785212516785 }, { "epoch": 2.92332353726957, "step": 10942, "train/loss_ctc": 0.6299201250076294, "train/loss_error": 0.5136492848396301, "train/loss_total": 0.536903440952301 }, { "epoch": 2.9235907026449373, "step": 10943, "train/loss_ctc": 1.1089205741882324, "train/loss_error": 0.43162283301353455, "train/loss_total": 0.567082405090332 }, { "epoch": 2.9238578680203045, "step": 10944, "train/loss_ctc": 0.3309609889984131, "train/loss_error": 0.4171070456504822, "train/loss_total": 0.3998778462409973 }, { "epoch": 2.924125033395672, "step": 10945, "train/loss_ctc": 0.49076950550079346, "train/loss_error": 0.4121042490005493, "train/loss_total": 0.4278373122215271 }, { "epoch": 2.9243921987710393, "step": 10946, "train/loss_ctc": 0.9245210886001587, "train/loss_error": 0.49132993817329407, "train/loss_total": 0.577968180179596 }, { "epoch": 2.9246593641464065, "step": 10947, "train/loss_ctc": 1.3046435117721558, "train/loss_error": 0.45933350920677185, "train/loss_total": 0.6283955574035645 }, { "epoch": 2.924926529521774, "step": 10948, "train/loss_ctc": 0.5976747274398804, "train/loss_error": 0.4572417438030243, "train/loss_total": 0.48532834649086 }, { "epoch": 2.9251936948971413, "step": 10949, "train/loss_ctc": 0.9899823665618896, "train/loss_error": 0.389206200838089, "train/loss_total": 0.5093614459037781 }, { "epoch": 2.9254608602725085, "grad_norm": 3.2927558422088623, "learning_rate": 1.2458455784130376e-05, "loss": 0.5244, "step": 10950 }, { "epoch": 2.9254608602725085, "step": 10950, "train/loss_ctc": 1.046390175819397, "train/loss_error": 0.4332616925239563, "train/loss_total": 0.5558874011039734 }, { "epoch": 2.925728025647876, "step": 10951, "train/loss_ctc": 0.5737920999526978, "train/loss_error": 0.45093533396720886, "train/loss_total": 0.4755066931247711 }, { "epoch": 2.9259951910232433, "step": 10952, "train/loss_ctc": 0.3075237274169922, "train/loss_error": 0.5113585591316223, "train/loss_total": 0.47059160470962524 }, { "epoch": 2.9262623563986105, "step": 10953, "train/loss_ctc": 1.0525777339935303, "train/loss_error": 0.5148992538452148, "train/loss_total": 0.6224349737167358 }, { "epoch": 2.926529521773978, "step": 10954, "train/loss_ctc": 1.018968105316162, "train/loss_error": 0.4079093635082245, "train/loss_total": 0.5301211476325989 }, { "epoch": 2.9267966871493454, "step": 10955, "train/loss_ctc": 0.4861014485359192, "train/loss_error": 0.4464670419692993, "train/loss_total": 0.4543939530849457 }, { "epoch": 2.9270638525247126, "step": 10956, "train/loss_ctc": 0.6886001229286194, "train/loss_error": 0.4515872299671173, "train/loss_total": 0.4989898204803467 }, { "epoch": 2.92733101790008, "step": 10957, "train/loss_ctc": 1.193739652633667, "train/loss_error": 0.36338919401168823, "train/loss_total": 0.5294592976570129 }, { "epoch": 2.9275981832754474, "step": 10958, "train/loss_ctc": 0.8976250886917114, "train/loss_error": 0.4798133671283722, "train/loss_total": 0.56337571144104 }, { "epoch": 2.9278653486508146, "step": 10959, "train/loss_ctc": 0.6568797826766968, "train/loss_error": 0.4590582549571991, "train/loss_total": 0.4986225366592407 }, { "epoch": 2.9281325140261822, "grad_norm": 1.8468185663223267, "learning_rate": 1.2442425861608336e-05, "loss": 0.5199, "step": 10960 }, { "epoch": 2.9281325140261822, "step": 10960, "train/loss_ctc": 0.7043951749801636, "train/loss_error": 0.5093801617622375, "train/loss_total": 0.5483831763267517 }, { "epoch": 2.9283996794015494, "step": 10961, "train/loss_ctc": 0.6077873706817627, "train/loss_error": 0.40798693895339966, "train/loss_total": 0.44794702529907227 }, { "epoch": 2.928666844776917, "step": 10962, "train/loss_ctc": 0.673241138458252, "train/loss_error": 0.38980579376220703, "train/loss_total": 0.44649288058280945 }, { "epoch": 2.9289340101522843, "step": 10963, "train/loss_ctc": 0.4487752914428711, "train/loss_error": 0.42410799860954285, "train/loss_total": 0.42904147505760193 }, { "epoch": 2.9292011755276515, "step": 10964, "train/loss_ctc": 0.7283434867858887, "train/loss_error": 0.4972383975982666, "train/loss_total": 0.543459415435791 }, { "epoch": 2.929468340903019, "step": 10965, "train/loss_ctc": 0.5756658315658569, "train/loss_error": 0.4379599392414093, "train/loss_total": 0.4655011296272278 }, { "epoch": 2.9297355062783863, "step": 10966, "train/loss_ctc": 1.2333316802978516, "train/loss_error": 0.4973118007183075, "train/loss_total": 0.6445157527923584 }, { "epoch": 2.930002671653754, "step": 10967, "train/loss_ctc": 1.1302382946014404, "train/loss_error": 0.4012036919593811, "train/loss_total": 0.5470106601715088 }, { "epoch": 2.930269837029121, "step": 10968, "train/loss_ctc": 0.4854509234428406, "train/loss_error": 0.41477304697036743, "train/loss_total": 0.4289086163043976 }, { "epoch": 2.9305370024044883, "step": 10969, "train/loss_ctc": 0.7232171297073364, "train/loss_error": 0.4219149649143219, "train/loss_total": 0.48217540979385376 }, { "epoch": 2.930804167779856, "grad_norm": 1.8019262552261353, "learning_rate": 1.2426395939086294e-05, "loss": 0.4983, "step": 10970 }, { "epoch": 2.930804167779856, "step": 10970, "train/loss_ctc": 0.547652542591095, "train/loss_error": 0.4885898232460022, "train/loss_total": 0.5004023313522339 }, { "epoch": 2.931071333155223, "step": 10971, "train/loss_ctc": 1.8164089918136597, "train/loss_error": 0.4902782142162323, "train/loss_total": 0.7555043697357178 }, { "epoch": 2.9313384985305904, "step": 10972, "train/loss_ctc": 0.40056073665618896, "train/loss_error": 0.42499101161956787, "train/loss_total": 0.42010498046875 }, { "epoch": 2.931605663905958, "step": 10973, "train/loss_ctc": 0.6469407081604004, "train/loss_error": 0.44441351294517517, "train/loss_total": 0.4849189519882202 }, { "epoch": 2.931872829281325, "step": 10974, "train/loss_ctc": 0.6096394062042236, "train/loss_error": 0.5011088848114014, "train/loss_total": 0.5228149890899658 }, { "epoch": 2.9321399946566924, "step": 10975, "train/loss_ctc": 0.6783787608146667, "train/loss_error": 0.39870327711105347, "train/loss_total": 0.45463839173316956 }, { "epoch": 2.93240716003206, "step": 10976, "train/loss_ctc": 0.6043829917907715, "train/loss_error": 0.43716269731521606, "train/loss_total": 0.4706067740917206 }, { "epoch": 2.9326743254074272, "step": 10977, "train/loss_ctc": 1.2294807434082031, "train/loss_error": 0.4566285014152527, "train/loss_total": 0.6111989617347717 }, { "epoch": 2.9329414907827944, "step": 10978, "train/loss_ctc": 0.8302504420280457, "train/loss_error": 0.5256240963935852, "train/loss_total": 0.5865494012832642 }, { "epoch": 2.933208656158162, "step": 10979, "train/loss_ctc": 0.2540286183357239, "train/loss_error": 0.46828216314315796, "train/loss_total": 0.4254314601421356 }, { "epoch": 2.9334758215335293, "grad_norm": 1.7627063989639282, "learning_rate": 1.2410366016564254e-05, "loss": 0.5232, "step": 10980 }, { "epoch": 2.9334758215335293, "step": 10980, "train/loss_ctc": 1.3069061040878296, "train/loss_error": 0.4592641592025757, "train/loss_total": 0.6287925839424133 }, { "epoch": 2.9337429869088965, "step": 10981, "train/loss_ctc": 0.6450028419494629, "train/loss_error": 0.4290880858898163, "train/loss_total": 0.47227105498313904 }, { "epoch": 2.934010152284264, "step": 10982, "train/loss_ctc": 0.5263233184814453, "train/loss_error": 0.40038755536079407, "train/loss_total": 0.42557471990585327 }, { "epoch": 2.9342773176596313, "step": 10983, "train/loss_ctc": 0.9912105798721313, "train/loss_error": 0.4643286168575287, "train/loss_total": 0.5697050094604492 }, { "epoch": 2.9345444830349985, "step": 10984, "train/loss_ctc": 0.41429996490478516, "train/loss_error": 0.5067515969276428, "train/loss_total": 0.48826128244400024 }, { "epoch": 2.934811648410366, "step": 10985, "train/loss_ctc": 0.577927827835083, "train/loss_error": 0.45151764154434204, "train/loss_total": 0.47679969668388367 }, { "epoch": 2.9350788137857333, "step": 10986, "train/loss_ctc": 1.0887062549591064, "train/loss_error": 0.4908846914768219, "train/loss_total": 0.6104490160942078 }, { "epoch": 2.9353459791611005, "step": 10987, "train/loss_ctc": 0.8399189710617065, "train/loss_error": 0.503986120223999, "train/loss_total": 0.5711727142333984 }, { "epoch": 2.935613144536468, "step": 10988, "train/loss_ctc": 0.31743600964546204, "train/loss_error": 0.3915264308452606, "train/loss_total": 0.37670835852622986 }, { "epoch": 2.9358803099118354, "step": 10989, "train/loss_ctc": 0.4391395151615143, "train/loss_error": 0.5273000597953796, "train/loss_total": 0.5096679329872131 }, { "epoch": 2.9361474752872025, "grad_norm": 1.6992634534835815, "learning_rate": 1.2394336094042214e-05, "loss": 0.5129, "step": 10990 }, { "epoch": 2.9361474752872025, "step": 10990, "train/loss_ctc": 1.2752575874328613, "train/loss_error": 0.48160022497177124, "train/loss_total": 0.6403317451477051 }, { "epoch": 2.93641464066257, "step": 10991, "train/loss_ctc": 0.8019782304763794, "train/loss_error": 0.45649293065071106, "train/loss_total": 0.5255900025367737 }, { "epoch": 2.9366818060379374, "step": 10992, "train/loss_ctc": 0.6111610531806946, "train/loss_error": 0.45394399762153625, "train/loss_total": 0.4853874444961548 }, { "epoch": 2.9369489714133046, "step": 10993, "train/loss_ctc": 0.3033320903778076, "train/loss_error": 0.36920684576034546, "train/loss_total": 0.3560318946838379 }, { "epoch": 2.937216136788672, "step": 10994, "train/loss_ctc": 0.5612605810165405, "train/loss_error": 0.46722668409347534, "train/loss_total": 0.48603346943855286 }, { "epoch": 2.9374833021640394, "step": 10995, "train/loss_ctc": 0.9178798794746399, "train/loss_error": 0.49292412400245667, "train/loss_total": 0.5779152512550354 }, { "epoch": 2.937750467539407, "step": 10996, "train/loss_ctc": 0.7260090112686157, "train/loss_error": 0.5308356881141663, "train/loss_total": 0.5698703527450562 }, { "epoch": 2.9380176329147742, "step": 10997, "train/loss_ctc": 0.8324056267738342, "train/loss_error": 0.40061521530151367, "train/loss_total": 0.48697328567504883 }, { "epoch": 2.9382847982901414, "step": 10998, "train/loss_ctc": 1.2471461296081543, "train/loss_error": 0.4503794312477112, "train/loss_total": 0.6097327470779419 }, { "epoch": 2.938551963665509, "step": 10999, "train/loss_ctc": 0.5618144273757935, "train/loss_error": 0.4303094744682312, "train/loss_total": 0.45661047101020813 }, { "epoch": 2.9388191290408763, "grad_norm": 1.155971646308899, "learning_rate": 1.2378306171520172e-05, "loss": 0.5194, "step": 11000 }, { "epoch": 2.9388191290408763, "step": 11000, "train/loss_ctc": 1.4526276588439941, "train/loss_error": 0.41874203085899353, "train/loss_total": 0.6255191564559937 }, { "epoch": 2.939086294416244, "step": 11001, "train/loss_ctc": 1.006117582321167, "train/loss_error": 0.46102192997932434, "train/loss_total": 0.5700410604476929 }, { "epoch": 2.939353459791611, "step": 11002, "train/loss_ctc": 0.5550066232681274, "train/loss_error": 0.4287976622581482, "train/loss_total": 0.45403945446014404 }, { "epoch": 2.9396206251669783, "step": 11003, "train/loss_ctc": 1.1190418004989624, "train/loss_error": 0.3814031779766083, "train/loss_total": 0.5289309024810791 }, { "epoch": 2.939887790542346, "step": 11004, "train/loss_ctc": 0.5944563746452332, "train/loss_error": 0.4238419830799103, "train/loss_total": 0.45796486735343933 }, { "epoch": 2.940154955917713, "step": 11005, "train/loss_ctc": 0.7998392581939697, "train/loss_error": 0.4678116738796234, "train/loss_total": 0.5342171788215637 }, { "epoch": 2.9404221212930803, "step": 11006, "train/loss_ctc": 0.43025124073028564, "train/loss_error": 0.502610445022583, "train/loss_total": 0.4881386160850525 }, { "epoch": 2.940689286668448, "step": 11007, "train/loss_ctc": 1.2299253940582275, "train/loss_error": 0.37929514050483704, "train/loss_total": 0.5494211912155151 }, { "epoch": 2.940956452043815, "step": 11008, "train/loss_ctc": 0.46044570207595825, "train/loss_error": 0.4919450879096985, "train/loss_total": 0.48564523458480835 }, { "epoch": 2.9412236174191824, "step": 11009, "train/loss_ctc": 0.8584011793136597, "train/loss_error": 0.49293261766433716, "train/loss_total": 0.5660263299942017 }, { "epoch": 2.94149078279455, "grad_norm": 2.0484583377838135, "learning_rate": 1.236227624899813e-05, "loss": 0.526, "step": 11010 }, { "epoch": 2.94149078279455, "step": 11010, "train/loss_ctc": 0.6432440876960754, "train/loss_error": 0.4530259668827057, "train/loss_total": 0.49106958508491516 }, { "epoch": 2.941757948169917, "step": 11011, "train/loss_ctc": 0.4989997446537018, "train/loss_error": 0.43138325214385986, "train/loss_total": 0.4449065625667572 }, { "epoch": 2.9420251135452844, "step": 11012, "train/loss_ctc": 0.5960429906845093, "train/loss_error": 0.4625339210033417, "train/loss_total": 0.4892357587814331 }, { "epoch": 2.942292278920652, "step": 11013, "train/loss_ctc": 0.5067917704582214, "train/loss_error": 0.43542179465293884, "train/loss_total": 0.44969579577445984 }, { "epoch": 2.9425594442960192, "step": 11014, "train/loss_ctc": 0.7626216411590576, "train/loss_error": 0.5003218054771423, "train/loss_total": 0.5527817606925964 }, { "epoch": 2.9428266096713864, "step": 11015, "train/loss_ctc": 0.503870964050293, "train/loss_error": 0.46792861819267273, "train/loss_total": 0.4751170873641968 }, { "epoch": 2.943093775046754, "step": 11016, "train/loss_ctc": 0.37325602769851685, "train/loss_error": 0.3942963480949402, "train/loss_total": 0.39008828997612 }, { "epoch": 2.9433609404221213, "step": 11017, "train/loss_ctc": 0.3434443175792694, "train/loss_error": 0.4202132523059845, "train/loss_total": 0.4048594832420349 }, { "epoch": 2.9436281057974885, "step": 11018, "train/loss_ctc": 0.8106325268745422, "train/loss_error": 0.3942830264568329, "train/loss_total": 0.47755295038223267 }, { "epoch": 2.943895271172856, "step": 11019, "train/loss_ctc": 0.8354165554046631, "train/loss_error": 0.4327426254749298, "train/loss_total": 0.5132774114608765 }, { "epoch": 2.9441624365482233, "grad_norm": 4.694834232330322, "learning_rate": 1.234624632647609e-05, "loss": 0.4689, "step": 11020 }, { "epoch": 2.9441624365482233, "step": 11020, "train/loss_ctc": 0.42705637216567993, "train/loss_error": 0.4579426050186157, "train/loss_total": 0.45176535844802856 }, { "epoch": 2.9444296019235905, "step": 11021, "train/loss_ctc": 0.8603995442390442, "train/loss_error": 0.4850105047225952, "train/loss_total": 0.5600883364677429 }, { "epoch": 2.944696767298958, "step": 11022, "train/loss_ctc": 0.7089492082595825, "train/loss_error": 0.3810413181781769, "train/loss_total": 0.44662290811538696 }, { "epoch": 2.9449639326743253, "step": 11023, "train/loss_ctc": 0.8881409168243408, "train/loss_error": 0.41958293318748474, "train/loss_total": 0.5132945775985718 }, { "epoch": 2.9452310980496925, "step": 11024, "train/loss_ctc": 0.4727269113063812, "train/loss_error": 0.39368730783462524, "train/loss_total": 0.4094952344894409 }, { "epoch": 2.94549826342506, "step": 11025, "train/loss_ctc": 0.3646513521671295, "train/loss_error": 0.44085225462913513, "train/loss_total": 0.42561209201812744 }, { "epoch": 2.9457654288004274, "step": 11026, "train/loss_ctc": 0.5400731563568115, "train/loss_error": 0.35969793796539307, "train/loss_total": 0.3957729935646057 }, { "epoch": 2.9460325941757946, "step": 11027, "train/loss_ctc": 0.7103589177131653, "train/loss_error": 0.4991057515144348, "train/loss_total": 0.5413563847541809 }, { "epoch": 2.946299759551162, "step": 11028, "train/loss_ctc": 1.3183813095092773, "train/loss_error": 0.4539102017879486, "train/loss_total": 0.6268044114112854 }, { "epoch": 2.9465669249265294, "step": 11029, "train/loss_ctc": 1.5631117820739746, "train/loss_error": 0.4985332787036896, "train/loss_total": 0.7114490270614624 }, { "epoch": 2.946834090301897, "grad_norm": 2.8421006202697754, "learning_rate": 1.2330216403954048e-05, "loss": 0.5082, "step": 11030 }, { "epoch": 2.946834090301897, "step": 11030, "train/loss_ctc": 0.5066792964935303, "train/loss_error": 0.4209511876106262, "train/loss_total": 0.438096821308136 }, { "epoch": 2.947101255677264, "step": 11031, "train/loss_ctc": 0.6019474267959595, "train/loss_error": 0.4509325623512268, "train/loss_total": 0.4811355471611023 }, { "epoch": 2.9473684210526314, "step": 11032, "train/loss_ctc": 1.0575568675994873, "train/loss_error": 0.48760882019996643, "train/loss_total": 0.6015984416007996 }, { "epoch": 2.947635586427999, "step": 11033, "train/loss_ctc": 0.5339586734771729, "train/loss_error": 0.45187997817993164, "train/loss_total": 0.46829572319984436 }, { "epoch": 2.9479027518033663, "step": 11034, "train/loss_ctc": 0.9305626153945923, "train/loss_error": 0.4495526850223541, "train/loss_total": 0.5457546710968018 }, { "epoch": 2.948169917178734, "step": 11035, "train/loss_ctc": 0.5315610766410828, "train/loss_error": 0.40556496381759644, "train/loss_total": 0.43076419830322266 }, { "epoch": 2.948437082554101, "step": 11036, "train/loss_ctc": 0.5034317970275879, "train/loss_error": 0.4178391098976135, "train/loss_total": 0.4349576532840729 }, { "epoch": 2.9487042479294683, "step": 11037, "train/loss_ctc": 0.8160685300827026, "train/loss_error": 0.4354637563228607, "train/loss_total": 0.5115846991539001 }, { "epoch": 2.948971413304836, "step": 11038, "train/loss_ctc": 0.8040239810943604, "train/loss_error": 0.4056603014469147, "train/loss_total": 0.48533302545547485 }, { "epoch": 2.949238578680203, "step": 11039, "train/loss_ctc": 1.0482807159423828, "train/loss_error": 0.5026828050613403, "train/loss_total": 0.6118023991584778 }, { "epoch": 2.9495057440555703, "grad_norm": 4.531277656555176, "learning_rate": 1.2314186481432006e-05, "loss": 0.5009, "step": 11040 }, { "epoch": 2.9495057440555703, "step": 11040, "train/loss_ctc": 0.4243800938129425, "train/loss_error": 0.4576747417449951, "train/loss_total": 0.451015830039978 }, { "epoch": 2.949772909430938, "step": 11041, "train/loss_ctc": 0.8547214865684509, "train/loss_error": 0.431675523519516, "train/loss_total": 0.516284704208374 }, { "epoch": 2.950040074806305, "step": 11042, "train/loss_ctc": 1.051878809928894, "train/loss_error": 0.4649337828159332, "train/loss_total": 0.5823227763175964 }, { "epoch": 2.9503072401816723, "step": 11043, "train/loss_ctc": 0.9224857091903687, "train/loss_error": 0.4292743504047394, "train/loss_total": 0.527916669845581 }, { "epoch": 2.95057440555704, "step": 11044, "train/loss_ctc": 0.36960601806640625, "train/loss_error": 0.46145278215408325, "train/loss_total": 0.44308343529701233 }, { "epoch": 2.950841570932407, "step": 11045, "train/loss_ctc": 0.6163437962532043, "train/loss_error": 0.3939806818962097, "train/loss_total": 0.4384533166885376 }, { "epoch": 2.9511087363077744, "step": 11046, "train/loss_ctc": 0.3695133328437805, "train/loss_error": 0.4523630440235138, "train/loss_total": 0.43579310178756714 }, { "epoch": 2.951375901683142, "step": 11047, "train/loss_ctc": 0.5654793977737427, "train/loss_error": 0.4413808286190033, "train/loss_total": 0.4662005603313446 }, { "epoch": 2.951643067058509, "step": 11048, "train/loss_ctc": 1.0724775791168213, "train/loss_error": 0.4988766610622406, "train/loss_total": 0.6135968565940857 }, { "epoch": 2.9519102324338764, "step": 11049, "train/loss_ctc": 1.360897183418274, "train/loss_error": 0.4693601429462433, "train/loss_total": 0.6476675868034363 }, { "epoch": 2.952177397809244, "grad_norm": 1.5806161165237427, "learning_rate": 1.2298156558909965e-05, "loss": 0.5122, "step": 11050 }, { "epoch": 2.952177397809244, "step": 11050, "train/loss_ctc": 0.7365642786026001, "train/loss_error": 0.5126005411148071, "train/loss_total": 0.5573933124542236 }, { "epoch": 2.9524445631846112, "step": 11051, "train/loss_ctc": 0.41414815187454224, "train/loss_error": 0.428922563791275, "train/loss_total": 0.4259676933288574 }, { "epoch": 2.9527117285599784, "step": 11052, "train/loss_ctc": 1.1243860721588135, "train/loss_error": 0.44780024886131287, "train/loss_total": 0.5831174254417419 }, { "epoch": 2.952978893935346, "step": 11053, "train/loss_ctc": 0.55202317237854, "train/loss_error": 0.49313849210739136, "train/loss_total": 0.5049154162406921 }, { "epoch": 2.9532460593107133, "step": 11054, "train/loss_ctc": 1.0031239986419678, "train/loss_error": 0.45398685336112976, "train/loss_total": 0.5638142824172974 }, { "epoch": 2.9535132246860805, "step": 11055, "train/loss_ctc": 0.5543398261070251, "train/loss_error": 0.4172115623950958, "train/loss_total": 0.4446372389793396 }, { "epoch": 2.953780390061448, "step": 11056, "train/loss_ctc": 0.9718416929244995, "train/loss_error": 0.4963046908378601, "train/loss_total": 0.5914121270179749 }, { "epoch": 2.9540475554368153, "step": 11057, "train/loss_ctc": 0.379497230052948, "train/loss_error": 0.49968570470809937, "train/loss_total": 0.47564801573753357 }, { "epoch": 2.9543147208121825, "step": 11058, "train/loss_ctc": 0.35314247012138367, "train/loss_error": 0.450119286775589, "train/loss_total": 0.4307239353656769 }, { "epoch": 2.95458188618755, "step": 11059, "train/loss_ctc": 0.7933732271194458, "train/loss_error": 0.40273797512054443, "train/loss_total": 0.4808650016784668 }, { "epoch": 2.9548490515629173, "grad_norm": 1.519289493560791, "learning_rate": 1.2282126636387923e-05, "loss": 0.5058, "step": 11060 }, { "epoch": 2.9548490515629173, "step": 11060, "train/loss_ctc": 1.2718558311462402, "train/loss_error": 0.5505583882331848, "train/loss_total": 0.6948179006576538 }, { "epoch": 2.9551162169382845, "step": 11061, "train/loss_ctc": 0.5346310138702393, "train/loss_error": 0.48290568590164185, "train/loss_total": 0.4932507574558258 }, { "epoch": 2.955383382313652, "step": 11062, "train/loss_ctc": 0.9435846209526062, "train/loss_error": 0.36398565769195557, "train/loss_total": 0.47990548610687256 }, { "epoch": 2.9556505476890194, "step": 11063, "train/loss_ctc": 0.4625847637653351, "train/loss_error": 0.45416492223739624, "train/loss_total": 0.45584890246391296 }, { "epoch": 2.955917713064387, "step": 11064, "train/loss_ctc": 1.0239620208740234, "train/loss_error": 0.5330579280853271, "train/loss_total": 0.6312387585639954 }, { "epoch": 2.956184878439754, "step": 11065, "train/loss_ctc": 1.3304991722106934, "train/loss_error": 0.4957372844219208, "train/loss_total": 0.6626896858215332 }, { "epoch": 2.956452043815122, "step": 11066, "train/loss_ctc": 0.4785577654838562, "train/loss_error": 0.46844327449798584, "train/loss_total": 0.4704661965370178 }, { "epoch": 2.956719209190489, "step": 11067, "train/loss_ctc": 1.2004081010818481, "train/loss_error": 0.41183221340179443, "train/loss_total": 0.5695474147796631 }, { "epoch": 2.9569863745658562, "step": 11068, "train/loss_ctc": 0.6577882766723633, "train/loss_error": 0.4300936460494995, "train/loss_total": 0.47563260793685913 }, { "epoch": 2.957253539941224, "step": 11069, "train/loss_ctc": 0.7635905146598816, "train/loss_error": 0.4641414284706116, "train/loss_total": 0.5240312814712524 }, { "epoch": 2.957520705316591, "grad_norm": 1.8259350061416626, "learning_rate": 1.2266096713865883e-05, "loss": 0.5457, "step": 11070 }, { "epoch": 2.957520705316591, "step": 11070, "train/loss_ctc": 0.5172630548477173, "train/loss_error": 0.43065086007118225, "train/loss_total": 0.4479733109474182 }, { "epoch": 2.9577878706919583, "step": 11071, "train/loss_ctc": 1.4356638193130493, "train/loss_error": 0.4681057035923004, "train/loss_total": 0.6616173386573792 }, { "epoch": 2.958055036067326, "step": 11072, "train/loss_ctc": 0.29215019941329956, "train/loss_error": 0.5232532024383545, "train/loss_total": 0.4770326018333435 }, { "epoch": 2.958322201442693, "step": 11073, "train/loss_ctc": 0.8640788793563843, "train/loss_error": 0.4713517725467682, "train/loss_total": 0.5498971939086914 }, { "epoch": 2.9585893668180603, "step": 11074, "train/loss_ctc": 0.5205156803131104, "train/loss_error": 0.44830432534217834, "train/loss_total": 0.46274662017822266 }, { "epoch": 2.958856532193428, "step": 11075, "train/loss_ctc": 0.8288547396659851, "train/loss_error": 0.4306918680667877, "train/loss_total": 0.5103244781494141 }, { "epoch": 2.959123697568795, "step": 11076, "train/loss_ctc": 1.1307957172393799, "train/loss_error": 0.4686085283756256, "train/loss_total": 0.6010459661483765 }, { "epoch": 2.9593908629441623, "step": 11077, "train/loss_ctc": 1.0164138078689575, "train/loss_error": 0.470112144947052, "train/loss_total": 0.5793724656105042 }, { "epoch": 2.95965802831953, "step": 11078, "train/loss_ctc": 0.5753881931304932, "train/loss_error": 0.4489709436893463, "train/loss_total": 0.47425439953804016 }, { "epoch": 2.959925193694897, "step": 11079, "train/loss_ctc": 0.31502002477645874, "train/loss_error": 0.4251200556755066, "train/loss_total": 0.40310007333755493 }, { "epoch": 2.9601923590702643, "grad_norm": 1.7424167394638062, "learning_rate": 1.2250066791343843e-05, "loss": 0.5167, "step": 11080 }, { "epoch": 2.9601923590702643, "step": 11080, "train/loss_ctc": 0.798080325126648, "train/loss_error": 0.4695335030555725, "train/loss_total": 0.5352428555488586 }, { "epoch": 2.960459524445632, "step": 11081, "train/loss_ctc": 0.9067797064781189, "train/loss_error": 0.49088770151138306, "train/loss_total": 0.5740661025047302 }, { "epoch": 2.960726689820999, "step": 11082, "train/loss_ctc": 0.6429032683372498, "train/loss_error": 0.4103809595108032, "train/loss_total": 0.456885427236557 }, { "epoch": 2.9609938551963664, "step": 11083, "train/loss_ctc": 1.1777293682098389, "train/loss_error": 0.45336464047431946, "train/loss_total": 0.5982376337051392 }, { "epoch": 2.961261020571734, "step": 11084, "train/loss_ctc": 0.9524866342544556, "train/loss_error": 0.4047439992427826, "train/loss_total": 0.5142925381660461 }, { "epoch": 2.961528185947101, "step": 11085, "train/loss_ctc": 0.970413088798523, "train/loss_error": 0.4093579649925232, "train/loss_total": 0.521569013595581 }, { "epoch": 2.9617953513224684, "step": 11086, "train/loss_ctc": 0.7007999420166016, "train/loss_error": 0.4818403720855713, "train/loss_total": 0.5256322622299194 }, { "epoch": 2.962062516697836, "step": 11087, "train/loss_ctc": 0.9267034530639648, "train/loss_error": 0.4429457187652588, "train/loss_total": 0.5396972894668579 }, { "epoch": 2.9623296820732032, "step": 11088, "train/loss_ctc": 0.559158980846405, "train/loss_error": 0.4295508861541748, "train/loss_total": 0.45547252893447876 }, { "epoch": 2.9625968474485704, "step": 11089, "train/loss_ctc": 0.6744970083236694, "train/loss_error": 0.46494412422180176, "train/loss_total": 0.5068547129631042 }, { "epoch": 2.962864012823938, "grad_norm": 1.3429393768310547, "learning_rate": 1.2234036868821801e-05, "loss": 0.5228, "step": 11090 }, { "epoch": 2.962864012823938, "step": 11090, "train/loss_ctc": 0.36613813042640686, "train/loss_error": 0.4453747272491455, "train/loss_total": 0.4295274019241333 }, { "epoch": 2.9631311781993053, "step": 11091, "train/loss_ctc": 1.1073591709136963, "train/loss_error": 0.47283488512039185, "train/loss_total": 0.5997397303581238 }, { "epoch": 2.9633983435746725, "step": 11092, "train/loss_ctc": 0.9274135231971741, "train/loss_error": 0.45012298226356506, "train/loss_total": 0.5455811023712158 }, { "epoch": 2.96366550895004, "step": 11093, "train/loss_ctc": 1.0472241640090942, "train/loss_error": 0.45215484499931335, "train/loss_total": 0.5711687207221985 }, { "epoch": 2.9639326743254073, "step": 11094, "train/loss_ctc": 0.6041224002838135, "train/loss_error": 0.47071704268455505, "train/loss_total": 0.49739813804626465 }, { "epoch": 2.964199839700775, "step": 11095, "train/loss_ctc": 0.6170000433921814, "train/loss_error": 0.4084240198135376, "train/loss_total": 0.45013922452926636 }, { "epoch": 2.964467005076142, "step": 11096, "train/loss_ctc": 0.660291314125061, "train/loss_error": 0.46566852927207947, "train/loss_total": 0.5045931339263916 }, { "epoch": 2.9647341704515093, "step": 11097, "train/loss_ctc": 0.34339141845703125, "train/loss_error": 0.41065987944602966, "train/loss_total": 0.39720618724823 }, { "epoch": 2.965001335826877, "step": 11098, "train/loss_ctc": 1.0183088779449463, "train/loss_error": 0.41318854689598083, "train/loss_total": 0.5342126488685608 }, { "epoch": 2.965268501202244, "step": 11099, "train/loss_ctc": 0.7584995031356812, "train/loss_error": 0.4026583135128021, "train/loss_total": 0.4738265573978424 }, { "epoch": 2.965535666577612, "grad_norm": 1.6377348899841309, "learning_rate": 1.2218006946299761e-05, "loss": 0.5003, "step": 11100 }, { "epoch": 2.965535666577612, "step": 11100, "train/loss_ctc": 0.9092223644256592, "train/loss_error": 0.43133291602134705, "train/loss_total": 0.5269107818603516 }, { "epoch": 2.965802831952979, "step": 11101, "train/loss_ctc": 0.5269269347190857, "train/loss_error": 0.4973995089530945, "train/loss_total": 0.5033050179481506 }, { "epoch": 2.966069997328346, "step": 11102, "train/loss_ctc": 0.9779900312423706, "train/loss_error": 0.4104903042316437, "train/loss_total": 0.523990273475647 }, { "epoch": 2.966337162703714, "step": 11103, "train/loss_ctc": 0.629965603351593, "train/loss_error": 0.46032771468162537, "train/loss_total": 0.49425530433654785 }, { "epoch": 2.966604328079081, "step": 11104, "train/loss_ctc": 0.4917057156562805, "train/loss_error": 0.4099842309951782, "train/loss_total": 0.42632853984832764 }, { "epoch": 2.9668714934544482, "step": 11105, "train/loss_ctc": 1.0432344675064087, "train/loss_error": 0.44283705949783325, "train/loss_total": 0.5629165172576904 }, { "epoch": 2.967138658829816, "step": 11106, "train/loss_ctc": 0.4484744966030121, "train/loss_error": 0.46386173367500305, "train/loss_total": 0.46078431606292725 }, { "epoch": 2.967405824205183, "step": 11107, "train/loss_ctc": 0.7168247699737549, "train/loss_error": 0.4737544357776642, "train/loss_total": 0.5223684906959534 }, { "epoch": 2.9676729895805503, "step": 11108, "train/loss_ctc": 1.098679542541504, "train/loss_error": 0.4422667622566223, "train/loss_total": 0.5735493302345276 }, { "epoch": 2.967940154955918, "step": 11109, "train/loss_ctc": 0.6957294344902039, "train/loss_error": 0.4574039578437805, "train/loss_total": 0.5050690770149231 }, { "epoch": 2.968207320331285, "grad_norm": 1.6027731895446777, "learning_rate": 1.2201977023777719e-05, "loss": 0.5099, "step": 11110 }, { "epoch": 2.968207320331285, "step": 11110, "train/loss_ctc": 0.31561794877052307, "train/loss_error": 0.43505859375, "train/loss_total": 0.41117045283317566 }, { "epoch": 2.9684744857066523, "step": 11111, "train/loss_ctc": 0.8658780455589294, "train/loss_error": 0.453257292509079, "train/loss_total": 0.5357814431190491 }, { "epoch": 2.96874165108202, "step": 11112, "train/loss_ctc": 0.6840776205062866, "train/loss_error": 0.5020030736923218, "train/loss_total": 0.5384179949760437 }, { "epoch": 2.969008816457387, "step": 11113, "train/loss_ctc": 0.7772409915924072, "train/loss_error": 0.4794287085533142, "train/loss_total": 0.5389912128448486 }, { "epoch": 2.9692759818327543, "step": 11114, "train/loss_ctc": 0.7124593257904053, "train/loss_error": 0.45229339599609375, "train/loss_total": 0.504326581954956 }, { "epoch": 2.969543147208122, "step": 11115, "train/loss_ctc": 0.4738500714302063, "train/loss_error": 0.46207520365715027, "train/loss_total": 0.46443018317222595 }, { "epoch": 2.969810312583489, "step": 11116, "train/loss_ctc": 0.5817102789878845, "train/loss_error": 0.4491247534751892, "train/loss_total": 0.4756418764591217 }, { "epoch": 2.9700774779588563, "step": 11117, "train/loss_ctc": 1.444543719291687, "train/loss_error": 0.4663738012313843, "train/loss_total": 0.6620078086853027 }, { "epoch": 2.970344643334224, "step": 11118, "train/loss_ctc": 0.32448482513427734, "train/loss_error": 0.4944246709346771, "train/loss_total": 0.46043670177459717 }, { "epoch": 2.970611808709591, "step": 11119, "train/loss_ctc": 0.6354212760925293, "train/loss_error": 0.4462289810180664, "train/loss_total": 0.484067440032959 }, { "epoch": 2.9708789740849584, "grad_norm": 2.475771427154541, "learning_rate": 1.2185947101255677e-05, "loss": 0.5075, "step": 11120 }, { "epoch": 2.9708789740849584, "step": 11120, "train/loss_ctc": 1.2576789855957031, "train/loss_error": 0.4818173050880432, "train/loss_total": 0.6369896531105042 }, { "epoch": 2.971146139460326, "step": 11121, "train/loss_ctc": 0.4277257025241852, "train/loss_error": 0.4470186233520508, "train/loss_total": 0.4431600570678711 }, { "epoch": 2.971413304835693, "step": 11122, "train/loss_ctc": 0.556638240814209, "train/loss_error": 0.44385844469070435, "train/loss_total": 0.4664144217967987 }, { "epoch": 2.9716804702110604, "step": 11123, "train/loss_ctc": 0.5191965103149414, "train/loss_error": 0.44266611337661743, "train/loss_total": 0.4579721987247467 }, { "epoch": 2.971947635586428, "step": 11124, "train/loss_ctc": 0.7263821363449097, "train/loss_error": 0.4921387732028961, "train/loss_total": 0.5389874577522278 }, { "epoch": 2.9722148009617952, "step": 11125, "train/loss_ctc": 0.6960501074790955, "train/loss_error": 0.4829454720020294, "train/loss_total": 0.5255663990974426 }, { "epoch": 2.9724819663371624, "step": 11126, "train/loss_ctc": 0.6360583305358887, "train/loss_error": 0.44254225492477417, "train/loss_total": 0.4812454581260681 }, { "epoch": 2.97274913171253, "step": 11127, "train/loss_ctc": 0.30959033966064453, "train/loss_error": 0.42008331418037415, "train/loss_total": 0.39798474311828613 }, { "epoch": 2.9730162970878973, "step": 11128, "train/loss_ctc": 0.40620216727256775, "train/loss_error": 0.41548991203308105, "train/loss_total": 0.4136323928833008 }, { "epoch": 2.973283462463265, "step": 11129, "train/loss_ctc": 1.8523306846618652, "train/loss_error": 0.44650787115097046, "train/loss_total": 0.7276724576950073 }, { "epoch": 2.973550627838632, "grad_norm": 1.404811143875122, "learning_rate": 1.2169917178733637e-05, "loss": 0.509, "step": 11130 }, { "epoch": 2.973550627838632, "step": 11130, "train/loss_ctc": 0.8222594261169434, "train/loss_error": 0.48984694480895996, "train/loss_total": 0.5563294291496277 }, { "epoch": 2.9738177932139993, "step": 11131, "train/loss_ctc": 0.9567655920982361, "train/loss_error": 0.43908849358558655, "train/loss_total": 0.5426239371299744 }, { "epoch": 2.974084958589367, "step": 11132, "train/loss_ctc": 0.5368441343307495, "train/loss_error": 0.5114239454269409, "train/loss_total": 0.5165079832077026 }, { "epoch": 2.974352123964734, "step": 11133, "train/loss_ctc": 0.45711642503738403, "train/loss_error": 0.38681355118751526, "train/loss_total": 0.40087413787841797 }, { "epoch": 2.974619289340102, "step": 11134, "train/loss_ctc": 1.2185919284820557, "train/loss_error": 0.5019499659538269, "train/loss_total": 0.6452783346176147 }, { "epoch": 2.974886454715469, "step": 11135, "train/loss_ctc": 0.8426082134246826, "train/loss_error": 0.4335787892341614, "train/loss_total": 0.5153846740722656 }, { "epoch": 2.975153620090836, "step": 11136, "train/loss_ctc": 0.6938390135765076, "train/loss_error": 0.4686203598976135, "train/loss_total": 0.5136641263961792 }, { "epoch": 2.975420785466204, "step": 11137, "train/loss_ctc": 1.0500375032424927, "train/loss_error": 0.43998926877975464, "train/loss_total": 0.5619989037513733 }, { "epoch": 2.975687950841571, "step": 11138, "train/loss_ctc": 0.5130083560943604, "train/loss_error": 0.40540677309036255, "train/loss_total": 0.4269270896911621 }, { "epoch": 2.975955116216938, "step": 11139, "train/loss_ctc": 0.7666183710098267, "train/loss_error": 0.4443455636501312, "train/loss_total": 0.5088001489639282 }, { "epoch": 2.976222281592306, "grad_norm": 1.8966224193572998, "learning_rate": 1.2153887256211595e-05, "loss": 0.5188, "step": 11140 }, { "epoch": 2.976222281592306, "step": 11140, "train/loss_ctc": 0.49020570516586304, "train/loss_error": 0.4624320864677429, "train/loss_total": 0.4679868221282959 }, { "epoch": 2.976489446967673, "step": 11141, "train/loss_ctc": 0.6409931182861328, "train/loss_error": 0.4559643268585205, "train/loss_total": 0.4929700791835785 }, { "epoch": 2.9767566123430402, "step": 11142, "train/loss_ctc": 0.4930383861064911, "train/loss_error": 0.5561500787734985, "train/loss_total": 0.5435277223587036 }, { "epoch": 2.977023777718408, "step": 11143, "train/loss_ctc": 0.7684375047683716, "train/loss_error": 0.48433083295822144, "train/loss_total": 0.5411521792411804 }, { "epoch": 2.977290943093775, "step": 11144, "train/loss_ctc": 1.0888595581054688, "train/loss_error": 0.44739240407943726, "train/loss_total": 0.5756858587265015 }, { "epoch": 2.9775581084691423, "step": 11145, "train/loss_ctc": 1.1028926372528076, "train/loss_error": 0.4456155598163605, "train/loss_total": 0.5770710110664368 }, { "epoch": 2.97782527384451, "step": 11146, "train/loss_ctc": 0.788926362991333, "train/loss_error": 0.4826628267765045, "train/loss_total": 0.5439155697822571 }, { "epoch": 2.978092439219877, "step": 11147, "train/loss_ctc": 0.47317740321159363, "train/loss_error": 0.3896552622318268, "train/loss_total": 0.4063597023487091 }, { "epoch": 2.9783596045952443, "step": 11148, "train/loss_ctc": 0.3821585476398468, "train/loss_error": 0.4368528723716736, "train/loss_total": 0.4259140193462372 }, { "epoch": 2.978626769970612, "step": 11149, "train/loss_ctc": 0.43274736404418945, "train/loss_error": 0.4269162118434906, "train/loss_total": 0.4280824661254883 }, { "epoch": 2.978893935345979, "grad_norm": 1.658908486366272, "learning_rate": 1.2137857333689553e-05, "loss": 0.5003, "step": 11150 }, { "epoch": 2.978893935345979, "step": 11150, "train/loss_ctc": 0.16671371459960938, "train/loss_error": 0.42212429642677307, "train/loss_total": 0.3710421919822693 }, { "epoch": 2.9791611007213463, "step": 11151, "train/loss_ctc": 0.4325246214866638, "train/loss_error": 0.446810781955719, "train/loss_total": 0.44395357370376587 }, { "epoch": 2.979428266096714, "step": 11152, "train/loss_ctc": 0.7320026755332947, "train/loss_error": 0.41533929109573364, "train/loss_total": 0.47867196798324585 }, { "epoch": 2.979695431472081, "step": 11153, "train/loss_ctc": 0.6344971060752869, "train/loss_error": 0.46056145429611206, "train/loss_total": 0.49534860253334045 }, { "epoch": 2.9799625968474484, "step": 11154, "train/loss_ctc": 0.9841102957725525, "train/loss_error": 0.4355265200138092, "train/loss_total": 0.5452432632446289 }, { "epoch": 2.980229762222816, "step": 11155, "train/loss_ctc": 0.4209890067577362, "train/loss_error": 0.3569919764995575, "train/loss_total": 0.3697913885116577 }, { "epoch": 2.980496927598183, "step": 11156, "train/loss_ctc": 0.46894022822380066, "train/loss_error": 0.41884154081344604, "train/loss_total": 0.4288612902164459 }, { "epoch": 2.9807640929735504, "step": 11157, "train/loss_ctc": 0.44560667872428894, "train/loss_error": 0.45537102222442627, "train/loss_total": 0.45341816544532776 }, { "epoch": 2.981031258348918, "step": 11158, "train/loss_ctc": 0.3560018539428711, "train/loss_error": 0.46163231134414673, "train/loss_total": 0.4405062198638916 }, { "epoch": 2.981298423724285, "step": 11159, "train/loss_ctc": 0.6535032391548157, "train/loss_error": 0.4613502025604248, "train/loss_total": 0.4997808039188385 }, { "epoch": 2.9815655890996524, "grad_norm": 1.3120172023773193, "learning_rate": 1.2121827411167514e-05, "loss": 0.4527, "step": 11160 }, { "epoch": 2.9815655890996524, "step": 11160, "train/loss_ctc": 1.2221137285232544, "train/loss_error": 0.4321296513080597, "train/loss_total": 0.5901264548301697 }, { "epoch": 2.98183275447502, "step": 11161, "train/loss_ctc": 0.8364006876945496, "train/loss_error": 0.49935707449913025, "train/loss_total": 0.5667657852172852 }, { "epoch": 2.9820999198503872, "step": 11162, "train/loss_ctc": 0.5166564583778381, "train/loss_error": 0.4912338852882385, "train/loss_total": 0.49631839990615845 }, { "epoch": 2.982367085225755, "step": 11163, "train/loss_ctc": 0.9307500720024109, "train/loss_error": 0.4306430220603943, "train/loss_total": 0.5306644439697266 }, { "epoch": 2.982634250601122, "step": 11164, "train/loss_ctc": 0.9671643972396851, "train/loss_error": 0.48077327013015747, "train/loss_total": 0.5780515074729919 }, { "epoch": 2.9829014159764897, "step": 11165, "train/loss_ctc": 0.796891450881958, "train/loss_error": 0.4074989855289459, "train/loss_total": 0.4853774905204773 }, { "epoch": 2.983168581351857, "step": 11166, "train/loss_ctc": 0.6734776496887207, "train/loss_error": 0.4026933014392853, "train/loss_total": 0.45685017108917236 }, { "epoch": 2.983435746727224, "step": 11167, "train/loss_ctc": 0.522005558013916, "train/loss_error": 0.4296521842479706, "train/loss_total": 0.44812285900115967 }, { "epoch": 2.9837029121025918, "step": 11168, "train/loss_ctc": 0.635513424873352, "train/loss_error": 0.4111171066761017, "train/loss_total": 0.45599639415740967 }, { "epoch": 2.983970077477959, "step": 11169, "train/loss_ctc": 1.089853286743164, "train/loss_error": 0.4769052267074585, "train/loss_total": 0.5994948148727417 }, { "epoch": 2.984237242853326, "grad_norm": 1.8294110298156738, "learning_rate": 1.2105797488645473e-05, "loss": 0.5208, "step": 11170 }, { "epoch": 2.984237242853326, "step": 11170, "train/loss_ctc": 0.7164920568466187, "train/loss_error": 0.4304797351360321, "train/loss_total": 0.4876822233200073 }, { "epoch": 2.984504408228694, "step": 11171, "train/loss_ctc": 0.9485658407211304, "train/loss_error": 0.4647127091884613, "train/loss_total": 0.5614833235740662 }, { "epoch": 2.984771573604061, "step": 11172, "train/loss_ctc": 0.6654312610626221, "train/loss_error": 0.4611872732639313, "train/loss_total": 0.5020360946655273 }, { "epoch": 2.985038738979428, "step": 11173, "train/loss_ctc": 0.9318671226501465, "train/loss_error": 0.44220080971717834, "train/loss_total": 0.540134072303772 }, { "epoch": 2.985305904354796, "step": 11174, "train/loss_ctc": 1.2995764017105103, "train/loss_error": 0.4260730445384979, "train/loss_total": 0.6007736921310425 }, { "epoch": 2.985573069730163, "step": 11175, "train/loss_ctc": 0.8955255746841431, "train/loss_error": 0.5251218676567078, "train/loss_total": 0.5992026329040527 }, { "epoch": 2.98584023510553, "step": 11176, "train/loss_ctc": 0.5783958435058594, "train/loss_error": 0.456159383058548, "train/loss_total": 0.48060667514801025 }, { "epoch": 2.986107400480898, "step": 11177, "train/loss_ctc": 0.5872843861579895, "train/loss_error": 0.45838555693626404, "train/loss_total": 0.48416534066200256 }, { "epoch": 2.986374565856265, "step": 11178, "train/loss_ctc": 0.6451812982559204, "train/loss_error": 0.5027315616607666, "train/loss_total": 0.5312215089797974 }, { "epoch": 2.9866417312316322, "step": 11179, "train/loss_ctc": 0.26048752665519714, "train/loss_error": 0.3758614659309387, "train/loss_total": 0.35278668999671936 }, { "epoch": 2.986908896607, "grad_norm": 1.0751656293869019, "learning_rate": 1.208976756612343e-05, "loss": 0.514, "step": 11180 }, { "epoch": 2.986908896607, "step": 11180, "train/loss_ctc": 0.531298816204071, "train/loss_error": 0.3721986413002014, "train/loss_total": 0.40401867032051086 }, { "epoch": 2.987176061982367, "step": 11181, "train/loss_ctc": 0.41721630096435547, "train/loss_error": 0.42623722553253174, "train/loss_total": 0.42443305253982544 }, { "epoch": 2.9874432273577343, "step": 11182, "train/loss_ctc": 0.8586169481277466, "train/loss_error": 0.5174033045768738, "train/loss_total": 0.5856460332870483 }, { "epoch": 2.987710392733102, "step": 11183, "train/loss_ctc": 1.480928659439087, "train/loss_error": 0.4328426420688629, "train/loss_total": 0.6424598693847656 }, { "epoch": 2.987977558108469, "step": 11184, "train/loss_ctc": 0.6248076558113098, "train/loss_error": 0.4486877918243408, "train/loss_total": 0.48391175270080566 }, { "epoch": 2.9882447234838363, "step": 11185, "train/loss_ctc": 0.6907011270523071, "train/loss_error": 0.40030938386917114, "train/loss_total": 0.45838773250579834 }, { "epoch": 2.988511888859204, "step": 11186, "train/loss_ctc": 1.1734483242034912, "train/loss_error": 0.4508114159107208, "train/loss_total": 0.5953388214111328 }, { "epoch": 2.988779054234571, "step": 11187, "train/loss_ctc": 0.36551135778427124, "train/loss_error": 0.42276448011398315, "train/loss_total": 0.41131386160850525 }, { "epoch": 2.9890462196099383, "step": 11188, "train/loss_ctc": 0.9327266216278076, "train/loss_error": 0.4490368366241455, "train/loss_total": 0.5457748174667358 }, { "epoch": 2.989313384985306, "step": 11189, "train/loss_ctc": 1.258521318435669, "train/loss_error": 0.41368746757507324, "train/loss_total": 0.5826542377471924 }, { "epoch": 2.989580550360673, "grad_norm": 1.627030611038208, "learning_rate": 1.207373764360139e-05, "loss": 0.5134, "step": 11190 }, { "epoch": 2.989580550360673, "step": 11190, "train/loss_ctc": 0.8981078863143921, "train/loss_error": 0.46753960847854614, "train/loss_total": 0.5536532402038574 }, { "epoch": 2.9898477157360404, "step": 11191, "train/loss_ctc": 0.36366036534309387, "train/loss_error": 0.42741793394088745, "train/loss_total": 0.41466641426086426 }, { "epoch": 2.990114881111408, "step": 11192, "train/loss_ctc": 0.43161502480506897, "train/loss_error": 0.4576701819896698, "train/loss_total": 0.4524591565132141 }, { "epoch": 2.990382046486775, "step": 11193, "train/loss_ctc": 0.8938431739807129, "train/loss_error": 0.4532795548439026, "train/loss_total": 0.5413923263549805 }, { "epoch": 2.990649211862143, "step": 11194, "train/loss_ctc": 0.29834240674972534, "train/loss_error": 0.4367133378982544, "train/loss_total": 0.409039169549942 }, { "epoch": 2.99091637723751, "step": 11195, "train/loss_ctc": 0.7756994962692261, "train/loss_error": 0.46733778715133667, "train/loss_total": 0.5290101170539856 }, { "epoch": 2.991183542612877, "step": 11196, "train/loss_ctc": 1.0303984880447388, "train/loss_error": 0.456825315952301, "train/loss_total": 0.5715399384498596 }, { "epoch": 2.991450707988245, "step": 11197, "train/loss_ctc": 0.9130252599716187, "train/loss_error": 0.4154795706272125, "train/loss_total": 0.5149887204170227 }, { "epoch": 2.991717873363612, "step": 11198, "train/loss_ctc": 1.3054449558258057, "train/loss_error": 0.4098275601863861, "train/loss_total": 0.588951051235199 }, { "epoch": 2.9919850387389797, "step": 11199, "train/loss_ctc": 1.3610124588012695, "train/loss_error": 0.46934306621551514, "train/loss_total": 0.647676944732666 }, { "epoch": 2.992252204114347, "grad_norm": 4.488957405090332, "learning_rate": 1.2057707721079348e-05, "loss": 0.5223, "step": 11200 }, { "epoch": 2.992252204114347, "step": 11200, "train/loss_ctc": 0.9022787809371948, "train/loss_error": 0.5199562907218933, "train/loss_total": 0.5964208245277405 }, { "epoch": 2.992519369489714, "step": 11201, "train/loss_ctc": 1.405895709991455, "train/loss_error": 0.42597997188568115, "train/loss_total": 0.6219631433486938 }, { "epoch": 2.9927865348650817, "step": 11202, "train/loss_ctc": 0.5380988717079163, "train/loss_error": 0.45782461762428284, "train/loss_total": 0.47387945652008057 }, { "epoch": 2.993053700240449, "step": 11203, "train/loss_ctc": 0.834342360496521, "train/loss_error": 0.46851614117622375, "train/loss_total": 0.5416814088821411 }, { "epoch": 2.993320865615816, "step": 11204, "train/loss_ctc": 0.5423438549041748, "train/loss_error": 0.5048825740814209, "train/loss_total": 0.5123748779296875 }, { "epoch": 2.9935880309911838, "step": 11205, "train/loss_ctc": 0.877180814743042, "train/loss_error": 0.49098941683769226, "train/loss_total": 0.5682277083396912 }, { "epoch": 2.993855196366551, "step": 11206, "train/loss_ctc": 1.0912325382232666, "train/loss_error": 0.4202297329902649, "train/loss_total": 0.5544303059577942 }, { "epoch": 2.994122361741918, "step": 11207, "train/loss_ctc": 0.8575193285942078, "train/loss_error": 0.43419861793518066, "train/loss_total": 0.518862783908844 }, { "epoch": 2.994389527117286, "step": 11208, "train/loss_ctc": 0.5936012268066406, "train/loss_error": 0.40426191687583923, "train/loss_total": 0.44212979078292847 }, { "epoch": 2.994656692492653, "step": 11209, "train/loss_ctc": 0.5604517459869385, "train/loss_error": 0.44118568301200867, "train/loss_total": 0.46503889560699463 }, { "epoch": 2.99492385786802, "grad_norm": 2.48173451423645, "learning_rate": 1.2041677798557307e-05, "loss": 0.5295, "step": 11210 }, { "epoch": 2.99492385786802, "step": 11210, "train/loss_ctc": 0.4215749502182007, "train/loss_error": 0.4672207534313202, "train/loss_total": 0.4580916166305542 }, { "epoch": 2.995191023243388, "step": 11211, "train/loss_ctc": 0.44461268186569214, "train/loss_error": 0.3903503119945526, "train/loss_total": 0.4012027978897095 }, { "epoch": 2.995458188618755, "step": 11212, "train/loss_ctc": 0.31801778078079224, "train/loss_error": 0.4859432280063629, "train/loss_total": 0.4523581266403198 }, { "epoch": 2.995725353994122, "step": 11213, "train/loss_ctc": 0.5318514108657837, "train/loss_error": 0.4157264530658722, "train/loss_total": 0.43895143270492554 }, { "epoch": 2.99599251936949, "step": 11214, "train/loss_ctc": 0.7518962025642395, "train/loss_error": 0.41525721549987793, "train/loss_total": 0.48258501291275024 }, { "epoch": 2.996259684744857, "step": 11215, "train/loss_ctc": 0.5939260721206665, "train/loss_error": 0.42965272068977356, "train/loss_total": 0.462507426738739 }, { "epoch": 2.9965268501202242, "step": 11216, "train/loss_ctc": 1.5140026807785034, "train/loss_error": 0.47494813799858093, "train/loss_total": 0.6827590465545654 }, { "epoch": 2.996794015495592, "step": 11217, "train/loss_ctc": 0.44164687395095825, "train/loss_error": 0.4634301960468292, "train/loss_total": 0.459073543548584 }, { "epoch": 2.997061180870959, "step": 11218, "train/loss_ctc": 0.5473041534423828, "train/loss_error": 0.4391652047634125, "train/loss_total": 0.46079298853874207 }, { "epoch": 2.9973283462463263, "step": 11219, "train/loss_ctc": 0.4296957552433014, "train/loss_error": 0.5171571969985962, "train/loss_total": 0.49966490268707275 }, { "epoch": 2.997595511621694, "grad_norm": 2.126162052154541, "learning_rate": 1.2025647876035266e-05, "loss": 0.4798, "step": 11220 }, { "epoch": 2.997595511621694, "step": 11220, "train/loss_ctc": 0.6862373352050781, "train/loss_error": 0.3689904808998108, "train/loss_total": 0.4324398636817932 }, { "epoch": 2.997862676997061, "step": 11221, "train/loss_ctc": 0.8641709089279175, "train/loss_error": 0.46562883257865906, "train/loss_total": 0.5453372597694397 }, { "epoch": 2.9981298423724283, "step": 11222, "train/loss_ctc": 0.614260196685791, "train/loss_error": 0.45298242568969727, "train/loss_total": 0.4852380156517029 }, { "epoch": 2.998397007747796, "step": 11223, "train/loss_ctc": 0.772311806678772, "train/loss_error": 0.5027798414230347, "train/loss_total": 0.556686282157898 }, { "epoch": 2.998664173123163, "step": 11224, "train/loss_ctc": 1.1838147640228271, "train/loss_error": 0.48463183641433716, "train/loss_total": 0.6244684457778931 }, { "epoch": 2.9989313384985303, "step": 11225, "train/loss_ctc": 0.5375871658325195, "train/loss_error": 0.45143046975135803, "train/loss_total": 0.4686618447303772 }, { "epoch": 2.999198503873898, "step": 11226, "train/loss_ctc": 0.5991509556770325, "train/loss_error": 0.48357781767845154, "train/loss_total": 0.5066924095153809 }, { "epoch": 2.999465669249265, "step": 11227, "train/loss_ctc": 0.7262211441993713, "train/loss_error": 0.43065041303634644, "train/loss_total": 0.48976457118988037 }, { "epoch": 2.999732834624633, "step": 11228, "train/loss_ctc": 0.2739168405532837, "train/loss_error": 0.4518117308616638, "train/loss_total": 0.41623276472091675 }, { "epoch": 3.0, "eval_eval/f1_0": 0.6183615326881409, "eval_eval/f1_1": 0.8201773762702942, "eval_eval/precision_0": 0.7684265375137329, "eval_eval/precision_1": 0.7510663270950317, "eval_eval/recall_0": 0.5173323750495911, "eval_eval/recall_1": 0.9032962322235107, "eval_eval/wer": 0.16187498273046105, "eval_runtime": 35.9922, "eval_samples_per_second": 12.753, "eval_steps_per_second": 12.753, "step": 11229 }, { "epoch": 3.0, "step": 11229, "train/loss_ctc": 1.15470552444458, "train/loss_error": 0.46368637681007385, "train/loss_total": 0.6018902063369751 }, { "epoch": 3.000267165375367, "grad_norm": 1.3334033489227295, "learning_rate": 1.2009617953513224e-05, "loss": 0.5127, "step": 11230 }, { "epoch": 3.000267165375367, "step": 11230, "train/loss_ctc": 0.6597281098365784, "train/loss_error": 0.44404757022857666, "train/loss_total": 0.48718369007110596 }, { "epoch": 3.000534330750735, "step": 11231, "train/loss_ctc": 0.5183937549591064, "train/loss_error": 0.408309668302536, "train/loss_total": 0.4303264915943146 }, { "epoch": 3.000801496126102, "step": 11232, "train/loss_ctc": 0.370108425617218, "train/loss_error": 0.43699637055397034, "train/loss_total": 0.42361879348754883 }, { "epoch": 3.0010686615014692, "step": 11233, "train/loss_ctc": 0.5488440990447998, "train/loss_error": 0.42754051089286804, "train/loss_total": 0.45180124044418335 }, { "epoch": 3.001335826876837, "step": 11234, "train/loss_ctc": 0.729166567325592, "train/loss_error": 0.42537790536880493, "train/loss_total": 0.4861356317996979 }, { "epoch": 3.001602992252204, "step": 11235, "train/loss_ctc": 0.4620145559310913, "train/loss_error": 0.4113815724849701, "train/loss_total": 0.42150816321372986 }, { "epoch": 3.0018701576275713, "step": 11236, "train/loss_ctc": 0.7199889421463013, "train/loss_error": 0.418975830078125, "train/loss_total": 0.47917845845222473 }, { "epoch": 3.002137323002939, "step": 11237, "train/loss_ctc": 1.001696228981018, "train/loss_error": 0.46089980006217957, "train/loss_total": 0.5690590739250183 }, { "epoch": 3.002404488378306, "step": 11238, "train/loss_ctc": 0.6827785968780518, "train/loss_error": 0.4839480221271515, "train/loss_total": 0.5237141251564026 }, { "epoch": 3.0026716537536737, "step": 11239, "train/loss_ctc": 0.5092869997024536, "train/loss_error": 0.4950176775455475, "train/loss_total": 0.4978715479373932 }, { "epoch": 3.002938819129041, "grad_norm": 2.7118313312530518, "learning_rate": 1.1993588030991183e-05, "loss": 0.477, "step": 11240 }, { "epoch": 3.002938819129041, "step": 11240, "train/loss_ctc": 0.8848058581352234, "train/loss_error": 0.43087783455848694, "train/loss_total": 0.5216634273529053 }, { "epoch": 3.003205984504408, "step": 11241, "train/loss_ctc": 0.8920335173606873, "train/loss_error": 0.4419609010219574, "train/loss_total": 0.5319754481315613 }, { "epoch": 3.0034731498797758, "step": 11242, "train/loss_ctc": 0.6626054048538208, "train/loss_error": 0.4178505837917328, "train/loss_total": 0.4668015241622925 }, { "epoch": 3.003740315255143, "step": 11243, "train/loss_ctc": 0.29921281337738037, "train/loss_error": 0.44768059253692627, "train/loss_total": 0.41798704862594604 }, { "epoch": 3.00400748063051, "step": 11244, "train/loss_ctc": 0.6733719110488892, "train/loss_error": 0.44825252890586853, "train/loss_total": 0.4932764172554016 }, { "epoch": 3.004274646005878, "step": 11245, "train/loss_ctc": 0.9954628348350525, "train/loss_error": 0.4827379882335663, "train/loss_total": 0.5852829217910767 }, { "epoch": 3.004541811381245, "step": 11246, "train/loss_ctc": 1.118675947189331, "train/loss_error": 0.429422527551651, "train/loss_total": 0.5672731995582581 }, { "epoch": 3.004808976756612, "step": 11247, "train/loss_ctc": 0.850013017654419, "train/loss_error": 0.44564810395240784, "train/loss_total": 0.5265210866928101 }, { "epoch": 3.00507614213198, "step": 11248, "train/loss_ctc": 0.7415085434913635, "train/loss_error": 0.442205548286438, "train/loss_total": 0.5020661354064941 }, { "epoch": 3.005343307507347, "step": 11249, "train/loss_ctc": 0.7560365796089172, "train/loss_error": 0.4551979899406433, "train/loss_total": 0.515365719795227 }, { "epoch": 3.005610472882714, "grad_norm": 1.814314842224121, "learning_rate": 1.1977558108469144e-05, "loss": 0.5128, "step": 11250 }, { "epoch": 3.005610472882714, "step": 11250, "train/loss_ctc": 0.40790271759033203, "train/loss_error": 0.48930078744888306, "train/loss_total": 0.47302117943763733 }, { "epoch": 3.005877638258082, "step": 11251, "train/loss_ctc": 0.5843942761421204, "train/loss_error": 0.4466557204723358, "train/loss_total": 0.4742034375667572 }, { "epoch": 3.006144803633449, "step": 11252, "train/loss_ctc": 1.4971966743469238, "train/loss_error": 0.4249981641769409, "train/loss_total": 0.6394379138946533 }, { "epoch": 3.0064119690088162, "step": 11253, "train/loss_ctc": 1.156519889831543, "train/loss_error": 0.42614230513572693, "train/loss_total": 0.5722178220748901 }, { "epoch": 3.006679134384184, "step": 11254, "train/loss_ctc": 1.218399167060852, "train/loss_error": 0.4230077266693115, "train/loss_total": 0.5820860266685486 }, { "epoch": 3.006946299759551, "step": 11255, "train/loss_ctc": 0.5120913982391357, "train/loss_error": 0.38251686096191406, "train/loss_total": 0.4084317684173584 }, { "epoch": 3.0072134651349187, "step": 11256, "train/loss_ctc": 0.22932197153568268, "train/loss_error": 0.42694544792175293, "train/loss_total": 0.38742077350616455 }, { "epoch": 3.007480630510286, "step": 11257, "train/loss_ctc": 0.7776873111724854, "train/loss_error": 0.4300943613052368, "train/loss_total": 0.4996129870414734 }, { "epoch": 3.007747795885653, "step": 11258, "train/loss_ctc": 0.7386677861213684, "train/loss_error": 0.45991677045822144, "train/loss_total": 0.5156669616699219 }, { "epoch": 3.0080149612610207, "step": 11259, "train/loss_ctc": 0.5965264439582825, "train/loss_error": 0.3819420635700226, "train/loss_total": 0.4248589277267456 }, { "epoch": 3.008282126636388, "grad_norm": 2.685567617416382, "learning_rate": 1.1961528185947102e-05, "loss": 0.4977, "step": 11260 }, { "epoch": 3.008282126636388, "step": 11260, "train/loss_ctc": 0.6636925339698792, "train/loss_error": 0.4761786162853241, "train/loss_total": 0.5136814117431641 }, { "epoch": 3.008549292011755, "step": 11261, "train/loss_ctc": 0.8918852210044861, "train/loss_error": 0.4019886553287506, "train/loss_total": 0.4999679923057556 }, { "epoch": 3.008816457387123, "step": 11262, "train/loss_ctc": 0.2359546571969986, "train/loss_error": 0.4446423649787903, "train/loss_total": 0.40290483832359314 }, { "epoch": 3.00908362276249, "step": 11263, "train/loss_ctc": 0.5785176157951355, "train/loss_error": 0.42566293478012085, "train/loss_total": 0.4562338888645172 }, { "epoch": 3.009350788137857, "step": 11264, "train/loss_ctc": 0.6101280450820923, "train/loss_error": 0.4618697762489319, "train/loss_total": 0.4915214478969574 }, { "epoch": 3.009617953513225, "step": 11265, "train/loss_ctc": 0.840865969657898, "train/loss_error": 0.42735835909843445, "train/loss_total": 0.5100598931312561 }, { "epoch": 3.009885118888592, "step": 11266, "train/loss_ctc": 0.6628203988075256, "train/loss_error": 0.4531179666519165, "train/loss_total": 0.49505847692489624 }, { "epoch": 3.010152284263959, "step": 11267, "train/loss_ctc": 0.6565659046173096, "train/loss_error": 0.5114647150039673, "train/loss_total": 0.5404849648475647 }, { "epoch": 3.010419449639327, "step": 11268, "train/loss_ctc": 1.0054469108581543, "train/loss_error": 0.4486162066459656, "train/loss_total": 0.5599823594093323 }, { "epoch": 3.010686615014694, "step": 11269, "train/loss_ctc": 0.7327485680580139, "train/loss_error": 0.4813541769981384, "train/loss_total": 0.5316330790519714 }, { "epoch": 3.0109537803900612, "grad_norm": 1.466989517211914, "learning_rate": 1.194549826342506e-05, "loss": 0.5002, "step": 11270 }, { "epoch": 3.0109537803900612, "step": 11270, "train/loss_ctc": 0.416086882352829, "train/loss_error": 0.42196908593177795, "train/loss_total": 0.42079266905784607 }, { "epoch": 3.011220945765429, "step": 11271, "train/loss_ctc": 0.7902554273605347, "train/loss_error": 0.4263163208961487, "train/loss_total": 0.4991041421890259 }, { "epoch": 3.011488111140796, "step": 11272, "train/loss_ctc": 0.7347559332847595, "train/loss_error": 0.4747490882873535, "train/loss_total": 0.5267504453659058 }, { "epoch": 3.0117552765161637, "step": 11273, "train/loss_ctc": 1.3155596256256104, "train/loss_error": 0.5293961763381958, "train/loss_total": 0.6866288781166077 }, { "epoch": 3.012022441891531, "step": 11274, "train/loss_ctc": 0.9455630779266357, "train/loss_error": 0.4517035186290741, "train/loss_total": 0.5504754185676575 }, { "epoch": 3.012289607266898, "step": 11275, "train/loss_ctc": 0.9413434267044067, "train/loss_error": 0.4928208291530609, "train/loss_total": 0.582525372505188 }, { "epoch": 3.0125567726422657, "step": 11276, "train/loss_ctc": 0.3945122957229614, "train/loss_error": 0.44213542342185974, "train/loss_total": 0.43261080980300903 }, { "epoch": 3.012823938017633, "step": 11277, "train/loss_ctc": 1.367218255996704, "train/loss_error": 0.43946608901023865, "train/loss_total": 0.6250165700912476 }, { "epoch": 3.013091103393, "step": 11278, "train/loss_ctc": 1.0066595077514648, "train/loss_error": 0.48630961775779724, "train/loss_total": 0.5903795957565308 }, { "epoch": 3.0133582687683678, "step": 11279, "train/loss_ctc": 1.2182644605636597, "train/loss_error": 0.5682485103607178, "train/loss_total": 0.6982517242431641 }, { "epoch": 3.013625434143735, "grad_norm": 3.3547680377960205, "learning_rate": 1.192946834090302e-05, "loss": 0.5613, "step": 11280 }, { "epoch": 3.013625434143735, "step": 11280, "train/loss_ctc": 0.7400115728378296, "train/loss_error": 0.5148115158081055, "train/loss_total": 0.5598515272140503 }, { "epoch": 3.013892599519102, "step": 11281, "train/loss_ctc": 0.4510023295879364, "train/loss_error": 0.4084816873073578, "train/loss_total": 0.41698580980300903 }, { "epoch": 3.01415976489447, "step": 11282, "train/loss_ctc": 0.6070421934127808, "train/loss_error": 0.5144936442375183, "train/loss_total": 0.5330033898353577 }, { "epoch": 3.014426930269837, "step": 11283, "train/loss_ctc": 0.5210345387458801, "train/loss_error": 0.46650058031082153, "train/loss_total": 0.47740739583969116 }, { "epoch": 3.014694095645204, "step": 11284, "train/loss_ctc": 1.0918288230895996, "train/loss_error": 0.48120906949043274, "train/loss_total": 0.603333055973053 }, { "epoch": 3.014961261020572, "step": 11285, "train/loss_ctc": 0.48648810386657715, "train/loss_error": 0.48208457231521606, "train/loss_total": 0.48296529054641724 }, { "epoch": 3.015228426395939, "step": 11286, "train/loss_ctc": 0.3887765407562256, "train/loss_error": 0.38832372426986694, "train/loss_total": 0.38841429352760315 }, { "epoch": 3.015495591771306, "step": 11287, "train/loss_ctc": 2.0078845024108887, "train/loss_error": 0.5173871517181396, "train/loss_total": 0.8154866695404053 }, { "epoch": 3.015762757146674, "step": 11288, "train/loss_ctc": 0.44792231917381287, "train/loss_error": 0.4351927638053894, "train/loss_total": 0.43773868680000305 }, { "epoch": 3.016029922522041, "step": 11289, "train/loss_ctc": 0.8586825728416443, "train/loss_error": 0.419996052980423, "train/loss_total": 0.5077333450317383 }, { "epoch": 3.0162970878974087, "grad_norm": 1.534807562828064, "learning_rate": 1.1913438418380978e-05, "loss": 0.5223, "step": 11290 }, { "epoch": 3.0162970878974087, "step": 11290, "train/loss_ctc": 1.2018585205078125, "train/loss_error": 0.45798981189727783, "train/loss_total": 0.6067636013031006 }, { "epoch": 3.016564253272776, "step": 11291, "train/loss_ctc": 0.8340429067611694, "train/loss_error": 0.5075050592422485, "train/loss_total": 0.5728126168251038 }, { "epoch": 3.016831418648143, "step": 11292, "train/loss_ctc": 0.839232325553894, "train/loss_error": 0.41506558656692505, "train/loss_total": 0.4998989403247833 }, { "epoch": 3.0170985840235107, "step": 11293, "train/loss_ctc": 1.1265126466751099, "train/loss_error": 0.4254106283187866, "train/loss_total": 0.5656310319900513 }, { "epoch": 3.017365749398878, "step": 11294, "train/loss_ctc": 0.5816396474838257, "train/loss_error": 0.45008453726768494, "train/loss_total": 0.4763955771923065 }, { "epoch": 3.017632914774245, "step": 11295, "train/loss_ctc": 0.6503255367279053, "train/loss_error": 0.3647654950618744, "train/loss_total": 0.42187750339508057 }, { "epoch": 3.0179000801496128, "step": 11296, "train/loss_ctc": 1.1262297630310059, "train/loss_error": 0.4748225212097168, "train/loss_total": 0.6051039695739746 }, { "epoch": 3.01816724552498, "step": 11297, "train/loss_ctc": 0.5171505212783813, "train/loss_error": 0.4194589853286743, "train/loss_total": 0.4389973282814026 }, { "epoch": 3.018434410900347, "step": 11298, "train/loss_ctc": 0.5505640506744385, "train/loss_error": 0.44349756836891174, "train/loss_total": 0.4649108648300171 }, { "epoch": 3.018701576275715, "step": 11299, "train/loss_ctc": 1.308694839477539, "train/loss_error": 0.4405888617038727, "train/loss_total": 0.6142100691795349 }, { "epoch": 3.018968741651082, "grad_norm": 1.204438328742981, "learning_rate": 1.1897408495858936e-05, "loss": 0.5267, "step": 11300 }, { "epoch": 3.018968741651082, "step": 11300, "train/loss_ctc": 1.1214628219604492, "train/loss_error": 0.44202810525894165, "train/loss_total": 0.5779150724411011 }, { "epoch": 3.019235907026449, "step": 11301, "train/loss_ctc": 0.21748997271060944, "train/loss_error": 0.4677444398403168, "train/loss_total": 0.417693555355072 }, { "epoch": 3.019503072401817, "step": 11302, "train/loss_ctc": 0.7543889284133911, "train/loss_error": 0.4881261885166168, "train/loss_total": 0.5413787364959717 }, { "epoch": 3.019770237777184, "step": 11303, "train/loss_ctc": 1.0551886558532715, "train/loss_error": 0.45027709007263184, "train/loss_total": 0.5712594389915466 }, { "epoch": 3.0200374031525516, "step": 11304, "train/loss_ctc": 1.0828193426132202, "train/loss_error": 0.4487004280090332, "train/loss_total": 0.5755242109298706 }, { "epoch": 3.020304568527919, "step": 11305, "train/loss_ctc": 1.040602207183838, "train/loss_error": 0.44859153032302856, "train/loss_total": 0.5669936537742615 }, { "epoch": 3.020571733903286, "step": 11306, "train/loss_ctc": 0.633577823638916, "train/loss_error": 0.4403516948223114, "train/loss_total": 0.4789969325065613 }, { "epoch": 3.0208388992786537, "step": 11307, "train/loss_ctc": 1.252805233001709, "train/loss_error": 0.4380296766757965, "train/loss_total": 0.6009848117828369 }, { "epoch": 3.021106064654021, "step": 11308, "train/loss_ctc": 0.7066981792449951, "train/loss_error": 0.3887825310230255, "train/loss_total": 0.4523656964302063 }, { "epoch": 3.021373230029388, "step": 11309, "train/loss_ctc": 0.45055949687957764, "train/loss_error": 0.5313424468040466, "train/loss_total": 0.5151858329772949 }, { "epoch": 3.0216403954047557, "grad_norm": 2.163968324661255, "learning_rate": 1.1881378573336896e-05, "loss": 0.5298, "step": 11310 }, { "epoch": 3.0216403954047557, "step": 11310, "train/loss_ctc": 0.8268905878067017, "train/loss_error": 0.49052131175994873, "train/loss_total": 0.5577951669692993 }, { "epoch": 3.021907560780123, "step": 11311, "train/loss_ctc": 0.6093176007270813, "train/loss_error": 0.5113812685012817, "train/loss_total": 0.5309685468673706 }, { "epoch": 3.02217472615549, "step": 11312, "train/loss_ctc": 0.4115299582481384, "train/loss_error": 0.4041173458099365, "train/loss_total": 0.4055998921394348 }, { "epoch": 3.0224418915308577, "step": 11313, "train/loss_ctc": 0.5478557348251343, "train/loss_error": 0.42376142740249634, "train/loss_total": 0.4485802948474884 }, { "epoch": 3.022709056906225, "step": 11314, "train/loss_ctc": 0.5747140645980835, "train/loss_error": 0.4223916530609131, "train/loss_total": 0.4528561532497406 }, { "epoch": 3.022976222281592, "step": 11315, "train/loss_ctc": 0.8281921148300171, "train/loss_error": 0.4244518578052521, "train/loss_total": 0.5051999092102051 }, { "epoch": 3.0232433876569598, "step": 11316, "train/loss_ctc": 1.245185375213623, "train/loss_error": 0.46894729137420654, "train/loss_total": 0.6241949200630188 }, { "epoch": 3.023510553032327, "step": 11317, "train/loss_ctc": 0.5223978757858276, "train/loss_error": 0.4039819538593292, "train/loss_total": 0.4276651442050934 }, { "epoch": 3.023777718407694, "step": 11318, "train/loss_ctc": 1.1167330741882324, "train/loss_error": 0.4314277470111847, "train/loss_total": 0.5684888362884521 }, { "epoch": 3.024044883783062, "step": 11319, "train/loss_ctc": 0.7493057250976562, "train/loss_error": 0.4794114828109741, "train/loss_total": 0.5333903431892395 }, { "epoch": 3.024312049158429, "grad_norm": 1.6211631298065186, "learning_rate": 1.1865348650814854e-05, "loss": 0.5055, "step": 11320 }, { "epoch": 3.024312049158429, "step": 11320, "train/loss_ctc": 0.44843894243240356, "train/loss_error": 0.3909524083137512, "train/loss_total": 0.40244972705841064 }, { "epoch": 3.0245792145337966, "step": 11321, "train/loss_ctc": 0.496736079454422, "train/loss_error": 0.4059372544288635, "train/loss_total": 0.4240970015525818 }, { "epoch": 3.024846379909164, "step": 11322, "train/loss_ctc": 0.7063368558883667, "train/loss_error": 0.43274518847465515, "train/loss_total": 0.4874635338783264 }, { "epoch": 3.025113545284531, "step": 11323, "train/loss_ctc": 0.4799234867095947, "train/loss_error": 0.4093514680862427, "train/loss_total": 0.42346587777137756 }, { "epoch": 3.0253807106598987, "step": 11324, "train/loss_ctc": 0.7931600213050842, "train/loss_error": 0.43971359729766846, "train/loss_total": 0.5104029178619385 }, { "epoch": 3.025647876035266, "step": 11325, "train/loss_ctc": 0.8260063529014587, "train/loss_error": 0.48145079612731934, "train/loss_total": 0.5503619313240051 }, { "epoch": 3.025915041410633, "step": 11326, "train/loss_ctc": 0.7285752296447754, "train/loss_error": 0.4877414107322693, "train/loss_total": 0.5359081625938416 }, { "epoch": 3.0261822067860007, "step": 11327, "train/loss_ctc": 0.6126056909561157, "train/loss_error": 0.48609596490859985, "train/loss_total": 0.5113978981971741 }, { "epoch": 3.026449372161368, "step": 11328, "train/loss_ctc": 1.034776210784912, "train/loss_error": 0.434222549200058, "train/loss_total": 0.5543332695960999 }, { "epoch": 3.026716537536735, "step": 11329, "train/loss_ctc": 0.5168704390525818, "train/loss_error": 0.49917924404144287, "train/loss_total": 0.5027174949645996 }, { "epoch": 3.0269837029121027, "grad_norm": 2.84338641166687, "learning_rate": 1.1849318728292812e-05, "loss": 0.4903, "step": 11330 }, { "epoch": 3.0269837029121027, "step": 11330, "train/loss_ctc": 1.6723694801330566, "train/loss_error": 0.4384765028953552, "train/loss_total": 0.6852551102638245 }, { "epoch": 3.02725086828747, "step": 11331, "train/loss_ctc": 0.5285773873329163, "train/loss_error": 0.4447108805179596, "train/loss_total": 0.4614841938018799 }, { "epoch": 3.027518033662837, "step": 11332, "train/loss_ctc": 1.1429544687271118, "train/loss_error": 0.5197439789772034, "train/loss_total": 0.6443860530853271 }, { "epoch": 3.0277851990382048, "step": 11333, "train/loss_ctc": 1.3796645402908325, "train/loss_error": 0.4947096109390259, "train/loss_total": 0.6717005968093872 }, { "epoch": 3.028052364413572, "step": 11334, "train/loss_ctc": 0.7760422229766846, "train/loss_error": 0.48921623826026917, "train/loss_total": 0.5465814471244812 }, { "epoch": 3.028319529788939, "step": 11335, "train/loss_ctc": 0.8469811677932739, "train/loss_error": 0.463845431804657, "train/loss_total": 0.5404725670814514 }, { "epoch": 3.028586695164307, "step": 11336, "train/loss_ctc": 0.4587438106536865, "train/loss_error": 0.37770870327949524, "train/loss_total": 0.39391574263572693 }, { "epoch": 3.028853860539674, "step": 11337, "train/loss_ctc": 0.4802909791469574, "train/loss_error": 0.44150105118751526, "train/loss_total": 0.44925904273986816 }, { "epoch": 3.0291210259150416, "step": 11338, "train/loss_ctc": 1.170454502105713, "train/loss_error": 0.4481045603752136, "train/loss_total": 0.5925745368003845 }, { "epoch": 3.029388191290409, "step": 11339, "train/loss_ctc": 1.2278294563293457, "train/loss_error": 0.5420429706573486, "train/loss_total": 0.679200291633606 }, { "epoch": 3.029655356665776, "grad_norm": 2.947780132293701, "learning_rate": 1.1833288805770774e-05, "loss": 0.5665, "step": 11340 }, { "epoch": 3.029655356665776, "step": 11340, "train/loss_ctc": 0.3193967640399933, "train/loss_error": 0.4027012288570404, "train/loss_total": 0.3860403299331665 }, { "epoch": 3.0299225220411437, "step": 11341, "train/loss_ctc": 0.5467254519462585, "train/loss_error": 0.39672034978866577, "train/loss_total": 0.42672139406204224 }, { "epoch": 3.030189687416511, "step": 11342, "train/loss_ctc": 0.7187683582305908, "train/loss_error": 0.4494623839855194, "train/loss_total": 0.5033235549926758 }, { "epoch": 3.030456852791878, "step": 11343, "train/loss_ctc": 1.7212607860565186, "train/loss_error": 0.4901776909828186, "train/loss_total": 0.7363942861557007 }, { "epoch": 3.0307240181672457, "step": 11344, "train/loss_ctc": 0.9246756434440613, "train/loss_error": 0.4703371524810791, "train/loss_total": 0.5612048506736755 }, { "epoch": 3.030991183542613, "step": 11345, "train/loss_ctc": 0.4986657500267029, "train/loss_error": 0.47904545068740845, "train/loss_total": 0.4829695224761963 }, { "epoch": 3.03125834891798, "step": 11346, "train/loss_ctc": 1.3007317781448364, "train/loss_error": 0.5290759205818176, "train/loss_total": 0.6834070682525635 }, { "epoch": 3.0315255142933477, "step": 11347, "train/loss_ctc": 0.5275347232818604, "train/loss_error": 0.4680151641368866, "train/loss_total": 0.47991910576820374 }, { "epoch": 3.031792679668715, "step": 11348, "train/loss_ctc": 0.9337853193283081, "train/loss_error": 0.42499735951423645, "train/loss_total": 0.5267549753189087 }, { "epoch": 3.032059845044082, "step": 11349, "train/loss_ctc": 1.4392454624176025, "train/loss_error": 0.5250052809715271, "train/loss_total": 0.7078533172607422 }, { "epoch": 3.0323270104194497, "grad_norm": 1.6273256540298462, "learning_rate": 1.1817258883248732e-05, "loss": 0.5495, "step": 11350 }, { "epoch": 3.0323270104194497, "step": 11350, "train/loss_ctc": 0.5947275161743164, "train/loss_error": 0.5008818507194519, "train/loss_total": 0.5196509957313538 }, { "epoch": 3.032594175794817, "step": 11351, "train/loss_ctc": 0.6078780889511108, "train/loss_error": 0.4486372172832489, "train/loss_total": 0.4804854094982147 }, { "epoch": 3.032861341170184, "step": 11352, "train/loss_ctc": 0.5899996161460876, "train/loss_error": 0.4115539789199829, "train/loss_total": 0.4472430944442749 }, { "epoch": 3.0331285065455518, "step": 11353, "train/loss_ctc": 0.7581233978271484, "train/loss_error": 0.4529060423374176, "train/loss_total": 0.5139495134353638 }, { "epoch": 3.033395671920919, "step": 11354, "train/loss_ctc": 0.3772730827331543, "train/loss_error": 0.40674471855163574, "train/loss_total": 0.40085041522979736 }, { "epoch": 3.0336628372962866, "step": 11355, "train/loss_ctc": 0.5827678442001343, "train/loss_error": 0.4915691018104553, "train/loss_total": 0.5098088383674622 }, { "epoch": 3.033930002671654, "step": 11356, "train/loss_ctc": 0.5374824404716492, "train/loss_error": 0.43419864773750305, "train/loss_total": 0.45485541224479675 }, { "epoch": 3.034197168047021, "step": 11357, "train/loss_ctc": 0.7241160869598389, "train/loss_error": 0.4245283007621765, "train/loss_total": 0.48444586992263794 }, { "epoch": 3.0344643334223886, "step": 11358, "train/loss_ctc": 0.8154166340827942, "train/loss_error": 0.5207420587539673, "train/loss_total": 0.5796769857406616 }, { "epoch": 3.034731498797756, "step": 11359, "train/loss_ctc": 0.9920902252197266, "train/loss_error": 0.44736844301223755, "train/loss_total": 0.5563127994537354 }, { "epoch": 3.034998664173123, "grad_norm": 1.6346442699432373, "learning_rate": 1.180122896072669e-05, "loss": 0.4947, "step": 11360 }, { "epoch": 3.034998664173123, "step": 11360, "train/loss_ctc": 0.9833845496177673, "train/loss_error": 0.49562880396842957, "train/loss_total": 0.5931799411773682 }, { "epoch": 3.0352658295484907, "step": 11361, "train/loss_ctc": 0.4946322739124298, "train/loss_error": 0.5324339866638184, "train/loss_total": 0.5248736143112183 }, { "epoch": 3.035532994923858, "step": 11362, "train/loss_ctc": 0.7492784261703491, "train/loss_error": 0.4455908536911011, "train/loss_total": 0.5063284039497375 }, { "epoch": 3.035800160299225, "step": 11363, "train/loss_ctc": 0.8367218971252441, "train/loss_error": 0.44346264004707336, "train/loss_total": 0.5221145153045654 }, { "epoch": 3.0360673256745927, "step": 11364, "train/loss_ctc": 0.5753336548805237, "train/loss_error": 0.5026047825813293, "train/loss_total": 0.5171505808830261 }, { "epoch": 3.03633449104996, "step": 11365, "train/loss_ctc": 0.9968048930168152, "train/loss_error": 0.44138503074645996, "train/loss_total": 0.55246901512146 }, { "epoch": 3.036601656425327, "step": 11366, "train/loss_ctc": 0.44904837012290955, "train/loss_error": 0.42180949449539185, "train/loss_total": 0.4272572994232178 }, { "epoch": 3.0368688218006947, "step": 11367, "train/loss_ctc": 0.3677309453487396, "train/loss_error": 0.43713971972465515, "train/loss_total": 0.423257976770401 }, { "epoch": 3.037135987176062, "step": 11368, "train/loss_ctc": 0.761802613735199, "train/loss_error": 0.4966980814933777, "train/loss_total": 0.549718976020813 }, { "epoch": 3.037403152551429, "step": 11369, "train/loss_ctc": 1.40067458152771, "train/loss_error": 0.4549810290336609, "train/loss_total": 0.6441197395324707 }, { "epoch": 3.0376703179267968, "grad_norm": 2.072246789932251, "learning_rate": 1.178519903820465e-05, "loss": 0.526, "step": 11370 }, { "epoch": 3.0376703179267968, "step": 11370, "train/loss_ctc": 0.6186339855194092, "train/loss_error": 0.4492781460285187, "train/loss_total": 0.48314934968948364 }, { "epoch": 3.037937483302164, "step": 11371, "train/loss_ctc": 0.657605767250061, "train/loss_error": 0.4209555983543396, "train/loss_total": 0.46828562021255493 }, { "epoch": 3.0382046486775316, "step": 11372, "train/loss_ctc": 1.4168264865875244, "train/loss_error": 0.375262975692749, "train/loss_total": 0.5835757255554199 }, { "epoch": 3.038471814052899, "step": 11373, "train/loss_ctc": 0.3281356692314148, "train/loss_error": 0.3951072692871094, "train/loss_total": 0.381712943315506 }, { "epoch": 3.038738979428266, "step": 11374, "train/loss_ctc": 0.7044528722763062, "train/loss_error": 0.5084770321846008, "train/loss_total": 0.5476722121238708 }, { "epoch": 3.0390061448036336, "step": 11375, "train/loss_ctc": 0.4750557839870453, "train/loss_error": 0.43126150965690613, "train/loss_total": 0.4400203824043274 }, { "epoch": 3.039273310179001, "step": 11376, "train/loss_ctc": 0.38698333501815796, "train/loss_error": 0.4139110743999481, "train/loss_total": 0.4085255265235901 }, { "epoch": 3.039540475554368, "step": 11377, "train/loss_ctc": 0.5018138289451599, "train/loss_error": 0.41486793756484985, "train/loss_total": 0.43225714564323425 }, { "epoch": 3.0398076409297357, "step": 11378, "train/loss_ctc": 0.4403516948223114, "train/loss_error": 0.4302413761615753, "train/loss_total": 0.43226343393325806 }, { "epoch": 3.040074806305103, "step": 11379, "train/loss_ctc": 1.1892189979553223, "train/loss_error": 0.4545002281665802, "train/loss_total": 0.6014440059661865 }, { "epoch": 3.04034197168047, "grad_norm": 1.4414327144622803, "learning_rate": 1.1769169115682608e-05, "loss": 0.4779, "step": 11380 }, { "epoch": 3.04034197168047, "step": 11380, "train/loss_ctc": 0.4813176393508911, "train/loss_error": 0.487474650144577, "train/loss_total": 0.48624324798583984 }, { "epoch": 3.0406091370558377, "step": 11381, "train/loss_ctc": 0.7801649570465088, "train/loss_error": 0.4310404360294342, "train/loss_total": 0.5008653402328491 }, { "epoch": 3.040876302431205, "step": 11382, "train/loss_ctc": 1.0058979988098145, "train/loss_error": 0.5036266446113586, "train/loss_total": 0.6040809154510498 }, { "epoch": 3.041143467806572, "step": 11383, "train/loss_ctc": 1.0887839794158936, "train/loss_error": 0.45669564604759216, "train/loss_total": 0.5831133127212524 }, { "epoch": 3.0414106331819397, "step": 11384, "train/loss_ctc": 1.0868349075317383, "train/loss_error": 0.47479790449142456, "train/loss_total": 0.5972052812576294 }, { "epoch": 3.041677798557307, "step": 11385, "train/loss_ctc": 0.5221692323684692, "train/loss_error": 0.46123746037483215, "train/loss_total": 0.4734238386154175 }, { "epoch": 3.041944963932674, "step": 11386, "train/loss_ctc": 1.001741647720337, "train/loss_error": 0.46197572350502014, "train/loss_total": 0.5699289441108704 }, { "epoch": 3.0422121293080417, "step": 11387, "train/loss_ctc": 0.33075273036956787, "train/loss_error": 0.43024858832359314, "train/loss_total": 0.41034942865371704 }, { "epoch": 3.042479294683409, "step": 11388, "train/loss_ctc": 0.9720563888549805, "train/loss_error": 0.4632794260978699, "train/loss_total": 0.5650348663330078 }, { "epoch": 3.0427464600587766, "step": 11389, "train/loss_ctc": 0.34920841455459595, "train/loss_error": 0.4551745355129242, "train/loss_total": 0.433981329202652 }, { "epoch": 3.0430136254341438, "grad_norm": 2.0440988540649414, "learning_rate": 1.1753139193160567e-05, "loss": 0.5224, "step": 11390 }, { "epoch": 3.0430136254341438, "step": 11390, "train/loss_ctc": 0.5040437579154968, "train/loss_error": 0.4526467025279999, "train/loss_total": 0.46292614936828613 }, { "epoch": 3.043280790809511, "step": 11391, "train/loss_ctc": 0.8153759241104126, "train/loss_error": 0.41895392537117004, "train/loss_total": 0.49823832511901855 }, { "epoch": 3.0435479561848786, "step": 11392, "train/loss_ctc": 1.1947219371795654, "train/loss_error": 0.4808049499988556, "train/loss_total": 0.6235883831977844 }, { "epoch": 3.043815121560246, "step": 11393, "train/loss_ctc": 0.4962776303291321, "train/loss_error": 0.4145667552947998, "train/loss_total": 0.4309089481830597 }, { "epoch": 3.044082286935613, "step": 11394, "train/loss_ctc": 0.3766053318977356, "train/loss_error": 0.5296357274055481, "train/loss_total": 0.49902966618537903 }, { "epoch": 3.0443494523109806, "step": 11395, "train/loss_ctc": 0.40509259700775146, "train/loss_error": 0.39796170592308044, "train/loss_total": 0.3993878960609436 }, { "epoch": 3.044616617686348, "step": 11396, "train/loss_ctc": 1.0250284671783447, "train/loss_error": 0.4465068578720093, "train/loss_total": 0.5622111558914185 }, { "epoch": 3.044883783061715, "step": 11397, "train/loss_ctc": 0.5938258171081543, "train/loss_error": 0.4634793698787689, "train/loss_total": 0.4895486831665039 }, { "epoch": 3.0451509484370827, "step": 11398, "train/loss_ctc": 0.7386594414710999, "train/loss_error": 0.48794490098953247, "train/loss_total": 0.538087785243988 }, { "epoch": 3.04541811381245, "step": 11399, "train/loss_ctc": 0.7955898642539978, "train/loss_error": 0.4476754665374756, "train/loss_total": 0.51725834608078 }, { "epoch": 3.045685279187817, "grad_norm": 2.728590965270996, "learning_rate": 1.1737109270638525e-05, "loss": 0.5021, "step": 11400 }, { "epoch": 3.045685279187817, "step": 11400, "train/loss_ctc": 0.6229573488235474, "train/loss_error": 0.42929747700691223, "train/loss_total": 0.4680294692516327 }, { "epoch": 3.0459524445631847, "step": 11401, "train/loss_ctc": 0.6657439470291138, "train/loss_error": 0.46719563007354736, "train/loss_total": 0.5069053173065186 }, { "epoch": 3.046219609938552, "step": 11402, "train/loss_ctc": 0.7981189489364624, "train/loss_error": 0.4316256046295166, "train/loss_total": 0.5049242973327637 }, { "epoch": 3.0464867753139195, "step": 11403, "train/loss_ctc": 1.2822591066360474, "train/loss_error": 0.4523576498031616, "train/loss_total": 0.6183379292488098 }, { "epoch": 3.0467539406892867, "step": 11404, "train/loss_ctc": 0.637930691242218, "train/loss_error": 0.5225216746330261, "train/loss_total": 0.5456035137176514 }, { "epoch": 3.047021106064654, "step": 11405, "train/loss_ctc": 0.4888503849506378, "train/loss_error": 0.46785953640937805, "train/loss_total": 0.4720577001571655 }, { "epoch": 3.0472882714400216, "step": 11406, "train/loss_ctc": 1.3972433805465698, "train/loss_error": 0.4935837984085083, "train/loss_total": 0.6743156909942627 }, { "epoch": 3.0475554368153888, "step": 11407, "train/loss_ctc": 0.8507654666900635, "train/loss_error": 0.38841569423675537, "train/loss_total": 0.48088568449020386 }, { "epoch": 3.047822602190756, "step": 11408, "train/loss_ctc": 1.2314720153808594, "train/loss_error": 0.5082639455795288, "train/loss_total": 0.6529055833816528 }, { "epoch": 3.0480897675661236, "step": 11409, "train/loss_ctc": 0.5088034868240356, "train/loss_error": 0.3972996473312378, "train/loss_total": 0.4196004271507263 }, { "epoch": 3.048356932941491, "grad_norm": 3.121084451675415, "learning_rate": 1.1721079348116483e-05, "loss": 0.5344, "step": 11410 }, { "epoch": 3.048356932941491, "step": 11410, "train/loss_ctc": 1.1243339776992798, "train/loss_error": 0.5172396302223206, "train/loss_total": 0.6386585235595703 }, { "epoch": 3.048624098316858, "step": 11411, "train/loss_ctc": 0.4958730638027191, "train/loss_error": 0.4672633707523346, "train/loss_total": 0.47298532724380493 }, { "epoch": 3.0488912636922256, "step": 11412, "train/loss_ctc": 0.8222485780715942, "train/loss_error": 0.4207456707954407, "train/loss_total": 0.5010462999343872 }, { "epoch": 3.049158429067593, "step": 11413, "train/loss_ctc": 1.3327831029891968, "train/loss_error": 0.47470638155937195, "train/loss_total": 0.6463217735290527 }, { "epoch": 3.04942559444296, "step": 11414, "train/loss_ctc": 0.382310152053833, "train/loss_error": 0.4051031172275543, "train/loss_total": 0.40054452419281006 }, { "epoch": 3.0496927598183277, "step": 11415, "train/loss_ctc": 0.8644015192985535, "train/loss_error": 0.39615440368652344, "train/loss_total": 0.48980385065078735 }, { "epoch": 3.049959925193695, "step": 11416, "train/loss_ctc": 0.8370405435562134, "train/loss_error": 0.4891780614852905, "train/loss_total": 0.558750569820404 }, { "epoch": 3.050227090569062, "step": 11417, "train/loss_ctc": 0.7756315469741821, "train/loss_error": 0.40723466873168945, "train/loss_total": 0.48091405630111694 }, { "epoch": 3.0504942559444297, "step": 11418, "train/loss_ctc": 0.40559321641921997, "train/loss_error": 0.48713064193725586, "train/loss_total": 0.47082316875457764 }, { "epoch": 3.050761421319797, "step": 11419, "train/loss_ctc": 0.5955952405929565, "train/loss_error": 0.4897049069404602, "train/loss_total": 0.5108829736709595 }, { "epoch": 3.0510285866951645, "grad_norm": 1.4101392030715942, "learning_rate": 1.1705049425594443e-05, "loss": 0.5171, "step": 11420 }, { "epoch": 3.0510285866951645, "step": 11420, "train/loss_ctc": 0.8215817213058472, "train/loss_error": 0.43064257502555847, "train/loss_total": 0.5088304281234741 }, { "epoch": 3.0512957520705317, "step": 11421, "train/loss_ctc": 0.4430217742919922, "train/loss_error": 0.5020784139633179, "train/loss_total": 0.4902670979499817 }, { "epoch": 3.051562917445899, "step": 11422, "train/loss_ctc": 0.44044822454452515, "train/loss_error": 0.4312673509120941, "train/loss_total": 0.4331035315990448 }, { "epoch": 3.0518300828212666, "step": 11423, "train/loss_ctc": 0.46472734212875366, "train/loss_error": 0.45841777324676514, "train/loss_total": 0.4596797227859497 }, { "epoch": 3.0520972481966337, "step": 11424, "train/loss_ctc": 0.4018704295158386, "train/loss_error": 0.48095807433128357, "train/loss_total": 0.46514055132865906 }, { "epoch": 3.052364413572001, "step": 11425, "train/loss_ctc": 0.6873083114624023, "train/loss_error": 0.4195845425128937, "train/loss_total": 0.4731293022632599 }, { "epoch": 3.0526315789473686, "step": 11426, "train/loss_ctc": 1.1571084260940552, "train/loss_error": 0.41976025700569153, "train/loss_total": 0.5672299265861511 }, { "epoch": 3.052898744322736, "step": 11427, "train/loss_ctc": 0.3809378147125244, "train/loss_error": 0.5054876208305359, "train/loss_total": 0.48057764768600464 }, { "epoch": 3.053165909698103, "step": 11428, "train/loss_ctc": 0.4378175735473633, "train/loss_error": 0.42138898372650146, "train/loss_total": 0.42467471957206726 }, { "epoch": 3.0534330750734706, "step": 11429, "train/loss_ctc": 1.1186498403549194, "train/loss_error": 0.5062462091445923, "train/loss_total": 0.6287269592285156 }, { "epoch": 3.053700240448838, "grad_norm": 1.9099678993225098, "learning_rate": 1.1689019503072403e-05, "loss": 0.4931, "step": 11430 }, { "epoch": 3.053700240448838, "step": 11430, "train/loss_ctc": 0.40830475091934204, "train/loss_error": 0.4938093423843384, "train/loss_total": 0.47670844197273254 }, { "epoch": 3.053967405824205, "step": 11431, "train/loss_ctc": 0.8658658266067505, "train/loss_error": 0.44795963168144226, "train/loss_total": 0.5315408706665039 }, { "epoch": 3.0542345711995726, "step": 11432, "train/loss_ctc": 0.7113559246063232, "train/loss_error": 0.45030638575553894, "train/loss_total": 0.5025162696838379 }, { "epoch": 3.05450173657494, "step": 11433, "train/loss_ctc": 0.2979828715324402, "train/loss_error": 0.4766996502876282, "train/loss_total": 0.4409562945365906 }, { "epoch": 3.054768901950307, "step": 11434, "train/loss_ctc": 0.381166934967041, "train/loss_error": 0.418211430311203, "train/loss_total": 0.41080254316329956 }, { "epoch": 3.0550360673256747, "step": 11435, "train/loss_ctc": 0.7817611694335938, "train/loss_error": 0.43746304512023926, "train/loss_total": 0.5063226819038391 }, { "epoch": 3.055303232701042, "step": 11436, "train/loss_ctc": 0.6751746535301208, "train/loss_error": 0.3816995322704315, "train/loss_total": 0.4403945803642273 }, { "epoch": 3.0555703980764095, "step": 11437, "train/loss_ctc": 0.15932071208953857, "train/loss_error": 0.43637821078300476, "train/loss_total": 0.3809667229652405 }, { "epoch": 3.0558375634517767, "step": 11438, "train/loss_ctc": 1.0335988998413086, "train/loss_error": 0.439022421836853, "train/loss_total": 0.557937741279602 }, { "epoch": 3.056104728827144, "step": 11439, "train/loss_ctc": 0.895363986492157, "train/loss_error": 0.5413715839385986, "train/loss_total": 0.6121701002120972 }, { "epoch": 3.0563718942025115, "grad_norm": 3.1473538875579834, "learning_rate": 1.1672989580550361e-05, "loss": 0.486, "step": 11440 }, { "epoch": 3.0563718942025115, "step": 11440, "train/loss_ctc": 0.4571439027786255, "train/loss_error": 0.40899696946144104, "train/loss_total": 0.4186263680458069 }, { "epoch": 3.0566390595778787, "step": 11441, "train/loss_ctc": 0.550282895565033, "train/loss_error": 0.4725382924079895, "train/loss_total": 0.4880872070789337 }, { "epoch": 3.056906224953246, "step": 11442, "train/loss_ctc": 1.0484668016433716, "train/loss_error": 0.4753398895263672, "train/loss_total": 0.589965283870697 }, { "epoch": 3.0571733903286136, "step": 11443, "train/loss_ctc": 0.40673762559890747, "train/loss_error": 0.3917836844921112, "train/loss_total": 0.394774466753006 }, { "epoch": 3.0574405557039808, "step": 11444, "train/loss_ctc": 0.6650072336196899, "train/loss_error": 0.4033322036266327, "train/loss_total": 0.4556672275066376 }, { "epoch": 3.057707721079348, "step": 11445, "train/loss_ctc": 1.1947495937347412, "train/loss_error": 0.4975353479385376, "train/loss_total": 0.6369782090187073 }, { "epoch": 3.0579748864547156, "step": 11446, "train/loss_ctc": 0.88724684715271, "train/loss_error": 0.495094895362854, "train/loss_total": 0.5735253095626831 }, { "epoch": 3.058242051830083, "step": 11447, "train/loss_ctc": 0.21663354337215424, "train/loss_error": 0.4424370527267456, "train/loss_total": 0.3972763419151306 }, { "epoch": 3.05850921720545, "step": 11448, "train/loss_ctc": 0.6612153053283691, "train/loss_error": 0.479046493768692, "train/loss_total": 0.5154802799224854 }, { "epoch": 3.0587763825808176, "step": 11449, "train/loss_ctc": 0.3107442259788513, "train/loss_error": 0.4620782136917114, "train/loss_total": 0.4318114221096039 }, { "epoch": 3.059043547956185, "grad_norm": 2.2264251708984375, "learning_rate": 1.1656959658028321e-05, "loss": 0.4902, "step": 11450 }, { "epoch": 3.059043547956185, "step": 11450, "train/loss_ctc": 0.8722098469734192, "train/loss_error": 0.49360817670822144, "train/loss_total": 0.5693285465240479 }, { "epoch": 3.059310713331552, "step": 11451, "train/loss_ctc": 0.29957810044288635, "train/loss_error": 0.41516217589378357, "train/loss_total": 0.39204537868499756 }, { "epoch": 3.0595778787069197, "step": 11452, "train/loss_ctc": 0.3400224447250366, "train/loss_error": 0.4529116153717041, "train/loss_total": 0.43033379316329956 }, { "epoch": 3.059845044082287, "step": 11453, "train/loss_ctc": 1.4771122932434082, "train/loss_error": 0.5036166310310364, "train/loss_total": 0.6983157396316528 }, { "epoch": 3.0601122094576545, "step": 11454, "train/loss_ctc": 0.7529693841934204, "train/loss_error": 0.479805588722229, "train/loss_total": 0.5344383716583252 }, { "epoch": 3.0603793748330217, "step": 11455, "train/loss_ctc": 1.1117000579833984, "train/loss_error": 0.4777202606201172, "train/loss_total": 0.6045162677764893 }, { "epoch": 3.060646540208389, "step": 11456, "train/loss_ctc": 0.665833592414856, "train/loss_error": 0.40459147095680237, "train/loss_total": 0.456839919090271 }, { "epoch": 3.0609137055837565, "step": 11457, "train/loss_ctc": 0.6064062118530273, "train/loss_error": 0.4196947515010834, "train/loss_total": 0.4570370316505432 }, { "epoch": 3.0611808709591237, "step": 11458, "train/loss_ctc": 0.8154212832450867, "train/loss_error": 0.5049229264259338, "train/loss_total": 0.5670226216316223 }, { "epoch": 3.061448036334491, "step": 11459, "train/loss_ctc": 0.5404691696166992, "train/loss_error": 0.5077934861183167, "train/loss_total": 0.51432865858078 }, { "epoch": 3.0617152017098586, "grad_norm": 3.076049327850342, "learning_rate": 1.1640929735506279e-05, "loss": 0.5224, "step": 11460 }, { "epoch": 3.0617152017098586, "step": 11460, "train/loss_ctc": 0.656430184841156, "train/loss_error": 0.4448460638523102, "train/loss_total": 0.48716288805007935 }, { "epoch": 3.0619823670852258, "step": 11461, "train/loss_ctc": 1.2597628831863403, "train/loss_error": 0.4429311156272888, "train/loss_total": 0.606297492980957 }, { "epoch": 3.062249532460593, "step": 11462, "train/loss_ctc": 0.7000841498374939, "train/loss_error": 0.44016769528388977, "train/loss_total": 0.49215102195739746 }, { "epoch": 3.0625166978359606, "step": 11463, "train/loss_ctc": 0.7355701923370361, "train/loss_error": 0.4342643916606903, "train/loss_total": 0.4945255517959595 }, { "epoch": 3.062783863211328, "step": 11464, "train/loss_ctc": 0.41304612159729004, "train/loss_error": 0.4218929409980774, "train/loss_total": 0.4201236069202423 }, { "epoch": 3.063051028586695, "step": 11465, "train/loss_ctc": 1.0591306686401367, "train/loss_error": 0.44059523940086365, "train/loss_total": 0.5643023252487183 }, { "epoch": 3.0633181939620626, "step": 11466, "train/loss_ctc": 0.7653936147689819, "train/loss_error": 0.44333377480506897, "train/loss_total": 0.5077457427978516 }, { "epoch": 3.06358535933743, "step": 11467, "train/loss_ctc": 0.46429845690727234, "train/loss_error": 0.4169587790966034, "train/loss_total": 0.4264267086982727 }, { "epoch": 3.063852524712797, "step": 11468, "train/loss_ctc": 0.885484516620636, "train/loss_error": 0.46499961614608765, "train/loss_total": 0.5490965843200684 }, { "epoch": 3.0641196900881646, "step": 11469, "train/loss_ctc": 1.2282249927520752, "train/loss_error": 0.46925532817840576, "train/loss_total": 0.6210492849349976 }, { "epoch": 3.064386855463532, "grad_norm": 1.7257847785949707, "learning_rate": 1.1624899812984237e-05, "loss": 0.5169, "step": 11470 }, { "epoch": 3.064386855463532, "step": 11470, "train/loss_ctc": 0.4367047846317291, "train/loss_error": 0.4298097491264343, "train/loss_total": 0.43118876218795776 }, { "epoch": 3.0646540208388995, "step": 11471, "train/loss_ctc": 1.020094871520996, "train/loss_error": 0.459576278924942, "train/loss_total": 0.5716800093650818 }, { "epoch": 3.0649211862142667, "step": 11472, "train/loss_ctc": 0.7708989381790161, "train/loss_error": 0.5271748304367065, "train/loss_total": 0.5759196877479553 }, { "epoch": 3.065188351589634, "step": 11473, "train/loss_ctc": 0.31719768047332764, "train/loss_error": 0.3762291967868805, "train/loss_total": 0.36442291736602783 }, { "epoch": 3.0654555169650015, "step": 11474, "train/loss_ctc": 1.0674480199813843, "train/loss_error": 0.43705081939697266, "train/loss_total": 0.563130259513855 }, { "epoch": 3.0657226823403687, "step": 11475, "train/loss_ctc": 1.2822256088256836, "train/loss_error": 0.4237668514251709, "train/loss_total": 0.5954586267471313 }, { "epoch": 3.065989847715736, "step": 11476, "train/loss_ctc": 0.6742242574691772, "train/loss_error": 0.4219643175601959, "train/loss_total": 0.47241634130477905 }, { "epoch": 3.0662570130911035, "step": 11477, "train/loss_ctc": 0.9566041827201843, "train/loss_error": 0.4331423044204712, "train/loss_total": 0.537834644317627 }, { "epoch": 3.0665241784664707, "step": 11478, "train/loss_ctc": 0.9606063365936279, "train/loss_error": 0.41600632667541504, "train/loss_total": 0.5249263048171997 }, { "epoch": 3.066791343841838, "step": 11479, "train/loss_ctc": 0.4499266445636749, "train/loss_error": 0.5333570837974548, "train/loss_total": 0.5166710019111633 }, { "epoch": 3.0670585092172056, "grad_norm": 2.4195027351379395, "learning_rate": 1.1608869890462197e-05, "loss": 0.5154, "step": 11480 }, { "epoch": 3.0670585092172056, "step": 11480, "train/loss_ctc": 0.40990009903907776, "train/loss_error": 0.4714416265487671, "train/loss_total": 0.4591333270072937 }, { "epoch": 3.0673256745925728, "step": 11481, "train/loss_ctc": 0.5196866989135742, "train/loss_error": 0.4131728410720825, "train/loss_total": 0.4344756007194519 }, { "epoch": 3.06759283996794, "step": 11482, "train/loss_ctc": 0.8569447994232178, "train/loss_error": 0.41293472051620483, "train/loss_total": 0.5017367601394653 }, { "epoch": 3.0678600053433076, "step": 11483, "train/loss_ctc": 0.7092339396476746, "train/loss_error": 0.3939632177352905, "train/loss_total": 0.45701736211776733 }, { "epoch": 3.068127170718675, "step": 11484, "train/loss_ctc": 0.5300837755203247, "train/loss_error": 0.521365225315094, "train/loss_total": 0.523108959197998 }, { "epoch": 3.068394336094042, "step": 11485, "train/loss_ctc": 0.7892458438873291, "train/loss_error": 0.5111563205718994, "train/loss_total": 0.5667742490768433 }, { "epoch": 3.0686615014694096, "step": 11486, "train/loss_ctc": 1.1446466445922852, "train/loss_error": 0.4356502294540405, "train/loss_total": 0.5774495005607605 }, { "epoch": 3.068928666844777, "step": 11487, "train/loss_ctc": 1.0262526273727417, "train/loss_error": 0.4165465235710144, "train/loss_total": 0.5384877920150757 }, { "epoch": 3.0691958322201445, "step": 11488, "train/loss_ctc": 0.3284480571746826, "train/loss_error": 0.4609178900718689, "train/loss_total": 0.43442395329475403 }, { "epoch": 3.0694629975955117, "step": 11489, "train/loss_ctc": 0.40933096408843994, "train/loss_error": 0.4348335564136505, "train/loss_total": 0.4297330677509308 }, { "epoch": 3.069730162970879, "grad_norm": 2.1386594772338867, "learning_rate": 1.1592839967940155e-05, "loss": 0.4922, "step": 11490 }, { "epoch": 3.069730162970879, "step": 11490, "train/loss_ctc": 0.841184675693512, "train/loss_error": 0.4407435953617096, "train/loss_total": 0.520831823348999 }, { "epoch": 3.0699973283462465, "step": 11491, "train/loss_ctc": 0.7696795463562012, "train/loss_error": 0.3691708743572235, "train/loss_total": 0.44927260279655457 }, { "epoch": 3.0702644937216137, "step": 11492, "train/loss_ctc": 0.9041467308998108, "train/loss_error": 0.42143189907073975, "train/loss_total": 0.517974853515625 }, { "epoch": 3.070531659096981, "step": 11493, "train/loss_ctc": 1.0869698524475098, "train/loss_error": 0.49936434626579285, "train/loss_total": 0.6168854832649231 }, { "epoch": 3.0707988244723485, "step": 11494, "train/loss_ctc": 1.0635937452316284, "train/loss_error": 0.4607064127922058, "train/loss_total": 0.5812839269638062 }, { "epoch": 3.0710659898477157, "step": 11495, "train/loss_ctc": 0.2690989673137665, "train/loss_error": 0.504052996635437, "train/loss_total": 0.4570622146129608 }, { "epoch": 3.071333155223083, "step": 11496, "train/loss_ctc": 0.42061948776245117, "train/loss_error": 0.3824761211872101, "train/loss_total": 0.3901048004627228 }, { "epoch": 3.0716003205984506, "step": 11497, "train/loss_ctc": 0.6007688045501709, "train/loss_error": 0.48957088589668274, "train/loss_total": 0.5118104815483093 }, { "epoch": 3.0718674859738178, "step": 11498, "train/loss_ctc": 0.40936243534088135, "train/loss_error": 0.46706491708755493, "train/loss_total": 0.4555244445800781 }, { "epoch": 3.072134651349185, "step": 11499, "train/loss_ctc": 0.9831442832946777, "train/loss_error": 0.37921199202537537, "train/loss_total": 0.49999845027923584 }, { "epoch": 3.0724018167245526, "grad_norm": 2.0572657585144043, "learning_rate": 1.1576810045418113e-05, "loss": 0.5001, "step": 11500 }, { "epoch": 3.0724018167245526, "step": 11500, "train/loss_ctc": 1.0746569633483887, "train/loss_error": 0.4561666250228882, "train/loss_total": 0.5798647403717041 }, { "epoch": 3.07266898209992, "step": 11501, "train/loss_ctc": 1.1221604347229004, "train/loss_error": 0.42590779066085815, "train/loss_total": 0.5651583075523376 }, { "epoch": 3.0729361474752874, "step": 11502, "train/loss_ctc": 1.1987252235412598, "train/loss_error": 0.41693341732025146, "train/loss_total": 0.5732917785644531 }, { "epoch": 3.0732033128506546, "step": 11503, "train/loss_ctc": 0.5696067810058594, "train/loss_error": 0.41299277544021606, "train/loss_total": 0.4443156123161316 }, { "epoch": 3.073470478226022, "step": 11504, "train/loss_ctc": 0.6459083557128906, "train/loss_error": 0.4491194486618042, "train/loss_total": 0.4884772300720215 }, { "epoch": 3.0737376436013895, "step": 11505, "train/loss_ctc": 0.6208330392837524, "train/loss_error": 0.4693901538848877, "train/loss_total": 0.49967873096466064 }, { "epoch": 3.0740048089767567, "step": 11506, "train/loss_ctc": 0.41777196526527405, "train/loss_error": 0.40699467062950134, "train/loss_total": 0.4091501235961914 }, { "epoch": 3.074271974352124, "step": 11507, "train/loss_ctc": 1.1365480422973633, "train/loss_error": 0.5438821911811829, "train/loss_total": 0.6624153852462769 }, { "epoch": 3.0745391397274915, "step": 11508, "train/loss_ctc": 0.7244378328323364, "train/loss_error": 0.4650672674179077, "train/loss_total": 0.5169414281845093 }, { "epoch": 3.0748063051028587, "step": 11509, "train/loss_ctc": 0.46026936173439026, "train/loss_error": 0.40059733390808105, "train/loss_total": 0.4125317335128784 }, { "epoch": 3.075073470478226, "grad_norm": 2.381082773208618, "learning_rate": 1.1560780122896073e-05, "loss": 0.5152, "step": 11510 }, { "epoch": 3.075073470478226, "step": 11510, "train/loss_ctc": 0.6183760166168213, "train/loss_error": 0.46897709369659424, "train/loss_total": 0.49885687232017517 }, { "epoch": 3.0753406358535935, "step": 11511, "train/loss_ctc": 0.5734661221504211, "train/loss_error": 0.432391881942749, "train/loss_total": 0.46060672402381897 }, { "epoch": 3.0756078012289607, "step": 11512, "train/loss_ctc": 0.4299081563949585, "train/loss_error": 0.5081294775009155, "train/loss_total": 0.4924852252006531 }, { "epoch": 3.075874966604328, "step": 11513, "train/loss_ctc": 0.34699416160583496, "train/loss_error": 0.425121933221817, "train/loss_total": 0.40949636697769165 }, { "epoch": 3.0761421319796955, "step": 11514, "train/loss_ctc": 1.1965422630310059, "train/loss_error": 0.42139020562171936, "train/loss_total": 0.5764206051826477 }, { "epoch": 3.0764092973550627, "step": 11515, "train/loss_ctc": 1.0156316757202148, "train/loss_error": 0.4437831938266754, "train/loss_total": 0.5581529140472412 }, { "epoch": 3.07667646273043, "step": 11516, "train/loss_ctc": 0.3332822024822235, "train/loss_error": 0.44063571095466614, "train/loss_total": 0.4191650152206421 }, { "epoch": 3.0769436281057976, "step": 11517, "train/loss_ctc": 0.9328931570053101, "train/loss_error": 0.4727451503276825, "train/loss_total": 0.564774751663208 }, { "epoch": 3.0772107934811648, "step": 11518, "train/loss_ctc": 0.9793573617935181, "train/loss_error": 0.508543848991394, "train/loss_total": 0.6027065515518188 }, { "epoch": 3.077477958856532, "step": 11519, "train/loss_ctc": 0.9561454653739929, "train/loss_error": 0.475280225276947, "train/loss_total": 0.5714532732963562 }, { "epoch": 3.0777451242318996, "grad_norm": 1.8201464414596558, "learning_rate": 1.1544750200374033e-05, "loss": 0.5154, "step": 11520 }, { "epoch": 3.0777451242318996, "step": 11520, "train/loss_ctc": 0.7614153623580933, "train/loss_error": 0.49767428636550903, "train/loss_total": 0.5504225492477417 }, { "epoch": 3.078012289607267, "step": 11521, "train/loss_ctc": 0.41823601722717285, "train/loss_error": 0.41168826818466187, "train/loss_total": 0.412997841835022 }, { "epoch": 3.0782794549826344, "step": 11522, "train/loss_ctc": 0.789257287979126, "train/loss_error": 0.4780494272708893, "train/loss_total": 0.5402910113334656 }, { "epoch": 3.0785466203580016, "step": 11523, "train/loss_ctc": 0.9219741821289062, "train/loss_error": 0.46849432587623596, "train/loss_total": 0.5591902732849121 }, { "epoch": 3.078813785733369, "step": 11524, "train/loss_ctc": 0.9451338052749634, "train/loss_error": 0.452114075422287, "train/loss_total": 0.5507180094718933 }, { "epoch": 3.0790809511087365, "step": 11525, "train/loss_ctc": 0.3149503171443939, "train/loss_error": 0.4292853772640228, "train/loss_total": 0.4064183831214905 }, { "epoch": 3.0793481164841037, "step": 11526, "train/loss_ctc": 0.5319844484329224, "train/loss_error": 0.461887925863266, "train/loss_total": 0.47590723633766174 }, { "epoch": 3.079615281859471, "step": 11527, "train/loss_ctc": 0.512904942035675, "train/loss_error": 0.36386439204216003, "train/loss_total": 0.39367252588272095 }, { "epoch": 3.0798824472348385, "step": 11528, "train/loss_ctc": 1.2613121271133423, "train/loss_error": 0.44174784421920776, "train/loss_total": 0.6056607365608215 }, { "epoch": 3.0801496126102057, "step": 11529, "train/loss_ctc": 1.1781532764434814, "train/loss_error": 0.4201164245605469, "train/loss_total": 0.5717238187789917 }, { "epoch": 3.080416777985573, "grad_norm": 2.234261989593506, "learning_rate": 1.152872027785199e-05, "loss": 0.5067, "step": 11530 }, { "epoch": 3.080416777985573, "step": 11530, "train/loss_ctc": 0.3689565360546112, "train/loss_error": 0.43686532974243164, "train/loss_total": 0.42328357696533203 }, { "epoch": 3.0806839433609405, "step": 11531, "train/loss_ctc": 0.8509715795516968, "train/loss_error": 0.47459742426872253, "train/loss_total": 0.5498722791671753 }, { "epoch": 3.0809511087363077, "step": 11532, "train/loss_ctc": 0.5161333680152893, "train/loss_error": 0.49904704093933105, "train/loss_total": 0.5024642944335938 }, { "epoch": 3.081218274111675, "step": 11533, "train/loss_ctc": 0.9636836051940918, "train/loss_error": 0.41098731756210327, "train/loss_total": 0.521526575088501 }, { "epoch": 3.0814854394870426, "step": 11534, "train/loss_ctc": 0.27512621879577637, "train/loss_error": 0.4584719240665436, "train/loss_total": 0.4218027889728546 }, { "epoch": 3.0817526048624098, "step": 11535, "train/loss_ctc": 0.7893093824386597, "train/loss_error": 0.4148629903793335, "train/loss_total": 0.48975229263305664 }, { "epoch": 3.0820197702377774, "step": 11536, "train/loss_ctc": 0.21599769592285156, "train/loss_error": 0.44978538155555725, "train/loss_total": 0.40302786231040955 }, { "epoch": 3.0822869356131446, "step": 11537, "train/loss_ctc": 0.9771036505699158, "train/loss_error": 0.4184664785861969, "train/loss_total": 0.5301939249038696 }, { "epoch": 3.082554100988512, "step": 11538, "train/loss_ctc": 0.2276250422000885, "train/loss_error": 0.47743165493011475, "train/loss_total": 0.4274703562259674 }, { "epoch": 3.0828212663638794, "step": 11539, "train/loss_ctc": 0.36235255002975464, "train/loss_error": 0.4464172124862671, "train/loss_total": 0.42960429191589355 }, { "epoch": 3.0830884317392466, "grad_norm": 2.352034091949463, "learning_rate": 1.151269035532995e-05, "loss": 0.4699, "step": 11540 }, { "epoch": 3.0830884317392466, "step": 11540, "train/loss_ctc": 0.5179377794265747, "train/loss_error": 0.44271120429039, "train/loss_total": 0.45775654911994934 }, { "epoch": 3.083355597114614, "step": 11541, "train/loss_ctc": 0.5534816980361938, "train/loss_error": 0.4375712275505066, "train/loss_total": 0.46075332164764404 }, { "epoch": 3.0836227624899815, "step": 11542, "train/loss_ctc": 0.5987241268157959, "train/loss_error": 0.47071653604507446, "train/loss_total": 0.4963180720806122 }, { "epoch": 3.0838899278653487, "step": 11543, "train/loss_ctc": 0.7692326903343201, "train/loss_error": 0.4634222984313965, "train/loss_total": 0.5245844125747681 }, { "epoch": 3.084157093240716, "step": 11544, "train/loss_ctc": 0.653434693813324, "train/loss_error": 0.4248996675014496, "train/loss_total": 0.4706066846847534 }, { "epoch": 3.0844242586160835, "step": 11545, "train/loss_ctc": 0.5496844053268433, "train/loss_error": 0.41146737337112427, "train/loss_total": 0.43911078572273254 }, { "epoch": 3.0846914239914507, "step": 11546, "train/loss_ctc": 0.5275071263313293, "train/loss_error": 0.40697208046913147, "train/loss_total": 0.43107908964157104 }, { "epoch": 3.084958589366818, "step": 11547, "train/loss_ctc": 0.9781777858734131, "train/loss_error": 0.42125341296195984, "train/loss_total": 0.5326383113861084 }, { "epoch": 3.0852257547421855, "step": 11548, "train/loss_ctc": 1.2746450901031494, "train/loss_error": 0.42610597610473633, "train/loss_total": 0.5958138108253479 }, { "epoch": 3.0854929201175527, "step": 11549, "train/loss_ctc": 0.7226063013076782, "train/loss_error": 0.4640178382396698, "train/loss_total": 0.5157355070114136 }, { "epoch": 3.08576008549292, "grad_norm": 1.5406579971313477, "learning_rate": 1.1496660432807909e-05, "loss": 0.4924, "step": 11550 }, { "epoch": 3.08576008549292, "step": 11550, "train/loss_ctc": 0.560295581817627, "train/loss_error": 0.4307123124599457, "train/loss_total": 0.4566289782524109 }, { "epoch": 3.0860272508682876, "step": 11551, "train/loss_ctc": 1.7544180154800415, "train/loss_error": 0.4417438209056854, "train/loss_total": 0.7042787075042725 }, { "epoch": 3.0862944162436547, "step": 11552, "train/loss_ctc": 0.7588018178939819, "train/loss_error": 0.4409707486629486, "train/loss_total": 0.5045369863510132 }, { "epoch": 3.0865615816190224, "step": 11553, "train/loss_ctc": 0.5415660738945007, "train/loss_error": 0.5200197696685791, "train/loss_total": 0.5243290662765503 }, { "epoch": 3.0868287469943896, "step": 11554, "train/loss_ctc": 1.8912287950515747, "train/loss_error": 0.4353379011154175, "train/loss_total": 0.7265161275863647 }, { "epoch": 3.087095912369757, "step": 11555, "train/loss_ctc": 0.49194371700286865, "train/loss_error": 0.42907342314720154, "train/loss_total": 0.441647469997406 }, { "epoch": 3.0873630777451244, "step": 11556, "train/loss_ctc": 0.41818222403526306, "train/loss_error": 0.45228809118270874, "train/loss_total": 0.44546693563461304 }, { "epoch": 3.0876302431204916, "step": 11557, "train/loss_ctc": 0.8004881739616394, "train/loss_error": 0.4417482018470764, "train/loss_total": 0.5134962201118469 }, { "epoch": 3.087897408495859, "step": 11558, "train/loss_ctc": 1.4717671871185303, "train/loss_error": 0.49992460012435913, "train/loss_total": 0.6942931413650513 }, { "epoch": 3.0881645738712264, "step": 11559, "train/loss_ctc": 0.4946051239967346, "train/loss_error": 0.4562121331691742, "train/loss_total": 0.4638907313346863 }, { "epoch": 3.0884317392465936, "grad_norm": 2.8721301555633545, "learning_rate": 1.1480630510285867e-05, "loss": 0.5475, "step": 11560 }, { "epoch": 3.0884317392465936, "step": 11560, "train/loss_ctc": 0.6517618298530579, "train/loss_error": 0.3979126214981079, "train/loss_total": 0.4486824870109558 }, { "epoch": 3.088698904621961, "step": 11561, "train/loss_ctc": 0.39706602692604065, "train/loss_error": 0.39258337020874023, "train/loss_total": 0.3934799134731293 }, { "epoch": 3.0889660699973285, "step": 11562, "train/loss_ctc": 0.24375030398368835, "train/loss_error": 0.42856767773628235, "train/loss_total": 0.3916042149066925 }, { "epoch": 3.0892332353726957, "step": 11563, "train/loss_ctc": 0.7871105074882507, "train/loss_error": 0.48785191774368286, "train/loss_total": 0.5477036237716675 }, { "epoch": 3.089500400748063, "step": 11564, "train/loss_ctc": 0.7547698020935059, "train/loss_error": 0.4406057596206665, "train/loss_total": 0.5034385919570923 }, { "epoch": 3.0897675661234305, "step": 11565, "train/loss_ctc": 0.4392539858818054, "train/loss_error": 0.40392184257507324, "train/loss_total": 0.41098830103874207 }, { "epoch": 3.0900347314987977, "step": 11566, "train/loss_ctc": 1.2616301774978638, "train/loss_error": 0.4645882844924927, "train/loss_total": 0.6239966750144958 }, { "epoch": 3.090301896874165, "step": 11567, "train/loss_ctc": 1.047396183013916, "train/loss_error": 0.44414278864860535, "train/loss_total": 0.5647934675216675 }, { "epoch": 3.0905690622495325, "step": 11568, "train/loss_ctc": 1.1069141626358032, "train/loss_error": 0.4737034738063812, "train/loss_total": 0.6003456115722656 }, { "epoch": 3.0908362276248997, "step": 11569, "train/loss_ctc": 0.3534996509552002, "train/loss_error": 0.4293016195297241, "train/loss_total": 0.4141412377357483 }, { "epoch": 3.0911033930002674, "grad_norm": 2.197781562805176, "learning_rate": 1.1464600587763826e-05, "loss": 0.4899, "step": 11570 }, { "epoch": 3.0911033930002674, "step": 11570, "train/loss_ctc": 0.6769522428512573, "train/loss_error": 0.4011586904525757, "train/loss_total": 0.4563174247741699 }, { "epoch": 3.0913705583756346, "step": 11571, "train/loss_ctc": 0.9130305051803589, "train/loss_error": 0.48747745156288147, "train/loss_total": 0.5725880861282349 }, { "epoch": 3.0916377237510018, "step": 11572, "train/loss_ctc": 0.45990458130836487, "train/loss_error": 0.4831346571445465, "train/loss_total": 0.47848862409591675 }, { "epoch": 3.0919048891263694, "step": 11573, "train/loss_ctc": 0.9876621961593628, "train/loss_error": 0.5251892805099487, "train/loss_total": 0.6176838874816895 }, { "epoch": 3.0921720545017366, "step": 11574, "train/loss_ctc": 0.7188223600387573, "train/loss_error": 0.4449924826622009, "train/loss_total": 0.4997584819793701 }, { "epoch": 3.092439219877104, "step": 11575, "train/loss_ctc": 0.7175727486610413, "train/loss_error": 0.48109474778175354, "train/loss_total": 0.5283903479576111 }, { "epoch": 3.0927063852524714, "step": 11576, "train/loss_ctc": 0.7527338266372681, "train/loss_error": 0.4340307414531708, "train/loss_total": 0.49777138233184814 }, { "epoch": 3.0929735506278386, "step": 11577, "train/loss_ctc": 0.42435216903686523, "train/loss_error": 0.3796875774860382, "train/loss_total": 0.3886204957962036 }, { "epoch": 3.093240716003206, "step": 11578, "train/loss_ctc": 0.45614734292030334, "train/loss_error": 0.45502495765686035, "train/loss_total": 0.4552494287490845 }, { "epoch": 3.0935078813785735, "step": 11579, "train/loss_ctc": 0.4194401204586029, "train/loss_error": 0.4839823544025421, "train/loss_total": 0.4710739254951477 }, { "epoch": 3.0937750467539407, "grad_norm": 3.5809834003448486, "learning_rate": 1.1448570665241784e-05, "loss": 0.4966, "step": 11580 }, { "epoch": 3.0937750467539407, "step": 11580, "train/loss_ctc": 1.1930378675460815, "train/loss_error": 0.38161829113960266, "train/loss_total": 0.5439022183418274 }, { "epoch": 3.094042212129308, "step": 11581, "train/loss_ctc": 0.6027334332466125, "train/loss_error": 0.41434386372566223, "train/loss_total": 0.4520218074321747 }, { "epoch": 3.0943093775046755, "step": 11582, "train/loss_ctc": 0.9463626146316528, "train/loss_error": 0.539252519607544, "train/loss_total": 0.6206745505332947 }, { "epoch": 3.0945765428800427, "step": 11583, "train/loss_ctc": 0.29891884326934814, "train/loss_error": 0.4526280462741852, "train/loss_total": 0.4218862056732178 }, { "epoch": 3.09484370825541, "step": 11584, "train/loss_ctc": 0.21434584259986877, "train/loss_error": 0.4399975836277008, "train/loss_total": 0.3948672413825989 }, { "epoch": 3.0951108736307775, "step": 11585, "train/loss_ctc": 0.5894161462783813, "train/loss_error": 0.48891448974609375, "train/loss_total": 0.5090148448944092 }, { "epoch": 3.0953780390061447, "step": 11586, "train/loss_ctc": 0.1097182184457779, "train/loss_error": 0.40138861536979675, "train/loss_total": 0.34305453300476074 }, { "epoch": 3.0956452043815124, "step": 11587, "train/loss_ctc": 0.38952764868736267, "train/loss_error": 0.46052929759025574, "train/loss_total": 0.4463289678096771 }, { "epoch": 3.0959123697568796, "step": 11588, "train/loss_ctc": 1.0326452255249023, "train/loss_error": 0.4233996868133545, "train/loss_total": 0.545248806476593 }, { "epoch": 3.0961795351322468, "step": 11589, "train/loss_ctc": 0.6406701803207397, "train/loss_error": 0.45610371232032776, "train/loss_total": 0.4930170178413391 }, { "epoch": 3.0964467005076144, "grad_norm": 3.3143470287323, "learning_rate": 1.1432540742719743e-05, "loss": 0.477, "step": 11590 }, { "epoch": 3.0964467005076144, "step": 11590, "train/loss_ctc": 1.2105464935302734, "train/loss_error": 0.41957083344459534, "train/loss_total": 0.577765941619873 }, { "epoch": 3.0967138658829816, "step": 11591, "train/loss_ctc": 0.767854630947113, "train/loss_error": 0.4363945424556732, "train/loss_total": 0.5026865601539612 }, { "epoch": 3.096981031258349, "step": 11592, "train/loss_ctc": 0.7838965058326721, "train/loss_error": 0.4711665213108063, "train/loss_total": 0.5337125062942505 }, { "epoch": 3.0972481966337164, "step": 11593, "train/loss_ctc": 0.45176905393600464, "train/loss_error": 0.433971107006073, "train/loss_total": 0.4375306963920593 }, { "epoch": 3.0975153620090836, "step": 11594, "train/loss_ctc": 0.5491477251052856, "train/loss_error": 0.4444245994091034, "train/loss_total": 0.46536922454833984 }, { "epoch": 3.097782527384451, "step": 11595, "train/loss_ctc": 0.4364473223686218, "train/loss_error": 0.4651881754398346, "train/loss_total": 0.4594399929046631 }, { "epoch": 3.0980496927598185, "step": 11596, "train/loss_ctc": 0.4979405999183655, "train/loss_error": 0.38412269949913025, "train/loss_total": 0.4068862795829773 }, { "epoch": 3.0983168581351856, "step": 11597, "train/loss_ctc": 0.6866458654403687, "train/loss_error": 0.4426334798336029, "train/loss_total": 0.4914359450340271 }, { "epoch": 3.098584023510553, "step": 11598, "train/loss_ctc": 0.46000635623931885, "train/loss_error": 0.41643309593200684, "train/loss_total": 0.42514777183532715 }, { "epoch": 3.0988511888859205, "step": 11599, "train/loss_ctc": 0.36178481578826904, "train/loss_error": 0.4416981339454651, "train/loss_total": 0.42571547627449036 }, { "epoch": 3.0991183542612877, "grad_norm": 3.0959742069244385, "learning_rate": 1.1416510820197702e-05, "loss": 0.4726, "step": 11600 }, { "epoch": 3.0991183542612877, "step": 11600, "train/loss_ctc": 0.8593149185180664, "train/loss_error": 0.4131571054458618, "train/loss_total": 0.5023887157440186 }, { "epoch": 3.0993855196366553, "step": 11601, "train/loss_ctc": 0.5380281209945679, "train/loss_error": 0.45736968517303467, "train/loss_total": 0.47350138425827026 }, { "epoch": 3.0996526850120225, "step": 11602, "train/loss_ctc": 0.5941776037216187, "train/loss_error": 0.4358348846435547, "train/loss_total": 0.4675034284591675 }, { "epoch": 3.0999198503873897, "step": 11603, "train/loss_ctc": 0.3807544410228729, "train/loss_error": 0.4557991921901703, "train/loss_total": 0.4407902657985687 }, { "epoch": 3.1001870157627573, "step": 11604, "train/loss_ctc": 0.6955533027648926, "train/loss_error": 0.46931853890419006, "train/loss_total": 0.5145655274391174 }, { "epoch": 3.1004541811381245, "step": 11605, "train/loss_ctc": 0.5562222003936768, "train/loss_error": 0.425639808177948, "train/loss_total": 0.4517562985420227 }, { "epoch": 3.1007213465134917, "step": 11606, "train/loss_ctc": 1.6855590343475342, "train/loss_error": 0.5000545978546143, "train/loss_total": 0.7371554970741272 }, { "epoch": 3.1009885118888594, "step": 11607, "train/loss_ctc": 0.502546489238739, "train/loss_error": 0.3837467133998871, "train/loss_total": 0.40750670433044434 }, { "epoch": 3.1012556772642266, "step": 11608, "train/loss_ctc": 0.4562285244464874, "train/loss_error": 0.47058042883872986, "train/loss_total": 0.46771004796028137 }, { "epoch": 3.1015228426395938, "step": 11609, "train/loss_ctc": 0.6971436738967896, "train/loss_error": 0.46150222420692444, "train/loss_total": 0.5086305141448975 }, { "epoch": 3.1017900080149614, "grad_norm": 1.6350096464157104, "learning_rate": 1.1400480897675662e-05, "loss": 0.4972, "step": 11610 }, { "epoch": 3.1017900080149614, "step": 11610, "train/loss_ctc": 0.4118577837944031, "train/loss_error": 0.3973177671432495, "train/loss_total": 0.40022578835487366 }, { "epoch": 3.1020571733903286, "step": 11611, "train/loss_ctc": 0.6116161942481995, "train/loss_error": 0.4834611117839813, "train/loss_total": 0.5090921521186829 }, { "epoch": 3.102324338765696, "step": 11612, "train/loss_ctc": 0.5878700017929077, "train/loss_error": 0.4283498227596283, "train/loss_total": 0.46025386452674866 }, { "epoch": 3.1025915041410634, "step": 11613, "train/loss_ctc": 0.6926581859588623, "train/loss_error": 0.43209290504455566, "train/loss_total": 0.484205961227417 }, { "epoch": 3.1028586695164306, "step": 11614, "train/loss_ctc": 0.5573731660842896, "train/loss_error": 0.49807047843933105, "train/loss_total": 0.5099310278892517 }, { "epoch": 3.103125834891798, "step": 11615, "train/loss_ctc": 0.7399284839630127, "train/loss_error": 0.4672386050224304, "train/loss_total": 0.521776556968689 }, { "epoch": 3.1033930002671655, "step": 11616, "train/loss_ctc": 0.6000020503997803, "train/loss_error": 0.44044429063796997, "train/loss_total": 0.4723558723926544 }, { "epoch": 3.1036601656425327, "step": 11617, "train/loss_ctc": 0.505056619644165, "train/loss_error": 0.39041030406951904, "train/loss_total": 0.4133395850658417 }, { "epoch": 3.1039273310179, "step": 11618, "train/loss_ctc": 0.5587292313575745, "train/loss_error": 0.4420972764492035, "train/loss_total": 0.46542370319366455 }, { "epoch": 3.1041944963932675, "step": 11619, "train/loss_ctc": 0.5495851635932922, "train/loss_error": 0.4216362237930298, "train/loss_total": 0.44722601771354675 }, { "epoch": 3.1044616617686347, "grad_norm": 1.946277379989624, "learning_rate": 1.138445097515362e-05, "loss": 0.4684, "step": 11620 }, { "epoch": 3.1044616617686347, "step": 11620, "train/loss_ctc": 0.605115532875061, "train/loss_error": 0.38586246967315674, "train/loss_total": 0.42971310019493103 }, { "epoch": 3.1047288271440023, "step": 11621, "train/loss_ctc": 0.40724775195121765, "train/loss_error": 0.49034765362739563, "train/loss_total": 0.4737277030944824 }, { "epoch": 3.1049959925193695, "step": 11622, "train/loss_ctc": 0.5359123945236206, "train/loss_error": 0.4269174635410309, "train/loss_total": 0.4487164616584778 }, { "epoch": 3.1052631578947367, "step": 11623, "train/loss_ctc": 0.48176735639572144, "train/loss_error": 0.4400123655796051, "train/loss_total": 0.44836336374282837 }, { "epoch": 3.1055303232701044, "step": 11624, "train/loss_ctc": 0.5253440737724304, "train/loss_error": 0.4866273105144501, "train/loss_total": 0.494370698928833 }, { "epoch": 3.1057974886454716, "step": 11625, "train/loss_ctc": 0.7114479541778564, "train/loss_error": 0.3734630048274994, "train/loss_total": 0.44106000661849976 }, { "epoch": 3.1060646540208388, "step": 11626, "train/loss_ctc": 0.28663164377212524, "train/loss_error": 0.499420166015625, "train/loss_total": 0.4568624496459961 }, { "epoch": 3.1063318193962064, "step": 11627, "train/loss_ctc": 0.8862175345420837, "train/loss_error": 0.4452403485774994, "train/loss_total": 0.5334358215332031 }, { "epoch": 3.1065989847715736, "step": 11628, "train/loss_ctc": 0.3278127610683441, "train/loss_error": 0.39735040068626404, "train/loss_total": 0.38344287872314453 }, { "epoch": 3.106866150146941, "step": 11629, "train/loss_ctc": 0.46478763222694397, "train/loss_error": 0.43986111879348755, "train/loss_total": 0.44484642148017883 }, { "epoch": 3.1071333155223084, "grad_norm": 1.819520354270935, "learning_rate": 1.136842105263158e-05, "loss": 0.4555, "step": 11630 }, { "epoch": 3.1071333155223084, "step": 11630, "train/loss_ctc": 1.0135222673416138, "train/loss_error": 0.42886924743652344, "train/loss_total": 0.5457998514175415 }, { "epoch": 3.1074004808976756, "step": 11631, "train/loss_ctc": 0.8948732614517212, "train/loss_error": 0.4888608157634735, "train/loss_total": 0.5700633525848389 }, { "epoch": 3.107667646273043, "step": 11632, "train/loss_ctc": 1.0184133052825928, "train/loss_error": 0.4811556041240692, "train/loss_total": 0.5886071920394897 }, { "epoch": 3.1079348116484105, "step": 11633, "train/loss_ctc": 0.6039732694625854, "train/loss_error": 0.4249124228954315, "train/loss_total": 0.4607245922088623 }, { "epoch": 3.1082019770237777, "step": 11634, "train/loss_ctc": 1.181898832321167, "train/loss_error": 0.5631884336471558, "train/loss_total": 0.6869305372238159 }, { "epoch": 3.1084691423991453, "step": 11635, "train/loss_ctc": 0.5549570322036743, "train/loss_error": 0.4570233225822449, "train/loss_total": 0.47661009430885315 }, { "epoch": 3.1087363077745125, "step": 11636, "train/loss_ctc": 0.18032650649547577, "train/loss_error": 0.4341353178024292, "train/loss_total": 0.38337355852127075 }, { "epoch": 3.1090034731498797, "step": 11637, "train/loss_ctc": 0.8761662244796753, "train/loss_error": 0.4622774124145508, "train/loss_total": 0.5450551509857178 }, { "epoch": 3.1092706385252473, "step": 11638, "train/loss_ctc": 0.754635214805603, "train/loss_error": 0.3956718146800995, "train/loss_total": 0.46746450662612915 }, { "epoch": 3.1095378039006145, "step": 11639, "train/loss_ctc": 0.3387014865875244, "train/loss_error": 0.42879924178123474, "train/loss_total": 0.4107796847820282 }, { "epoch": 3.1098049692759817, "grad_norm": 1.886471152305603, "learning_rate": 1.1352391130109538e-05, "loss": 0.5135, "step": 11640 }, { "epoch": 3.1098049692759817, "step": 11640, "train/loss_ctc": 0.494570791721344, "train/loss_error": 0.4875029921531677, "train/loss_total": 0.4889165759086609 }, { "epoch": 3.1100721346513494, "step": 11641, "train/loss_ctc": 0.537600040435791, "train/loss_error": 0.44615718722343445, "train/loss_total": 0.4644457697868347 }, { "epoch": 3.1103393000267165, "step": 11642, "train/loss_ctc": 0.4205414950847626, "train/loss_error": 0.46046480536460876, "train/loss_total": 0.45248013734817505 }, { "epoch": 3.1106064654020837, "step": 11643, "train/loss_ctc": 0.885600209236145, "train/loss_error": 0.4752245843410492, "train/loss_total": 0.5572997331619263 }, { "epoch": 3.1108736307774514, "step": 11644, "train/loss_ctc": 0.494884192943573, "train/loss_error": 0.5072188973426819, "train/loss_total": 0.504751980304718 }, { "epoch": 3.1111407961528186, "step": 11645, "train/loss_ctc": 0.42690569162368774, "train/loss_error": 0.44222399592399597, "train/loss_total": 0.4391603469848633 }, { "epoch": 3.1114079615281858, "step": 11646, "train/loss_ctc": 0.7582005262374878, "train/loss_error": 0.4636455774307251, "train/loss_total": 0.5225565433502197 }, { "epoch": 3.1116751269035534, "step": 11647, "train/loss_ctc": 0.4536416530609131, "train/loss_error": 0.4530843198299408, "train/loss_total": 0.45319581031799316 }, { "epoch": 3.1119422922789206, "step": 11648, "train/loss_ctc": 0.5546037554740906, "train/loss_error": 0.443887323141098, "train/loss_total": 0.46603062748908997 }, { "epoch": 3.112209457654288, "step": 11649, "train/loss_ctc": 1.4849306344985962, "train/loss_error": 0.4330848753452301, "train/loss_total": 0.6434540748596191 }, { "epoch": 3.1124766230296554, "grad_norm": 2.8695387840270996, "learning_rate": 1.1336361207587496e-05, "loss": 0.4992, "step": 11650 }, { "epoch": 3.1124766230296554, "step": 11650, "train/loss_ctc": 0.460463285446167, "train/loss_error": 0.4207749664783478, "train/loss_total": 0.4287126362323761 }, { "epoch": 3.1127437884050226, "step": 11651, "train/loss_ctc": 1.3142937421798706, "train/loss_error": 0.5358772873878479, "train/loss_total": 0.6915606260299683 }, { "epoch": 3.11301095378039, "step": 11652, "train/loss_ctc": 0.9075807929039001, "train/loss_error": 0.4527701735496521, "train/loss_total": 0.5437322854995728 }, { "epoch": 3.1132781191557575, "step": 11653, "train/loss_ctc": 0.4232017397880554, "train/loss_error": 0.4801698923110962, "train/loss_total": 0.46877628564834595 }, { "epoch": 3.1135452845311247, "step": 11654, "train/loss_ctc": 0.5914822220802307, "train/loss_error": 0.410234659910202, "train/loss_total": 0.44648417830467224 }, { "epoch": 3.1138124499064923, "step": 11655, "train/loss_ctc": 0.30710768699645996, "train/loss_error": 0.4498961865901947, "train/loss_total": 0.4213384985923767 }, { "epoch": 3.1140796152818595, "step": 11656, "train/loss_ctc": 0.9221329689025879, "train/loss_error": 0.46717262268066406, "train/loss_total": 0.5581647157669067 }, { "epoch": 3.1143467806572267, "step": 11657, "train/loss_ctc": 0.45061594247817993, "train/loss_error": 0.48793914914131165, "train/loss_total": 0.4804745316505432 }, { "epoch": 3.1146139460325943, "step": 11658, "train/loss_ctc": 0.7739700078964233, "train/loss_error": 0.4286121726036072, "train/loss_total": 0.4976837635040283 }, { "epoch": 3.1148811114079615, "step": 11659, "train/loss_ctc": 0.6756571531295776, "train/loss_error": 0.45309290289878845, "train/loss_total": 0.49760574102401733 }, { "epoch": 3.1151482767833287, "grad_norm": 1.4198445081710815, "learning_rate": 1.1320331285065456e-05, "loss": 0.5035, "step": 11660 }, { "epoch": 3.1151482767833287, "step": 11660, "train/loss_ctc": 0.7502255439758301, "train/loss_error": 0.40760672092437744, "train/loss_total": 0.47613048553466797 }, { "epoch": 3.1154154421586964, "step": 11661, "train/loss_ctc": 0.4492233991622925, "train/loss_error": 0.41022831201553345, "train/loss_total": 0.4180273413658142 }, { "epoch": 3.1156826075340636, "step": 11662, "train/loss_ctc": 0.9808403849601746, "train/loss_error": 0.47627872228622437, "train/loss_total": 0.5771910548210144 }, { "epoch": 3.1159497729094308, "step": 11663, "train/loss_ctc": 1.2667194604873657, "train/loss_error": 0.4170096218585968, "train/loss_total": 0.5869516134262085 }, { "epoch": 3.1162169382847984, "step": 11664, "train/loss_ctc": 1.0221288204193115, "train/loss_error": 0.4353904724121094, "train/loss_total": 0.5527381300926208 }, { "epoch": 3.1164841036601656, "step": 11665, "train/loss_ctc": 0.4628404676914215, "train/loss_error": 0.46189701557159424, "train/loss_total": 0.4620857238769531 }, { "epoch": 3.116751269035533, "step": 11666, "train/loss_ctc": 0.9464111924171448, "train/loss_error": 0.48174846172332764, "train/loss_total": 0.5746810436248779 }, { "epoch": 3.1170184344109004, "step": 11667, "train/loss_ctc": 0.3191874921321869, "train/loss_error": 0.4729674756526947, "train/loss_total": 0.44221147894859314 }, { "epoch": 3.1172855997862676, "step": 11668, "train/loss_ctc": 0.692747950553894, "train/loss_error": 0.5404031872749329, "train/loss_total": 0.5708721876144409 }, { "epoch": 3.1175527651616353, "step": 11669, "train/loss_ctc": 0.44231417775154114, "train/loss_error": 0.39565062522888184, "train/loss_total": 0.4049833416938782 }, { "epoch": 3.1178199305370025, "grad_norm": 2.453460216522217, "learning_rate": 1.1304301362543414e-05, "loss": 0.5066, "step": 11670 }, { "epoch": 3.1178199305370025, "step": 11670, "train/loss_ctc": 0.6880106925964355, "train/loss_error": 0.4844006299972534, "train/loss_total": 0.5251226425170898 }, { "epoch": 3.1180870959123697, "step": 11671, "train/loss_ctc": 0.7011467218399048, "train/loss_error": 0.4471009373664856, "train/loss_total": 0.49791011214256287 }, { "epoch": 3.1183542612877373, "step": 11672, "train/loss_ctc": 0.6434309482574463, "train/loss_error": 0.41435378789901733, "train/loss_total": 0.4601692259311676 }, { "epoch": 3.1186214266631045, "step": 11673, "train/loss_ctc": 0.5547769069671631, "train/loss_error": 0.41863977909088135, "train/loss_total": 0.4458672106266022 }, { "epoch": 3.1188885920384717, "step": 11674, "train/loss_ctc": 0.4011855125427246, "train/loss_error": 0.4343234896659851, "train/loss_total": 0.4276959300041199 }, { "epoch": 3.1191557574138393, "step": 11675, "train/loss_ctc": 0.8932366371154785, "train/loss_error": 0.49019262194633484, "train/loss_total": 0.5708014369010925 }, { "epoch": 3.1194229227892065, "step": 11676, "train/loss_ctc": 0.43753400444984436, "train/loss_error": 0.5224747061729431, "train/loss_total": 0.5054866075515747 }, { "epoch": 3.1196900881645737, "step": 11677, "train/loss_ctc": 1.1006097793579102, "train/loss_error": 0.4816683828830719, "train/loss_total": 0.6054566502571106 }, { "epoch": 3.1199572535399414, "step": 11678, "train/loss_ctc": 0.9674040079116821, "train/loss_error": 0.5041884779930115, "train/loss_total": 0.5968316197395325 }, { "epoch": 3.1202244189153086, "step": 11679, "train/loss_ctc": 0.9286960363388062, "train/loss_error": 0.3553764522075653, "train/loss_total": 0.47004038095474243 }, { "epoch": 3.1204915842906757, "grad_norm": 3.59970760345459, "learning_rate": 1.1288271440021374e-05, "loss": 0.5105, "step": 11680 }, { "epoch": 3.1204915842906757, "step": 11680, "train/loss_ctc": 1.1522302627563477, "train/loss_error": 0.435608446598053, "train/loss_total": 0.5789328217506409 }, { "epoch": 3.1207587496660434, "step": 11681, "train/loss_ctc": 1.5524876117706299, "train/loss_error": 0.41511350870132446, "train/loss_total": 0.6425883769989014 }, { "epoch": 3.1210259150414106, "step": 11682, "train/loss_ctc": 0.37920504808425903, "train/loss_error": 0.4261043071746826, "train/loss_total": 0.41672447323799133 }, { "epoch": 3.1212930804167778, "step": 11683, "train/loss_ctc": 0.46821969747543335, "train/loss_error": 0.3924347758293152, "train/loss_total": 0.4075917601585388 }, { "epoch": 3.1215602457921454, "step": 11684, "train/loss_ctc": 0.2790277898311615, "train/loss_error": 0.42512303590774536, "train/loss_total": 0.395904004573822 }, { "epoch": 3.1218274111675126, "step": 11685, "train/loss_ctc": 0.6259545087814331, "train/loss_error": 0.34628698229789734, "train/loss_total": 0.4022204875946045 }, { "epoch": 3.1220945765428803, "step": 11686, "train/loss_ctc": 0.6548036336898804, "train/loss_error": 0.38033410906791687, "train/loss_total": 0.43522801995277405 }, { "epoch": 3.1223617419182474, "step": 11687, "train/loss_ctc": 0.7717779874801636, "train/loss_error": 0.4302198588848114, "train/loss_total": 0.4985315203666687 }, { "epoch": 3.1226289072936146, "step": 11688, "train/loss_ctc": 0.33097994327545166, "train/loss_error": 0.44144493341445923, "train/loss_total": 0.4193519353866577 }, { "epoch": 3.1228960726689823, "step": 11689, "train/loss_ctc": 0.8084028363227844, "train/loss_error": 0.49496591091156006, "train/loss_total": 0.5576533079147339 }, { "epoch": 3.1231632380443495, "grad_norm": 2.591456651687622, "learning_rate": 1.1272241517499332e-05, "loss": 0.4755, "step": 11690 }, { "epoch": 3.1231632380443495, "step": 11690, "train/loss_ctc": 1.2936749458312988, "train/loss_error": 0.5460155606269836, "train/loss_total": 0.6955474615097046 }, { "epoch": 3.1234304034197167, "step": 11691, "train/loss_ctc": 0.5503196120262146, "train/loss_error": 0.4551926851272583, "train/loss_total": 0.47421807050704956 }, { "epoch": 3.1236975687950843, "step": 11692, "train/loss_ctc": 1.0317819118499756, "train/loss_error": 0.46551889181137085, "train/loss_total": 0.5787715315818787 }, { "epoch": 3.1239647341704515, "step": 11693, "train/loss_ctc": 0.6025595664978027, "train/loss_error": 0.44335275888442993, "train/loss_total": 0.47519412636756897 }, { "epoch": 3.1242318995458187, "step": 11694, "train/loss_ctc": 0.9212068319320679, "train/loss_error": 0.561125636100769, "train/loss_total": 0.6331418752670288 }, { "epoch": 3.1244990649211863, "step": 11695, "train/loss_ctc": 0.7674411535263062, "train/loss_error": 0.43702298402786255, "train/loss_total": 0.5031066536903381 }, { "epoch": 3.1247662302965535, "step": 11696, "train/loss_ctc": 0.5834795832633972, "train/loss_error": 0.3863324820995331, "train/loss_total": 0.4257619082927704 }, { "epoch": 3.1250333956719207, "step": 11697, "train/loss_ctc": 0.48335325717926025, "train/loss_error": 0.49468058347702026, "train/loss_total": 0.4924151301383972 }, { "epoch": 3.1253005610472884, "step": 11698, "train/loss_ctc": 1.2725498676300049, "train/loss_error": 0.438689649105072, "train/loss_total": 0.6054617166519165 }, { "epoch": 3.1255677264226556, "step": 11699, "train/loss_ctc": 0.878506064414978, "train/loss_error": 0.43232205510139465, "train/loss_total": 0.5215588808059692 }, { "epoch": 3.125834891798023, "grad_norm": 2.8683648109436035, "learning_rate": 1.1256211594977292e-05, "loss": 0.5405, "step": 11700 }, { "epoch": 3.125834891798023, "step": 11700, "train/loss_ctc": 0.4692371189594269, "train/loss_error": 0.4014599025249481, "train/loss_total": 0.4150153398513794 }, { "epoch": 3.1261020571733904, "step": 11701, "train/loss_ctc": 0.29877254366874695, "train/loss_error": 0.4395968019962311, "train/loss_total": 0.4114319682121277 }, { "epoch": 3.1263692225487576, "step": 11702, "train/loss_ctc": 0.2747320532798767, "train/loss_error": 0.41331300139427185, "train/loss_total": 0.3855968415737152 }, { "epoch": 3.1266363879241252, "step": 11703, "train/loss_ctc": 0.2848528325557709, "train/loss_error": 0.4358865022659302, "train/loss_total": 0.40567976236343384 }, { "epoch": 3.1269035532994924, "step": 11704, "train/loss_ctc": 0.878119707107544, "train/loss_error": 0.44531500339508057, "train/loss_total": 0.5318759679794312 }, { "epoch": 3.1271707186748596, "step": 11705, "train/loss_ctc": 0.34979888796806335, "train/loss_error": 0.43289414048194885, "train/loss_total": 0.41627511382102966 }, { "epoch": 3.1274378840502273, "step": 11706, "train/loss_ctc": 0.9136751890182495, "train/loss_error": 0.41357818245887756, "train/loss_total": 0.5135976076126099 }, { "epoch": 3.1277050494255945, "step": 11707, "train/loss_ctc": 0.7279467582702637, "train/loss_error": 0.43578076362609863, "train/loss_total": 0.4942139685153961 }, { "epoch": 3.1279722148009617, "step": 11708, "train/loss_ctc": 1.1770819425582886, "train/loss_error": 0.42138171195983887, "train/loss_total": 0.5725217461585999 }, { "epoch": 3.1282393801763293, "step": 11709, "train/loss_ctc": 0.5035673379898071, "train/loss_error": 0.38936421275138855, "train/loss_total": 0.4122048318386078 }, { "epoch": 3.1285065455516965, "grad_norm": 1.4283714294433594, "learning_rate": 1.1240181672455251e-05, "loss": 0.4558, "step": 11710 }, { "epoch": 3.1285065455516965, "step": 11710, "train/loss_ctc": 0.7954369783401489, "train/loss_error": 0.4886888563632965, "train/loss_total": 0.5500385165214539 }, { "epoch": 3.1287737109270637, "step": 11711, "train/loss_ctc": 0.4586591422557831, "train/loss_error": 0.45377683639526367, "train/loss_total": 0.4547532796859741 }, { "epoch": 3.1290408763024313, "step": 11712, "train/loss_ctc": 1.1467101573944092, "train/loss_error": 0.4193422794342041, "train/loss_total": 0.564815878868103 }, { "epoch": 3.1293080416777985, "step": 11713, "train/loss_ctc": 0.6824730038642883, "train/loss_error": 0.44861841201782227, "train/loss_total": 0.49538934230804443 }, { "epoch": 3.1295752070531657, "step": 11714, "train/loss_ctc": 0.8064526319503784, "train/loss_error": 0.4961436688899994, "train/loss_total": 0.5582054853439331 }, { "epoch": 3.1298423724285334, "step": 11715, "train/loss_ctc": 0.9838207960128784, "train/loss_error": 0.4903131425380707, "train/loss_total": 0.5890146493911743 }, { "epoch": 3.1301095378039006, "step": 11716, "train/loss_ctc": 0.8579148650169373, "train/loss_error": 0.46346980333328247, "train/loss_total": 0.5423588156700134 }, { "epoch": 3.1303767031792677, "step": 11717, "train/loss_ctc": 0.881716251373291, "train/loss_error": 0.4581831097602844, "train/loss_total": 0.5428897142410278 }, { "epoch": 3.1306438685546354, "step": 11718, "train/loss_ctc": 0.9658225178718567, "train/loss_error": 0.4145486652851105, "train/loss_total": 0.5248034596443176 }, { "epoch": 3.1309110339300026, "step": 11719, "train/loss_ctc": 1.4977431297302246, "train/loss_error": 0.49780648946762085, "train/loss_total": 0.6977938413619995 }, { "epoch": 3.1311781993053702, "grad_norm": 4.672388076782227, "learning_rate": 1.122415174993321e-05, "loss": 0.552, "step": 11720 }, { "epoch": 3.1311781993053702, "step": 11720, "train/loss_ctc": 1.4965054988861084, "train/loss_error": 0.48353254795074463, "train/loss_total": 0.6861271858215332 }, { "epoch": 3.1314453646807374, "step": 11721, "train/loss_ctc": 1.2164138555526733, "train/loss_error": 0.4358470141887665, "train/loss_total": 0.5919603705406189 }, { "epoch": 3.1317125300561046, "step": 11722, "train/loss_ctc": 0.27061182260513306, "train/loss_error": 0.4251139461994171, "train/loss_total": 0.3942135274410248 }, { "epoch": 3.1319796954314723, "step": 11723, "train/loss_ctc": 1.1389281749725342, "train/loss_error": 0.43643835186958313, "train/loss_total": 0.5769363045692444 }, { "epoch": 3.1322468608068395, "step": 11724, "train/loss_ctc": 0.21529808640480042, "train/loss_error": 0.3635105490684509, "train/loss_total": 0.3338680565357208 }, { "epoch": 3.1325140261822066, "step": 11725, "train/loss_ctc": 0.5112981796264648, "train/loss_error": 0.44203445315361023, "train/loss_total": 0.45588719844818115 }, { "epoch": 3.1327811915575743, "step": 11726, "train/loss_ctc": 0.46965649724006653, "train/loss_error": 0.41780444979667664, "train/loss_total": 0.42817485332489014 }, { "epoch": 3.1330483569329415, "step": 11727, "train/loss_ctc": 0.7446503043174744, "train/loss_error": 0.4515995979309082, "train/loss_total": 0.5102097392082214 }, { "epoch": 3.1333155223083087, "step": 11728, "train/loss_ctc": 0.7731795310974121, "train/loss_error": 0.4317570626735687, "train/loss_total": 0.5000416040420532 }, { "epoch": 3.1335826876836763, "step": 11729, "train/loss_ctc": 0.6492288112640381, "train/loss_error": 0.3866231441497803, "train/loss_total": 0.4391442835330963 }, { "epoch": 3.1338498530590435, "grad_norm": 2.0427029132843018, "learning_rate": 1.1208121827411168e-05, "loss": 0.4917, "step": 11730 }, { "epoch": 3.1338498530590435, "step": 11730, "train/loss_ctc": 1.1381862163543701, "train/loss_error": 0.4738474190235138, "train/loss_total": 0.606715202331543 }, { "epoch": 3.1341170184344107, "step": 11731, "train/loss_ctc": 0.8373711705207825, "train/loss_error": 0.476298451423645, "train/loss_total": 0.5485129952430725 }, { "epoch": 3.1343841838097783, "step": 11732, "train/loss_ctc": 0.5522565245628357, "train/loss_error": 0.3937995135784149, "train/loss_total": 0.4254909157752991 }, { "epoch": 3.1346513491851455, "step": 11733, "train/loss_ctc": 0.2851385772228241, "train/loss_error": 0.4313536584377289, "train/loss_total": 0.40211066603660583 }, { "epoch": 3.134918514560513, "step": 11734, "train/loss_ctc": 0.43291139602661133, "train/loss_error": 0.4018504023551941, "train/loss_total": 0.408062607049942 }, { "epoch": 3.1351856799358804, "step": 11735, "train/loss_ctc": 0.626040518283844, "train/loss_error": 0.40562519431114197, "train/loss_total": 0.4497082829475403 }, { "epoch": 3.1354528453112476, "step": 11736, "train/loss_ctc": 0.7608723044395447, "train/loss_error": 0.40205809473991394, "train/loss_total": 0.47382092475891113 }, { "epoch": 3.135720010686615, "step": 11737, "train/loss_ctc": 0.21482378244400024, "train/loss_error": 0.4131331145763397, "train/loss_total": 0.3734712600708008 }, { "epoch": 3.1359871760619824, "step": 11738, "train/loss_ctc": 0.8518862724304199, "train/loss_error": 0.3967980444431305, "train/loss_total": 0.4878157079219818 }, { "epoch": 3.1362543414373496, "step": 11739, "train/loss_ctc": 0.7370057106018066, "train/loss_error": 0.5040804743766785, "train/loss_total": 0.5506654977798462 }, { "epoch": 3.1365215068127172, "grad_norm": 5.5276408195495605, "learning_rate": 1.1192091904889127e-05, "loss": 0.4726, "step": 11740 }, { "epoch": 3.1365215068127172, "step": 11740, "train/loss_ctc": 0.3300783932209015, "train/loss_error": 0.4129785895347595, "train/loss_total": 0.3963985741138458 }, { "epoch": 3.1367886721880844, "step": 11741, "train/loss_ctc": 0.26761168241500854, "train/loss_error": 0.3807108700275421, "train/loss_total": 0.3580910563468933 }, { "epoch": 3.1370558375634516, "step": 11742, "train/loss_ctc": 1.0952579975128174, "train/loss_error": 0.48416459560394287, "train/loss_total": 0.6063833236694336 }, { "epoch": 3.1373230029388193, "step": 11743, "train/loss_ctc": 0.8320136666297913, "train/loss_error": 0.4743242561817169, "train/loss_total": 0.5458621382713318 }, { "epoch": 3.1375901683141865, "step": 11744, "train/loss_ctc": 0.4710410237312317, "train/loss_error": 0.4035089313983917, "train/loss_total": 0.4170153737068176 }, { "epoch": 3.1378573336895537, "step": 11745, "train/loss_ctc": 0.970967173576355, "train/loss_error": 0.5510967969894409, "train/loss_total": 0.6350708603858948 }, { "epoch": 3.1381244990649213, "step": 11746, "train/loss_ctc": 0.9024447202682495, "train/loss_error": 0.42061102390289307, "train/loss_total": 0.5169777870178223 }, { "epoch": 3.1383916644402885, "step": 11747, "train/loss_ctc": 0.7214028835296631, "train/loss_error": 0.4330379068851471, "train/loss_total": 0.49071091413497925 }, { "epoch": 3.1386588298156557, "step": 11748, "train/loss_ctc": 0.6076565980911255, "train/loss_error": 0.37746819853782654, "train/loss_total": 0.42350590229034424 }, { "epoch": 3.1389259951910233, "step": 11749, "train/loss_ctc": 0.6370093822479248, "train/loss_error": 0.45432427525520325, "train/loss_total": 0.49086129665374756 }, { "epoch": 3.1391931605663905, "grad_norm": 3.09045147895813, "learning_rate": 1.1176061982367085e-05, "loss": 0.4881, "step": 11750 }, { "epoch": 3.1391931605663905, "step": 11750, "train/loss_ctc": 1.5289885997772217, "train/loss_error": 0.42945295572280884, "train/loss_total": 0.6493600606918335 }, { "epoch": 3.1394603259417577, "step": 11751, "train/loss_ctc": 0.8595930933952332, "train/loss_error": 0.4099747836589813, "train/loss_total": 0.49989843368530273 }, { "epoch": 3.1397274913171254, "step": 11752, "train/loss_ctc": 1.0753599405288696, "train/loss_error": 0.4146736264228821, "train/loss_total": 0.5468109250068665 }, { "epoch": 3.1399946566924926, "step": 11753, "train/loss_ctc": 0.5717860460281372, "train/loss_error": 0.4790048897266388, "train/loss_total": 0.49756112694740295 }, { "epoch": 3.14026182206786, "step": 11754, "train/loss_ctc": 1.2625603675842285, "train/loss_error": 0.4686203598976135, "train/loss_total": 0.6274083852767944 }, { "epoch": 3.1405289874432274, "step": 11755, "train/loss_ctc": 0.5733875632286072, "train/loss_error": 0.44186174869537354, "train/loss_total": 0.46816691756248474 }, { "epoch": 3.1407961528185946, "step": 11756, "train/loss_ctc": 0.9402494430541992, "train/loss_error": 0.4310539960861206, "train/loss_total": 0.5328931212425232 }, { "epoch": 3.1410633181939622, "step": 11757, "train/loss_ctc": 0.807148814201355, "train/loss_error": 0.3976902663707733, "train/loss_total": 0.4795819818973541 }, { "epoch": 3.1413304835693294, "step": 11758, "train/loss_ctc": 0.7075181603431702, "train/loss_error": 0.4182339906692505, "train/loss_total": 0.47609081864356995 }, { "epoch": 3.1415976489446966, "step": 11759, "train/loss_ctc": 0.35418057441711426, "train/loss_error": 0.4458962380886078, "train/loss_total": 0.42755311727523804 }, { "epoch": 3.1418648143200643, "grad_norm": 2.0752618312835693, "learning_rate": 1.1160032059845043e-05, "loss": 0.5205, "step": 11760 }, { "epoch": 3.1418648143200643, "step": 11760, "train/loss_ctc": 0.9963279962539673, "train/loss_error": 0.507477343082428, "train/loss_total": 0.6052474975585938 }, { "epoch": 3.1421319796954315, "step": 11761, "train/loss_ctc": 0.45805346965789795, "train/loss_error": 0.43108850717544556, "train/loss_total": 0.4364815056324005 }, { "epoch": 3.1423991450707986, "step": 11762, "train/loss_ctc": 0.8970775604248047, "train/loss_error": 0.43349796533584595, "train/loss_total": 0.5262138843536377 }, { "epoch": 3.1426663104461663, "step": 11763, "train/loss_ctc": 0.4657445549964905, "train/loss_error": 0.401308536529541, "train/loss_total": 0.4141957461833954 }, { "epoch": 3.1429334758215335, "step": 11764, "train/loss_ctc": 1.1593952178955078, "train/loss_error": 0.4821060001850128, "train/loss_total": 0.6175638437271118 }, { "epoch": 3.1432006411969007, "step": 11765, "train/loss_ctc": 1.2442593574523926, "train/loss_error": 0.5308915972709656, "train/loss_total": 0.673565149307251 }, { "epoch": 3.1434678065722683, "step": 11766, "train/loss_ctc": 0.7740970849990845, "train/loss_error": 0.38273605704307556, "train/loss_total": 0.4610082507133484 }, { "epoch": 3.1437349719476355, "step": 11767, "train/loss_ctc": 0.7091204524040222, "train/loss_error": 0.44386914372444153, "train/loss_total": 0.4969194233417511 }, { "epoch": 3.144002137323003, "step": 11768, "train/loss_ctc": 0.352914959192276, "train/loss_error": 0.5460714101791382, "train/loss_total": 0.5074401497840881 }, { "epoch": 3.1442693026983703, "step": 11769, "train/loss_ctc": 0.3428250551223755, "train/loss_error": 0.4859529137611389, "train/loss_total": 0.45732733607292175 }, { "epoch": 3.1445364680737375, "grad_norm": 2.129424571990967, "learning_rate": 1.1144002137323003e-05, "loss": 0.5196, "step": 11770 }, { "epoch": 3.1445364680737375, "step": 11770, "train/loss_ctc": 1.1114704608917236, "train/loss_error": 0.5369482636451721, "train/loss_total": 0.6518527269363403 }, { "epoch": 3.144803633449105, "step": 11771, "train/loss_ctc": 0.4421404004096985, "train/loss_error": 0.4165704846382141, "train/loss_total": 0.42168447375297546 }, { "epoch": 3.1450707988244724, "step": 11772, "train/loss_ctc": 0.6350060701370239, "train/loss_error": 0.46469447016716003, "train/loss_total": 0.4987567663192749 }, { "epoch": 3.1453379641998396, "step": 11773, "train/loss_ctc": 0.4153043031692505, "train/loss_error": 0.43773967027664185, "train/loss_total": 0.43325260281562805 }, { "epoch": 3.145605129575207, "step": 11774, "train/loss_ctc": 0.4440329670906067, "train/loss_error": 0.40919676423072815, "train/loss_total": 0.41616401076316833 }, { "epoch": 3.1458722949505744, "step": 11775, "train/loss_ctc": 0.6955652832984924, "train/loss_error": 0.4720321297645569, "train/loss_total": 0.516738772392273 }, { "epoch": 3.1461394603259416, "step": 11776, "train/loss_ctc": 0.5119526982307434, "train/loss_error": 0.4452449679374695, "train/loss_total": 0.45858651399612427 }, { "epoch": 3.1464066257013092, "step": 11777, "train/loss_ctc": 0.5801693797111511, "train/loss_error": 0.4059816300868988, "train/loss_total": 0.4408192038536072 }, { "epoch": 3.1466737910766764, "step": 11778, "train/loss_ctc": 1.6092348098754883, "train/loss_error": 0.4980125427246094, "train/loss_total": 0.720257043838501 }, { "epoch": 3.1469409564520436, "step": 11779, "train/loss_ctc": 0.3511446714401245, "train/loss_error": 0.47420769929885864, "train/loss_total": 0.4495950937271118 }, { "epoch": 3.1472081218274113, "grad_norm": 1.6476627588272095, "learning_rate": 1.1127972214800961e-05, "loss": 0.5008, "step": 11780 }, { "epoch": 3.1472081218274113, "step": 11780, "train/loss_ctc": 0.42516565322875977, "train/loss_error": 0.39733368158340454, "train/loss_total": 0.4029000997543335 }, { "epoch": 3.1474752872027785, "step": 11781, "train/loss_ctc": 1.7566331624984741, "train/loss_error": 0.48234403133392334, "train/loss_total": 0.7372018694877625 }, { "epoch": 3.1477424525781457, "step": 11782, "train/loss_ctc": 0.6398025155067444, "train/loss_error": 0.3727075755596161, "train/loss_total": 0.4261265695095062 }, { "epoch": 3.1480096179535133, "step": 11783, "train/loss_ctc": 0.6137269735336304, "train/loss_error": 0.43923330307006836, "train/loss_total": 0.4741320312023163 }, { "epoch": 3.1482767833288805, "step": 11784, "train/loss_ctc": 0.7466420531272888, "train/loss_error": 0.404391348361969, "train/loss_total": 0.4728415012359619 }, { "epoch": 3.1485439487042477, "step": 11785, "train/loss_ctc": 0.8512532711029053, "train/loss_error": 0.4206312596797943, "train/loss_total": 0.5067557096481323 }, { "epoch": 3.1488111140796153, "step": 11786, "train/loss_ctc": 1.3659484386444092, "train/loss_error": 0.4014642536640167, "train/loss_total": 0.5943610668182373 }, { "epoch": 3.1490782794549825, "step": 11787, "train/loss_ctc": 0.7881741523742676, "train/loss_error": 0.46251869201660156, "train/loss_total": 0.5276498198509216 }, { "epoch": 3.14934544483035, "step": 11788, "train/loss_ctc": 0.5714141130447388, "train/loss_error": 0.4524625539779663, "train/loss_total": 0.47625285387039185 }, { "epoch": 3.1496126102057174, "step": 11789, "train/loss_ctc": 0.4755134880542755, "train/loss_error": 0.46536457538604736, "train/loss_total": 0.4673943519592285 }, { "epoch": 3.1498797755810846, "grad_norm": 1.407334804534912, "learning_rate": 1.1111942292278921e-05, "loss": 0.5086, "step": 11790 }, { "epoch": 3.1498797755810846, "step": 11790, "train/loss_ctc": 0.7813411951065063, "train/loss_error": 0.43539729714393616, "train/loss_total": 0.5045861005783081 }, { "epoch": 3.150146940956452, "step": 11791, "train/loss_ctc": 0.8864392042160034, "train/loss_error": 0.45527565479278564, "train/loss_total": 0.5415083765983582 }, { "epoch": 3.1504141063318194, "step": 11792, "train/loss_ctc": 0.6532799005508423, "train/loss_error": 0.4167618751525879, "train/loss_total": 0.4640654921531677 }, { "epoch": 3.1506812717071866, "step": 11793, "train/loss_ctc": 0.8520917892456055, "train/loss_error": 0.4803336262702942, "train/loss_total": 0.5546852946281433 }, { "epoch": 3.1509484370825542, "step": 11794, "train/loss_ctc": 0.8384972810745239, "train/loss_error": 0.45475614070892334, "train/loss_total": 0.5315043926239014 }, { "epoch": 3.1512156024579214, "step": 11795, "train/loss_ctc": 0.7643923163414001, "train/loss_error": 0.4826280176639557, "train/loss_total": 0.5389808416366577 }, { "epoch": 3.1514827678332886, "step": 11796, "train/loss_ctc": 0.7211918234825134, "train/loss_error": 0.4092446565628052, "train/loss_total": 0.4716340899467468 }, { "epoch": 3.1517499332086563, "step": 11797, "train/loss_ctc": 0.38547956943511963, "train/loss_error": 0.44694435596466064, "train/loss_total": 0.4346514046192169 }, { "epoch": 3.1520170985840235, "step": 11798, "train/loss_ctc": 0.3211124539375305, "train/loss_error": 0.44195619225502014, "train/loss_total": 0.41778743267059326 }, { "epoch": 3.152284263959391, "step": 11799, "train/loss_ctc": 0.9035469889640808, "train/loss_error": 0.4144286811351776, "train/loss_total": 0.5122523307800293 }, { "epoch": 3.1525514293347583, "grad_norm": 2.386924982070923, "learning_rate": 1.1095912369756881e-05, "loss": 0.4972, "step": 11800 }, { "epoch": 3.1525514293347583, "step": 11800, "train/loss_ctc": 1.6049423217773438, "train/loss_error": 0.48926249146461487, "train/loss_total": 0.7123984694480896 }, { "epoch": 3.1528185947101255, "step": 11801, "train/loss_ctc": 0.6983578205108643, "train/loss_error": 0.48688897490501404, "train/loss_total": 0.5291827917098999 }, { "epoch": 3.153085760085493, "step": 11802, "train/loss_ctc": 0.5952622890472412, "train/loss_error": 0.44889891147613525, "train/loss_total": 0.47817161679267883 }, { "epoch": 3.1533529254608603, "step": 11803, "train/loss_ctc": 0.8275040984153748, "train/loss_error": 0.4694715440273285, "train/loss_total": 0.5410780906677246 }, { "epoch": 3.1536200908362275, "step": 11804, "train/loss_ctc": 0.9588476419448853, "train/loss_error": 0.46133702993392944, "train/loss_total": 0.5608391761779785 }, { "epoch": 3.153887256211595, "step": 11805, "train/loss_ctc": 0.7017245888710022, "train/loss_error": 0.40286770462989807, "train/loss_total": 0.46263909339904785 }, { "epoch": 3.1541544215869624, "step": 11806, "train/loss_ctc": 0.9085614681243896, "train/loss_error": 0.4787267744541168, "train/loss_total": 0.5646936893463135 }, { "epoch": 3.1544215869623295, "step": 11807, "train/loss_ctc": 1.0843336582183838, "train/loss_error": 0.4836154282093048, "train/loss_total": 0.6037590503692627 }, { "epoch": 3.154688752337697, "step": 11808, "train/loss_ctc": 0.3731057941913605, "train/loss_error": 0.4420119822025299, "train/loss_total": 0.42823076248168945 }, { "epoch": 3.1549559177130644, "step": 11809, "train/loss_ctc": 0.42435985803604126, "train/loss_error": 0.40683597326278687, "train/loss_total": 0.4103407561779022 }, { "epoch": 3.1552230830884316, "grad_norm": 1.733948826789856, "learning_rate": 1.1079882447234839e-05, "loss": 0.5291, "step": 11810 }, { "epoch": 3.1552230830884316, "step": 11810, "train/loss_ctc": 0.503920316696167, "train/loss_error": 0.4666755199432373, "train/loss_total": 0.4741244912147522 }, { "epoch": 3.155490248463799, "step": 11811, "train/loss_ctc": 0.6886416077613831, "train/loss_error": 0.4038422107696533, "train/loss_total": 0.4608020782470703 }, { "epoch": 3.1557574138391664, "step": 11812, "train/loss_ctc": 0.287262886762619, "train/loss_error": 0.48293232917785645, "train/loss_total": 0.4437984526157379 }, { "epoch": 3.1560245792145336, "step": 11813, "train/loss_ctc": 1.0485883951187134, "train/loss_error": 0.4718503952026367, "train/loss_total": 0.58719801902771 }, { "epoch": 3.1562917445899012, "step": 11814, "train/loss_ctc": 0.327721506357193, "train/loss_error": 0.5142672061920166, "train/loss_total": 0.4769580662250519 }, { "epoch": 3.1565589099652684, "step": 11815, "train/loss_ctc": 0.5055623650550842, "train/loss_error": 0.41647663712501526, "train/loss_total": 0.43429380655288696 }, { "epoch": 3.1568260753406356, "step": 11816, "train/loss_ctc": 0.8433000445365906, "train/loss_error": 0.4545184373855591, "train/loss_total": 0.5322747826576233 }, { "epoch": 3.1570932407160033, "step": 11817, "train/loss_ctc": 0.3173091411590576, "train/loss_error": 0.5036240816116333, "train/loss_total": 0.4663611054420471 }, { "epoch": 3.1573604060913705, "step": 11818, "train/loss_ctc": 0.415079265832901, "train/loss_error": 0.46853458881378174, "train/loss_total": 0.457843542098999 }, { "epoch": 3.157627571466738, "step": 11819, "train/loss_ctc": 0.9859150648117065, "train/loss_error": 0.4615597128868103, "train/loss_total": 0.5664308071136475 }, { "epoch": 3.1578947368421053, "grad_norm": 1.2890064716339111, "learning_rate": 1.1063852524712797e-05, "loss": 0.49, "step": 11820 }, { "epoch": 3.1578947368421053, "step": 11820, "train/loss_ctc": 0.5712875127792358, "train/loss_error": 0.43378084897994995, "train/loss_total": 0.4612821936607361 }, { "epoch": 3.1581619022174725, "step": 11821, "train/loss_ctc": 0.734937310218811, "train/loss_error": 0.45808178186416626, "train/loss_total": 0.5134528875350952 }, { "epoch": 3.15842906759284, "step": 11822, "train/loss_ctc": 0.4100285768508911, "train/loss_error": 0.5119001269340515, "train/loss_total": 0.4915258288383484 }, { "epoch": 3.1586962329682073, "step": 11823, "train/loss_ctc": 0.3387429714202881, "train/loss_error": 0.40730857849121094, "train/loss_total": 0.39359548687934875 }, { "epoch": 3.1589633983435745, "step": 11824, "train/loss_ctc": 0.30469825863838196, "train/loss_error": 0.41426846385002136, "train/loss_total": 0.39235442876815796 }, { "epoch": 3.159230563718942, "step": 11825, "train/loss_ctc": 0.6623888611793518, "train/loss_error": 0.4145064055919647, "train/loss_total": 0.46408289670944214 }, { "epoch": 3.1594977290943094, "step": 11826, "train/loss_ctc": 0.8542698621749878, "train/loss_error": 0.40597736835479736, "train/loss_total": 0.49563586711883545 }, { "epoch": 3.1597648944696766, "step": 11827, "train/loss_ctc": 0.8726335167884827, "train/loss_error": 0.4519328773021698, "train/loss_total": 0.5360730290412903 }, { "epoch": 3.160032059845044, "step": 11828, "train/loss_ctc": 0.31648576259613037, "train/loss_error": 0.44846367835998535, "train/loss_total": 0.4220680892467499 }, { "epoch": 3.1602992252204114, "step": 11829, "train/loss_ctc": 1.1630313396453857, "train/loss_error": 0.44020935893058777, "train/loss_total": 0.5847737789154053 }, { "epoch": 3.1605663905957786, "grad_norm": 3.30403995513916, "learning_rate": 1.1047822602190757e-05, "loss": 0.4755, "step": 11830 }, { "epoch": 3.1605663905957786, "step": 11830, "train/loss_ctc": 0.4131823778152466, "train/loss_error": 0.44047704339027405, "train/loss_total": 0.4350181221961975 }, { "epoch": 3.1608335559711462, "step": 11831, "train/loss_ctc": 1.2532374858856201, "train/loss_error": 0.4222121238708496, "train/loss_total": 0.5884172320365906 }, { "epoch": 3.1611007213465134, "step": 11832, "train/loss_ctc": 0.8100465536117554, "train/loss_error": 0.36686035990715027, "train/loss_total": 0.4554976224899292 }, { "epoch": 3.161367886721881, "step": 11833, "train/loss_ctc": 1.2123281955718994, "train/loss_error": 0.40485987067222595, "train/loss_total": 0.5663535594940186 }, { "epoch": 3.1616350520972483, "step": 11834, "train/loss_ctc": 0.6721171140670776, "train/loss_error": 0.3445509970188141, "train/loss_total": 0.4100642204284668 }, { "epoch": 3.1619022174726155, "step": 11835, "train/loss_ctc": 1.1714354753494263, "train/loss_error": 0.39560437202453613, "train/loss_total": 0.5507705807685852 }, { "epoch": 3.162169382847983, "step": 11836, "train/loss_ctc": 0.6743552684783936, "train/loss_error": 0.4466173052787781, "train/loss_total": 0.4921649098396301 }, { "epoch": 3.1624365482233503, "step": 11837, "train/loss_ctc": 0.5873887538909912, "train/loss_error": 0.43646806478500366, "train/loss_total": 0.4666522145271301 }, { "epoch": 3.1627037135987175, "step": 11838, "train/loss_ctc": 0.8829988241195679, "train/loss_error": 0.4301351308822632, "train/loss_total": 0.5207078456878662 }, { "epoch": 3.162970878974085, "step": 11839, "train/loss_ctc": 0.33739763498306274, "train/loss_error": 0.4535454511642456, "train/loss_total": 0.43031588196754456 }, { "epoch": 3.1632380443494523, "grad_norm": 2.747856378555298, "learning_rate": 1.1031792679668715e-05, "loss": 0.4916, "step": 11840 }, { "epoch": 3.1632380443494523, "step": 11840, "train/loss_ctc": 0.5473990440368652, "train/loss_error": 0.4986509084701538, "train/loss_total": 0.508400559425354 }, { "epoch": 3.1635052097248195, "step": 11841, "train/loss_ctc": 0.30536389350891113, "train/loss_error": 0.4187399744987488, "train/loss_total": 0.39606475830078125 }, { "epoch": 3.163772375100187, "step": 11842, "train/loss_ctc": 0.552600085735321, "train/loss_error": 0.4206429719924927, "train/loss_total": 0.44703441858291626 }, { "epoch": 3.1640395404755544, "step": 11843, "train/loss_ctc": 0.29121994972229004, "train/loss_error": 0.4383024573326111, "train/loss_total": 0.4088859558105469 }, { "epoch": 3.1643067058509216, "step": 11844, "train/loss_ctc": 0.5456512570381165, "train/loss_error": 0.4295206665992737, "train/loss_total": 0.45274680852890015 }, { "epoch": 3.164573871226289, "step": 11845, "train/loss_ctc": 1.0493413209915161, "train/loss_error": 0.44656285643577576, "train/loss_total": 0.5671185851097107 }, { "epoch": 3.1648410366016564, "step": 11846, "train/loss_ctc": 1.0845650434494019, "train/loss_error": 0.4221056401729584, "train/loss_total": 0.5545974969863892 }, { "epoch": 3.1651082019770236, "step": 11847, "train/loss_ctc": 0.29306355118751526, "train/loss_error": 0.48063433170318604, "train/loss_total": 0.44312018156051636 }, { "epoch": 3.1653753673523912, "step": 11848, "train/loss_ctc": 0.6535964012145996, "train/loss_error": 0.375632107257843, "train/loss_total": 0.4312250018119812 }, { "epoch": 3.1656425327277584, "step": 11849, "train/loss_ctc": 0.5633502006530762, "train/loss_error": 0.44295069575309753, "train/loss_total": 0.4670305848121643 }, { "epoch": 3.1659096981031256, "grad_norm": 2.0134992599487305, "learning_rate": 1.1015762757146673e-05, "loss": 0.4676, "step": 11850 }, { "epoch": 3.1659096981031256, "step": 11850, "train/loss_ctc": 1.319732427597046, "train/loss_error": 0.3933744728565216, "train/loss_total": 0.5786460638046265 }, { "epoch": 3.1661768634784933, "step": 11851, "train/loss_ctc": 1.047282099723816, "train/loss_error": 0.515974223613739, "train/loss_total": 0.6222358345985413 }, { "epoch": 3.1664440288538604, "step": 11852, "train/loss_ctc": 1.05393385887146, "train/loss_error": 0.5717021226882935, "train/loss_total": 0.6681484580039978 }, { "epoch": 3.166711194229228, "step": 11853, "train/loss_ctc": 0.3847818970680237, "train/loss_error": 0.4650503695011139, "train/loss_total": 0.4489966928958893 }, { "epoch": 3.1669783596045953, "step": 11854, "train/loss_ctc": 0.9392801523208618, "train/loss_error": 0.42411094903945923, "train/loss_total": 0.5271447896957397 }, { "epoch": 3.1672455249799625, "step": 11855, "train/loss_ctc": 0.3326503038406372, "train/loss_error": 0.46543294191360474, "train/loss_total": 0.4388764500617981 }, { "epoch": 3.16751269035533, "step": 11856, "train/loss_ctc": 0.605627179145813, "train/loss_error": 0.4875865578651428, "train/loss_total": 0.5111947059631348 }, { "epoch": 3.1677798557306973, "step": 11857, "train/loss_ctc": 0.4187142252922058, "train/loss_error": 0.4282059967517853, "train/loss_total": 0.42630764842033386 }, { "epoch": 3.1680470211060645, "step": 11858, "train/loss_ctc": 0.6723852157592773, "train/loss_error": 0.42180582880973816, "train/loss_total": 0.4719217121601105 }, { "epoch": 3.168314186481432, "step": 11859, "train/loss_ctc": 1.4186127185821533, "train/loss_error": 0.47609955072402954, "train/loss_total": 0.6646021604537964 }, { "epoch": 3.1685813518567993, "grad_norm": 1.2079390287399292, "learning_rate": 1.0999732834624633e-05, "loss": 0.5358, "step": 11860 }, { "epoch": 3.1685813518567993, "step": 11860, "train/loss_ctc": 0.5629491209983826, "train/loss_error": 0.46175700426101685, "train/loss_total": 0.48199543356895447 }, { "epoch": 3.1688485172321665, "step": 11861, "train/loss_ctc": 0.4143415093421936, "train/loss_error": 0.48100683093070984, "train/loss_total": 0.46767377853393555 }, { "epoch": 3.169115682607534, "step": 11862, "train/loss_ctc": 0.6319001317024231, "train/loss_error": 0.41553330421447754, "train/loss_total": 0.4588066637516022 }, { "epoch": 3.1693828479829014, "step": 11863, "train/loss_ctc": 0.7425969243049622, "train/loss_error": 0.48277217149734497, "train/loss_total": 0.5347371101379395 }, { "epoch": 3.1696500133582686, "step": 11864, "train/loss_ctc": 0.49961376190185547, "train/loss_error": 0.4178315997123718, "train/loss_total": 0.43418803811073303 }, { "epoch": 3.169917178733636, "step": 11865, "train/loss_ctc": 0.3762918710708618, "train/loss_error": 0.4045685827732086, "train/loss_total": 0.3989132344722748 }, { "epoch": 3.1701843441090034, "step": 11866, "train/loss_ctc": 0.3727240562438965, "train/loss_error": 0.3985067903995514, "train/loss_total": 0.3933502435684204 }, { "epoch": 3.170451509484371, "step": 11867, "train/loss_ctc": 0.8304892778396606, "train/loss_error": 0.46843868494033813, "train/loss_total": 0.5408487915992737 }, { "epoch": 3.1707186748597382, "step": 11868, "train/loss_ctc": 0.6364076137542725, "train/loss_error": 0.45998674631118774, "train/loss_total": 0.49527090787887573 }, { "epoch": 3.1709858402351054, "step": 11869, "train/loss_ctc": 0.4805924892425537, "train/loss_error": 0.38801464438438416, "train/loss_total": 0.4065302312374115 }, { "epoch": 3.171253005610473, "grad_norm": 2.4935882091522217, "learning_rate": 1.0983702912102591e-05, "loss": 0.4612, "step": 11870 }, { "epoch": 3.171253005610473, "step": 11870, "train/loss_ctc": 0.40503206849098206, "train/loss_error": 0.42851170897483826, "train/loss_total": 0.4238157868385315 }, { "epoch": 3.1715201709858403, "step": 11871, "train/loss_ctc": 1.8040893077850342, "train/loss_error": 0.4491405487060547, "train/loss_total": 0.7201303243637085 }, { "epoch": 3.1717873363612075, "step": 11872, "train/loss_ctc": 0.711844801902771, "train/loss_error": 0.508171796798706, "train/loss_total": 0.5489063858985901 }, { "epoch": 3.172054501736575, "step": 11873, "train/loss_ctc": 0.6310869455337524, "train/loss_error": 0.39551788568496704, "train/loss_total": 0.44263172149658203 }, { "epoch": 3.1723216671119423, "step": 11874, "train/loss_ctc": 0.5599756240844727, "train/loss_error": 0.4486437737941742, "train/loss_total": 0.4709101617336273 }, { "epoch": 3.1725888324873095, "step": 11875, "train/loss_ctc": 0.5882375240325928, "train/loss_error": 0.36863306164741516, "train/loss_total": 0.41255396604537964 }, { "epoch": 3.172855997862677, "step": 11876, "train/loss_ctc": 0.838706910610199, "train/loss_error": 0.40685582160949707, "train/loss_total": 0.4932260513305664 }, { "epoch": 3.1731231632380443, "step": 11877, "train/loss_ctc": 0.3996608853340149, "train/loss_error": 0.4694303274154663, "train/loss_total": 0.45547646284103394 }, { "epoch": 3.1733903286134115, "step": 11878, "train/loss_ctc": 1.0568974018096924, "train/loss_error": 0.4701961278915405, "train/loss_total": 0.5875363945960999 }, { "epoch": 3.173657493988779, "step": 11879, "train/loss_ctc": 0.3789122998714447, "train/loss_error": 0.4672996997833252, "train/loss_total": 0.4496222138404846 }, { "epoch": 3.1739246593641464, "grad_norm": 4.751404285430908, "learning_rate": 1.096767298958055e-05, "loss": 0.5005, "step": 11880 }, { "epoch": 3.1739246593641464, "step": 11880, "train/loss_ctc": 0.8508719205856323, "train/loss_error": 0.46813511848449707, "train/loss_total": 0.544682502746582 }, { "epoch": 3.1741918247395136, "step": 11881, "train/loss_ctc": 0.7492299675941467, "train/loss_error": 0.495103120803833, "train/loss_total": 0.5459284782409668 }, { "epoch": 3.174458990114881, "step": 11882, "train/loss_ctc": 0.13533274829387665, "train/loss_error": 0.38473275303840637, "train/loss_total": 0.33485275506973267 }, { "epoch": 3.1747261554902484, "step": 11883, "train/loss_ctc": 2.104785680770874, "train/loss_error": 0.4768187999725342, "train/loss_total": 0.8024121522903442 }, { "epoch": 3.1749933208656156, "step": 11884, "train/loss_ctc": 0.6682618856430054, "train/loss_error": 0.4299114942550659, "train/loss_total": 0.47758156061172485 }, { "epoch": 3.1752604862409832, "step": 11885, "train/loss_ctc": 0.4319874942302704, "train/loss_error": 0.4043728709220886, "train/loss_total": 0.40989580750465393 }, { "epoch": 3.1755276516163504, "step": 11886, "train/loss_ctc": 1.0180896520614624, "train/loss_error": 0.46754705905914307, "train/loss_total": 0.577655553817749 }, { "epoch": 3.175794816991718, "step": 11887, "train/loss_ctc": 0.4816657602787018, "train/loss_error": 0.49980196356773376, "train/loss_total": 0.49617472290992737 }, { "epoch": 3.1760619823670853, "step": 11888, "train/loss_ctc": 1.1780118942260742, "train/loss_error": 0.5028815865516663, "train/loss_total": 0.6379076242446899 }, { "epoch": 3.1763291477424525, "step": 11889, "train/loss_ctc": 0.6823145151138306, "train/loss_error": 0.3837321400642395, "train/loss_total": 0.44344860315322876 }, { "epoch": 3.17659631311782, "grad_norm": 1.178454041481018, "learning_rate": 1.095164306705851e-05, "loss": 0.5271, "step": 11890 }, { "epoch": 3.17659631311782, "step": 11890, "train/loss_ctc": 0.46060559153556824, "train/loss_error": 0.47468236088752747, "train/loss_total": 0.47186702489852905 }, { "epoch": 3.1768634784931873, "step": 11891, "train/loss_ctc": 0.8807558417320251, "train/loss_error": 0.43007123470306396, "train/loss_total": 0.5202081799507141 }, { "epoch": 3.1771306438685545, "step": 11892, "train/loss_ctc": 0.6944962739944458, "train/loss_error": 0.512251079082489, "train/loss_total": 0.5487000942230225 }, { "epoch": 3.177397809243922, "step": 11893, "train/loss_ctc": 1.075976014137268, "train/loss_error": 0.48478949069976807, "train/loss_total": 0.603026807308197 }, { "epoch": 3.1776649746192893, "step": 11894, "train/loss_ctc": 0.9173725247383118, "train/loss_error": 0.3896830677986145, "train/loss_total": 0.49522095918655396 }, { "epoch": 3.1779321399946565, "step": 11895, "train/loss_ctc": 0.6685685515403748, "train/loss_error": 0.46298646926879883, "train/loss_total": 0.504102885723114 }, { "epoch": 3.178199305370024, "step": 11896, "train/loss_ctc": 0.8482007384300232, "train/loss_error": 0.4794852137565613, "train/loss_total": 0.5532283186912537 }, { "epoch": 3.1784664707453913, "step": 11897, "train/loss_ctc": 0.23956158757209778, "train/loss_error": 0.43399104475975037, "train/loss_total": 0.39510518312454224 }, { "epoch": 3.1787336361207585, "step": 11898, "train/loss_ctc": 1.4182586669921875, "train/loss_error": 0.4444553852081299, "train/loss_total": 0.6392160654067993 }, { "epoch": 3.179000801496126, "step": 11899, "train/loss_ctc": 1.591774821281433, "train/loss_error": 0.4870419502258301, "train/loss_total": 0.7079885005950928 }, { "epoch": 3.1792679668714934, "grad_norm": 2.690147638320923, "learning_rate": 1.0935613144536469e-05, "loss": 0.5439, "step": 11900 }, { "epoch": 3.1792679668714934, "step": 11900, "train/loss_ctc": 1.022907018661499, "train/loss_error": 0.49763166904449463, "train/loss_total": 0.6026867628097534 }, { "epoch": 3.179535132246861, "step": 11901, "train/loss_ctc": 0.7478123307228088, "train/loss_error": 0.46636709570884705, "train/loss_total": 0.5226561427116394 }, { "epoch": 3.179802297622228, "step": 11902, "train/loss_ctc": 0.9047051668167114, "train/loss_error": 0.4226895570755005, "train/loss_total": 0.5190926790237427 }, { "epoch": 3.1800694629975954, "step": 11903, "train/loss_ctc": 0.9516867399215698, "train/loss_error": 0.5163792371749878, "train/loss_total": 0.6034407615661621 }, { "epoch": 3.180336628372963, "step": 11904, "train/loss_ctc": 0.5249543190002441, "train/loss_error": 0.4114586114883423, "train/loss_total": 0.43415775895118713 }, { "epoch": 3.1806037937483302, "step": 11905, "train/loss_ctc": 1.1476991176605225, "train/loss_error": 0.4578348696231842, "train/loss_total": 0.5958077311515808 }, { "epoch": 3.1808709591236974, "step": 11906, "train/loss_ctc": 0.4012943506240845, "train/loss_error": 0.30430397391319275, "train/loss_total": 0.32370203733444214 }, { "epoch": 3.181138124499065, "step": 11907, "train/loss_ctc": 0.8316049575805664, "train/loss_error": 0.45745596289634705, "train/loss_total": 0.532285749912262 }, { "epoch": 3.1814052898744323, "step": 11908, "train/loss_ctc": 0.6330552101135254, "train/loss_error": 0.4290676414966583, "train/loss_total": 0.4698651432991028 }, { "epoch": 3.1816724552497995, "step": 11909, "train/loss_ctc": 0.6639585494995117, "train/loss_error": 0.47539037466049194, "train/loss_total": 0.5131040215492249 }, { "epoch": 3.181939620625167, "grad_norm": 3.9257636070251465, "learning_rate": 1.0919583222014427e-05, "loss": 0.5117, "step": 11910 }, { "epoch": 3.181939620625167, "step": 11910, "train/loss_ctc": 1.4267687797546387, "train/loss_error": 0.47239574790000916, "train/loss_total": 0.6632703542709351 }, { "epoch": 3.1822067860005343, "step": 11911, "train/loss_ctc": 1.2067924737930298, "train/loss_error": 0.4981357455253601, "train/loss_total": 0.6398671269416809 }, { "epoch": 3.1824739513759015, "step": 11912, "train/loss_ctc": 1.1152716875076294, "train/loss_error": 0.45801717042922974, "train/loss_total": 0.5894680619239807 }, { "epoch": 3.182741116751269, "step": 11913, "train/loss_ctc": 0.5779100656509399, "train/loss_error": 0.37557798624038696, "train/loss_total": 0.4160444140434265 }, { "epoch": 3.1830082821266363, "step": 11914, "train/loss_ctc": 2.9807729721069336, "train/loss_error": 0.46801647543907166, "train/loss_total": 0.9705678224563599 }, { "epoch": 3.1832754475020035, "step": 11915, "train/loss_ctc": 1.1247618198394775, "train/loss_error": 0.49839285016059875, "train/loss_total": 0.6236666440963745 }, { "epoch": 3.183542612877371, "step": 11916, "train/loss_ctc": 0.950782060623169, "train/loss_error": 0.4797406792640686, "train/loss_total": 0.5739489793777466 }, { "epoch": 3.1838097782527384, "step": 11917, "train/loss_ctc": 1.1307435035705566, "train/loss_error": 0.4953049421310425, "train/loss_total": 0.6223926544189453 }, { "epoch": 3.184076943628106, "step": 11918, "train/loss_ctc": 0.5318039655685425, "train/loss_error": 0.5162938833236694, "train/loss_total": 0.5193959474563599 }, { "epoch": 3.184344109003473, "step": 11919, "train/loss_ctc": 0.7160520553588867, "train/loss_error": 0.36875060200691223, "train/loss_total": 0.4382109045982361 }, { "epoch": 3.1846112743788404, "grad_norm": 2.7236251831054688, "learning_rate": 1.0903553299492386e-05, "loss": 0.6057, "step": 11920 }, { "epoch": 3.1846112743788404, "step": 11920, "train/loss_ctc": 0.5492435097694397, "train/loss_error": 0.39104512333869934, "train/loss_total": 0.42268481850624084 }, { "epoch": 3.184878439754208, "step": 11921, "train/loss_ctc": 0.6824449300765991, "train/loss_error": 0.4241064786911011, "train/loss_total": 0.4757741689682007 }, { "epoch": 3.1851456051295752, "step": 11922, "train/loss_ctc": 0.8010462522506714, "train/loss_error": 0.4510002136230469, "train/loss_total": 0.5210094451904297 }, { "epoch": 3.1854127705049424, "step": 11923, "train/loss_ctc": 0.5871785879135132, "train/loss_error": 0.36642685532569885, "train/loss_total": 0.4105772078037262 }, { "epoch": 3.18567993588031, "step": 11924, "train/loss_ctc": 0.5357838869094849, "train/loss_error": 0.5445906519889832, "train/loss_total": 0.5428292751312256 }, { "epoch": 3.1859471012556773, "step": 11925, "train/loss_ctc": 0.4704345464706421, "train/loss_error": 0.502247154712677, "train/loss_total": 0.49588465690612793 }, { "epoch": 3.1862142666310445, "step": 11926, "train/loss_ctc": 0.20352467894554138, "train/loss_error": 0.3841567933559418, "train/loss_total": 0.3480303883552551 }, { "epoch": 3.186481432006412, "step": 11927, "train/loss_ctc": 0.6956381797790527, "train/loss_error": 0.4998477101325989, "train/loss_total": 0.5390058159828186 }, { "epoch": 3.1867485973817793, "step": 11928, "train/loss_ctc": 0.6354038715362549, "train/loss_error": 0.4205135107040405, "train/loss_total": 0.46349161863327026 }, { "epoch": 3.1870157627571465, "step": 11929, "train/loss_ctc": 0.4226681888103485, "train/loss_error": 0.44733861088752747, "train/loss_total": 0.44240453839302063 }, { "epoch": 3.187282928132514, "grad_norm": 1.410638451576233, "learning_rate": 1.0887523376970344e-05, "loss": 0.4662, "step": 11930 }, { "epoch": 3.187282928132514, "step": 11930, "train/loss_ctc": 0.482190877199173, "train/loss_error": 0.44740140438079834, "train/loss_total": 0.4543592929840088 }, { "epoch": 3.1875500935078813, "step": 11931, "train/loss_ctc": 0.4051235020160675, "train/loss_error": 0.43024083971977234, "train/loss_total": 0.4252173900604248 }, { "epoch": 3.187817258883249, "step": 11932, "train/loss_ctc": 0.6298923492431641, "train/loss_error": 0.4377886950969696, "train/loss_total": 0.476209431886673 }, { "epoch": 3.188084424258616, "step": 11933, "train/loss_ctc": 0.9494591951370239, "train/loss_error": 0.4268980324268341, "train/loss_total": 0.531410276889801 }, { "epoch": 3.1883515896339834, "step": 11934, "train/loss_ctc": 0.631976842880249, "train/loss_error": 0.5023401975631714, "train/loss_total": 0.528267502784729 }, { "epoch": 3.188618755009351, "step": 11935, "train/loss_ctc": 0.5560289621353149, "train/loss_error": 0.4310860335826874, "train/loss_total": 0.45607462525367737 }, { "epoch": 3.188885920384718, "step": 11936, "train/loss_ctc": 0.36381039023399353, "train/loss_error": 0.36758947372436523, "train/loss_total": 0.3668336570262909 }, { "epoch": 3.1891530857600854, "step": 11937, "train/loss_ctc": 0.807443380355835, "train/loss_error": 0.4056377112865448, "train/loss_total": 0.48599886894226074 }, { "epoch": 3.189420251135453, "step": 11938, "train/loss_ctc": 1.0867912769317627, "train/loss_error": 0.41380786895751953, "train/loss_total": 0.5484045743942261 }, { "epoch": 3.18968741651082, "step": 11939, "train/loss_ctc": 0.4974258244037628, "train/loss_error": 0.41738182306289673, "train/loss_total": 0.43339061737060547 }, { "epoch": 3.1899545818861874, "grad_norm": 3.343198776245117, "learning_rate": 1.0871493454448303e-05, "loss": 0.4706, "step": 11940 }, { "epoch": 3.1899545818861874, "step": 11940, "train/loss_ctc": 0.38433530926704407, "train/loss_error": 0.483669638633728, "train/loss_total": 0.4638027846813202 }, { "epoch": 3.190221747261555, "step": 11941, "train/loss_ctc": 0.3507732152938843, "train/loss_error": 0.4699290692806244, "train/loss_total": 0.4460979104042053 }, { "epoch": 3.1904889126369222, "step": 11942, "train/loss_ctc": 0.8649856448173523, "train/loss_error": 0.423578679561615, "train/loss_total": 0.5118600726127625 }, { "epoch": 3.1907560780122894, "step": 11943, "train/loss_ctc": 0.45795512199401855, "train/loss_error": 0.44740885496139526, "train/loss_total": 0.4495181143283844 }, { "epoch": 3.191023243387657, "step": 11944, "train/loss_ctc": 0.4740790128707886, "train/loss_error": 0.5247529149055481, "train/loss_total": 0.5146181583404541 }, { "epoch": 3.1912904087630243, "step": 11945, "train/loss_ctc": 0.7041152715682983, "train/loss_error": 0.3996853232383728, "train/loss_total": 0.4605712890625 }, { "epoch": 3.1915575741383915, "step": 11946, "train/loss_ctc": 0.6547287702560425, "train/loss_error": 0.40630701184272766, "train/loss_total": 0.45599138736724854 }, { "epoch": 3.191824739513759, "step": 11947, "train/loss_ctc": 0.7703691720962524, "train/loss_error": 0.4828857183456421, "train/loss_total": 0.5403823852539062 }, { "epoch": 3.1920919048891263, "step": 11948, "train/loss_ctc": 0.36684492230415344, "train/loss_error": 0.36295390129089355, "train/loss_total": 0.36373212933540344 }, { "epoch": 3.1923590702644935, "step": 11949, "train/loss_ctc": 0.42725786566734314, "train/loss_error": 0.4301357567310333, "train/loss_total": 0.42956018447875977 }, { "epoch": 3.192626235639861, "grad_norm": 1.2076326608657837, "learning_rate": 1.0855463531926262e-05, "loss": 0.4636, "step": 11950 }, { "epoch": 3.192626235639861, "step": 11950, "train/loss_ctc": 0.44723182916641235, "train/loss_error": 0.4750349819660187, "train/loss_total": 0.46947434544563293 }, { "epoch": 3.1928934010152283, "step": 11951, "train/loss_ctc": 0.6921207904815674, "train/loss_error": 0.41142964363098145, "train/loss_total": 0.46756789088249207 }, { "epoch": 3.193160566390596, "step": 11952, "train/loss_ctc": 1.2377309799194336, "train/loss_error": 0.4089837372303009, "train/loss_total": 0.5747331976890564 }, { "epoch": 3.193427731765963, "step": 11953, "train/loss_ctc": 0.7128843665122986, "train/loss_error": 0.46603819727897644, "train/loss_total": 0.5154074430465698 }, { "epoch": 3.1936948971413304, "step": 11954, "train/loss_ctc": 0.8726487159729004, "train/loss_error": 0.4096628427505493, "train/loss_total": 0.5022600293159485 }, { "epoch": 3.193962062516698, "step": 11955, "train/loss_ctc": 0.5728405117988586, "train/loss_error": 0.465790331363678, "train/loss_total": 0.48720037937164307 }, { "epoch": 3.194229227892065, "step": 11956, "train/loss_ctc": 1.009330153465271, "train/loss_error": 0.4526597261428833, "train/loss_total": 0.5639938116073608 }, { "epoch": 3.1944963932674324, "step": 11957, "train/loss_ctc": 0.7118571996688843, "train/loss_error": 0.4594411253929138, "train/loss_total": 0.5099243521690369 }, { "epoch": 3.1947635586428, "step": 11958, "train/loss_ctc": 1.0476112365722656, "train/loss_error": 0.5244173407554626, "train/loss_total": 0.6290560960769653 }, { "epoch": 3.1950307240181672, "step": 11959, "train/loss_ctc": 0.72638338804245, "train/loss_error": 0.4294191300868988, "train/loss_total": 0.4888119697570801 }, { "epoch": 3.1952978893935344, "grad_norm": 2.3754215240478516, "learning_rate": 1.083943360940422e-05, "loss": 0.5208, "step": 11960 }, { "epoch": 3.1952978893935344, "step": 11960, "train/loss_ctc": 0.7834210395812988, "train/loss_error": 0.5019958019256592, "train/loss_total": 0.5582808256149292 }, { "epoch": 3.195565054768902, "step": 11961, "train/loss_ctc": 0.6620838046073914, "train/loss_error": 0.3882240951061249, "train/loss_total": 0.4429960250854492 }, { "epoch": 3.1958322201442693, "step": 11962, "train/loss_ctc": 0.2597479224205017, "train/loss_error": 0.3847626745700836, "train/loss_total": 0.35975974798202515 }, { "epoch": 3.1960993855196365, "step": 11963, "train/loss_ctc": 0.5553118586540222, "train/loss_error": 0.4327862560749054, "train/loss_total": 0.4572913944721222 }, { "epoch": 3.196366550895004, "step": 11964, "train/loss_ctc": 0.57908034324646, "train/loss_error": 0.4625934660434723, "train/loss_total": 0.48589086532592773 }, { "epoch": 3.1966337162703713, "step": 11965, "train/loss_ctc": 0.7249941825866699, "train/loss_error": 0.4597812294960022, "train/loss_total": 0.5128238201141357 }, { "epoch": 3.196900881645739, "step": 11966, "train/loss_ctc": 1.0219194889068604, "train/loss_error": 0.4375445544719696, "train/loss_total": 0.5544195175170898 }, { "epoch": 3.197168047021106, "step": 11967, "train/loss_ctc": 0.4610665440559387, "train/loss_error": 0.4733002185821533, "train/loss_total": 0.4708534777164459 }, { "epoch": 3.1974352123964733, "step": 11968, "train/loss_ctc": 0.8489264249801636, "train/loss_error": 0.3855476379394531, "train/loss_total": 0.47822341322898865 }, { "epoch": 3.197702377771841, "step": 11969, "train/loss_ctc": 0.24351972341537476, "train/loss_error": 0.4335947334766388, "train/loss_total": 0.3955797255039215 }, { "epoch": 3.197969543147208, "grad_norm": 1.364090085029602, "learning_rate": 1.082340368688218e-05, "loss": 0.4716, "step": 11970 }, { "epoch": 3.197969543147208, "step": 11970, "train/loss_ctc": 0.7608879804611206, "train/loss_error": 0.372783899307251, "train/loss_total": 0.45040473341941833 }, { "epoch": 3.1982367085225754, "step": 11971, "train/loss_ctc": 0.5961215496063232, "train/loss_error": 0.4840569794178009, "train/loss_total": 0.5064699053764343 }, { "epoch": 3.198503873897943, "step": 11972, "train/loss_ctc": 0.9250174760818481, "train/loss_error": 0.39882299304008484, "train/loss_total": 0.5040618777275085 }, { "epoch": 3.19877103927331, "step": 11973, "train/loss_ctc": 0.7349703311920166, "train/loss_error": 0.4481513500213623, "train/loss_total": 0.5055151581764221 }, { "epoch": 3.1990382046486774, "step": 11974, "train/loss_ctc": 0.7117387056350708, "train/loss_error": 0.43648678064346313, "train/loss_total": 0.4915371537208557 }, { "epoch": 3.199305370024045, "step": 11975, "train/loss_ctc": 0.37931692600250244, "train/loss_error": 0.5316038727760315, "train/loss_total": 0.5011464953422546 }, { "epoch": 3.199572535399412, "step": 11976, "train/loss_ctc": 0.46875154972076416, "train/loss_error": 0.45044833421707153, "train/loss_total": 0.4541090130805969 }, { "epoch": 3.1998397007747794, "step": 11977, "train/loss_ctc": 0.2896271049976349, "train/loss_error": 0.38815364241600037, "train/loss_total": 0.3684483468532562 }, { "epoch": 3.200106866150147, "step": 11978, "train/loss_ctc": 0.4809933304786682, "train/loss_error": 0.40941932797431946, "train/loss_total": 0.4237341582775116 }, { "epoch": 3.2003740315255143, "step": 11979, "train/loss_ctc": 0.8985031843185425, "train/loss_error": 0.4508618712425232, "train/loss_total": 0.540390133857727 }, { "epoch": 3.2006411969008814, "grad_norm": 1.6322643756866455, "learning_rate": 1.080737376436014e-05, "loss": 0.4746, "step": 11980 }, { "epoch": 3.2006411969008814, "step": 11980, "train/loss_ctc": 0.29666972160339355, "train/loss_error": 0.41811901330947876, "train/loss_total": 0.3938291668891907 }, { "epoch": 3.200908362276249, "step": 11981, "train/loss_ctc": 0.7129743099212646, "train/loss_error": 0.4674614369869232, "train/loss_total": 0.5165640115737915 }, { "epoch": 3.2011755276516163, "step": 11982, "train/loss_ctc": 0.8959251046180725, "train/loss_error": 0.48389261960983276, "train/loss_total": 0.5662991404533386 }, { "epoch": 3.2014426930269835, "step": 11983, "train/loss_ctc": 0.6939693689346313, "train/loss_error": 0.4520786702632904, "train/loss_total": 0.5004568099975586 }, { "epoch": 3.201709858402351, "step": 11984, "train/loss_ctc": 0.8780063390731812, "train/loss_error": 0.4975028336048126, "train/loss_total": 0.5736035108566284 }, { "epoch": 3.2019770237777183, "step": 11985, "train/loss_ctc": 0.4513380527496338, "train/loss_error": 0.49325573444366455, "train/loss_total": 0.4848722219467163 }, { "epoch": 3.202244189153086, "step": 11986, "train/loss_ctc": 0.5503594875335693, "train/loss_error": 0.40561041235923767, "train/loss_total": 0.43456023931503296 }, { "epoch": 3.202511354528453, "step": 11987, "train/loss_ctc": 0.6958868503570557, "train/loss_error": 0.45848315954208374, "train/loss_total": 0.505963921546936 }, { "epoch": 3.2027785199038203, "step": 11988, "train/loss_ctc": 0.8414708375930786, "train/loss_error": 0.43425482511520386, "train/loss_total": 0.5156980156898499 }, { "epoch": 3.203045685279188, "step": 11989, "train/loss_ctc": 2.212181806564331, "train/loss_error": 0.5214279890060425, "train/loss_total": 0.8595787286758423 }, { "epoch": 3.203312850654555, "grad_norm": 3.154127597808838, "learning_rate": 1.0791343841838098e-05, "loss": 0.5351, "step": 11990 }, { "epoch": 3.203312850654555, "step": 11990, "train/loss_ctc": 0.33101701736450195, "train/loss_error": 0.45041391253471375, "train/loss_total": 0.4265345633029938 }, { "epoch": 3.2035800160299224, "step": 11991, "train/loss_ctc": 0.3357106149196625, "train/loss_error": 0.4450312554836273, "train/loss_total": 0.4231671392917633 }, { "epoch": 3.20384718140529, "step": 11992, "train/loss_ctc": 0.5904385447502136, "train/loss_error": 0.46158650517463684, "train/loss_total": 0.48735693097114563 }, { "epoch": 3.204114346780657, "step": 11993, "train/loss_ctc": 0.6647529602050781, "train/loss_error": 0.49925240874290466, "train/loss_total": 0.5323525071144104 }, { "epoch": 3.2043815121560244, "step": 11994, "train/loss_ctc": 0.9494352340698242, "train/loss_error": 0.4524872899055481, "train/loss_total": 0.5518769025802612 }, { "epoch": 3.204648677531392, "step": 11995, "train/loss_ctc": 0.71260666847229, "train/loss_error": 0.4858347773551941, "train/loss_total": 0.5311891436576843 }, { "epoch": 3.2049158429067592, "step": 11996, "train/loss_ctc": 0.25428199768066406, "train/loss_error": 0.4911375045776367, "train/loss_total": 0.44376641511917114 }, { "epoch": 3.2051830082821264, "step": 11997, "train/loss_ctc": 0.700400710105896, "train/loss_error": 0.4775455892086029, "train/loss_total": 0.5221166014671326 }, { "epoch": 3.205450173657494, "step": 11998, "train/loss_ctc": 0.3570491671562195, "train/loss_error": 0.40375596284866333, "train/loss_total": 0.39441460371017456 }, { "epoch": 3.2057173390328613, "step": 11999, "train/loss_ctc": 0.2519424557685852, "train/loss_error": 0.4439510405063629, "train/loss_total": 0.4055493175983429 }, { "epoch": 3.205984504408229, "grad_norm": 1.8816205263137817, "learning_rate": 1.0775313919316058e-05, "loss": 0.4718, "step": 12000 }, { "epoch": 3.205984504408229, "step": 12000, "train/loss_ctc": 1.420762062072754, "train/loss_error": 0.43073296546936035, "train/loss_total": 0.6287387609481812 }, { "epoch": 3.206251669783596, "step": 12001, "train/loss_ctc": 0.9071083068847656, "train/loss_error": 0.4487370550632477, "train/loss_total": 0.5404113531112671 }, { "epoch": 3.2065188351589633, "step": 12002, "train/loss_ctc": 0.6533308029174805, "train/loss_error": 0.4303191006183624, "train/loss_total": 0.47492146492004395 }, { "epoch": 3.206786000534331, "step": 12003, "train/loss_ctc": 0.4158433675765991, "train/loss_error": 0.4423009157180786, "train/loss_total": 0.43700942397117615 }, { "epoch": 3.207053165909698, "step": 12004, "train/loss_ctc": 0.7035612463951111, "train/loss_error": 0.44006261229515076, "train/loss_total": 0.49276232719421387 }, { "epoch": 3.2073203312850653, "step": 12005, "train/loss_ctc": 0.7748976945877075, "train/loss_error": 0.4817889630794525, "train/loss_total": 0.5404106974601746 }, { "epoch": 3.207587496660433, "step": 12006, "train/loss_ctc": 0.6787232756614685, "train/loss_error": 0.4627370536327362, "train/loss_total": 0.5059342980384827 }, { "epoch": 3.2078546620358, "step": 12007, "train/loss_ctc": 0.8868779540061951, "train/loss_error": 0.4455180764198303, "train/loss_total": 0.5337900519371033 }, { "epoch": 3.2081218274111674, "step": 12008, "train/loss_ctc": 0.5381546020507812, "train/loss_error": 0.42534321546554565, "train/loss_total": 0.4479054808616638 }, { "epoch": 3.208388992786535, "step": 12009, "train/loss_ctc": 0.8842734098434448, "train/loss_error": 0.4391978085041046, "train/loss_total": 0.5282129645347595 }, { "epoch": 3.208656158161902, "grad_norm": 3.126739740371704, "learning_rate": 1.0759283996794016e-05, "loss": 0.513, "step": 12010 }, { "epoch": 3.208656158161902, "step": 12010, "train/loss_ctc": 0.5893359184265137, "train/loss_error": 0.410368949174881, "train/loss_total": 0.4461623430252075 }, { "epoch": 3.2089233235372694, "step": 12011, "train/loss_ctc": 0.970126748085022, "train/loss_error": 0.4493488371372223, "train/loss_total": 0.5535044074058533 }, { "epoch": 3.209190488912637, "step": 12012, "train/loss_ctc": 0.5897345542907715, "train/loss_error": 0.4929411709308624, "train/loss_total": 0.5122998356819153 }, { "epoch": 3.2094576542880042, "step": 12013, "train/loss_ctc": 0.4809497594833374, "train/loss_error": 0.4583592712879181, "train/loss_total": 0.4628773629665375 }, { "epoch": 3.2097248196633714, "step": 12014, "train/loss_ctc": 1.4388536214828491, "train/loss_error": 0.42494022846221924, "train/loss_total": 0.6277229189872742 }, { "epoch": 3.209991985038739, "step": 12015, "train/loss_ctc": 0.7993313074111938, "train/loss_error": 0.4034777879714966, "train/loss_total": 0.48264849185943604 }, { "epoch": 3.2102591504141063, "step": 12016, "train/loss_ctc": 0.6617432832717896, "train/loss_error": 0.47407811880111694, "train/loss_total": 0.5116111636161804 }, { "epoch": 3.2105263157894735, "step": 12017, "train/loss_ctc": 0.4095485806465149, "train/loss_error": 0.4428061842918396, "train/loss_total": 0.43615466356277466 }, { "epoch": 3.210793481164841, "step": 12018, "train/loss_ctc": 0.7107625007629395, "train/loss_error": 0.41198819875717163, "train/loss_total": 0.47174304723739624 }, { "epoch": 3.2110606465402083, "step": 12019, "train/loss_ctc": 0.4616659879684448, "train/loss_error": 0.46086496114730835, "train/loss_total": 0.4610251784324646 }, { "epoch": 3.211327811915576, "grad_norm": 1.348341464996338, "learning_rate": 1.0743254074271974e-05, "loss": 0.4966, "step": 12020 }, { "epoch": 3.211327811915576, "step": 12020, "train/loss_ctc": 1.0963103771209717, "train/loss_error": 0.4358764588832855, "train/loss_total": 0.5679632425308228 }, { "epoch": 3.211594977290943, "step": 12021, "train/loss_ctc": 0.9145751595497131, "train/loss_error": 0.3815188705921173, "train/loss_total": 0.488130122423172 }, { "epoch": 3.2118621426663103, "step": 12022, "train/loss_ctc": 0.8699690699577332, "train/loss_error": 0.45364660024642944, "train/loss_total": 0.5369110703468323 }, { "epoch": 3.212129308041678, "step": 12023, "train/loss_ctc": 0.523573637008667, "train/loss_error": 0.44597119092941284, "train/loss_total": 0.4614916741847992 }, { "epoch": 3.212396473417045, "step": 12024, "train/loss_ctc": 0.5204637050628662, "train/loss_error": 0.3860228359699249, "train/loss_total": 0.4129110276699066 }, { "epoch": 3.2126636387924123, "step": 12025, "train/loss_ctc": 0.44670000672340393, "train/loss_error": 0.3743753135204315, "train/loss_total": 0.3888402581214905 }, { "epoch": 3.21293080416778, "step": 12026, "train/loss_ctc": 0.734494149684906, "train/loss_error": 0.3958452045917511, "train/loss_total": 0.46357500553131104 }, { "epoch": 3.213197969543147, "step": 12027, "train/loss_ctc": 0.6868284940719604, "train/loss_error": 0.49998611211776733, "train/loss_total": 0.537354588508606 }, { "epoch": 3.2134651349185144, "step": 12028, "train/loss_ctc": 0.3732355237007141, "train/loss_error": 0.4190346598625183, "train/loss_total": 0.409874826669693 }, { "epoch": 3.213732300293882, "step": 12029, "train/loss_ctc": 0.2026360034942627, "train/loss_error": 0.41098272800445557, "train/loss_total": 0.36931338906288147 }, { "epoch": 3.213999465669249, "grad_norm": 3.785379409790039, "learning_rate": 1.0727224151749934e-05, "loss": 0.4636, "step": 12030 }, { "epoch": 3.213999465669249, "step": 12030, "train/loss_ctc": 0.6088128685951233, "train/loss_error": 0.427333265542984, "train/loss_total": 0.46362918615341187 }, { "epoch": 3.214266631044617, "step": 12031, "train/loss_ctc": 0.9873245358467102, "train/loss_error": 0.4776737689971924, "train/loss_total": 0.579603910446167 }, { "epoch": 3.214533796419984, "step": 12032, "train/loss_ctc": 0.9615438580513, "train/loss_error": 0.4348755478858948, "train/loss_total": 0.5402092337608337 }, { "epoch": 3.2148009617953512, "step": 12033, "train/loss_ctc": 0.3916650414466858, "train/loss_error": 0.5075605511665344, "train/loss_total": 0.48438146710395813 }, { "epoch": 3.215068127170719, "step": 12034, "train/loss_ctc": 0.5504186153411865, "train/loss_error": 0.45860573649406433, "train/loss_total": 0.47696831822395325 }, { "epoch": 3.215335292546086, "step": 12035, "train/loss_ctc": 0.7562703490257263, "train/loss_error": 0.5543645620346069, "train/loss_total": 0.5947457551956177 }, { "epoch": 3.2156024579214533, "step": 12036, "train/loss_ctc": 0.7111270427703857, "train/loss_error": 0.3838811218738556, "train/loss_total": 0.44933032989501953 }, { "epoch": 3.215869623296821, "step": 12037, "train/loss_ctc": 2.015194892883301, "train/loss_error": 0.43553677201271057, "train/loss_total": 0.7514684200286865 }, { "epoch": 3.216136788672188, "step": 12038, "train/loss_ctc": 0.9773569107055664, "train/loss_error": 0.45971813797950745, "train/loss_total": 0.5632458925247192 }, { "epoch": 3.2164039540475553, "step": 12039, "train/loss_ctc": 0.5404550433158875, "train/loss_error": 0.47132790088653564, "train/loss_total": 0.48515331745147705 }, { "epoch": 3.216671119422923, "grad_norm": 2.208725929260254, "learning_rate": 1.0711194229227892e-05, "loss": 0.5389, "step": 12040 }, { "epoch": 3.216671119422923, "step": 12040, "train/loss_ctc": 0.6703149080276489, "train/loss_error": 0.47083356976509094, "train/loss_total": 0.5107298493385315 }, { "epoch": 3.21693828479829, "step": 12041, "train/loss_ctc": 0.3681541979312897, "train/loss_error": 0.49086201190948486, "train/loss_total": 0.4663204550743103 }, { "epoch": 3.2172054501736573, "step": 12042, "train/loss_ctc": 0.5096331238746643, "train/loss_error": 0.47302332520484924, "train/loss_total": 0.4803452789783478 }, { "epoch": 3.217472615549025, "step": 12043, "train/loss_ctc": 0.6896098852157593, "train/loss_error": 0.4279225766658783, "train/loss_total": 0.4802600145339966 }, { "epoch": 3.217739780924392, "step": 12044, "train/loss_ctc": 0.4675152897834778, "train/loss_error": 0.4068334698677063, "train/loss_total": 0.4189698398113251 }, { "epoch": 3.2180069462997594, "step": 12045, "train/loss_ctc": 1.3344497680664062, "train/loss_error": 0.5234025716781616, "train/loss_total": 0.6856120228767395 }, { "epoch": 3.218274111675127, "step": 12046, "train/loss_ctc": 0.37833553552627563, "train/loss_error": 0.37578240036964417, "train/loss_total": 0.37629303336143494 }, { "epoch": 3.218541277050494, "step": 12047, "train/loss_ctc": 1.0729581117630005, "train/loss_error": 0.4926137924194336, "train/loss_total": 0.6086826324462891 }, { "epoch": 3.2188084424258614, "step": 12048, "train/loss_ctc": 0.40592944622039795, "train/loss_error": 0.4416425824165344, "train/loss_total": 0.43449997901916504 }, { "epoch": 3.219075607801229, "step": 12049, "train/loss_ctc": 1.1427816152572632, "train/loss_error": 0.4675590395927429, "train/loss_total": 0.602603554725647 }, { "epoch": 3.2193427731765962, "grad_norm": 9.146928787231445, "learning_rate": 1.069516430670585e-05, "loss": 0.5064, "step": 12050 }, { "epoch": 3.2193427731765962, "step": 12050, "train/loss_ctc": 0.9598621129989624, "train/loss_error": 0.5138791799545288, "train/loss_total": 0.6030757427215576 }, { "epoch": 3.219609938551964, "step": 12051, "train/loss_ctc": 0.7207139730453491, "train/loss_error": 0.472283273935318, "train/loss_total": 0.5219694375991821 }, { "epoch": 3.219877103927331, "step": 12052, "train/loss_ctc": 0.839846134185791, "train/loss_error": 0.4372606873512268, "train/loss_total": 0.5177778005599976 }, { "epoch": 3.2201442693026983, "step": 12053, "train/loss_ctc": 0.4353578984737396, "train/loss_error": 0.4104117453098297, "train/loss_total": 0.41540098190307617 }, { "epoch": 3.220411434678066, "step": 12054, "train/loss_ctc": 0.7136406898498535, "train/loss_error": 0.421823650598526, "train/loss_total": 0.4801870584487915 }, { "epoch": 3.220678600053433, "step": 12055, "train/loss_ctc": 1.4098148345947266, "train/loss_error": 0.4468195140361786, "train/loss_total": 0.6394186019897461 }, { "epoch": 3.2209457654288003, "step": 12056, "train/loss_ctc": 0.927005410194397, "train/loss_error": 0.447253942489624, "train/loss_total": 0.5432042479515076 }, { "epoch": 3.221212930804168, "step": 12057, "train/loss_ctc": 0.3873421549797058, "train/loss_error": 0.46541130542755127, "train/loss_total": 0.44979748129844666 }, { "epoch": 3.221480096179535, "step": 12058, "train/loss_ctc": 0.5830394625663757, "train/loss_error": 0.4037938714027405, "train/loss_total": 0.439642995595932 }, { "epoch": 3.2217472615549023, "step": 12059, "train/loss_ctc": 0.3761567771434784, "train/loss_error": 0.38739216327667236, "train/loss_total": 0.38514506816864014 }, { "epoch": 3.22201442693027, "grad_norm": 5.354017734527588, "learning_rate": 1.0679134384183811e-05, "loss": 0.4996, "step": 12060 }, { "epoch": 3.22201442693027, "step": 12060, "train/loss_ctc": 0.5018616914749146, "train/loss_error": 0.3586443364620209, "train/loss_total": 0.38728782534599304 }, { "epoch": 3.222281592305637, "step": 12061, "train/loss_ctc": 0.9379919171333313, "train/loss_error": 0.5175073742866516, "train/loss_total": 0.6016042828559875 }, { "epoch": 3.2225487576810043, "step": 12062, "train/loss_ctc": 0.4803445637226105, "train/loss_error": 0.4315793216228485, "train/loss_total": 0.4413323700428009 }, { "epoch": 3.222815923056372, "step": 12063, "train/loss_ctc": 0.8197156190872192, "train/loss_error": 0.4390738308429718, "train/loss_total": 0.5152022242546082 }, { "epoch": 3.223083088431739, "step": 12064, "train/loss_ctc": 1.0692145824432373, "train/loss_error": 0.43978169560432434, "train/loss_total": 0.5656682848930359 }, { "epoch": 3.223350253807107, "step": 12065, "train/loss_ctc": 1.019901156425476, "train/loss_error": 0.46714240312576294, "train/loss_total": 0.5776941776275635 }, { "epoch": 3.223617419182474, "step": 12066, "train/loss_ctc": 0.3256179690361023, "train/loss_error": 0.4810119569301605, "train/loss_total": 0.44993317127227783 }, { "epoch": 3.223884584557841, "step": 12067, "train/loss_ctc": 0.577943742275238, "train/loss_error": 0.5123298168182373, "train/loss_total": 0.5254526138305664 }, { "epoch": 3.224151749933209, "step": 12068, "train/loss_ctc": 0.703879714012146, "train/loss_error": 0.4294891953468323, "train/loss_total": 0.484367311000824 }, { "epoch": 3.224418915308576, "step": 12069, "train/loss_ctc": 0.4125136733055115, "train/loss_error": 0.44200506806373596, "train/loss_total": 0.43610680103302 }, { "epoch": 3.2246860806839432, "grad_norm": 2.042487621307373, "learning_rate": 1.066310446166177e-05, "loss": 0.4985, "step": 12070 }, { "epoch": 3.2246860806839432, "step": 12070, "train/loss_ctc": 0.8536813259124756, "train/loss_error": 0.47580137848854065, "train/loss_total": 0.5513773560523987 }, { "epoch": 3.224953246059311, "step": 12071, "train/loss_ctc": 1.118943452835083, "train/loss_error": 0.4839314818382263, "train/loss_total": 0.6109338998794556 }, { "epoch": 3.225220411434678, "step": 12072, "train/loss_ctc": 0.4991556406021118, "train/loss_error": 0.44752150774002075, "train/loss_total": 0.45784834027290344 }, { "epoch": 3.2254875768100453, "step": 12073, "train/loss_ctc": 1.0822908878326416, "train/loss_error": 0.48449498414993286, "train/loss_total": 0.6040541529655457 }, { "epoch": 3.225754742185413, "step": 12074, "train/loss_ctc": 0.2630411386489868, "train/loss_error": 0.42918816208839417, "train/loss_total": 0.3959587514400482 }, { "epoch": 3.22602190756078, "step": 12075, "train/loss_ctc": 0.7710279226303101, "train/loss_error": 0.38606885075569153, "train/loss_total": 0.4630606770515442 }, { "epoch": 3.2262890729361473, "step": 12076, "train/loss_ctc": 1.536884069442749, "train/loss_error": 0.5305094122886658, "train/loss_total": 0.7317843437194824 }, { "epoch": 3.226556238311515, "step": 12077, "train/loss_ctc": 1.5413373708724976, "train/loss_error": 0.4872075319290161, "train/loss_total": 0.6980335116386414 }, { "epoch": 3.226823403686882, "step": 12078, "train/loss_ctc": 0.39987194538116455, "train/loss_error": 0.4620624780654907, "train/loss_total": 0.44962435960769653 }, { "epoch": 3.2270905690622493, "step": 12079, "train/loss_ctc": 0.5505576133728027, "train/loss_error": 0.42701056599617004, "train/loss_total": 0.4517199993133545 }, { "epoch": 3.227357734437617, "grad_norm": 2.425959825515747, "learning_rate": 1.0647074539139728e-05, "loss": 0.5414, "step": 12080 }, { "epoch": 3.227357734437617, "step": 12080, "train/loss_ctc": 0.5374184250831604, "train/loss_error": 0.403766393661499, "train/loss_total": 0.43049681186676025 }, { "epoch": 3.227624899812984, "step": 12081, "train/loss_ctc": 0.830994725227356, "train/loss_error": 0.4991343319416046, "train/loss_total": 0.5655063986778259 }, { "epoch": 3.2278920651883514, "step": 12082, "train/loss_ctc": 0.6731610894203186, "train/loss_error": 0.42027124762535095, "train/loss_total": 0.4708492159843445 }, { "epoch": 3.228159230563719, "step": 12083, "train/loss_ctc": 0.812057375907898, "train/loss_error": 0.3926513195037842, "train/loss_total": 0.47653254866600037 }, { "epoch": 3.228426395939086, "step": 12084, "train/loss_ctc": 0.8162510991096497, "train/loss_error": 0.417845219373703, "train/loss_total": 0.4975264072418213 }, { "epoch": 3.228693561314454, "step": 12085, "train/loss_ctc": 0.7843761444091797, "train/loss_error": 0.450408935546875, "train/loss_total": 0.5172023773193359 }, { "epoch": 3.228960726689821, "step": 12086, "train/loss_ctc": 0.4741368591785431, "train/loss_error": 0.419988751411438, "train/loss_total": 0.4308183789253235 }, { "epoch": 3.2292278920651882, "step": 12087, "train/loss_ctc": 1.7862217426300049, "train/loss_error": 0.4767056107521057, "train/loss_total": 0.7386088371276855 }, { "epoch": 3.229495057440556, "step": 12088, "train/loss_ctc": 0.5497358441352844, "train/loss_error": 0.40437552332878113, "train/loss_total": 0.43344759941101074 }, { "epoch": 3.229762222815923, "step": 12089, "train/loss_ctc": 2.1281895637512207, "train/loss_error": 0.4924323856830597, "train/loss_total": 0.8195838332176208 }, { "epoch": 3.2300293881912903, "grad_norm": 3.040001392364502, "learning_rate": 1.0631044616617687e-05, "loss": 0.5381, "step": 12090 }, { "epoch": 3.2300293881912903, "step": 12090, "train/loss_ctc": 0.5387374758720398, "train/loss_error": 0.4719114303588867, "train/loss_total": 0.48527663946151733 }, { "epoch": 3.230296553566658, "step": 12091, "train/loss_ctc": 1.490645408630371, "train/loss_error": 0.4853922724723816, "train/loss_total": 0.6864429116249084 }, { "epoch": 3.230563718942025, "step": 12092, "train/loss_ctc": 0.645354688167572, "train/loss_error": 0.43049177527427673, "train/loss_total": 0.47346436977386475 }, { "epoch": 3.2308308843173923, "step": 12093, "train/loss_ctc": 0.9930712580680847, "train/loss_error": 0.474068284034729, "train/loss_total": 0.5778688788414001 }, { "epoch": 3.23109804969276, "step": 12094, "train/loss_ctc": 0.6217436194419861, "train/loss_error": 0.4663415551185608, "train/loss_total": 0.4974219799041748 }, { "epoch": 3.231365215068127, "step": 12095, "train/loss_ctc": 0.4468284249305725, "train/loss_error": 0.4281734824180603, "train/loss_total": 0.43190449476242065 }, { "epoch": 3.2316323804434943, "step": 12096, "train/loss_ctc": 0.5046355724334717, "train/loss_error": 0.41422155499458313, "train/loss_total": 0.43230435252189636 }, { "epoch": 3.231899545818862, "step": 12097, "train/loss_ctc": 0.7076188325881958, "train/loss_error": 0.36723920702934265, "train/loss_total": 0.4353151321411133 }, { "epoch": 3.232166711194229, "step": 12098, "train/loss_ctc": 0.9305149912834167, "train/loss_error": 0.47781965136528015, "train/loss_total": 0.5683587193489075 }, { "epoch": 3.232433876569597, "step": 12099, "train/loss_ctc": 1.0837972164154053, "train/loss_error": 0.4390629827976227, "train/loss_total": 0.5680098533630371 }, { "epoch": 3.232701041944964, "grad_norm": 3.12939715385437, "learning_rate": 1.0615014694095645e-05, "loss": 0.5156, "step": 12100 }, { "epoch": 3.232701041944964, "step": 12100, "train/loss_ctc": 0.8745365738868713, "train/loss_error": 0.4036029875278473, "train/loss_total": 0.4977896809577942 }, { "epoch": 3.232968207320331, "step": 12101, "train/loss_ctc": 1.2666419744491577, "train/loss_error": 0.38566121459007263, "train/loss_total": 0.5618574023246765 }, { "epoch": 3.233235372695699, "step": 12102, "train/loss_ctc": 0.4185379147529602, "train/loss_error": 0.40299341082572937, "train/loss_total": 0.4061022996902466 }, { "epoch": 3.233502538071066, "step": 12103, "train/loss_ctc": 1.1019482612609863, "train/loss_error": 0.4145209491252899, "train/loss_total": 0.5520064234733582 }, { "epoch": 3.233769703446433, "step": 12104, "train/loss_ctc": 0.3983030319213867, "train/loss_error": 0.418867826461792, "train/loss_total": 0.41475486755371094 }, { "epoch": 3.234036868821801, "step": 12105, "train/loss_ctc": 0.6908400058746338, "train/loss_error": 0.44946208596229553, "train/loss_total": 0.49773767590522766 }, { "epoch": 3.234304034197168, "step": 12106, "train/loss_ctc": 0.525439977645874, "train/loss_error": 0.4634946286678314, "train/loss_total": 0.47588369250297546 }, { "epoch": 3.2345711995725352, "step": 12107, "train/loss_ctc": 0.8650960326194763, "train/loss_error": 0.4116808772087097, "train/loss_total": 0.502363920211792 }, { "epoch": 3.234838364947903, "step": 12108, "train/loss_ctc": 0.5259354114532471, "train/loss_error": 0.4740125834941864, "train/loss_total": 0.48439717292785645 }, { "epoch": 3.23510553032327, "step": 12109, "train/loss_ctc": 0.7832269668579102, "train/loss_error": 0.4680593013763428, "train/loss_total": 0.5310928225517273 }, { "epoch": 3.2353726956986373, "grad_norm": 2.3002734184265137, "learning_rate": 1.0598984771573604e-05, "loss": 0.4924, "step": 12110 }, { "epoch": 3.2353726956986373, "step": 12110, "train/loss_ctc": 1.4071663618087769, "train/loss_error": 0.4974673092365265, "train/loss_total": 0.6794071197509766 }, { "epoch": 3.235639861074005, "step": 12111, "train/loss_ctc": 0.605144739151001, "train/loss_error": 0.36363375186920166, "train/loss_total": 0.411935955286026 }, { "epoch": 3.235907026449372, "step": 12112, "train/loss_ctc": 0.579727053642273, "train/loss_error": 0.4999479651451111, "train/loss_total": 0.5159037709236145 }, { "epoch": 3.2361741918247393, "step": 12113, "train/loss_ctc": 0.6791779398918152, "train/loss_error": 0.3676225543022156, "train/loss_total": 0.4299336373806 }, { "epoch": 3.236441357200107, "step": 12114, "train/loss_ctc": 0.8492143154144287, "train/loss_error": 0.42227134108543396, "train/loss_total": 0.507659912109375 }, { "epoch": 3.236708522575474, "step": 12115, "train/loss_ctc": 0.8187108039855957, "train/loss_error": 0.4621967077255249, "train/loss_total": 0.533499538898468 }, { "epoch": 3.2369756879508413, "step": 12116, "train/loss_ctc": 0.6244848966598511, "train/loss_error": 0.4368845224380493, "train/loss_total": 0.47440460324287415 }, { "epoch": 3.237242853326209, "step": 12117, "train/loss_ctc": 0.3903801441192627, "train/loss_error": 0.4204239845275879, "train/loss_total": 0.41441524028778076 }, { "epoch": 3.237510018701576, "step": 12118, "train/loss_ctc": 0.3670523464679718, "train/loss_error": 0.4784047305583954, "train/loss_total": 0.45613425970077515 }, { "epoch": 3.237777184076944, "step": 12119, "train/loss_ctc": 1.1373310089111328, "train/loss_error": 0.4453831613063812, "train/loss_total": 0.5837727189064026 }, { "epoch": 3.238044349452311, "grad_norm": 1.637576937675476, "learning_rate": 1.0582954849051563e-05, "loss": 0.5007, "step": 12120 }, { "epoch": 3.238044349452311, "step": 12120, "train/loss_ctc": 0.7671518325805664, "train/loss_error": 0.39482271671295166, "train/loss_total": 0.46928855776786804 }, { "epoch": 3.238311514827678, "step": 12121, "train/loss_ctc": 0.8300023078918457, "train/loss_error": 0.5095524191856384, "train/loss_total": 0.5736424326896667 }, { "epoch": 3.238578680203046, "step": 12122, "train/loss_ctc": 0.5135269165039062, "train/loss_error": 0.4363442361354828, "train/loss_total": 0.4517807960510254 }, { "epoch": 3.238845845578413, "step": 12123, "train/loss_ctc": 0.5336316823959351, "train/loss_error": 0.45761173963546753, "train/loss_total": 0.47281575202941895 }, { "epoch": 3.2391130109537802, "step": 12124, "train/loss_ctc": 0.8450499773025513, "train/loss_error": 0.41861873865127563, "train/loss_total": 0.5039049983024597 }, { "epoch": 3.239380176329148, "step": 12125, "train/loss_ctc": 0.8368061780929565, "train/loss_error": 0.4447349011898041, "train/loss_total": 0.5231491923332214 }, { "epoch": 3.239647341704515, "step": 12126, "train/loss_ctc": 0.7859277725219727, "train/loss_error": 0.3914486765861511, "train/loss_total": 0.47034451365470886 }, { "epoch": 3.2399145070798823, "step": 12127, "train/loss_ctc": 0.6012007594108582, "train/loss_error": 0.4508211016654968, "train/loss_total": 0.48089703917503357 }, { "epoch": 3.24018167245525, "step": 12128, "train/loss_ctc": 0.7606632709503174, "train/loss_error": 0.4692540764808655, "train/loss_total": 0.5275359153747559 }, { "epoch": 3.240448837830617, "step": 12129, "train/loss_ctc": 0.3650561571121216, "train/loss_error": 0.47110477089881897, "train/loss_total": 0.44989508390426636 }, { "epoch": 3.2407160032059843, "grad_norm": 1.5561347007751465, "learning_rate": 1.0566924926529521e-05, "loss": 0.4923, "step": 12130 }, { "epoch": 3.2407160032059843, "step": 12130, "train/loss_ctc": 0.3875134587287903, "train/loss_error": 0.44608139991760254, "train/loss_total": 0.434367835521698 }, { "epoch": 3.240983168581352, "step": 12131, "train/loss_ctc": 0.5370248556137085, "train/loss_error": 0.39802882075309753, "train/loss_total": 0.4258280396461487 }, { "epoch": 3.241250333956719, "step": 12132, "train/loss_ctc": 0.7512187957763672, "train/loss_error": 0.5013645887374878, "train/loss_total": 0.5513354539871216 }, { "epoch": 3.2415174993320868, "step": 12133, "train/loss_ctc": 0.40700575709342957, "train/loss_error": 0.37858861684799194, "train/loss_total": 0.384272038936615 }, { "epoch": 3.241784664707454, "step": 12134, "train/loss_ctc": 0.5591887831687927, "train/loss_error": 0.4644111096858978, "train/loss_total": 0.4833666682243347 }, { "epoch": 3.242051830082821, "step": 12135, "train/loss_ctc": 0.4072401225566864, "train/loss_error": 0.4609225392341614, "train/loss_total": 0.4501860439777374 }, { "epoch": 3.242318995458189, "step": 12136, "train/loss_ctc": 0.514292299747467, "train/loss_error": 0.41963809728622437, "train/loss_total": 0.43856894969940186 }, { "epoch": 3.242586160833556, "step": 12137, "train/loss_ctc": 0.7747926712036133, "train/loss_error": 0.3918883800506592, "train/loss_total": 0.4684692621231079 }, { "epoch": 3.242853326208923, "step": 12138, "train/loss_ctc": 0.13370555639266968, "train/loss_error": 0.3868672847747803, "train/loss_total": 0.3362349569797516 }, { "epoch": 3.243120491584291, "step": 12139, "train/loss_ctc": 1.1204829216003418, "train/loss_error": 0.4970024824142456, "train/loss_total": 0.6216985583305359 }, { "epoch": 3.243387656959658, "grad_norm": 3.5772902965545654, "learning_rate": 1.0550895004007481e-05, "loss": 0.4594, "step": 12140 }, { "epoch": 3.243387656959658, "step": 12140, "train/loss_ctc": 0.5475326180458069, "train/loss_error": 0.4898735582828522, "train/loss_total": 0.5014053583145142 }, { "epoch": 3.2436548223350252, "step": 12141, "train/loss_ctc": 1.3175557851791382, "train/loss_error": 0.495086669921875, "train/loss_total": 0.6595804691314697 }, { "epoch": 3.243921987710393, "step": 12142, "train/loss_ctc": 2.1547915935516357, "train/loss_error": 0.557614266872406, "train/loss_total": 0.8770497441291809 }, { "epoch": 3.24418915308576, "step": 12143, "train/loss_ctc": 1.2347197532653809, "train/loss_error": 0.48199546337127686, "train/loss_total": 0.6325403451919556 }, { "epoch": 3.2444563184611273, "step": 12144, "train/loss_ctc": 0.4655613601207733, "train/loss_error": 0.49728280305862427, "train/loss_total": 0.49093854427337646 }, { "epoch": 3.244723483836495, "step": 12145, "train/loss_ctc": 1.3222575187683105, "train/loss_error": 0.5216687917709351, "train/loss_total": 0.6817865371704102 }, { "epoch": 3.244990649211862, "step": 12146, "train/loss_ctc": 0.6553493738174438, "train/loss_error": 0.508578896522522, "train/loss_total": 0.5379329919815063 }, { "epoch": 3.2452578145872293, "step": 12147, "train/loss_ctc": 1.2381088733673096, "train/loss_error": 0.47717681527137756, "train/loss_total": 0.6293632388114929 }, { "epoch": 3.245524979962597, "step": 12148, "train/loss_ctc": 0.48997974395751953, "train/loss_error": 0.44135674834251404, "train/loss_total": 0.4510813355445862 }, { "epoch": 3.245792145337964, "step": 12149, "train/loss_ctc": 0.6420774459838867, "train/loss_error": 0.4925382435321808, "train/loss_total": 0.5224460959434509 }, { "epoch": 3.2460593107133318, "grad_norm": 3.048828601837158, "learning_rate": 1.0534865081485441e-05, "loss": 0.5984, "step": 12150 }, { "epoch": 3.2460593107133318, "step": 12150, "train/loss_ctc": 0.8237390518188477, "train/loss_error": 0.44166168570518494, "train/loss_total": 0.5180771946907043 }, { "epoch": 3.246326476088699, "step": 12151, "train/loss_ctc": 0.5648857951164246, "train/loss_error": 0.397620290517807, "train/loss_total": 0.4310734272003174 }, { "epoch": 3.246593641464066, "step": 12152, "train/loss_ctc": 0.26063433289527893, "train/loss_error": 0.3715593218803406, "train/loss_total": 0.34937432408332825 }, { "epoch": 3.246860806839434, "step": 12153, "train/loss_ctc": 0.9351409673690796, "train/loss_error": 0.47653552889823914, "train/loss_total": 0.5682566165924072 }, { "epoch": 3.247127972214801, "step": 12154, "train/loss_ctc": 0.9858244061470032, "train/loss_error": 0.3955898880958557, "train/loss_total": 0.5136367678642273 }, { "epoch": 3.247395137590168, "step": 12155, "train/loss_ctc": 0.8451595306396484, "train/loss_error": 0.46255525946617126, "train/loss_total": 0.5390760898590088 }, { "epoch": 3.247662302965536, "step": 12156, "train/loss_ctc": 0.7292988300323486, "train/loss_error": 0.47346457839012146, "train/loss_total": 0.5246314406394958 }, { "epoch": 3.247929468340903, "step": 12157, "train/loss_ctc": 0.7021905183792114, "train/loss_error": 0.4148333668708801, "train/loss_total": 0.4723048210144043 }, { "epoch": 3.24819663371627, "step": 12158, "train/loss_ctc": 0.7558689117431641, "train/loss_error": 0.4143652617931366, "train/loss_total": 0.482666015625 }, { "epoch": 3.248463799091638, "step": 12159, "train/loss_ctc": 0.46137189865112305, "train/loss_error": 0.4408574104309082, "train/loss_total": 0.4449602961540222 }, { "epoch": 3.248730964467005, "grad_norm": 13.301658630371094, "learning_rate": 1.0518835158963399e-05, "loss": 0.4844, "step": 12160 }, { "epoch": 3.248730964467005, "step": 12160, "train/loss_ctc": 1.1134766340255737, "train/loss_error": 0.4368113577365875, "train/loss_total": 0.5721444487571716 }, { "epoch": 3.2489981298423722, "step": 12161, "train/loss_ctc": 0.6439340710639954, "train/loss_error": 0.4895831048488617, "train/loss_total": 0.5204533338546753 }, { "epoch": 3.24926529521774, "step": 12162, "train/loss_ctc": 0.6231371164321899, "train/loss_error": 0.40234145522117615, "train/loss_total": 0.44650059938430786 }, { "epoch": 3.249532460593107, "step": 12163, "train/loss_ctc": 0.47619885206222534, "train/loss_error": 0.45939701795578003, "train/loss_total": 0.462757408618927 }, { "epoch": 3.2497996259684747, "step": 12164, "train/loss_ctc": 0.8608742952346802, "train/loss_error": 0.45277413725852966, "train/loss_total": 0.5343941450119019 }, { "epoch": 3.250066791343842, "step": 12165, "train/loss_ctc": 1.1363428831100464, "train/loss_error": 0.46432122588157654, "train/loss_total": 0.5987255573272705 }, { "epoch": 3.250333956719209, "step": 12166, "train/loss_ctc": 0.5672376155853271, "train/loss_error": 0.4666796922683716, "train/loss_total": 0.4867912828922272 }, { "epoch": 3.2506011220945767, "step": 12167, "train/loss_ctc": 0.6040167808532715, "train/loss_error": 0.4305960536003113, "train/loss_total": 0.4652802050113678 }, { "epoch": 3.250868287469944, "step": 12168, "train/loss_ctc": 0.479582816362381, "train/loss_error": 0.40635859966278076, "train/loss_total": 0.42100346088409424 }, { "epoch": 3.251135452845311, "step": 12169, "train/loss_ctc": 1.1926677227020264, "train/loss_error": 0.47962701320648193, "train/loss_total": 0.6222351789474487 }, { "epoch": 3.2514026182206788, "grad_norm": 2.5178894996643066, "learning_rate": 1.0502805236441357e-05, "loss": 0.513, "step": 12170 }, { "epoch": 3.2514026182206788, "step": 12170, "train/loss_ctc": 1.233488917350769, "train/loss_error": 0.447223424911499, "train/loss_total": 0.6044765710830688 }, { "epoch": 3.251669783596046, "step": 12171, "train/loss_ctc": 0.7940899729728699, "train/loss_error": 0.45050376653671265, "train/loss_total": 0.5192210078239441 }, { "epoch": 3.251936948971413, "step": 12172, "train/loss_ctc": 0.4993698000907898, "train/loss_error": 0.39475151896476746, "train/loss_total": 0.41567519307136536 }, { "epoch": 3.252204114346781, "step": 12173, "train/loss_ctc": 0.9524300694465637, "train/loss_error": 0.45929962396621704, "train/loss_total": 0.5579257011413574 }, { "epoch": 3.252471279722148, "step": 12174, "train/loss_ctc": 1.866729497909546, "train/loss_error": 0.4629824459552765, "train/loss_total": 0.7437318563461304 }, { "epoch": 3.252738445097515, "step": 12175, "train/loss_ctc": 0.9687294363975525, "train/loss_error": 0.5345860123634338, "train/loss_total": 0.6214147210121155 }, { "epoch": 3.253005610472883, "step": 12176, "train/loss_ctc": 1.0868310928344727, "train/loss_error": 0.45927855372428894, "train/loss_total": 0.5847890377044678 }, { "epoch": 3.25327277584825, "step": 12177, "train/loss_ctc": 1.1526012420654297, "train/loss_error": 0.505743145942688, "train/loss_total": 0.6351147890090942 }, { "epoch": 3.2535399412236172, "step": 12178, "train/loss_ctc": 0.7951363325119019, "train/loss_error": 0.4047263562679291, "train/loss_total": 0.48280835151672363 }, { "epoch": 3.253807106598985, "step": 12179, "train/loss_ctc": 0.7514604926109314, "train/loss_error": 0.41461631655693054, "train/loss_total": 0.4819851517677307 }, { "epoch": 3.254074271974352, "grad_norm": 12.115772247314453, "learning_rate": 1.0486775313919317e-05, "loss": 0.5647, "step": 12180 }, { "epoch": 3.254074271974352, "step": 12180, "train/loss_ctc": 0.8882465362548828, "train/loss_error": 0.41666728258132935, "train/loss_total": 0.5109831094741821 }, { "epoch": 3.2543414373497193, "step": 12181, "train/loss_ctc": 0.43880727887153625, "train/loss_error": 0.4113847315311432, "train/loss_total": 0.41686925292015076 }, { "epoch": 3.254608602725087, "step": 12182, "train/loss_ctc": 0.914451003074646, "train/loss_error": 0.48966309428215027, "train/loss_total": 0.5746207237243652 }, { "epoch": 3.254875768100454, "step": 12183, "train/loss_ctc": 1.3040169477462769, "train/loss_error": 0.4150165319442749, "train/loss_total": 0.5928165912628174 }, { "epoch": 3.2551429334758213, "step": 12184, "train/loss_ctc": 0.7311842441558838, "train/loss_error": 0.4834083020687103, "train/loss_total": 0.5329635143280029 }, { "epoch": 3.255410098851189, "step": 12185, "train/loss_ctc": 0.5956012010574341, "train/loss_error": 0.4379996955394745, "train/loss_total": 0.4695200026035309 }, { "epoch": 3.255677264226556, "step": 12186, "train/loss_ctc": 0.6522015333175659, "train/loss_error": 0.4327543377876282, "train/loss_total": 0.47664380073547363 }, { "epoch": 3.2559444296019238, "step": 12187, "train/loss_ctc": 0.23737594485282898, "train/loss_error": 0.407993882894516, "train/loss_total": 0.373870313167572 }, { "epoch": 3.256211594977291, "step": 12188, "train/loss_ctc": 1.3056447505950928, "train/loss_error": 0.40100574493408203, "train/loss_total": 0.5819335579872131 }, { "epoch": 3.256478760352658, "step": 12189, "train/loss_ctc": 0.5427801609039307, "train/loss_error": 0.47541606426239014, "train/loss_total": 0.4888888895511627 }, { "epoch": 3.256745925728026, "grad_norm": 2.2748427391052246, "learning_rate": 1.0470745391397275e-05, "loss": 0.5019, "step": 12190 }, { "epoch": 3.256745925728026, "step": 12190, "train/loss_ctc": 0.893394410610199, "train/loss_error": 0.48117968440055847, "train/loss_total": 0.5636226534843445 }, { "epoch": 3.257013091103393, "step": 12191, "train/loss_ctc": 0.6891046762466431, "train/loss_error": 0.4250149130821228, "train/loss_total": 0.4778328537940979 }, { "epoch": 3.25728025647876, "step": 12192, "train/loss_ctc": 0.3555936813354492, "train/loss_error": 0.45198196172714233, "train/loss_total": 0.4327043294906616 }, { "epoch": 3.257547421854128, "step": 12193, "train/loss_ctc": 1.0887656211853027, "train/loss_error": 0.4487384855747223, "train/loss_total": 0.5767439007759094 }, { "epoch": 3.257814587229495, "step": 12194, "train/loss_ctc": 0.511223554611206, "train/loss_error": 0.4027743339538574, "train/loss_total": 0.4244641661643982 }, { "epoch": 3.2580817526048627, "step": 12195, "train/loss_ctc": 0.6179380416870117, "train/loss_error": 0.4644288420677185, "train/loss_total": 0.4951306879520416 }, { "epoch": 3.25834891798023, "step": 12196, "train/loss_ctc": 0.6709978580474854, "train/loss_error": 0.5151121616363525, "train/loss_total": 0.546289324760437 }, { "epoch": 3.258616083355597, "step": 12197, "train/loss_ctc": 0.4312900900840759, "train/loss_error": 0.43997350335121155, "train/loss_total": 0.4382368326187134 }, { "epoch": 3.2588832487309647, "step": 12198, "train/loss_ctc": 0.8044552803039551, "train/loss_error": 0.4587063789367676, "train/loss_total": 0.527856171131134 }, { "epoch": 3.259150414106332, "step": 12199, "train/loss_ctc": 0.534544825553894, "train/loss_error": 0.4920828938484192, "train/loss_total": 0.5005753040313721 }, { "epoch": 3.259417579481699, "grad_norm": 1.794873833656311, "learning_rate": 1.0454715468875233e-05, "loss": 0.4983, "step": 12200 }, { "epoch": 3.259417579481699, "step": 12200, "train/loss_ctc": 1.0141031742095947, "train/loss_error": 0.5153110027313232, "train/loss_total": 0.6150694489479065 }, { "epoch": 3.2596847448570667, "step": 12201, "train/loss_ctc": 0.8237002491950989, "train/loss_error": 0.39775699377059937, "train/loss_total": 0.48294565081596375 }, { "epoch": 3.259951910232434, "step": 12202, "train/loss_ctc": 1.583916187286377, "train/loss_error": 0.5058748126029968, "train/loss_total": 0.7214831113815308 }, { "epoch": 3.260219075607801, "step": 12203, "train/loss_ctc": 0.47697460651397705, "train/loss_error": 0.42750659584999084, "train/loss_total": 0.437400221824646 }, { "epoch": 3.2604862409831687, "step": 12204, "train/loss_ctc": 0.8558509945869446, "train/loss_error": 0.3888106346130371, "train/loss_total": 0.4822187125682831 }, { "epoch": 3.260753406358536, "step": 12205, "train/loss_ctc": 1.1648459434509277, "train/loss_error": 0.43110474944114685, "train/loss_total": 0.5778529644012451 }, { "epoch": 3.261020571733903, "step": 12206, "train/loss_ctc": 0.8260532021522522, "train/loss_error": 0.433586061000824, "train/loss_total": 0.5120794773101807 }, { "epoch": 3.261287737109271, "step": 12207, "train/loss_ctc": 0.9248452186584473, "train/loss_error": 0.4339354634284973, "train/loss_total": 0.5321174263954163 }, { "epoch": 3.261554902484638, "step": 12208, "train/loss_ctc": 0.8520944118499756, "train/loss_error": 0.4523468017578125, "train/loss_total": 0.5322962999343872 }, { "epoch": 3.261822067860005, "step": 12209, "train/loss_ctc": 0.7690390348434448, "train/loss_error": 0.40523651242256165, "train/loss_total": 0.4779970049858093 }, { "epoch": 3.262089233235373, "grad_norm": 1.9563353061676025, "learning_rate": 1.0438685546353193e-05, "loss": 0.5371, "step": 12210 }, { "epoch": 3.262089233235373, "step": 12210, "train/loss_ctc": 0.5279998779296875, "train/loss_error": 0.48944514989852905, "train/loss_total": 0.4971560835838318 }, { "epoch": 3.26235639861074, "step": 12211, "train/loss_ctc": 0.8617339730262756, "train/loss_error": 0.47372984886169434, "train/loss_total": 0.5513306856155396 }, { "epoch": 3.262623563986107, "step": 12212, "train/loss_ctc": 0.7918224334716797, "train/loss_error": 0.4183521866798401, "train/loss_total": 0.49304622411727905 }, { "epoch": 3.262890729361475, "step": 12213, "train/loss_ctc": 0.5241031646728516, "train/loss_error": 0.45481252670288086, "train/loss_total": 0.46867066621780396 }, { "epoch": 3.263157894736842, "step": 12214, "train/loss_ctc": 0.7530378699302673, "train/loss_error": 0.40780970454216003, "train/loss_total": 0.4768553376197815 }, { "epoch": 3.2634250601122092, "step": 12215, "train/loss_ctc": 1.2956290245056152, "train/loss_error": 0.4667375683784485, "train/loss_total": 0.6325158476829529 }, { "epoch": 3.263692225487577, "step": 12216, "train/loss_ctc": 0.8611166477203369, "train/loss_error": 0.49694740772247314, "train/loss_total": 0.5697813034057617 }, { "epoch": 3.263959390862944, "step": 12217, "train/loss_ctc": 1.0462497472763062, "train/loss_error": 0.39627471566200256, "train/loss_total": 0.5262697339057922 }, { "epoch": 3.2642265562383117, "step": 12218, "train/loss_ctc": 0.6004478931427002, "train/loss_error": 0.45679670572280884, "train/loss_total": 0.4855269491672516 }, { "epoch": 3.264493721613679, "step": 12219, "train/loss_ctc": 0.8749335408210754, "train/loss_error": 0.46983012557029724, "train/loss_total": 0.5508508086204529 }, { "epoch": 3.264760886989046, "grad_norm": 2.643120050430298, "learning_rate": 1.0422655623831151e-05, "loss": 0.5252, "step": 12220 }, { "epoch": 3.264760886989046, "step": 12220, "train/loss_ctc": 1.236872911453247, "train/loss_error": 0.471676230430603, "train/loss_total": 0.6247155666351318 }, { "epoch": 3.2650280523644137, "step": 12221, "train/loss_ctc": 1.1365301609039307, "train/loss_error": 0.45203348994255066, "train/loss_total": 0.5889328718185425 }, { "epoch": 3.265295217739781, "step": 12222, "train/loss_ctc": 0.4791974425315857, "train/loss_error": 0.4023984372615814, "train/loss_total": 0.4177582561969757 }, { "epoch": 3.265562383115148, "step": 12223, "train/loss_ctc": 0.6927392482757568, "train/loss_error": 0.4621469974517822, "train/loss_total": 0.5082654356956482 }, { "epoch": 3.2658295484905158, "step": 12224, "train/loss_ctc": 0.45151758193969727, "train/loss_error": 0.42384859919548035, "train/loss_total": 0.4293823838233948 }, { "epoch": 3.266096713865883, "step": 12225, "train/loss_ctc": 0.5191934108734131, "train/loss_error": 0.43416717648506165, "train/loss_total": 0.45117244124412537 }, { "epoch": 3.26636387924125, "step": 12226, "train/loss_ctc": 1.0087047815322876, "train/loss_error": 0.4571225345134735, "train/loss_total": 0.5674390196800232 }, { "epoch": 3.266631044616618, "step": 12227, "train/loss_ctc": 0.9052841663360596, "train/loss_error": 0.41954633593559265, "train/loss_total": 0.5166938900947571 }, { "epoch": 3.266898209991985, "step": 12228, "train/loss_ctc": 0.5863431692123413, "train/loss_error": 0.37266984581947327, "train/loss_total": 0.4154044985771179 }, { "epoch": 3.2671653753673526, "step": 12229, "train/loss_ctc": 0.7746537923812866, "train/loss_error": 0.48190295696258545, "train/loss_total": 0.5404531359672546 }, { "epoch": 3.26743254074272, "grad_norm": 3.3896899223327637, "learning_rate": 1.040662570130911e-05, "loss": 0.506, "step": 12230 }, { "epoch": 3.26743254074272, "step": 12230, "train/loss_ctc": 0.6338176131248474, "train/loss_error": 0.46166232228279114, "train/loss_total": 0.49609339237213135 }, { "epoch": 3.267699706118087, "step": 12231, "train/loss_ctc": 0.5148963928222656, "train/loss_error": 0.43604403734207153, "train/loss_total": 0.4518145024776459 }, { "epoch": 3.2679668714934547, "step": 12232, "train/loss_ctc": 0.6672970056533813, "train/loss_error": 0.43848904967308044, "train/loss_total": 0.48425066471099854 }, { "epoch": 3.268234036868822, "step": 12233, "train/loss_ctc": 0.2698732614517212, "train/loss_error": 0.43623319268226624, "train/loss_total": 0.40296122431755066 }, { "epoch": 3.268501202244189, "step": 12234, "train/loss_ctc": 0.7751405835151672, "train/loss_error": 0.44876089692115784, "train/loss_total": 0.5140368342399597 }, { "epoch": 3.2687683676195567, "step": 12235, "train/loss_ctc": 0.48302149772644043, "train/loss_error": 0.46346810460090637, "train/loss_total": 0.46737879514694214 }, { "epoch": 3.269035532994924, "step": 12236, "train/loss_ctc": 1.152936577796936, "train/loss_error": 0.42154034972190857, "train/loss_total": 0.5678195953369141 }, { "epoch": 3.269302698370291, "step": 12237, "train/loss_ctc": 0.8628427982330322, "train/loss_error": 0.42486000061035156, "train/loss_total": 0.5124565362930298 }, { "epoch": 3.2695698637456587, "step": 12238, "train/loss_ctc": 1.1971089839935303, "train/loss_error": 0.42904797196388245, "train/loss_total": 0.5826601982116699 }, { "epoch": 3.269837029121026, "step": 12239, "train/loss_ctc": 0.23507949709892273, "train/loss_error": 0.4097631275653839, "train/loss_total": 0.3748264014720917 }, { "epoch": 3.270104194496393, "grad_norm": 5.14388370513916, "learning_rate": 1.039059577878707e-05, "loss": 0.4854, "step": 12240 }, { "epoch": 3.270104194496393, "step": 12240, "train/loss_ctc": 0.6354731917381287, "train/loss_error": 0.49766141176223755, "train/loss_total": 0.5252237915992737 }, { "epoch": 3.2703713598717608, "step": 12241, "train/loss_ctc": 0.5944095849990845, "train/loss_error": 0.43582943081855774, "train/loss_total": 0.46754544973373413 }, { "epoch": 3.270638525247128, "step": 12242, "train/loss_ctc": 0.6163767576217651, "train/loss_error": 0.3881244659423828, "train/loss_total": 0.4337749481201172 }, { "epoch": 3.270905690622495, "step": 12243, "train/loss_ctc": 1.0552277565002441, "train/loss_error": 0.4690777361392975, "train/loss_total": 0.5863077640533447 }, { "epoch": 3.271172855997863, "step": 12244, "train/loss_ctc": 0.7899773120880127, "train/loss_error": 0.40364113450050354, "train/loss_total": 0.4809083640575409 }, { "epoch": 3.27144002137323, "step": 12245, "train/loss_ctc": 1.455039620399475, "train/loss_error": 0.4651586711406708, "train/loss_total": 0.6631348729133606 }, { "epoch": 3.271707186748597, "step": 12246, "train/loss_ctc": 0.8707229495048523, "train/loss_error": 0.39823195338249207, "train/loss_total": 0.49273017048835754 }, { "epoch": 3.271974352123965, "step": 12247, "train/loss_ctc": 0.7162467241287231, "train/loss_error": 0.40805816650390625, "train/loss_total": 0.4696958661079407 }, { "epoch": 3.272241517499332, "step": 12248, "train/loss_ctc": 0.9507438540458679, "train/loss_error": 0.426895409822464, "train/loss_total": 0.5316650867462158 }, { "epoch": 3.272508682874699, "step": 12249, "train/loss_ctc": 1.3060522079467773, "train/loss_error": 0.43381041288375854, "train/loss_total": 0.6082587838172913 }, { "epoch": 3.272775848250067, "grad_norm": 2.1295337677001953, "learning_rate": 1.0374565856265029e-05, "loss": 0.5259, "step": 12250 }, { "epoch": 3.272775848250067, "step": 12250, "train/loss_ctc": 0.37708109617233276, "train/loss_error": 0.433858722448349, "train/loss_total": 0.4225032329559326 }, { "epoch": 3.273043013625434, "step": 12251, "train/loss_ctc": 0.7051122784614563, "train/loss_error": 0.40643244981765747, "train/loss_total": 0.4661684036254883 }, { "epoch": 3.2733101790008017, "step": 12252, "train/loss_ctc": 0.5469973087310791, "train/loss_error": 0.46493402123451233, "train/loss_total": 0.4813466966152191 }, { "epoch": 3.273577344376169, "step": 12253, "train/loss_ctc": 1.1077589988708496, "train/loss_error": 0.46549761295318604, "train/loss_total": 0.5939499139785767 }, { "epoch": 3.273844509751536, "step": 12254, "train/loss_ctc": 0.7295761108398438, "train/loss_error": 0.42947378754615784, "train/loss_total": 0.489494264125824 }, { "epoch": 3.2741116751269037, "step": 12255, "train/loss_ctc": 1.2191627025604248, "train/loss_error": 0.5191643834114075, "train/loss_total": 0.6591640710830688 }, { "epoch": 3.274378840502271, "step": 12256, "train/loss_ctc": 0.8920135498046875, "train/loss_error": 0.4548882246017456, "train/loss_total": 0.542313277721405 }, { "epoch": 3.274646005877638, "step": 12257, "train/loss_ctc": 0.6948871612548828, "train/loss_error": 0.4515032172203064, "train/loss_total": 0.5001800060272217 }, { "epoch": 3.2749131712530057, "step": 12258, "train/loss_ctc": 1.1188948154449463, "train/loss_error": 0.45984891057014465, "train/loss_total": 0.5916581153869629 }, { "epoch": 3.275180336628373, "step": 12259, "train/loss_ctc": 0.8316130042076111, "train/loss_error": 0.4310527741909027, "train/loss_total": 0.5111648440361023 }, { "epoch": 3.27544750200374, "grad_norm": 2.9236648082733154, "learning_rate": 1.0358535933742987e-05, "loss": 0.5258, "step": 12260 }, { "epoch": 3.27544750200374, "step": 12260, "train/loss_ctc": 0.7978899478912354, "train/loss_error": 0.43413281440734863, "train/loss_total": 0.5068842172622681 }, { "epoch": 3.2757146673791078, "step": 12261, "train/loss_ctc": 0.6281926035881042, "train/loss_error": 0.4921231269836426, "train/loss_total": 0.5193370580673218 }, { "epoch": 3.275981832754475, "step": 12262, "train/loss_ctc": 0.753102719783783, "train/loss_error": 0.3725394904613495, "train/loss_total": 0.44865214824676514 }, { "epoch": 3.2762489981298426, "step": 12263, "train/loss_ctc": 1.867046594619751, "train/loss_error": 0.4594986140727997, "train/loss_total": 0.7410082221031189 }, { "epoch": 3.27651616350521, "step": 12264, "train/loss_ctc": 0.3418372571468353, "train/loss_error": 0.3963352143764496, "train/loss_total": 0.38543564081192017 }, { "epoch": 3.276783328880577, "step": 12265, "train/loss_ctc": 0.2523573040962219, "train/loss_error": 0.47448498010635376, "train/loss_total": 0.43005943298339844 }, { "epoch": 3.2770504942559446, "step": 12266, "train/loss_ctc": 1.1124464273452759, "train/loss_error": 0.472868412733078, "train/loss_total": 0.6007840037345886 }, { "epoch": 3.277317659631312, "step": 12267, "train/loss_ctc": 0.42105528712272644, "train/loss_error": 0.4591312110424042, "train/loss_total": 0.4515160322189331 }, { "epoch": 3.277584825006679, "step": 12268, "train/loss_ctc": 0.2665237784385681, "train/loss_error": 0.3802725672721863, "train/loss_total": 0.3575228154659271 }, { "epoch": 3.2778519903820467, "step": 12269, "train/loss_ctc": 0.94832444190979, "train/loss_error": 0.4555932581424713, "train/loss_total": 0.5541394948959351 }, { "epoch": 3.278119155757414, "grad_norm": 1.732801079750061, "learning_rate": 1.0342506011220946e-05, "loss": 0.4995, "step": 12270 }, { "epoch": 3.278119155757414, "step": 12270, "train/loss_ctc": 0.43603944778442383, "train/loss_error": 0.40962567925453186, "train/loss_total": 0.41490843892097473 }, { "epoch": 3.278386321132781, "step": 12271, "train/loss_ctc": 0.8175010681152344, "train/loss_error": 0.4487555921077728, "train/loss_total": 0.5225046873092651 }, { "epoch": 3.2786534865081487, "step": 12272, "train/loss_ctc": 0.9905896186828613, "train/loss_error": 0.4450506269931793, "train/loss_total": 0.5541584491729736 }, { "epoch": 3.278920651883516, "step": 12273, "train/loss_ctc": 0.6537773013114929, "train/loss_error": 0.3703421652317047, "train/loss_total": 0.42702919244766235 }, { "epoch": 3.279187817258883, "step": 12274, "train/loss_ctc": 0.5007646083831787, "train/loss_error": 0.4213225543498993, "train/loss_total": 0.43721097707748413 }, { "epoch": 3.2794549826342507, "step": 12275, "train/loss_ctc": 0.7176176905632019, "train/loss_error": 0.47868773341178894, "train/loss_total": 0.5264737606048584 }, { "epoch": 3.279722148009618, "step": 12276, "train/loss_ctc": 0.26797670125961304, "train/loss_error": 0.4087983965873718, "train/loss_total": 0.380634069442749 }, { "epoch": 3.279989313384985, "step": 12277, "train/loss_ctc": 0.8626694679260254, "train/loss_error": 0.4469110369682312, "train/loss_total": 0.530062735080719 }, { "epoch": 3.2802564787603528, "step": 12278, "train/loss_ctc": 0.7306118607521057, "train/loss_error": 0.4565330147743225, "train/loss_total": 0.5113487839698792 }, { "epoch": 3.28052364413572, "step": 12279, "train/loss_ctc": 1.1434171199798584, "train/loss_error": 0.5107319355010986, "train/loss_total": 0.6372689604759216 }, { "epoch": 3.280790809511087, "grad_norm": 2.8877251148223877, "learning_rate": 1.0326476088698904e-05, "loss": 0.4942, "step": 12280 }, { "epoch": 3.280790809511087, "step": 12280, "train/loss_ctc": 0.5193144083023071, "train/loss_error": 0.41088375449180603, "train/loss_total": 0.4325698912143707 }, { "epoch": 3.281057974886455, "step": 12281, "train/loss_ctc": 0.8869198560714722, "train/loss_error": 0.36214354634284973, "train/loss_total": 0.46709883213043213 }, { "epoch": 3.281325140261822, "step": 12282, "train/loss_ctc": 0.3833658993244171, "train/loss_error": 0.44647544622421265, "train/loss_total": 0.43385353684425354 }, { "epoch": 3.281592305637189, "step": 12283, "train/loss_ctc": 1.0027179718017578, "train/loss_error": 0.4058312773704529, "train/loss_total": 0.5252086520195007 }, { "epoch": 3.281859471012557, "step": 12284, "train/loss_ctc": 1.0038094520568848, "train/loss_error": 0.4384166896343231, "train/loss_total": 0.5514952540397644 }, { "epoch": 3.282126636387924, "step": 12285, "train/loss_ctc": 0.5153412222862244, "train/loss_error": 0.417736291885376, "train/loss_total": 0.4372572898864746 }, { "epoch": 3.2823938017632917, "step": 12286, "train/loss_ctc": 0.4558510184288025, "train/loss_error": 0.4584609866142273, "train/loss_total": 0.4579390287399292 }, { "epoch": 3.282660967138659, "step": 12287, "train/loss_ctc": 0.6156445741653442, "train/loss_error": 0.434283584356308, "train/loss_total": 0.47055578231811523 }, { "epoch": 3.282928132514026, "step": 12288, "train/loss_ctc": 0.3956773579120636, "train/loss_error": 0.3972102105617523, "train/loss_total": 0.3969036638736725 }, { "epoch": 3.2831952978893937, "step": 12289, "train/loss_ctc": 1.2566889524459839, "train/loss_error": 0.455265611410141, "train/loss_total": 0.6155502796173096 }, { "epoch": 3.283462463264761, "grad_norm": 2.3387768268585205, "learning_rate": 1.0310446166176864e-05, "loss": 0.4788, "step": 12290 }, { "epoch": 3.283462463264761, "step": 12290, "train/loss_ctc": 0.8708387017250061, "train/loss_error": 0.5026718974113464, "train/loss_total": 0.5763052701950073 }, { "epoch": 3.283729628640128, "step": 12291, "train/loss_ctc": 0.8499763011932373, "train/loss_error": 0.41799646615982056, "train/loss_total": 0.5043924450874329 }, { "epoch": 3.2839967940154957, "step": 12292, "train/loss_ctc": 0.8334853649139404, "train/loss_error": 0.39152607321739197, "train/loss_total": 0.4799179434776306 }, { "epoch": 3.284263959390863, "step": 12293, "train/loss_ctc": 0.9238119721412659, "train/loss_error": 0.422963410615921, "train/loss_total": 0.5231331586837769 }, { "epoch": 3.28453112476623, "step": 12294, "train/loss_ctc": 1.6067922115325928, "train/loss_error": 0.42048314213752747, "train/loss_total": 0.6577450037002563 }, { "epoch": 3.2847982901415977, "step": 12295, "train/loss_ctc": 0.4742125868797302, "train/loss_error": 0.4779180586338043, "train/loss_total": 0.4771769642829895 }, { "epoch": 3.285065455516965, "step": 12296, "train/loss_ctc": 1.0604876279830933, "train/loss_error": 0.5072367191314697, "train/loss_total": 0.6178869009017944 }, { "epoch": 3.2853326208923326, "step": 12297, "train/loss_ctc": 0.5944244861602783, "train/loss_error": 0.46967801451683044, "train/loss_total": 0.49462729692459106 }, { "epoch": 3.2855997862676998, "step": 12298, "train/loss_ctc": 0.7358242273330688, "train/loss_error": 0.4025442898273468, "train/loss_total": 0.4692002832889557 }, { "epoch": 3.285866951643067, "step": 12299, "train/loss_ctc": 0.7886430025100708, "train/loss_error": 0.3986106514930725, "train/loss_total": 0.47661709785461426 }, { "epoch": 3.2861341170184346, "grad_norm": 2.2785325050354004, "learning_rate": 1.0294416243654822e-05, "loss": 0.5277, "step": 12300 }, { "epoch": 3.2861341170184346, "step": 12300, "train/loss_ctc": 0.40360331535339355, "train/loss_error": 0.4307854473590851, "train/loss_total": 0.42534902691841125 }, { "epoch": 3.286401282393802, "step": 12301, "train/loss_ctc": 0.7819452285766602, "train/loss_error": 0.4883047938346863, "train/loss_total": 0.54703289270401 }, { "epoch": 3.286668447769169, "step": 12302, "train/loss_ctc": 0.7643752098083496, "train/loss_error": 0.43449726700782776, "train/loss_total": 0.5004728436470032 }, { "epoch": 3.2869356131445366, "step": 12303, "train/loss_ctc": 0.9434854984283447, "train/loss_error": 0.45462796092033386, "train/loss_total": 0.5523995161056519 }, { "epoch": 3.287202778519904, "step": 12304, "train/loss_ctc": 0.5358157157897949, "train/loss_error": 0.46648505330085754, "train/loss_total": 0.48035120964050293 }, { "epoch": 3.287469943895271, "step": 12305, "train/loss_ctc": 0.7553233504295349, "train/loss_error": 0.4271453320980072, "train/loss_total": 0.4927809238433838 }, { "epoch": 3.2877371092706387, "step": 12306, "train/loss_ctc": 1.00514817237854, "train/loss_error": 0.3825744688510895, "train/loss_total": 0.5070891976356506 }, { "epoch": 3.288004274646006, "step": 12307, "train/loss_ctc": 0.8503142595291138, "train/loss_error": 0.37727341055870056, "train/loss_total": 0.47188156843185425 }, { "epoch": 3.288271440021373, "step": 12308, "train/loss_ctc": 1.0629163980484009, "train/loss_error": 0.42922741174697876, "train/loss_total": 0.55596524477005 }, { "epoch": 3.2885386053967407, "step": 12309, "train/loss_ctc": 0.494087278842926, "train/loss_error": 0.4333510994911194, "train/loss_total": 0.44549834728240967 }, { "epoch": 3.288805770772108, "grad_norm": 3.044161558151245, "learning_rate": 1.027838632113278e-05, "loss": 0.4979, "step": 12310 }, { "epoch": 3.288805770772108, "step": 12310, "train/loss_ctc": 0.5647826790809631, "train/loss_error": 0.40958505868911743, "train/loss_total": 0.4406245946884155 }, { "epoch": 3.289072936147475, "step": 12311, "train/loss_ctc": 0.48451513051986694, "train/loss_error": 0.4433296322822571, "train/loss_total": 0.4515667259693146 }, { "epoch": 3.2893401015228427, "step": 12312, "train/loss_ctc": 0.5570136904716492, "train/loss_error": 0.44917356967926025, "train/loss_total": 0.4707415997982025 }, { "epoch": 3.28960726689821, "step": 12313, "train/loss_ctc": 0.4757559895515442, "train/loss_error": 0.3824630677700043, "train/loss_total": 0.40112167596817017 }, { "epoch": 3.289874432273577, "step": 12314, "train/loss_ctc": 0.20195609331130981, "train/loss_error": 0.47838205099105835, "train/loss_total": 0.4230968654155731 }, { "epoch": 3.2901415976489448, "step": 12315, "train/loss_ctc": 0.6173776388168335, "train/loss_error": 0.4209469258785248, "train/loss_total": 0.46023306250572205 }, { "epoch": 3.290408763024312, "step": 12316, "train/loss_ctc": 0.4183270335197449, "train/loss_error": 0.4458286762237549, "train/loss_total": 0.44032835960388184 }, { "epoch": 3.2906759283996796, "step": 12317, "train/loss_ctc": 1.0347704887390137, "train/loss_error": 0.43248122930526733, "train/loss_total": 0.5529391169548035 }, { "epoch": 3.290943093775047, "step": 12318, "train/loss_ctc": 0.7755517959594727, "train/loss_error": 0.4729249179363251, "train/loss_total": 0.5334503054618835 }, { "epoch": 3.291210259150414, "step": 12319, "train/loss_ctc": 1.0732614994049072, "train/loss_error": 0.455422580242157, "train/loss_total": 0.5789903402328491 }, { "epoch": 3.2914774245257816, "grad_norm": 1.2938438653945923, "learning_rate": 1.026235639861074e-05, "loss": 0.4753, "step": 12320 }, { "epoch": 3.2914774245257816, "step": 12320, "train/loss_ctc": 0.4623314142227173, "train/loss_error": 0.49947646260261536, "train/loss_total": 0.4920474588871002 }, { "epoch": 3.291744589901149, "step": 12321, "train/loss_ctc": 0.924041748046875, "train/loss_error": 0.40206658840179443, "train/loss_total": 0.5064616203308105 }, { "epoch": 3.292011755276516, "step": 12322, "train/loss_ctc": 0.3453148603439331, "train/loss_error": 0.36884939670562744, "train/loss_total": 0.364142507314682 }, { "epoch": 3.2922789206518837, "step": 12323, "train/loss_ctc": 0.9481872320175171, "train/loss_error": 0.45800304412841797, "train/loss_total": 0.5560399293899536 }, { "epoch": 3.292546086027251, "step": 12324, "train/loss_ctc": 0.9673036336898804, "train/loss_error": 0.549921452999115, "train/loss_total": 0.6333979368209839 }, { "epoch": 3.292813251402618, "step": 12325, "train/loss_ctc": 0.745964527130127, "train/loss_error": 0.4421144425868988, "train/loss_total": 0.5028844475746155 }, { "epoch": 3.2930804167779857, "step": 12326, "train/loss_ctc": 0.8145601153373718, "train/loss_error": 0.4410717785358429, "train/loss_total": 0.5157694816589355 }, { "epoch": 3.293347582153353, "step": 12327, "train/loss_ctc": 0.5242834687232971, "train/loss_error": 0.4371176064014435, "train/loss_total": 0.4545508027076721 }, { "epoch": 3.2936147475287205, "step": 12328, "train/loss_ctc": 0.2560044527053833, "train/loss_error": 0.45751065015792847, "train/loss_total": 0.4172094166278839 }, { "epoch": 3.2938819129040877, "step": 12329, "train/loss_ctc": 0.5649550557136536, "train/loss_error": 0.43655282258987427, "train/loss_total": 0.4622332751750946 }, { "epoch": 3.294149078279455, "grad_norm": 1.8378305435180664, "learning_rate": 1.02463264760887e-05, "loss": 0.4905, "step": 12330 }, { "epoch": 3.294149078279455, "step": 12330, "train/loss_ctc": 0.7987749576568604, "train/loss_error": 0.4539089798927307, "train/loss_total": 0.5228822231292725 }, { "epoch": 3.2944162436548226, "step": 12331, "train/loss_ctc": 0.4679644703865051, "train/loss_error": 0.4983067810535431, "train/loss_total": 0.4922383427619934 }, { "epoch": 3.2946834090301897, "step": 12332, "train/loss_ctc": 0.6239258050918579, "train/loss_error": 0.4305949807167053, "train/loss_total": 0.46926113963127136 }, { "epoch": 3.294950574405557, "step": 12333, "train/loss_ctc": 0.5001161694526672, "train/loss_error": 0.4875272810459137, "train/loss_total": 0.49004507064819336 }, { "epoch": 3.2952177397809246, "step": 12334, "train/loss_ctc": 0.799109935760498, "train/loss_error": 0.5159029960632324, "train/loss_total": 0.5725443959236145 }, { "epoch": 3.2954849051562918, "step": 12335, "train/loss_ctc": 0.5339066386222839, "train/loss_error": 0.47071531414985657, "train/loss_total": 0.4833535850048065 }, { "epoch": 3.295752070531659, "step": 12336, "train/loss_ctc": 0.47971656918525696, "train/loss_error": 0.5534957647323608, "train/loss_total": 0.5387399196624756 }, { "epoch": 3.2960192359070266, "step": 12337, "train/loss_ctc": 0.8404038548469543, "train/loss_error": 0.5405270457267761, "train/loss_total": 0.6005024313926697 }, { "epoch": 3.296286401282394, "step": 12338, "train/loss_ctc": 0.9144049882888794, "train/loss_error": 0.42220455408096313, "train/loss_total": 0.5206446647644043 }, { "epoch": 3.296553566657761, "step": 12339, "train/loss_ctc": 1.1074374914169312, "train/loss_error": 0.4243842363357544, "train/loss_total": 0.5609949231147766 }, { "epoch": 3.2968207320331286, "grad_norm": 2.4962921142578125, "learning_rate": 1.0230296553566658e-05, "loss": 0.5251, "step": 12340 }, { "epoch": 3.2968207320331286, "step": 12340, "train/loss_ctc": 0.5731984376907349, "train/loss_error": 0.4473090171813965, "train/loss_total": 0.4724869132041931 }, { "epoch": 3.297087897408496, "step": 12341, "train/loss_ctc": 0.5756334066390991, "train/loss_error": 0.43850865960121155, "train/loss_total": 0.465933620929718 }, { "epoch": 3.297355062783863, "step": 12342, "train/loss_ctc": 0.640399158000946, "train/loss_error": 0.38253745436668396, "train/loss_total": 0.43410980701446533 }, { "epoch": 3.2976222281592307, "step": 12343, "train/loss_ctc": 0.8250110745429993, "train/loss_error": 0.39819610118865967, "train/loss_total": 0.4835590720176697 }, { "epoch": 3.297889393534598, "step": 12344, "train/loss_ctc": 0.4423973560333252, "train/loss_error": 0.5415434837341309, "train/loss_total": 0.5217142701148987 }, { "epoch": 3.298156558909965, "step": 12345, "train/loss_ctc": 0.928257942199707, "train/loss_error": 0.46484556794166565, "train/loss_total": 0.557528018951416 }, { "epoch": 3.2984237242853327, "step": 12346, "train/loss_ctc": 1.229705572128296, "train/loss_error": 0.44033074378967285, "train/loss_total": 0.5982057452201843 }, { "epoch": 3.2986908896607, "step": 12347, "train/loss_ctc": 0.8339048624038696, "train/loss_error": 0.44846275448799133, "train/loss_total": 0.5255511999130249 }, { "epoch": 3.298958055036067, "step": 12348, "train/loss_ctc": 0.48764485120773315, "train/loss_error": 0.48525571823120117, "train/loss_total": 0.4857335388660431 }, { "epoch": 3.2992252204114347, "step": 12349, "train/loss_ctc": 0.8060091733932495, "train/loss_error": 0.4261668622493744, "train/loss_total": 0.5021353363990784 }, { "epoch": 3.299492385786802, "grad_norm": 1.7526530027389526, "learning_rate": 1.0214266631044618e-05, "loss": 0.5047, "step": 12350 }, { "epoch": 3.299492385786802, "step": 12350, "train/loss_ctc": 0.596397876739502, "train/loss_error": 0.4009580612182617, "train/loss_total": 0.4400460124015808 }, { "epoch": 3.2997595511621696, "step": 12351, "train/loss_ctc": 1.0204181671142578, "train/loss_error": 0.4602501094341278, "train/loss_total": 0.5722837448120117 }, { "epoch": 3.3000267165375368, "step": 12352, "train/loss_ctc": 0.31799110770225525, "train/loss_error": 0.4554518163204193, "train/loss_total": 0.427959680557251 }, { "epoch": 3.300293881912904, "step": 12353, "train/loss_ctc": 0.19205619394779205, "train/loss_error": 0.46971169114112854, "train/loss_total": 0.41418057680130005 }, { "epoch": 3.3005610472882716, "step": 12354, "train/loss_ctc": 0.5858478546142578, "train/loss_error": 0.4024965763092041, "train/loss_total": 0.4391668438911438 }, { "epoch": 3.300828212663639, "step": 12355, "train/loss_ctc": 0.7070155143737793, "train/loss_error": 0.41202273964881897, "train/loss_total": 0.47102129459381104 }, { "epoch": 3.301095378039006, "step": 12356, "train/loss_ctc": 0.7637577056884766, "train/loss_error": 0.41096314787864685, "train/loss_total": 0.4815220832824707 }, { "epoch": 3.3013625434143736, "step": 12357, "train/loss_ctc": 0.7490992546081543, "train/loss_error": 0.38688457012176514, "train/loss_total": 0.4593275189399719 }, { "epoch": 3.301629708789741, "step": 12358, "train/loss_ctc": 1.1432883739471436, "train/loss_error": 0.47090184688568115, "train/loss_total": 0.6053791642189026 }, { "epoch": 3.301896874165108, "step": 12359, "train/loss_ctc": 1.0464826822280884, "train/loss_error": 0.4294627606868744, "train/loss_total": 0.5528667569160461 }, { "epoch": 3.3021640395404757, "grad_norm": 1.4470897912979126, "learning_rate": 1.0198236708522576e-05, "loss": 0.4864, "step": 12360 }, { "epoch": 3.3021640395404757, "step": 12360, "train/loss_ctc": 0.918389618396759, "train/loss_error": 0.4920136034488678, "train/loss_total": 0.577288806438446 }, { "epoch": 3.302431204915843, "step": 12361, "train/loss_ctc": 0.5737422704696655, "train/loss_error": 0.4022594392299652, "train/loss_total": 0.43655601143836975 }, { "epoch": 3.3026983702912105, "step": 12362, "train/loss_ctc": 0.4426620900630951, "train/loss_error": 0.4359573721885681, "train/loss_total": 0.43729832768440247 }, { "epoch": 3.3029655356665777, "step": 12363, "train/loss_ctc": 0.7133610248565674, "train/loss_error": 0.45706260204315186, "train/loss_total": 0.5083222985267639 }, { "epoch": 3.303232701041945, "step": 12364, "train/loss_ctc": 0.8210369944572449, "train/loss_error": 0.4317077398300171, "train/loss_total": 0.5095735788345337 }, { "epoch": 3.3034998664173125, "step": 12365, "train/loss_ctc": 1.1888747215270996, "train/loss_error": 0.4527866840362549, "train/loss_total": 0.6000043153762817 }, { "epoch": 3.3037670317926797, "step": 12366, "train/loss_ctc": 0.44536247849464417, "train/loss_error": 0.38926950097084045, "train/loss_total": 0.40048810839653015 }, { "epoch": 3.304034197168047, "step": 12367, "train/loss_ctc": 0.69548100233078, "train/loss_error": 0.46080297231674194, "train/loss_total": 0.5077385902404785 }, { "epoch": 3.3043013625434146, "step": 12368, "train/loss_ctc": 1.2836142778396606, "train/loss_error": 0.44449159502983093, "train/loss_total": 0.6123161315917969 }, { "epoch": 3.3045685279187818, "step": 12369, "train/loss_ctc": 0.5882748961448669, "train/loss_error": 0.4476717710494995, "train/loss_total": 0.47579240798950195 }, { "epoch": 3.304835693294149, "grad_norm": 1.6025712490081787, "learning_rate": 1.0182206786000534e-05, "loss": 0.5065, "step": 12370 }, { "epoch": 3.304835693294149, "step": 12370, "train/loss_ctc": 0.3654531240463257, "train/loss_error": 0.44000735878944397, "train/loss_total": 0.4250965118408203 }, { "epoch": 3.3051028586695166, "step": 12371, "train/loss_ctc": 1.237499475479126, "train/loss_error": 0.432582288980484, "train/loss_total": 0.5935657620429993 }, { "epoch": 3.305370024044884, "step": 12372, "train/loss_ctc": 0.7257665395736694, "train/loss_error": 0.41277989745140076, "train/loss_total": 0.47537723183631897 }, { "epoch": 3.305637189420251, "step": 12373, "train/loss_ctc": 0.8286298513412476, "train/loss_error": 0.43848732113838196, "train/loss_total": 0.516515851020813 }, { "epoch": 3.3059043547956186, "step": 12374, "train/loss_ctc": 1.019184947013855, "train/loss_error": 0.4150223731994629, "train/loss_total": 0.5358548760414124 }, { "epoch": 3.306171520170986, "step": 12375, "train/loss_ctc": 0.7307180762290955, "train/loss_error": 0.44418224692344666, "train/loss_total": 0.5014894008636475 }, { "epoch": 3.306438685546353, "step": 12376, "train/loss_ctc": 0.6189184188842773, "train/loss_error": 0.46586108207702637, "train/loss_total": 0.4964725375175476 }, { "epoch": 3.3067058509217206, "step": 12377, "train/loss_ctc": 0.5286694765090942, "train/loss_error": 0.4266686737537384, "train/loss_total": 0.44706884026527405 }, { "epoch": 3.306973016297088, "step": 12378, "train/loss_ctc": 0.33335721492767334, "train/loss_error": 0.38626426458358765, "train/loss_total": 0.37568289041519165 }, { "epoch": 3.307240181672455, "step": 12379, "train/loss_ctc": 0.5597778558731079, "train/loss_error": 0.4912125766277313, "train/loss_total": 0.5049256086349487 }, { "epoch": 3.3075073470478227, "grad_norm": 2.9507927894592285, "learning_rate": 1.0166176863478494e-05, "loss": 0.4872, "step": 12380 }, { "epoch": 3.3075073470478227, "step": 12380, "train/loss_ctc": 0.6511774063110352, "train/loss_error": 0.4703015387058258, "train/loss_total": 0.5064767003059387 }, { "epoch": 3.30777451242319, "step": 12381, "train/loss_ctc": 1.0768743753433228, "train/loss_error": 0.43815362453460693, "train/loss_total": 0.5658977627754211 }, { "epoch": 3.308041677798557, "step": 12382, "train/loss_ctc": 1.0650626420974731, "train/loss_error": 0.46242478489875793, "train/loss_total": 0.5829523801803589 }, { "epoch": 3.3083088431739247, "step": 12383, "train/loss_ctc": 0.8338079452514648, "train/loss_error": 0.42543476819992065, "train/loss_total": 0.5071094036102295 }, { "epoch": 3.308576008549292, "step": 12384, "train/loss_ctc": 0.25784751772880554, "train/loss_error": 0.3865593671798706, "train/loss_total": 0.36081698536872864 }, { "epoch": 3.3088431739246595, "step": 12385, "train/loss_ctc": 0.8365668058395386, "train/loss_error": 0.41734087467193604, "train/loss_total": 0.5011860728263855 }, { "epoch": 3.3091103393000267, "step": 12386, "train/loss_ctc": 0.43875038623809814, "train/loss_error": 0.44015029072761536, "train/loss_total": 0.43987032771110535 }, { "epoch": 3.309377504675394, "step": 12387, "train/loss_ctc": 0.6549371480941772, "train/loss_error": 0.46988770365715027, "train/loss_total": 0.5068975687026978 }, { "epoch": 3.3096446700507616, "step": 12388, "train/loss_ctc": 0.6981993317604065, "train/loss_error": 0.44242197275161743, "train/loss_total": 0.4935774803161621 }, { "epoch": 3.3099118354261288, "step": 12389, "train/loss_ctc": 0.5537126064300537, "train/loss_error": 0.449494868516922, "train/loss_total": 0.4703384041786194 }, { "epoch": 3.310179000801496, "grad_norm": 2.103256940841675, "learning_rate": 1.0150146940956452e-05, "loss": 0.4935, "step": 12390 }, { "epoch": 3.310179000801496, "step": 12390, "train/loss_ctc": 1.2210280895233154, "train/loss_error": 0.4095666706562042, "train/loss_total": 0.5718590021133423 }, { "epoch": 3.3104461661768636, "step": 12391, "train/loss_ctc": 0.40848782658576965, "train/loss_error": 0.41143274307250977, "train/loss_total": 0.41084378957748413 }, { "epoch": 3.310713331552231, "step": 12392, "train/loss_ctc": 0.4828198254108429, "train/loss_error": 0.4500269889831543, "train/loss_total": 0.456585556268692 }, { "epoch": 3.310980496927598, "step": 12393, "train/loss_ctc": 0.5639111995697021, "train/loss_error": 0.4727948307991028, "train/loss_total": 0.4910181164741516 }, { "epoch": 3.3112476623029656, "step": 12394, "train/loss_ctc": 0.37581777572631836, "train/loss_error": 0.4175049364566803, "train/loss_total": 0.4091675281524658 }, { "epoch": 3.311514827678333, "step": 12395, "train/loss_ctc": 0.7662863731384277, "train/loss_error": 0.40466544032096863, "train/loss_total": 0.47698962688446045 }, { "epoch": 3.3117819930537005, "step": 12396, "train/loss_ctc": 1.089005947113037, "train/loss_error": 0.4632369577884674, "train/loss_total": 0.5883907675743103 }, { "epoch": 3.3120491584290677, "step": 12397, "train/loss_ctc": 0.5530521869659424, "train/loss_error": 0.46441835165023804, "train/loss_total": 0.48214513063430786 }, { "epoch": 3.312316323804435, "step": 12398, "train/loss_ctc": 0.46080756187438965, "train/loss_error": 0.47077813744544983, "train/loss_total": 0.46878403425216675 }, { "epoch": 3.3125834891798025, "step": 12399, "train/loss_ctc": 0.5785281658172607, "train/loss_error": 0.4129466414451599, "train/loss_total": 0.44606295228004456 }, { "epoch": 3.3128506545551697, "grad_norm": 1.9803963899612427, "learning_rate": 1.013411701843441e-05, "loss": 0.4802, "step": 12400 }, { "epoch": 3.3128506545551697, "step": 12400, "train/loss_ctc": 0.7672614455223083, "train/loss_error": 0.4785722494125366, "train/loss_total": 0.536310076713562 }, { "epoch": 3.313117819930537, "step": 12401, "train/loss_ctc": 0.5455561876296997, "train/loss_error": 0.4540400803089142, "train/loss_total": 0.4723433256149292 }, { "epoch": 3.3133849853059045, "step": 12402, "train/loss_ctc": 1.098421335220337, "train/loss_error": 0.42114198207855225, "train/loss_total": 0.5565978288650513 }, { "epoch": 3.3136521506812717, "step": 12403, "train/loss_ctc": 1.1741448640823364, "train/loss_error": 0.4612637460231781, "train/loss_total": 0.6038399934768677 }, { "epoch": 3.313919316056639, "step": 12404, "train/loss_ctc": 0.6746995449066162, "train/loss_error": 0.40699970722198486, "train/loss_total": 0.46053966879844666 }, { "epoch": 3.3141864814320066, "step": 12405, "train/loss_ctc": 1.2820725440979004, "train/loss_error": 0.3959646224975586, "train/loss_total": 0.5731862187385559 }, { "epoch": 3.3144536468073738, "step": 12406, "train/loss_ctc": 0.8213114738464355, "train/loss_error": 0.4221934378147125, "train/loss_total": 0.5020170211791992 }, { "epoch": 3.314720812182741, "step": 12407, "train/loss_ctc": 0.45989370346069336, "train/loss_error": 0.49339649081230164, "train/loss_total": 0.48669594526290894 }, { "epoch": 3.3149879775581086, "step": 12408, "train/loss_ctc": 0.7562410831451416, "train/loss_error": 0.3810732364654541, "train/loss_total": 0.4561068117618561 }, { "epoch": 3.315255142933476, "step": 12409, "train/loss_ctc": 0.36924421787261963, "train/loss_error": 0.46416208148002625, "train/loss_total": 0.4451785087585449 }, { "epoch": 3.315522308308843, "grad_norm": 2.5729541778564453, "learning_rate": 1.0118087095912371e-05, "loss": 0.5093, "step": 12410 }, { "epoch": 3.315522308308843, "step": 12410, "train/loss_ctc": 0.7398232817649841, "train/loss_error": 0.47016698122024536, "train/loss_total": 0.52409827709198 }, { "epoch": 3.3157894736842106, "step": 12411, "train/loss_ctc": 0.6782236099243164, "train/loss_error": 0.466545969247818, "train/loss_total": 0.5088815093040466 }, { "epoch": 3.316056639059578, "step": 12412, "train/loss_ctc": 0.6250749230384827, "train/loss_error": 0.44510093331336975, "train/loss_total": 0.48109573125839233 }, { "epoch": 3.316323804434945, "step": 12413, "train/loss_ctc": 0.9137206077575684, "train/loss_error": 0.39523324370384216, "train/loss_total": 0.49893075227737427 }, { "epoch": 3.3165909698103127, "step": 12414, "train/loss_ctc": 0.3615066111087799, "train/loss_error": 0.45295900106430054, "train/loss_total": 0.43466854095458984 }, { "epoch": 3.31685813518568, "step": 12415, "train/loss_ctc": 0.44741106033325195, "train/loss_error": 0.45706629753112793, "train/loss_total": 0.4551352560520172 }, { "epoch": 3.317125300561047, "step": 12416, "train/loss_ctc": 0.4711216688156128, "train/loss_error": 0.45057258009910583, "train/loss_total": 0.4546824097633362 }, { "epoch": 3.3173924659364147, "step": 12417, "train/loss_ctc": 1.0153124332427979, "train/loss_error": 0.44984865188598633, "train/loss_total": 0.5629414319992065 }, { "epoch": 3.317659631311782, "step": 12418, "train/loss_ctc": 1.653925895690918, "train/loss_error": 0.4862271249294281, "train/loss_total": 0.7197668552398682 }, { "epoch": 3.3179267966871495, "step": 12419, "train/loss_ctc": 0.6241209506988525, "train/loss_error": 0.3681676387786865, "train/loss_total": 0.4193583130836487 }, { "epoch": 3.3181939620625167, "grad_norm": 1.634676218032837, "learning_rate": 1.010205717339033e-05, "loss": 0.506, "step": 12420 }, { "epoch": 3.3181939620625167, "step": 12420, "train/loss_ctc": 1.0685265064239502, "train/loss_error": 0.48571521043777466, "train/loss_total": 0.6022775173187256 }, { "epoch": 3.318461127437884, "step": 12421, "train/loss_ctc": 0.4272019863128662, "train/loss_error": 0.39043447375297546, "train/loss_total": 0.39778798818588257 }, { "epoch": 3.3187282928132515, "step": 12422, "train/loss_ctc": 1.172238826751709, "train/loss_error": 0.48431921005249023, "train/loss_total": 0.621903121471405 }, { "epoch": 3.3189954581886187, "step": 12423, "train/loss_ctc": 0.6025250554084778, "train/loss_error": 0.48140043020248413, "train/loss_total": 0.5056253671646118 }, { "epoch": 3.319262623563986, "step": 12424, "train/loss_ctc": 0.7031655311584473, "train/loss_error": 0.4350772500038147, "train/loss_total": 0.4886949062347412 }, { "epoch": 3.3195297889393536, "step": 12425, "train/loss_ctc": 1.0262548923492432, "train/loss_error": 0.3577166497707367, "train/loss_total": 0.4914242923259735 }, { "epoch": 3.3197969543147208, "step": 12426, "train/loss_ctc": 0.5203040838241577, "train/loss_error": 0.4950263202190399, "train/loss_total": 0.5000818967819214 }, { "epoch": 3.3200641196900884, "step": 12427, "train/loss_ctc": 0.3346797525882721, "train/loss_error": 0.44028112292289734, "train/loss_total": 0.4191608726978302 }, { "epoch": 3.3203312850654556, "step": 12428, "train/loss_ctc": 0.31252607703208923, "train/loss_error": 0.44421568512916565, "train/loss_total": 0.41787776350975037 }, { "epoch": 3.320598450440823, "step": 12429, "train/loss_ctc": 1.1983486413955688, "train/loss_error": 0.4856213331222534, "train/loss_total": 0.6281667947769165 }, { "epoch": 3.3208656158161904, "grad_norm": 2.191821813583374, "learning_rate": 1.0086027250868288e-05, "loss": 0.5073, "step": 12430 }, { "epoch": 3.3208656158161904, "step": 12430, "train/loss_ctc": 0.6128966808319092, "train/loss_error": 0.4489189684391022, "train/loss_total": 0.48171451687812805 }, { "epoch": 3.3211327811915576, "step": 12431, "train/loss_ctc": 0.7951672077178955, "train/loss_error": 0.5397070050239563, "train/loss_total": 0.59079909324646 }, { "epoch": 3.321399946566925, "step": 12432, "train/loss_ctc": 1.005859375, "train/loss_error": 0.45734620094299316, "train/loss_total": 0.5670488476753235 }, { "epoch": 3.3216671119422925, "step": 12433, "train/loss_ctc": 0.6816301345825195, "train/loss_error": 0.4538975954055786, "train/loss_total": 0.4994441270828247 }, { "epoch": 3.3219342773176597, "step": 12434, "train/loss_ctc": 0.8990704417228699, "train/loss_error": 0.413129985332489, "train/loss_total": 0.5103181004524231 }, { "epoch": 3.322201442693027, "step": 12435, "train/loss_ctc": 0.4798780381679535, "train/loss_error": 0.44025927782058716, "train/loss_total": 0.4481830298900604 }, { "epoch": 3.3224686080683945, "step": 12436, "train/loss_ctc": 0.5074273347854614, "train/loss_error": 0.4652474522590637, "train/loss_total": 0.4736834168434143 }, { "epoch": 3.3227357734437617, "step": 12437, "train/loss_ctc": 0.5583145022392273, "train/loss_error": 0.3628248870372772, "train/loss_total": 0.4019228219985962 }, { "epoch": 3.323002938819129, "step": 12438, "train/loss_ctc": 0.7149150371551514, "train/loss_error": 0.4250470697879791, "train/loss_total": 0.4830206632614136 }, { "epoch": 3.3232701041944965, "step": 12439, "train/loss_ctc": 0.6881678700447083, "train/loss_error": 0.5132688879966736, "train/loss_total": 0.5482487082481384 }, { "epoch": 3.3235372695698637, "grad_norm": 1.6836886405944824, "learning_rate": 1.0069997328346247e-05, "loss": 0.5004, "step": 12440 }, { "epoch": 3.3235372695698637, "step": 12440, "train/loss_ctc": 0.8398168683052063, "train/loss_error": 0.44574227929115295, "train/loss_total": 0.5245571732521057 }, { "epoch": 3.323804434945231, "step": 12441, "train/loss_ctc": 0.8661479949951172, "train/loss_error": 0.3784695565700531, "train/loss_total": 0.4760052561759949 }, { "epoch": 3.3240716003205986, "step": 12442, "train/loss_ctc": 0.2527795433998108, "train/loss_error": 0.3960263431072235, "train/loss_total": 0.36737698316574097 }, { "epoch": 3.3243387656959658, "step": 12443, "train/loss_ctc": 0.5877415537834167, "train/loss_error": 0.4618414342403412, "train/loss_total": 0.48702147603034973 }, { "epoch": 3.324605931071333, "step": 12444, "train/loss_ctc": 0.8790255784988403, "train/loss_error": 0.4220181405544281, "train/loss_total": 0.5134196281433105 }, { "epoch": 3.3248730964467006, "step": 12445, "train/loss_ctc": 0.3968132734298706, "train/loss_error": 0.3790922462940216, "train/loss_total": 0.3826364576816559 }, { "epoch": 3.325140261822068, "step": 12446, "train/loss_ctc": 1.8523304462432861, "train/loss_error": 0.43905574083328247, "train/loss_total": 0.7217106819152832 }, { "epoch": 3.325407427197435, "step": 12447, "train/loss_ctc": 0.4547090530395508, "train/loss_error": 0.48511695861816406, "train/loss_total": 0.4790353775024414 }, { "epoch": 3.3256745925728026, "step": 12448, "train/loss_ctc": 0.837216317653656, "train/loss_error": 0.44310522079467773, "train/loss_total": 0.5219274163246155 }, { "epoch": 3.32594175794817, "step": 12449, "train/loss_ctc": 0.6423690915107727, "train/loss_error": 0.394855797290802, "train/loss_total": 0.4443584680557251 }, { "epoch": 3.3262089233235375, "grad_norm": 1.960782766342163, "learning_rate": 1.0053967405824205e-05, "loss": 0.4918, "step": 12450 }, { "epoch": 3.3262089233235375, "step": 12450, "train/loss_ctc": 0.8206840753555298, "train/loss_error": 0.48178210854530334, "train/loss_total": 0.5495625138282776 }, { "epoch": 3.3264760886989047, "step": 12451, "train/loss_ctc": 0.6781420707702637, "train/loss_error": 0.42980697751045227, "train/loss_total": 0.4794740080833435 }, { "epoch": 3.326743254074272, "step": 12452, "train/loss_ctc": 0.5440226793289185, "train/loss_error": 0.4123513698577881, "train/loss_total": 0.43868565559387207 }, { "epoch": 3.3270104194496395, "step": 12453, "train/loss_ctc": 1.1216323375701904, "train/loss_error": 0.3980017602443695, "train/loss_total": 0.5427278876304626 }, { "epoch": 3.3272775848250067, "step": 12454, "train/loss_ctc": 0.8345414400100708, "train/loss_error": 0.47378256916999817, "train/loss_total": 0.5459343194961548 }, { "epoch": 3.327544750200374, "step": 12455, "train/loss_ctc": 1.3428932428359985, "train/loss_error": 0.43840935826301575, "train/loss_total": 0.6193061470985413 }, { "epoch": 3.3278119155757415, "step": 12456, "train/loss_ctc": 0.6277535557746887, "train/loss_error": 0.4338088631629944, "train/loss_total": 0.47259780764579773 }, { "epoch": 3.3280790809511087, "step": 12457, "train/loss_ctc": 0.7649946808815002, "train/loss_error": 0.42105773091316223, "train/loss_total": 0.4898451566696167 }, { "epoch": 3.328346246326476, "step": 12458, "train/loss_ctc": 0.39557725191116333, "train/loss_error": 0.48359665274620056, "train/loss_total": 0.4659927785396576 }, { "epoch": 3.3286134117018435, "step": 12459, "train/loss_ctc": 0.66050785779953, "train/loss_error": 0.39159947633743286, "train/loss_total": 0.44538116455078125 }, { "epoch": 3.3288805770772107, "grad_norm": 1.5595438480377197, "learning_rate": 1.0037937483302164e-05, "loss": 0.505, "step": 12460 }, { "epoch": 3.3288805770772107, "step": 12460, "train/loss_ctc": 0.48447078466415405, "train/loss_error": 0.3452572524547577, "train/loss_total": 0.3730999827384949 }, { "epoch": 3.3291477424525784, "step": 12461, "train/loss_ctc": 0.7185876369476318, "train/loss_error": 0.38519254326820374, "train/loss_total": 0.4518715739250183 }, { "epoch": 3.3294149078279456, "step": 12462, "train/loss_ctc": 0.32017838954925537, "train/loss_error": 0.46986907720565796, "train/loss_total": 0.4399309456348419 }, { "epoch": 3.3296820732033128, "step": 12463, "train/loss_ctc": 0.8905553817749023, "train/loss_error": 0.4426405429840088, "train/loss_total": 0.5322235226631165 }, { "epoch": 3.3299492385786804, "step": 12464, "train/loss_ctc": 1.510852575302124, "train/loss_error": 0.43973666429519653, "train/loss_total": 0.6539598703384399 }, { "epoch": 3.3302164039540476, "step": 12465, "train/loss_ctc": 1.1990125179290771, "train/loss_error": 0.46219906210899353, "train/loss_total": 0.6095618009567261 }, { "epoch": 3.330483569329415, "step": 12466, "train/loss_ctc": 0.7999317646026611, "train/loss_error": 0.4699021577835083, "train/loss_total": 0.5359081029891968 }, { "epoch": 3.3307507347047824, "step": 12467, "train/loss_ctc": 0.8599298596382141, "train/loss_error": 0.5498783588409424, "train/loss_total": 0.6118886470794678 }, { "epoch": 3.3310179000801496, "step": 12468, "train/loss_ctc": 1.1100059747695923, "train/loss_error": 0.4897056519985199, "train/loss_total": 0.6137657165527344 }, { "epoch": 3.331285065455517, "step": 12469, "train/loss_ctc": 1.2950329780578613, "train/loss_error": 0.4098633825778961, "train/loss_total": 0.5868973135948181 }, { "epoch": 3.3315522308308845, "grad_norm": 2.04203200340271, "learning_rate": 1.0021907560780123e-05, "loss": 0.5409, "step": 12470 }, { "epoch": 3.3315522308308845, "step": 12470, "train/loss_ctc": 1.0301129817962646, "train/loss_error": 0.48711082339286804, "train/loss_total": 0.5957112908363342 }, { "epoch": 3.3318193962062517, "step": 12471, "train/loss_ctc": 0.5436175465583801, "train/loss_error": 0.46919602155685425, "train/loss_total": 0.48408034443855286 }, { "epoch": 3.332086561581619, "step": 12472, "train/loss_ctc": 0.6128684282302856, "train/loss_error": 0.4817664325237274, "train/loss_total": 0.507986843585968 }, { "epoch": 3.3323537269569865, "step": 12473, "train/loss_ctc": 0.34007424116134644, "train/loss_error": 0.4376208782196045, "train/loss_total": 0.41811156272888184 }, { "epoch": 3.3326208923323537, "step": 12474, "train/loss_ctc": 1.0066492557525635, "train/loss_error": 0.42318999767303467, "train/loss_total": 0.5398818254470825 }, { "epoch": 3.332888057707721, "step": 12475, "train/loss_ctc": 0.48991551995277405, "train/loss_error": 0.42126259207725525, "train/loss_total": 0.4349932074546814 }, { "epoch": 3.3331552230830885, "step": 12476, "train/loss_ctc": 0.886419415473938, "train/loss_error": 0.4326503574848175, "train/loss_total": 0.5234041810035706 }, { "epoch": 3.3334223884584557, "step": 12477, "train/loss_ctc": 0.3619644045829773, "train/loss_error": 0.44794535636901855, "train/loss_total": 0.43074917793273926 }, { "epoch": 3.333689553833823, "step": 12478, "train/loss_ctc": 1.0006051063537598, "train/loss_error": 0.5140007138252258, "train/loss_total": 0.6113216280937195 }, { "epoch": 3.3339567192091906, "step": 12479, "train/loss_ctc": 0.9263362884521484, "train/loss_error": 0.5030210018157959, "train/loss_total": 0.5876840353012085 }, { "epoch": 3.3342238845845578, "grad_norm": 1.64982008934021, "learning_rate": 1.0005877638258081e-05, "loss": 0.5134, "step": 12480 }, { "epoch": 3.3342238845845578, "step": 12480, "train/loss_ctc": 1.180893898010254, "train/loss_error": 0.46043092012405396, "train/loss_total": 0.6045235395431519 }, { "epoch": 3.334491049959925, "step": 12481, "train/loss_ctc": 0.5136857032775879, "train/loss_error": 0.40537163615226746, "train/loss_total": 0.4270344376564026 }, { "epoch": 3.3347582153352926, "step": 12482, "train/loss_ctc": 0.8254081606864929, "train/loss_error": 0.42277470231056213, "train/loss_total": 0.5033013820648193 }, { "epoch": 3.33502538071066, "step": 12483, "train/loss_ctc": 0.5705236196517944, "train/loss_error": 0.473954975605011, "train/loss_total": 0.4932686984539032 }, { "epoch": 3.3352925460860274, "step": 12484, "train/loss_ctc": 0.8157942295074463, "train/loss_error": 0.42085379362106323, "train/loss_total": 0.4998418688774109 }, { "epoch": 3.3355597114613946, "step": 12485, "train/loss_ctc": 0.5517593026161194, "train/loss_error": 0.40858936309814453, "train/loss_total": 0.437223345041275 }, { "epoch": 3.335826876836762, "step": 12486, "train/loss_ctc": 0.40108537673950195, "train/loss_error": 0.5187056660652161, "train/loss_total": 0.4951816201210022 }, { "epoch": 3.3360940422121295, "step": 12487, "train/loss_ctc": 0.9764617085456848, "train/loss_error": 0.44772469997406006, "train/loss_total": 0.553472101688385 }, { "epoch": 3.3363612075874967, "step": 12488, "train/loss_ctc": 0.554246187210083, "train/loss_error": 0.44651374220848083, "train/loss_total": 0.4680602252483368 }, { "epoch": 3.336628372962864, "step": 12489, "train/loss_ctc": 0.4231005311012268, "train/loss_error": 0.3651462197303772, "train/loss_total": 0.3767370879650116 }, { "epoch": 3.3368955383382315, "grad_norm": 1.998507022857666, "learning_rate": 9.98984771573604e-06, "loss": 0.4859, "step": 12490 }, { "epoch": 3.3368955383382315, "step": 12490, "train/loss_ctc": 1.0706590414047241, "train/loss_error": 0.4764561653137207, "train/loss_total": 0.5952967405319214 }, { "epoch": 3.3371627037135987, "step": 12491, "train/loss_ctc": 0.6564563512802124, "train/loss_error": 0.3883000612258911, "train/loss_total": 0.4419313371181488 }, { "epoch": 3.337429869088966, "step": 12492, "train/loss_ctc": 0.6702972054481506, "train/loss_error": 0.42616182565689087, "train/loss_total": 0.4749889373779297 }, { "epoch": 3.3376970344643335, "step": 12493, "train/loss_ctc": 0.6622435450553894, "train/loss_error": 0.42671483755111694, "train/loss_total": 0.4738205671310425 }, { "epoch": 3.3379641998397007, "step": 12494, "train/loss_ctc": 0.5953781604766846, "train/loss_error": 0.43956202268600464, "train/loss_total": 0.47072523832321167 }, { "epoch": 3.3382313652150684, "step": 12495, "train/loss_ctc": 0.6554105877876282, "train/loss_error": 0.4947153329849243, "train/loss_total": 0.526854395866394 }, { "epoch": 3.3384985305904356, "step": 12496, "train/loss_ctc": 0.5100399255752563, "train/loss_error": 0.44719716906547546, "train/loss_total": 0.4597657322883606 }, { "epoch": 3.3387656959658027, "step": 12497, "train/loss_ctc": 0.6466816663742065, "train/loss_error": 0.4917541742324829, "train/loss_total": 0.5227397084236145 }, { "epoch": 3.3390328613411704, "step": 12498, "train/loss_ctc": 0.4448610842227936, "train/loss_error": 0.3210194706916809, "train/loss_total": 0.34578779339790344 }, { "epoch": 3.3393000267165376, "step": 12499, "train/loss_ctc": 1.171384572982788, "train/loss_error": 0.4576367735862732, "train/loss_total": 0.600386381149292 }, { "epoch": 3.339567192091905, "grad_norm": 1.5968236923217773, "learning_rate": 9.973817793214001e-06, "loss": 0.4912, "step": 12500 }, { "epoch": 3.339567192091905, "step": 12500, "train/loss_ctc": 0.6025944948196411, "train/loss_error": 0.4104277193546295, "train/loss_total": 0.4488610625267029 }, { "epoch": 3.3398343574672724, "step": 12501, "train/loss_ctc": 0.42483773827552795, "train/loss_error": 0.43489161133766174, "train/loss_total": 0.43288084864616394 }, { "epoch": 3.3401015228426396, "step": 12502, "train/loss_ctc": 0.4468626379966736, "train/loss_error": 0.4300861656665802, "train/loss_total": 0.4334414601325989 }, { "epoch": 3.340368688218007, "step": 12503, "train/loss_ctc": 0.6008074283599854, "train/loss_error": 0.44280773401260376, "train/loss_total": 0.4744076728820801 }, { "epoch": 3.3406358535933744, "step": 12504, "train/loss_ctc": 0.37492889165878296, "train/loss_error": 0.47169846296310425, "train/loss_total": 0.45234453678131104 }, { "epoch": 3.3409030189687416, "step": 12505, "train/loss_ctc": 0.7717396020889282, "train/loss_error": 0.4204760491847992, "train/loss_total": 0.4907287657260895 }, { "epoch": 3.341170184344109, "step": 12506, "train/loss_ctc": 0.5394881963729858, "train/loss_error": 0.4783422350883484, "train/loss_total": 0.49057143926620483 }, { "epoch": 3.3414373497194765, "step": 12507, "train/loss_ctc": 0.4020400643348694, "train/loss_error": 0.46689555048942566, "train/loss_total": 0.4539244472980499 }, { "epoch": 3.3417045150948437, "step": 12508, "train/loss_ctc": 0.41481345891952515, "train/loss_error": 0.5223484039306641, "train/loss_total": 0.5008413791656494 }, { "epoch": 3.341971680470211, "step": 12509, "train/loss_ctc": 0.5353342890739441, "train/loss_error": 0.46873369812965393, "train/loss_total": 0.48205384612083435 }, { "epoch": 3.3422388458455785, "grad_norm": 3.8029303550720215, "learning_rate": 9.957787870691959e-06, "loss": 0.466, "step": 12510 }, { "epoch": 3.3422388458455785, "step": 12510, "train/loss_ctc": 0.45938241481781006, "train/loss_error": 0.3898998498916626, "train/loss_total": 0.40379637479782104 }, { "epoch": 3.3425060112209457, "step": 12511, "train/loss_ctc": 0.7296795845031738, "train/loss_error": 0.42417120933532715, "train/loss_total": 0.4852728843688965 }, { "epoch": 3.342773176596313, "step": 12512, "train/loss_ctc": 1.0189259052276611, "train/loss_error": 0.41316527128219604, "train/loss_total": 0.5343173742294312 }, { "epoch": 3.3430403419716805, "step": 12513, "train/loss_ctc": 0.5787022709846497, "train/loss_error": 0.4612376391887665, "train/loss_total": 0.4847305715084076 }, { "epoch": 3.3433075073470477, "step": 12514, "train/loss_ctc": 0.7910532355308533, "train/loss_error": 0.460225909948349, "train/loss_total": 0.5263913869857788 }, { "epoch": 3.343574672722415, "step": 12515, "train/loss_ctc": 0.6904383897781372, "train/loss_error": 0.5444180965423584, "train/loss_total": 0.5736221671104431 }, { "epoch": 3.3438418380977826, "step": 12516, "train/loss_ctc": 0.664435863494873, "train/loss_error": 0.4838052988052368, "train/loss_total": 0.519931435585022 }, { "epoch": 3.3441090034731498, "step": 12517, "train/loss_ctc": 0.6526394486427307, "train/loss_error": 0.4551258683204651, "train/loss_total": 0.4946286082267761 }, { "epoch": 3.3443761688485174, "step": 12518, "train/loss_ctc": 0.661676824092865, "train/loss_error": 0.4028259515762329, "train/loss_total": 0.4545961320400238 }, { "epoch": 3.3446433342238846, "step": 12519, "train/loss_ctc": 0.17477723956108093, "train/loss_error": 0.4583906829357147, "train/loss_total": 0.401667982339859 }, { "epoch": 3.344910499599252, "grad_norm": 1.0262494087219238, "learning_rate": 9.941757948169917e-06, "loss": 0.4879, "step": 12520 }, { "epoch": 3.344910499599252, "step": 12520, "train/loss_ctc": 0.7138292789459229, "train/loss_error": 0.4453940987586975, "train/loss_total": 0.4990811347961426 }, { "epoch": 3.3451776649746194, "step": 12521, "train/loss_ctc": 0.4045957326889038, "train/loss_error": 0.45557352900505066, "train/loss_total": 0.44537797570228577 }, { "epoch": 3.3454448303499866, "step": 12522, "train/loss_ctc": 0.8892731070518494, "train/loss_error": 0.4652474522590637, "train/loss_total": 0.5500525832176208 }, { "epoch": 3.345711995725354, "step": 12523, "train/loss_ctc": 1.146294116973877, "train/loss_error": 0.3846098482608795, "train/loss_total": 0.536946713924408 }, { "epoch": 3.3459791611007215, "step": 12524, "train/loss_ctc": 0.3026636838912964, "train/loss_error": 0.37560585141181946, "train/loss_total": 0.3610174357891083 }, { "epoch": 3.3462463264760887, "step": 12525, "train/loss_ctc": 0.4348639249801636, "train/loss_error": 0.44490617513656616, "train/loss_total": 0.4428977370262146 }, { "epoch": 3.346513491851456, "step": 12526, "train/loss_ctc": 0.2906932830810547, "train/loss_error": 0.3933072090148926, "train/loss_total": 0.37278443574905396 }, { "epoch": 3.3467806572268235, "step": 12527, "train/loss_ctc": 0.6157301068305969, "train/loss_error": 0.4874574840068817, "train/loss_total": 0.5131120085716248 }, { "epoch": 3.3470478226021907, "step": 12528, "train/loss_ctc": 0.24840496480464935, "train/loss_error": 0.3769036829471588, "train/loss_total": 0.35120391845703125 }, { "epoch": 3.3473149879775583, "step": 12529, "train/loss_ctc": 1.3038580417633057, "train/loss_error": 0.47425955533981323, "train/loss_total": 0.6401792764663696 }, { "epoch": 3.3475821533529255, "grad_norm": 2.5505378246307373, "learning_rate": 9.925728025647877e-06, "loss": 0.4713, "step": 12530 }, { "epoch": 3.3475821533529255, "step": 12530, "train/loss_ctc": 0.8226625919342041, "train/loss_error": 0.40437251329421997, "train/loss_total": 0.4880305528640747 }, { "epoch": 3.3478493187282927, "step": 12531, "train/loss_ctc": 0.35660141706466675, "train/loss_error": 0.4463168978691101, "train/loss_total": 0.4283738136291504 }, { "epoch": 3.3481164841036604, "step": 12532, "train/loss_ctc": 1.4124302864074707, "train/loss_error": 0.38672906160354614, "train/loss_total": 0.5918692946434021 }, { "epoch": 3.3483836494790276, "step": 12533, "train/loss_ctc": 0.7643488645553589, "train/loss_error": 0.40870633721351624, "train/loss_total": 0.4798348546028137 }, { "epoch": 3.3486508148543948, "step": 12534, "train/loss_ctc": 0.9334856271743774, "train/loss_error": 0.43636614084243774, "train/loss_total": 0.5357900857925415 }, { "epoch": 3.3489179802297624, "step": 12535, "train/loss_ctc": 0.7419703602790833, "train/loss_error": 0.38736191391944885, "train/loss_total": 0.45828360319137573 }, { "epoch": 3.3491851456051296, "step": 12536, "train/loss_ctc": 0.851705014705658, "train/loss_error": 0.4639183580875397, "train/loss_total": 0.5414757132530212 }, { "epoch": 3.349452310980497, "step": 12537, "train/loss_ctc": 0.17621919512748718, "train/loss_error": 0.43462371826171875, "train/loss_total": 0.3829428255558014 }, { "epoch": 3.3497194763558644, "step": 12538, "train/loss_ctc": 0.8001313209533691, "train/loss_error": 0.4440530240535736, "train/loss_total": 0.5152686834335327 }, { "epoch": 3.3499866417312316, "step": 12539, "train/loss_ctc": 0.723939061164856, "train/loss_error": 0.6174876689910889, "train/loss_total": 0.6387779712677002 }, { "epoch": 3.350253807106599, "grad_norm": 2.823058843612671, "learning_rate": 9.909698103125835e-06, "loss": 0.5061, "step": 12540 }, { "epoch": 3.350253807106599, "step": 12540, "train/loss_ctc": 0.9074879884719849, "train/loss_error": 0.4416358470916748, "train/loss_total": 0.5348062515258789 }, { "epoch": 3.3505209724819665, "step": 12541, "train/loss_ctc": 0.5382752418518066, "train/loss_error": 0.453294575214386, "train/loss_total": 0.4702907204627991 }, { "epoch": 3.3507881378573336, "step": 12542, "train/loss_ctc": 0.5560307502746582, "train/loss_error": 0.4405097961425781, "train/loss_total": 0.46361398696899414 }, { "epoch": 3.351055303232701, "step": 12543, "train/loss_ctc": 0.36412313580513, "train/loss_error": 0.425114244222641, "train/loss_total": 0.41291603446006775 }, { "epoch": 3.3513224686080685, "step": 12544, "train/loss_ctc": 0.5716051459312439, "train/loss_error": 0.44756871461868286, "train/loss_total": 0.4723759889602661 }, { "epoch": 3.3515896339834357, "step": 12545, "train/loss_ctc": 0.5639519691467285, "train/loss_error": 0.47575643658638, "train/loss_total": 0.4933955669403076 }, { "epoch": 3.351856799358803, "step": 12546, "train/loss_ctc": 0.3877682089805603, "train/loss_error": 0.3987623155117035, "train/loss_total": 0.3965635299682617 }, { "epoch": 3.3521239647341705, "step": 12547, "train/loss_ctc": 1.656509518623352, "train/loss_error": 0.43862828612327576, "train/loss_total": 0.68220454454422 }, { "epoch": 3.3523911301095377, "step": 12548, "train/loss_ctc": 0.4000224173069, "train/loss_error": 0.4613867700099945, "train/loss_total": 0.4491139054298401 }, { "epoch": 3.3526582954849053, "step": 12549, "train/loss_ctc": 0.3262655436992645, "train/loss_error": 0.5003479719161987, "train/loss_total": 0.46553149819374084 }, { "epoch": 3.3529254608602725, "grad_norm": 3.098905324935913, "learning_rate": 9.893668180603793e-06, "loss": 0.4841, "step": 12550 }, { "epoch": 3.3529254608602725, "step": 12550, "train/loss_ctc": 0.6205918788909912, "train/loss_error": 0.5155282616615295, "train/loss_total": 0.5365409851074219 }, { "epoch": 3.3531926262356397, "step": 12551, "train/loss_ctc": 0.567213773727417, "train/loss_error": 0.4361366033554077, "train/loss_total": 0.46235203742980957 }, { "epoch": 3.3534597916110074, "step": 12552, "train/loss_ctc": 0.8670845627784729, "train/loss_error": 0.39803460240364075, "train/loss_total": 0.4918445944786072 }, { "epoch": 3.3537269569863746, "step": 12553, "train/loss_ctc": 0.9603655338287354, "train/loss_error": 0.407944917678833, "train/loss_total": 0.5184290409088135 }, { "epoch": 3.3539941223617418, "step": 12554, "train/loss_ctc": 0.6909974813461304, "train/loss_error": 0.4148447513580322, "train/loss_total": 0.4700753092765808 }, { "epoch": 3.3542612877371094, "step": 12555, "train/loss_ctc": 1.2313697338104248, "train/loss_error": 0.48555293679237366, "train/loss_total": 0.6347163319587708 }, { "epoch": 3.3545284531124766, "step": 12556, "train/loss_ctc": 0.8272851705551147, "train/loss_error": 0.3709534704685211, "train/loss_total": 0.46221983432769775 }, { "epoch": 3.354795618487844, "step": 12557, "train/loss_ctc": 0.6004728078842163, "train/loss_error": 0.4306688606739044, "train/loss_total": 0.4646296501159668 }, { "epoch": 3.3550627838632114, "step": 12558, "train/loss_ctc": 0.4730129539966583, "train/loss_error": 0.4413232207298279, "train/loss_total": 0.4476611614227295 }, { "epoch": 3.3553299492385786, "step": 12559, "train/loss_ctc": 0.6230697631835938, "train/loss_error": 0.44592371582984924, "train/loss_total": 0.48135292530059814 }, { "epoch": 3.3555971146139463, "grad_norm": 1.1752240657806396, "learning_rate": 9.877638258081753e-06, "loss": 0.497, "step": 12560 }, { "epoch": 3.3555971146139463, "step": 12560, "train/loss_ctc": 0.6737707853317261, "train/loss_error": 0.48203057050704956, "train/loss_total": 0.5203786492347717 }, { "epoch": 3.3558642799893135, "step": 12561, "train/loss_ctc": 0.6167168617248535, "train/loss_error": 0.3888086974620819, "train/loss_total": 0.4343903362751007 }, { "epoch": 3.3561314453646807, "step": 12562, "train/loss_ctc": 0.7458319067955017, "train/loss_error": 0.38312506675720215, "train/loss_total": 0.4556664228439331 }, { "epoch": 3.3563986107400483, "step": 12563, "train/loss_ctc": 0.6001033186912537, "train/loss_error": 0.4175870716571808, "train/loss_total": 0.45409032702445984 }, { "epoch": 3.3566657761154155, "step": 12564, "train/loss_ctc": 0.9687315225601196, "train/loss_error": 0.4558736979961395, "train/loss_total": 0.5584452748298645 }, { "epoch": 3.3569329414907827, "step": 12565, "train/loss_ctc": 1.3764052391052246, "train/loss_error": 0.40801772475242615, "train/loss_total": 0.6016952395439148 }, { "epoch": 3.3572001068661503, "step": 12566, "train/loss_ctc": 0.641891360282898, "train/loss_error": 0.48094144463539124, "train/loss_total": 0.5131314396858215 }, { "epoch": 3.3574672722415175, "step": 12567, "train/loss_ctc": 0.7327046990394592, "train/loss_error": 0.4862494170665741, "train/loss_total": 0.5355404615402222 }, { "epoch": 3.3577344376168847, "step": 12568, "train/loss_ctc": 1.563614845275879, "train/loss_error": 0.42927271127700806, "train/loss_total": 0.6561411619186401 }, { "epoch": 3.3580016029922524, "step": 12569, "train/loss_ctc": 0.5027512311935425, "train/loss_error": 0.4354690611362457, "train/loss_total": 0.4489254951477051 }, { "epoch": 3.3582687683676196, "grad_norm": 4.107820987701416, "learning_rate": 9.861608335559711e-06, "loss": 0.5178, "step": 12570 }, { "epoch": 3.3582687683676196, "step": 12570, "train/loss_ctc": 0.8910608887672424, "train/loss_error": 0.5057950019836426, "train/loss_total": 0.5828481912612915 }, { "epoch": 3.3585359337429868, "step": 12571, "train/loss_ctc": 0.7665761709213257, "train/loss_error": 0.5250042676925659, "train/loss_total": 0.5733186602592468 }, { "epoch": 3.3588030991183544, "step": 12572, "train/loss_ctc": 0.3863983750343323, "train/loss_error": 0.4972648322582245, "train/loss_total": 0.4750915467739105 }, { "epoch": 3.3590702644937216, "step": 12573, "train/loss_ctc": 0.4018425941467285, "train/loss_error": 0.4372393488883972, "train/loss_total": 0.4301600158214569 }, { "epoch": 3.359337429869089, "step": 12574, "train/loss_ctc": 0.5124792456626892, "train/loss_error": 0.48852017521858215, "train/loss_total": 0.4933120012283325 }, { "epoch": 3.3596045952444564, "step": 12575, "train/loss_ctc": 0.3155952990055084, "train/loss_error": 0.4121854305267334, "train/loss_total": 0.39286741614341736 }, { "epoch": 3.3598717606198236, "step": 12576, "train/loss_ctc": 0.9355791807174683, "train/loss_error": 0.4287608861923218, "train/loss_total": 0.5301245450973511 }, { "epoch": 3.360138925995191, "step": 12577, "train/loss_ctc": 0.9452201724052429, "train/loss_error": 0.47864943742752075, "train/loss_total": 0.5719636082649231 }, { "epoch": 3.3604060913705585, "step": 12578, "train/loss_ctc": 0.9009571671485901, "train/loss_error": 0.49189263582229614, "train/loss_total": 0.5737055540084839 }, { "epoch": 3.3606732567459257, "step": 12579, "train/loss_ctc": 0.8652265071868896, "train/loss_error": 0.417466938495636, "train/loss_total": 0.5070188641548157 }, { "epoch": 3.360940422121293, "grad_norm": 2.124965190887451, "learning_rate": 9.84557841303767e-06, "loss": 0.513, "step": 12580 }, { "epoch": 3.360940422121293, "step": 12580, "train/loss_ctc": 0.6146109104156494, "train/loss_error": 0.4404066205024719, "train/loss_total": 0.47524747252464294 }, { "epoch": 3.3612075874966605, "step": 12581, "train/loss_ctc": 0.6400007009506226, "train/loss_error": 0.5038252472877502, "train/loss_total": 0.5310603380203247 }, { "epoch": 3.3614747528720277, "step": 12582, "train/loss_ctc": 1.4791074991226196, "train/loss_error": 0.5108702182769775, "train/loss_total": 0.7045177221298218 }, { "epoch": 3.3617419182473953, "step": 12583, "train/loss_ctc": 0.7473845481872559, "train/loss_error": 0.45443591475486755, "train/loss_total": 0.5130256414413452 }, { "epoch": 3.3620090836227625, "step": 12584, "train/loss_ctc": 0.5608677864074707, "train/loss_error": 0.3671538531780243, "train/loss_total": 0.4058966338634491 }, { "epoch": 3.3622762489981297, "step": 12585, "train/loss_ctc": 0.6575157642364502, "train/loss_error": 0.42694103717803955, "train/loss_total": 0.47305595874786377 }, { "epoch": 3.3625434143734974, "step": 12586, "train/loss_ctc": 1.198800802230835, "train/loss_error": 0.5200507044792175, "train/loss_total": 0.6558007001876831 }, { "epoch": 3.3628105797488645, "step": 12587, "train/loss_ctc": 0.47323065996170044, "train/loss_error": 0.3833249807357788, "train/loss_total": 0.4013061225414276 }, { "epoch": 3.3630777451242317, "step": 12588, "train/loss_ctc": 0.7304648756980896, "train/loss_error": 0.47709429264068604, "train/loss_total": 0.5277684330940247 }, { "epoch": 3.3633449104995994, "step": 12589, "train/loss_ctc": 0.33274126052856445, "train/loss_error": 0.46975475549697876, "train/loss_total": 0.4423520565032959 }, { "epoch": 3.3636120758749666, "grad_norm": 1.3799299001693726, "learning_rate": 9.82954849051563e-06, "loss": 0.513, "step": 12590 }, { "epoch": 3.3636120758749666, "step": 12590, "train/loss_ctc": 0.5150805711746216, "train/loss_error": 0.33868226408958435, "train/loss_total": 0.3739619255065918 }, { "epoch": 3.3638792412503338, "step": 12591, "train/loss_ctc": 0.9282892942428589, "train/loss_error": 0.4025863707065582, "train/loss_total": 0.5077269673347473 }, { "epoch": 3.3641464066257014, "step": 12592, "train/loss_ctc": 0.4769793450832367, "train/loss_error": 0.4924688935279846, "train/loss_total": 0.4893709719181061 }, { "epoch": 3.3644135720010686, "step": 12593, "train/loss_ctc": 0.36997342109680176, "train/loss_error": 0.48552605509757996, "train/loss_total": 0.46241554617881775 }, { "epoch": 3.3646807373764362, "step": 12594, "train/loss_ctc": 1.3546596765518188, "train/loss_error": 0.4799419641494751, "train/loss_total": 0.6548855304718018 }, { "epoch": 3.3649479027518034, "step": 12595, "train/loss_ctc": 1.1055433750152588, "train/loss_error": 0.4662948250770569, "train/loss_total": 0.5941445827484131 }, { "epoch": 3.3652150681271706, "step": 12596, "train/loss_ctc": 1.228292465209961, "train/loss_error": 0.49628114700317383, "train/loss_total": 0.6426834464073181 }, { "epoch": 3.3654822335025383, "step": 12597, "train/loss_ctc": 0.9255362153053284, "train/loss_error": 0.46696925163269043, "train/loss_total": 0.5586826801300049 }, { "epoch": 3.3657493988779055, "step": 12598, "train/loss_ctc": 0.9155336618423462, "train/loss_error": 0.4207615256309509, "train/loss_total": 0.5197159647941589 }, { "epoch": 3.3660165642532727, "step": 12599, "train/loss_ctc": 0.808289110660553, "train/loss_error": 0.48995327949523926, "train/loss_total": 0.553620457649231 }, { "epoch": 3.3662837296286403, "grad_norm": 2.622401475906372, "learning_rate": 9.813518567993589e-06, "loss": 0.5357, "step": 12600 }, { "epoch": 3.3662837296286403, "step": 12600, "train/loss_ctc": 1.03378427028656, "train/loss_error": 0.4757530987262726, "train/loss_total": 0.5873593091964722 }, { "epoch": 3.3665508950040075, "step": 12601, "train/loss_ctc": 0.4151036739349365, "train/loss_error": 0.45373931527137756, "train/loss_total": 0.4460121989250183 }, { "epoch": 3.3668180603793747, "step": 12602, "train/loss_ctc": 1.2762324810028076, "train/loss_error": 0.47029152512550354, "train/loss_total": 0.6314797401428223 }, { "epoch": 3.3670852257547423, "step": 12603, "train/loss_ctc": 0.6392160058021545, "train/loss_error": 0.3938557207584381, "train/loss_total": 0.4429277777671814 }, { "epoch": 3.3673523911301095, "step": 12604, "train/loss_ctc": 0.41877710819244385, "train/loss_error": 0.3905928432941437, "train/loss_total": 0.39622971415519714 }, { "epoch": 3.3676195565054767, "step": 12605, "train/loss_ctc": 0.739913284778595, "train/loss_error": 0.42615216970443726, "train/loss_total": 0.4889043867588043 }, { "epoch": 3.3678867218808444, "step": 12606, "train/loss_ctc": 0.9899736642837524, "train/loss_error": 0.44989070296287537, "train/loss_total": 0.5579073429107666 }, { "epoch": 3.3681538872562116, "step": 12607, "train/loss_ctc": 0.2731904983520508, "train/loss_error": 0.3869076073169708, "train/loss_total": 0.36416417360305786 }, { "epoch": 3.3684210526315788, "step": 12608, "train/loss_ctc": 0.5812617540359497, "train/loss_error": 0.5074073076248169, "train/loss_total": 0.5221781730651855 }, { "epoch": 3.3686882180069464, "step": 12609, "train/loss_ctc": 0.7452356815338135, "train/loss_error": 0.4828200936317444, "train/loss_total": 0.5353032350540161 }, { "epoch": 3.3689553833823136, "grad_norm": 3.1364705562591553, "learning_rate": 9.797488645471548e-06, "loss": 0.4972, "step": 12610 }, { "epoch": 3.3689553833823136, "step": 12610, "train/loss_ctc": 0.29275190830230713, "train/loss_error": 0.4434610903263092, "train/loss_total": 0.41331925988197327 }, { "epoch": 3.369222548757681, "step": 12611, "train/loss_ctc": 0.5590653419494629, "train/loss_error": 0.49912455677986145, "train/loss_total": 0.5111126899719238 }, { "epoch": 3.3694897141330484, "step": 12612, "train/loss_ctc": 0.3575895428657532, "train/loss_error": 0.4840008020401001, "train/loss_total": 0.45871856808662415 }, { "epoch": 3.3697568795084156, "step": 12613, "train/loss_ctc": 0.8308058381080627, "train/loss_error": 0.3890172243118286, "train/loss_total": 0.47737497091293335 }, { "epoch": 3.370024044883783, "step": 12614, "train/loss_ctc": 0.6636768579483032, "train/loss_error": 0.5032960176467896, "train/loss_total": 0.5353721976280212 }, { "epoch": 3.3702912102591505, "step": 12615, "train/loss_ctc": 1.7495685815811157, "train/loss_error": 0.5046049952507019, "train/loss_total": 0.7535977363586426 }, { "epoch": 3.3705583756345177, "step": 12616, "train/loss_ctc": 0.6031861901283264, "train/loss_error": 0.4024745225906372, "train/loss_total": 0.44261685013771057 }, { "epoch": 3.3708255410098853, "step": 12617, "train/loss_ctc": 0.6751477718353271, "train/loss_error": 0.4231029450893402, "train/loss_total": 0.4735119044780731 }, { "epoch": 3.3710927063852525, "step": 12618, "train/loss_ctc": 1.04222571849823, "train/loss_error": 0.5034300088882446, "train/loss_total": 0.6111891865730286 }, { "epoch": 3.3713598717606197, "step": 12619, "train/loss_ctc": 0.5746623277664185, "train/loss_error": 0.4587984085083008, "train/loss_total": 0.48197120428085327 }, { "epoch": 3.3716270371359873, "grad_norm": 1.8855966329574585, "learning_rate": 9.781458722949506e-06, "loss": 0.5159, "step": 12620 }, { "epoch": 3.3716270371359873, "step": 12620, "train/loss_ctc": 1.2030490636825562, "train/loss_error": 0.45937010645866394, "train/loss_total": 0.6081058979034424 }, { "epoch": 3.3718942025113545, "step": 12621, "train/loss_ctc": 0.4638332426548004, "train/loss_error": 0.47077783942222595, "train/loss_total": 0.4693889319896698 }, { "epoch": 3.3721613678867217, "step": 12622, "train/loss_ctc": 0.7161033153533936, "train/loss_error": 0.4321417808532715, "train/loss_total": 0.48893409967422485 }, { "epoch": 3.3724285332620894, "step": 12623, "train/loss_ctc": 0.8109636306762695, "train/loss_error": 0.40206387639045715, "train/loss_total": 0.4838438332080841 }, { "epoch": 3.3726956986374566, "step": 12624, "train/loss_ctc": 0.85814368724823, "train/loss_error": 0.47942572832107544, "train/loss_total": 0.5551693439483643 }, { "epoch": 3.3729628640128237, "step": 12625, "train/loss_ctc": 0.4254632592201233, "train/loss_error": 0.4761785864830017, "train/loss_total": 0.46603554487228394 }, { "epoch": 3.3732300293881914, "step": 12626, "train/loss_ctc": 0.5340232253074646, "train/loss_error": 0.4432150721549988, "train/loss_total": 0.46137669682502747 }, { "epoch": 3.3734971947635586, "step": 12627, "train/loss_ctc": 0.9791405200958252, "train/loss_error": 0.371880441904068, "train/loss_total": 0.49333247542381287 }, { "epoch": 3.373764360138926, "step": 12628, "train/loss_ctc": 0.6249136924743652, "train/loss_error": 0.4136705696582794, "train/loss_total": 0.45591920614242554 }, { "epoch": 3.3740315255142934, "step": 12629, "train/loss_ctc": 0.7230215072631836, "train/loss_error": 0.4543725550174713, "train/loss_total": 0.5081023573875427 }, { "epoch": 3.3742986908896606, "grad_norm": 1.9651304483413696, "learning_rate": 9.765428800427464e-06, "loss": 0.499, "step": 12630 }, { "epoch": 3.3742986908896606, "step": 12630, "train/loss_ctc": 0.8849824666976929, "train/loss_error": 0.4518902897834778, "train/loss_total": 0.5385087728500366 }, { "epoch": 3.3745658562650283, "step": 12631, "train/loss_ctc": 0.7706126570701599, "train/loss_error": 0.41653865575790405, "train/loss_total": 0.48735347390174866 }, { "epoch": 3.3748330216403954, "step": 12632, "train/loss_ctc": 0.9847192764282227, "train/loss_error": 0.45564836263656616, "train/loss_total": 0.5614625811576843 }, { "epoch": 3.3751001870157626, "step": 12633, "train/loss_ctc": 0.3849765360355377, "train/loss_error": 0.45780205726623535, "train/loss_total": 0.44323697686195374 }, { "epoch": 3.3753673523911303, "step": 12634, "train/loss_ctc": 0.7312218546867371, "train/loss_error": 0.48510003089904785, "train/loss_total": 0.5343244075775146 }, { "epoch": 3.3756345177664975, "step": 12635, "train/loss_ctc": 1.1932517290115356, "train/loss_error": 0.46586692333221436, "train/loss_total": 0.6113438606262207 }, { "epoch": 3.3759016831418647, "step": 12636, "train/loss_ctc": 0.5786000490188599, "train/loss_error": 0.4516230821609497, "train/loss_total": 0.47701847553253174 }, { "epoch": 3.3761688485172323, "step": 12637, "train/loss_ctc": 1.0745859146118164, "train/loss_error": 0.43614277243614197, "train/loss_total": 0.5638314485549927 }, { "epoch": 3.3764360138925995, "step": 12638, "train/loss_ctc": 1.5519359111785889, "train/loss_error": 0.483517050743103, "train/loss_total": 0.6972008347511292 }, { "epoch": 3.3767031792679667, "step": 12639, "train/loss_ctc": 0.6311187744140625, "train/loss_error": 0.4872916340827942, "train/loss_total": 0.5160570740699768 }, { "epoch": 3.3769703446433343, "grad_norm": 2.0067341327667236, "learning_rate": 9.749398877905424e-06, "loss": 0.543, "step": 12640 }, { "epoch": 3.3769703446433343, "step": 12640, "train/loss_ctc": 0.8217681050300598, "train/loss_error": 0.4800790846347809, "train/loss_total": 0.5484169125556946 }, { "epoch": 3.3772375100187015, "step": 12641, "train/loss_ctc": 1.1965479850769043, "train/loss_error": 0.4663226306438446, "train/loss_total": 0.6123676896095276 }, { "epoch": 3.3775046753940687, "step": 12642, "train/loss_ctc": 0.5489997863769531, "train/loss_error": 0.4227104187011719, "train/loss_total": 0.4479683041572571 }, { "epoch": 3.3777718407694364, "step": 12643, "train/loss_ctc": 0.5473004579544067, "train/loss_error": 0.48812344670295715, "train/loss_total": 0.4999588429927826 }, { "epoch": 3.3780390061448036, "step": 12644, "train/loss_ctc": 0.45828258991241455, "train/loss_error": 0.4196952283382416, "train/loss_total": 0.4274126887321472 }, { "epoch": 3.3783061715201708, "step": 12645, "train/loss_ctc": 0.4259454011917114, "train/loss_error": 0.44630658626556396, "train/loss_total": 0.4422343373298645 }, { "epoch": 3.3785733368955384, "step": 12646, "train/loss_ctc": 0.6434879899024963, "train/loss_error": 0.392377644777298, "train/loss_total": 0.44259971380233765 }, { "epoch": 3.3788405022709056, "step": 12647, "train/loss_ctc": 0.838654637336731, "train/loss_error": 0.44273993372917175, "train/loss_total": 0.5219228863716125 }, { "epoch": 3.379107667646273, "step": 12648, "train/loss_ctc": 1.2793173789978027, "train/loss_error": 0.4684622585773468, "train/loss_total": 0.6306332945823669 }, { "epoch": 3.3793748330216404, "step": 12649, "train/loss_ctc": 0.8414642810821533, "train/loss_error": 0.4657098650932312, "train/loss_total": 0.5408607721328735 }, { "epoch": 3.3796419983970076, "grad_norm": 3.41510009765625, "learning_rate": 9.733368955383382e-06, "loss": 0.5114, "step": 12650 }, { "epoch": 3.3796419983970076, "step": 12650, "train/loss_ctc": 0.7532855272293091, "train/loss_error": 0.48449060320854187, "train/loss_total": 0.5382496118545532 }, { "epoch": 3.3799091637723753, "step": 12651, "train/loss_ctc": 0.5778038501739502, "train/loss_error": 0.4786280691623688, "train/loss_total": 0.4984632432460785 }, { "epoch": 3.3801763291477425, "step": 12652, "train/loss_ctc": 0.7008476853370667, "train/loss_error": 0.41002157330513, "train/loss_total": 0.46818679571151733 }, { "epoch": 3.3804434945231097, "step": 12653, "train/loss_ctc": 0.9621004462242126, "train/loss_error": 0.41965150833129883, "train/loss_total": 0.5281413197517395 }, { "epoch": 3.3807106598984773, "step": 12654, "train/loss_ctc": 0.557693362236023, "train/loss_error": 0.49353909492492676, "train/loss_total": 0.506369948387146 }, { "epoch": 3.3809778252738445, "step": 12655, "train/loss_ctc": 0.774019718170166, "train/loss_error": 0.4056299328804016, "train/loss_total": 0.4793078899383545 }, { "epoch": 3.3812449906492117, "step": 12656, "train/loss_ctc": 0.7060790061950684, "train/loss_error": 0.41612279415130615, "train/loss_total": 0.4741140305995941 }, { "epoch": 3.3815121560245793, "step": 12657, "train/loss_ctc": 0.7461004853248596, "train/loss_error": 0.42417365312576294, "train/loss_total": 0.4885590076446533 }, { "epoch": 3.3817793213999465, "step": 12658, "train/loss_ctc": 0.992304265499115, "train/loss_error": 0.4913969337940216, "train/loss_total": 0.5915784239768982 }, { "epoch": 3.382046486775314, "step": 12659, "train/loss_ctc": 0.6712580919265747, "train/loss_error": 0.5365786552429199, "train/loss_total": 0.5635145902633667 }, { "epoch": 3.3823136521506814, "grad_norm": 3.332401752471924, "learning_rate": 9.71733903286134e-06, "loss": 0.5136, "step": 12660 }, { "epoch": 3.3823136521506814, "step": 12660, "train/loss_ctc": 1.49338960647583, "train/loss_error": 0.5026655793190002, "train/loss_total": 0.700810432434082 }, { "epoch": 3.3825808175260486, "step": 12661, "train/loss_ctc": 0.7559688091278076, "train/loss_error": 0.44932878017425537, "train/loss_total": 0.5106568336486816 }, { "epoch": 3.382847982901416, "step": 12662, "train/loss_ctc": 1.0625154972076416, "train/loss_error": 0.42077013850212097, "train/loss_total": 0.549119234085083 }, { "epoch": 3.3831151482767834, "step": 12663, "train/loss_ctc": 0.44302093982696533, "train/loss_error": 0.5146833062171936, "train/loss_total": 0.500350832939148 }, { "epoch": 3.3833823136521506, "step": 12664, "train/loss_ctc": 0.6290394067764282, "train/loss_error": 0.4935898780822754, "train/loss_total": 0.5206798315048218 }, { "epoch": 3.3836494790275182, "step": 12665, "train/loss_ctc": 0.4170185327529907, "train/loss_error": 0.47477319836616516, "train/loss_total": 0.4632222652435303 }, { "epoch": 3.3839166444028854, "step": 12666, "train/loss_ctc": 1.4502918720245361, "train/loss_error": 0.5385118126869202, "train/loss_total": 0.7208678722381592 }, { "epoch": 3.3841838097782526, "step": 12667, "train/loss_ctc": 0.7615281939506531, "train/loss_error": 0.429186075925827, "train/loss_total": 0.49565452337265015 }, { "epoch": 3.3844509751536203, "step": 12668, "train/loss_ctc": 1.024327039718628, "train/loss_error": 0.43801674246788025, "train/loss_total": 0.5552788376808167 }, { "epoch": 3.3847181405289875, "step": 12669, "train/loss_ctc": 0.8669531941413879, "train/loss_error": 0.49148738384246826, "train/loss_total": 0.5665805339813232 }, { "epoch": 3.3849853059043546, "grad_norm": 1.626339316368103, "learning_rate": 9.7013091103393e-06, "loss": 0.5583, "step": 12670 }, { "epoch": 3.3849853059043546, "step": 12670, "train/loss_ctc": 0.6324999928474426, "train/loss_error": 0.37476566433906555, "train/loss_total": 0.42631250619888306 }, { "epoch": 3.3852524712797223, "step": 12671, "train/loss_ctc": 0.8654330968856812, "train/loss_error": 0.48242807388305664, "train/loss_total": 0.5590291023254395 }, { "epoch": 3.3855196366550895, "step": 12672, "train/loss_ctc": 0.7319749593734741, "train/loss_error": 0.4271714389324188, "train/loss_total": 0.48813214898109436 }, { "epoch": 3.3857868020304567, "step": 12673, "train/loss_ctc": 0.7869457006454468, "train/loss_error": 0.4093174636363983, "train/loss_total": 0.4848431348800659 }, { "epoch": 3.3860539674058243, "step": 12674, "train/loss_ctc": 0.5425136685371399, "train/loss_error": 0.433681845664978, "train/loss_total": 0.4554482400417328 }, { "epoch": 3.3863211327811915, "step": 12675, "train/loss_ctc": 0.2592329978942871, "train/loss_error": 0.4157635271549225, "train/loss_total": 0.38445740938186646 }, { "epoch": 3.3865882981565587, "step": 12676, "train/loss_ctc": 1.4296725988388062, "train/loss_error": 0.47208574414253235, "train/loss_total": 0.6636031270027161 }, { "epoch": 3.3868554635319263, "step": 12677, "train/loss_ctc": 1.5905654430389404, "train/loss_error": 0.44297167658805847, "train/loss_total": 0.6724904775619507 }, { "epoch": 3.3871226289072935, "step": 12678, "train/loss_ctc": 0.5587910413742065, "train/loss_error": 0.367347776889801, "train/loss_total": 0.40563642978668213 }, { "epoch": 3.3873897942826607, "step": 12679, "train/loss_ctc": 0.350940465927124, "train/loss_error": 0.4782491624355316, "train/loss_total": 0.4527874290943146 }, { "epoch": 3.3876569596580284, "grad_norm": 1.234549641609192, "learning_rate": 9.68527918781726e-06, "loss": 0.4993, "step": 12680 }, { "epoch": 3.3876569596580284, "step": 12680, "train/loss_ctc": 0.3856378197669983, "train/loss_error": 0.535720944404602, "train/loss_total": 0.5057043433189392 }, { "epoch": 3.3879241250333956, "step": 12681, "train/loss_ctc": 0.9540135860443115, "train/loss_error": 0.4600480794906616, "train/loss_total": 0.5588412284851074 }, { "epoch": 3.388191290408763, "step": 12682, "train/loss_ctc": 1.5873404741287231, "train/loss_error": 0.47973036766052246, "train/loss_total": 0.7012524008750916 }, { "epoch": 3.3884584557841304, "step": 12683, "train/loss_ctc": 0.36969316005706787, "train/loss_error": 0.4534026086330414, "train/loss_total": 0.4366607367992401 }, { "epoch": 3.3887256211594976, "step": 12684, "train/loss_ctc": 0.2930218577384949, "train/loss_error": 0.4954657554626465, "train/loss_total": 0.45497697591781616 }, { "epoch": 3.3889927865348652, "step": 12685, "train/loss_ctc": 1.655297875404358, "train/loss_error": 0.45732131600379944, "train/loss_total": 0.6969166398048401 }, { "epoch": 3.3892599519102324, "step": 12686, "train/loss_ctc": 0.4187622368335724, "train/loss_error": 0.397108256816864, "train/loss_total": 0.4014390707015991 }, { "epoch": 3.3895271172855996, "step": 12687, "train/loss_ctc": 0.8611359596252441, "train/loss_error": 0.5591228008270264, "train/loss_total": 0.6195254325866699 }, { "epoch": 3.3897942826609673, "step": 12688, "train/loss_ctc": 0.5267398357391357, "train/loss_error": 0.4380470812320709, "train/loss_total": 0.4557856321334839 }, { "epoch": 3.3900614480363345, "step": 12689, "train/loss_ctc": 0.7503216862678528, "train/loss_error": 0.3910515010356903, "train/loss_total": 0.46290552616119385 }, { "epoch": 3.3903286134117017, "grad_norm": 2.29803204536438, "learning_rate": 9.669249265295218e-06, "loss": 0.5294, "step": 12690 }, { "epoch": 3.3903286134117017, "step": 12690, "train/loss_ctc": 0.6557763814926147, "train/loss_error": 0.4213078022003174, "train/loss_total": 0.46820151805877686 }, { "epoch": 3.3905957787870693, "step": 12691, "train/loss_ctc": 0.5744165182113647, "train/loss_error": 0.41033735871315, "train/loss_total": 0.4431532025337219 }, { "epoch": 3.3908629441624365, "step": 12692, "train/loss_ctc": 0.34481117129325867, "train/loss_error": 0.42586463689804077, "train/loss_total": 0.4096539616584778 }, { "epoch": 3.391130109537804, "step": 12693, "train/loss_ctc": 0.9153462648391724, "train/loss_error": 0.42143234610557556, "train/loss_total": 0.5202151536941528 }, { "epoch": 3.3913972749131713, "step": 12694, "train/loss_ctc": 0.9187118411064148, "train/loss_error": 0.4455547630786896, "train/loss_total": 0.5401861667633057 }, { "epoch": 3.3916644402885385, "step": 12695, "train/loss_ctc": 1.315683364868164, "train/loss_error": 0.3859065771102905, "train/loss_total": 0.571861982345581 }, { "epoch": 3.391931605663906, "step": 12696, "train/loss_ctc": 0.989566445350647, "train/loss_error": 0.47181200981140137, "train/loss_total": 0.5753629207611084 }, { "epoch": 3.3921987710392734, "step": 12697, "train/loss_ctc": 0.9206957817077637, "train/loss_error": 0.42697077989578247, "train/loss_total": 0.5257158279418945 }, { "epoch": 3.3924659364146406, "step": 12698, "train/loss_ctc": 0.5391886234283447, "train/loss_error": 0.4019680917263031, "train/loss_total": 0.42941221594810486 }, { "epoch": 3.392733101790008, "step": 12699, "train/loss_ctc": 0.772948145866394, "train/loss_error": 0.34448543190956116, "train/loss_total": 0.4301779866218567 }, { "epoch": 3.3930002671653754, "grad_norm": 1.8358486890792847, "learning_rate": 9.653219342773178e-06, "loss": 0.4914, "step": 12700 }, { "epoch": 3.3930002671653754, "step": 12700, "train/loss_ctc": 0.5193637013435364, "train/loss_error": 0.4737841486930847, "train/loss_total": 0.48290008306503296 }, { "epoch": 3.3932674325407426, "step": 12701, "train/loss_ctc": 0.5692068338394165, "train/loss_error": 0.4729402959346771, "train/loss_total": 0.49219363927841187 }, { "epoch": 3.3935345979161102, "step": 12702, "train/loss_ctc": 0.2493082880973816, "train/loss_error": 0.456632137298584, "train/loss_total": 0.4151673913002014 }, { "epoch": 3.3938017632914774, "step": 12703, "train/loss_ctc": 1.0140612125396729, "train/loss_error": 0.5046366453170776, "train/loss_total": 0.6065215468406677 }, { "epoch": 3.3940689286668446, "step": 12704, "train/loss_ctc": 0.7429625988006592, "train/loss_error": 0.507739782333374, "train/loss_total": 0.55478435754776 }, { "epoch": 3.3943360940422123, "step": 12705, "train/loss_ctc": 0.6520477533340454, "train/loss_error": 0.49479934573173523, "train/loss_total": 0.5262490510940552 }, { "epoch": 3.3946032594175795, "step": 12706, "train/loss_ctc": 1.6473950147628784, "train/loss_error": 0.4124090075492859, "train/loss_total": 0.6594061851501465 }, { "epoch": 3.3948704247929467, "step": 12707, "train/loss_ctc": 0.6516450643539429, "train/loss_error": 0.4201606810092926, "train/loss_total": 0.4664575755596161 }, { "epoch": 3.3951375901683143, "step": 12708, "train/loss_ctc": 1.311729907989502, "train/loss_error": 0.43393850326538086, "train/loss_total": 0.6094968318939209 }, { "epoch": 3.3954047555436815, "step": 12709, "train/loss_ctc": 0.4344916343688965, "train/loss_error": 0.44961950182914734, "train/loss_total": 0.4465939402580261 }, { "epoch": 3.3956719209190487, "grad_norm": 4.709073066711426, "learning_rate": 9.637189420251136e-06, "loss": 0.526, "step": 12710 }, { "epoch": 3.3956719209190487, "step": 12710, "train/loss_ctc": 0.7672574520111084, "train/loss_error": 0.4128718972206116, "train/loss_total": 0.48374903202056885 }, { "epoch": 3.3959390862944163, "step": 12711, "train/loss_ctc": 0.3041682839393616, "train/loss_error": 0.40940573811531067, "train/loss_total": 0.3883582651615143 }, { "epoch": 3.3962062516697835, "step": 12712, "train/loss_ctc": 0.5619946718215942, "train/loss_error": 0.45453280210494995, "train/loss_total": 0.47602516412734985 }, { "epoch": 3.3964734170451507, "step": 12713, "train/loss_ctc": 0.32250964641571045, "train/loss_error": 0.4499591588973999, "train/loss_total": 0.4244692623615265 }, { "epoch": 3.3967405824205184, "step": 12714, "train/loss_ctc": 0.6259188652038574, "train/loss_error": 0.43614161014556885, "train/loss_total": 0.4740970730781555 }, { "epoch": 3.3970077477958855, "step": 12715, "train/loss_ctc": 0.49665889143943787, "train/loss_error": 0.49795588850975037, "train/loss_total": 0.49769651889801025 }, { "epoch": 3.397274913171253, "step": 12716, "train/loss_ctc": 1.121863603591919, "train/loss_error": 0.4453032314777374, "train/loss_total": 0.5806153416633606 }, { "epoch": 3.3975420785466204, "step": 12717, "train/loss_ctc": 0.8205733299255371, "train/loss_error": 0.4313768744468689, "train/loss_total": 0.5092161893844604 }, { "epoch": 3.3978092439219876, "step": 12718, "train/loss_ctc": 0.4985044002532959, "train/loss_error": 0.4848260283470154, "train/loss_total": 0.4875617027282715 }, { "epoch": 3.398076409297355, "step": 12719, "train/loss_ctc": 0.6645990610122681, "train/loss_error": 0.42316165566444397, "train/loss_total": 0.4714491367340088 }, { "epoch": 3.3983435746727224, "grad_norm": 2.122408390045166, "learning_rate": 9.621159497729094e-06, "loss": 0.4793, "step": 12720 }, { "epoch": 3.3983435746727224, "step": 12720, "train/loss_ctc": 0.6661287546157837, "train/loss_error": 0.4529535174369812, "train/loss_total": 0.49558860063552856 }, { "epoch": 3.3986107400480896, "step": 12721, "train/loss_ctc": 0.7937447428703308, "train/loss_error": 0.4474599361419678, "train/loss_total": 0.5167168974876404 }, { "epoch": 3.3988779054234572, "step": 12722, "train/loss_ctc": 1.077187180519104, "train/loss_error": 0.4572443664073944, "train/loss_total": 0.5812329053878784 }, { "epoch": 3.3991450707988244, "step": 12723, "train/loss_ctc": 0.7605564594268799, "train/loss_error": 0.4204455316066742, "train/loss_total": 0.4884677231311798 }, { "epoch": 3.3994122361741916, "step": 12724, "train/loss_ctc": 0.7129970788955688, "train/loss_error": 0.3634188771247864, "train/loss_total": 0.4333345293998718 }, { "epoch": 3.3996794015495593, "step": 12725, "train/loss_ctc": 0.4539598226547241, "train/loss_error": 0.42890894412994385, "train/loss_total": 0.43391913175582886 }, { "epoch": 3.3999465669249265, "step": 12726, "train/loss_ctc": 1.3186876773834229, "train/loss_error": 0.459413081407547, "train/loss_total": 0.6312680244445801 }, { "epoch": 3.400213732300294, "step": 12727, "train/loss_ctc": 0.9748973250389099, "train/loss_error": 0.49885061383247375, "train/loss_total": 0.594059944152832 }, { "epoch": 3.4004808976756613, "step": 12728, "train/loss_ctc": 0.5015156269073486, "train/loss_error": 0.3922712802886963, "train/loss_total": 0.4141201376914978 }, { "epoch": 3.4007480630510285, "step": 12729, "train/loss_ctc": 1.591469407081604, "train/loss_error": 0.375414103269577, "train/loss_total": 0.6186251640319824 }, { "epoch": 3.401015228426396, "grad_norm": 5.861729145050049, "learning_rate": 9.605129575207054e-06, "loss": 0.5207, "step": 12730 }, { "epoch": 3.401015228426396, "step": 12730, "train/loss_ctc": 1.135448694229126, "train/loss_error": 0.42854076623916626, "train/loss_total": 0.5699223875999451 }, { "epoch": 3.4012823938017633, "step": 12731, "train/loss_ctc": 1.3554253578186035, "train/loss_error": 0.38216087222099304, "train/loss_total": 0.576813817024231 }, { "epoch": 3.4015495591771305, "step": 12732, "train/loss_ctc": 1.2910844087600708, "train/loss_error": 0.4901978075504303, "train/loss_total": 0.6503751277923584 }, { "epoch": 3.401816724552498, "step": 12733, "train/loss_ctc": 0.5498173236846924, "train/loss_error": 0.37316691875457764, "train/loss_total": 0.40849700570106506 }, { "epoch": 3.4020838899278654, "step": 12734, "train/loss_ctc": 0.6865022778511047, "train/loss_error": 0.4601364731788635, "train/loss_total": 0.5054096579551697 }, { "epoch": 3.4023510553032326, "step": 12735, "train/loss_ctc": 0.658434271812439, "train/loss_error": 0.4765641391277313, "train/loss_total": 0.5129381418228149 }, { "epoch": 3.4026182206786, "step": 12736, "train/loss_ctc": 0.4324839413166046, "train/loss_error": 0.5281749963760376, "train/loss_total": 0.5090367794036865 }, { "epoch": 3.4028853860539674, "step": 12737, "train/loss_ctc": 0.7512761354446411, "train/loss_error": 0.4328087568283081, "train/loss_total": 0.49650225043296814 }, { "epoch": 3.4031525514293346, "step": 12738, "train/loss_ctc": 1.1892707347869873, "train/loss_error": 0.5188910961151123, "train/loss_total": 0.6529670357704163 }, { "epoch": 3.4034197168047022, "step": 12739, "train/loss_ctc": 1.1268444061279297, "train/loss_error": 0.4789396822452545, "train/loss_total": 0.6085206270217896 }, { "epoch": 3.4036868821800694, "grad_norm": 3.8292884826660156, "learning_rate": 9.589099652685012e-06, "loss": 0.5491, "step": 12740 }, { "epoch": 3.4036868821800694, "step": 12740, "train/loss_ctc": 0.8124564290046692, "train/loss_error": 0.47733378410339355, "train/loss_total": 0.5443583130836487 }, { "epoch": 3.4039540475554366, "step": 12741, "train/loss_ctc": 0.5507775545120239, "train/loss_error": 0.47978195548057556, "train/loss_total": 0.49398109316825867 }, { "epoch": 3.4042212129308043, "step": 12742, "train/loss_ctc": 0.7571340203285217, "train/loss_error": 0.37246254086494446, "train/loss_total": 0.44939684867858887 }, { "epoch": 3.4044883783061715, "step": 12743, "train/loss_ctc": 1.1086835861206055, "train/loss_error": 0.5232693552970886, "train/loss_total": 0.640352189540863 }, { "epoch": 3.4047555436815387, "step": 12744, "train/loss_ctc": 0.4896456003189087, "train/loss_error": 0.418550580739975, "train/loss_total": 0.4327695965766907 }, { "epoch": 3.4050227090569063, "step": 12745, "train/loss_ctc": 0.5091418027877808, "train/loss_error": 0.43202731013298035, "train/loss_total": 0.4474502205848694 }, { "epoch": 3.4052898744322735, "step": 12746, "train/loss_ctc": 0.5234110355377197, "train/loss_error": 0.4432156980037689, "train/loss_total": 0.45925477147102356 }, { "epoch": 3.4055570398076407, "step": 12747, "train/loss_ctc": 0.8499099016189575, "train/loss_error": 0.4076043963432312, "train/loss_total": 0.49606549739837646 }, { "epoch": 3.4058242051830083, "step": 12748, "train/loss_ctc": 0.3382602632045746, "train/loss_error": 0.43870410323143005, "train/loss_total": 0.41861534118652344 }, { "epoch": 3.4060913705583755, "step": 12749, "train/loss_ctc": 1.4164438247680664, "train/loss_error": 0.43749359250068665, "train/loss_total": 0.6332836151123047 }, { "epoch": 3.406358535933743, "grad_norm": 2.234325408935547, "learning_rate": 9.57306973016297e-06, "loss": 0.5016, "step": 12750 }, { "epoch": 3.406358535933743, "step": 12750, "train/loss_ctc": 1.016362190246582, "train/loss_error": 0.5287024974822998, "train/loss_total": 0.6262344717979431 }, { "epoch": 3.4066257013091104, "step": 12751, "train/loss_ctc": 0.6924121379852295, "train/loss_error": 0.4450196921825409, "train/loss_total": 0.49449819326400757 }, { "epoch": 3.4068928666844775, "step": 12752, "train/loss_ctc": 0.4502653181552887, "train/loss_error": 0.41960442066192627, "train/loss_total": 0.42573660612106323 }, { "epoch": 3.407160032059845, "step": 12753, "train/loss_ctc": 1.3028876781463623, "train/loss_error": 0.4635896682739258, "train/loss_total": 0.631449282169342 }, { "epoch": 3.4074271974352124, "step": 12754, "train/loss_ctc": 0.8019276261329651, "train/loss_error": 0.48976272344589233, "train/loss_total": 0.5521957278251648 }, { "epoch": 3.4076943628105796, "step": 12755, "train/loss_ctc": 0.2886754870414734, "train/loss_error": 0.4551973044872284, "train/loss_total": 0.4218929409980774 }, { "epoch": 3.407961528185947, "step": 12756, "train/loss_ctc": 0.5231665372848511, "train/loss_error": 0.4296424984931946, "train/loss_total": 0.4483473002910614 }, { "epoch": 3.4082286935613144, "step": 12757, "train/loss_ctc": 1.3360791206359863, "train/loss_error": 0.4868726432323456, "train/loss_total": 0.6567139625549316 }, { "epoch": 3.408495858936682, "step": 12758, "train/loss_ctc": 0.5753008723258972, "train/loss_error": 0.40958595275878906, "train/loss_total": 0.4427289366722107 }, { "epoch": 3.4087630243120493, "step": 12759, "train/loss_ctc": 0.6092528104782104, "train/loss_error": 0.5033873319625854, "train/loss_total": 0.5245604515075684 }, { "epoch": 3.4090301896874164, "grad_norm": 1.2682149410247803, "learning_rate": 9.55703980764093e-06, "loss": 0.5224, "step": 12760 }, { "epoch": 3.4090301896874164, "step": 12760, "train/loss_ctc": 0.8295712471008301, "train/loss_error": 0.4806617498397827, "train/loss_total": 0.5504436492919922 }, { "epoch": 3.409297355062784, "step": 12761, "train/loss_ctc": 0.9996299743652344, "train/loss_error": 0.3634697198867798, "train/loss_total": 0.4907017946243286 }, { "epoch": 3.4095645204381513, "step": 12762, "train/loss_ctc": 0.7975380420684814, "train/loss_error": 0.40779080986976624, "train/loss_total": 0.4857402443885803 }, { "epoch": 3.4098316858135185, "step": 12763, "train/loss_ctc": 0.5059612989425659, "train/loss_error": 0.4661332964897156, "train/loss_total": 0.47409892082214355 }, { "epoch": 3.410098851188886, "step": 12764, "train/loss_ctc": 1.2485764026641846, "train/loss_error": 0.46182334423065186, "train/loss_total": 0.6191739439964294 }, { "epoch": 3.4103660165642533, "step": 12765, "train/loss_ctc": 0.8333590626716614, "train/loss_error": 0.4466956853866577, "train/loss_total": 0.5240283608436584 }, { "epoch": 3.4106331819396205, "step": 12766, "train/loss_ctc": 0.7811921834945679, "train/loss_error": 0.39976203441619873, "train/loss_total": 0.476048082113266 }, { "epoch": 3.410900347314988, "step": 12767, "train/loss_ctc": 0.7111535668373108, "train/loss_error": 0.47095921635627747, "train/loss_total": 0.5189980864524841 }, { "epoch": 3.4111675126903553, "step": 12768, "train/loss_ctc": 0.7373355627059937, "train/loss_error": 0.4080629050731659, "train/loss_total": 0.4739174246788025 }, { "epoch": 3.4114346780657225, "step": 12769, "train/loss_ctc": 0.5206870436668396, "train/loss_error": 0.4224010109901428, "train/loss_total": 0.4420582354068756 }, { "epoch": 3.41170184344109, "grad_norm": 1.4350664615631104, "learning_rate": 9.54100988511889e-06, "loss": 0.5055, "step": 12770 }, { "epoch": 3.41170184344109, "step": 12770, "train/loss_ctc": 0.24113647639751434, "train/loss_error": 0.4283722937107086, "train/loss_total": 0.3909251093864441 }, { "epoch": 3.4119690088164574, "step": 12771, "train/loss_ctc": 0.44692713022232056, "train/loss_error": 0.423421174287796, "train/loss_total": 0.4281223714351654 }, { "epoch": 3.4122361741918246, "step": 12772, "train/loss_ctc": 1.388009786605835, "train/loss_error": 0.43390142917633057, "train/loss_total": 0.6247230768203735 }, { "epoch": 3.412503339567192, "step": 12773, "train/loss_ctc": 1.6222548484802246, "train/loss_error": 0.43392547965049744, "train/loss_total": 0.6715914011001587 }, { "epoch": 3.4127705049425594, "step": 12774, "train/loss_ctc": 0.3218613862991333, "train/loss_error": 0.46056997776031494, "train/loss_total": 0.43282824754714966 }, { "epoch": 3.4130376703179266, "step": 12775, "train/loss_ctc": 0.7316431999206543, "train/loss_error": 0.43325158953666687, "train/loss_total": 0.49292993545532227 }, { "epoch": 3.4133048356932942, "step": 12776, "train/loss_ctc": 1.1947286128997803, "train/loss_error": 0.5003875494003296, "train/loss_total": 0.6392557621002197 }, { "epoch": 3.4135720010686614, "step": 12777, "train/loss_ctc": 0.6538877487182617, "train/loss_error": 0.4026121199131012, "train/loss_total": 0.4528672695159912 }, { "epoch": 3.4138391664440286, "step": 12778, "train/loss_ctc": 1.1615071296691895, "train/loss_error": 0.519353449344635, "train/loss_total": 0.6477841734886169 }, { "epoch": 3.4141063318193963, "step": 12779, "train/loss_ctc": 0.48154354095458984, "train/loss_error": 0.39563700556755066, "train/loss_total": 0.4128183126449585 }, { "epoch": 3.4143734971947635, "grad_norm": 1.2573076486587524, "learning_rate": 9.524979962596848e-06, "loss": 0.5194, "step": 12780 }, { "epoch": 3.4143734971947635, "step": 12780, "train/loss_ctc": 1.2664896249771118, "train/loss_error": 0.4831247627735138, "train/loss_total": 0.6397977471351624 }, { "epoch": 3.414640662570131, "step": 12781, "train/loss_ctc": 0.5214044451713562, "train/loss_error": 0.39513328671455383, "train/loss_total": 0.42038753628730774 }, { "epoch": 3.4149078279454983, "step": 12782, "train/loss_ctc": 0.35342028737068176, "train/loss_error": 0.4742075204849243, "train/loss_total": 0.4500500559806824 }, { "epoch": 3.4151749933208655, "step": 12783, "train/loss_ctc": 0.8164637684822083, "train/loss_error": 0.47266635298728943, "train/loss_total": 0.5414258241653442 }, { "epoch": 3.415442158696233, "step": 12784, "train/loss_ctc": 0.6422730684280396, "train/loss_error": 0.3726140856742859, "train/loss_total": 0.4265458583831787 }, { "epoch": 3.4157093240716003, "step": 12785, "train/loss_ctc": 0.7777642011642456, "train/loss_error": 0.46700748801231384, "train/loss_total": 0.5291588306427002 }, { "epoch": 3.4159764894469675, "step": 12786, "train/loss_ctc": 0.7597436904907227, "train/loss_error": 0.3703238368034363, "train/loss_total": 0.4482077956199646 }, { "epoch": 3.416243654822335, "step": 12787, "train/loss_ctc": 0.24126169085502625, "train/loss_error": 0.4404767155647278, "train/loss_total": 0.40063372254371643 }, { "epoch": 3.4165108201977024, "step": 12788, "train/loss_ctc": 0.5142757892608643, "train/loss_error": 0.48307496309280396, "train/loss_total": 0.4893151521682739 }, { "epoch": 3.4167779855730696, "step": 12789, "train/loss_ctc": 0.588056743144989, "train/loss_error": 0.43097949028015137, "train/loss_total": 0.46239495277404785 }, { "epoch": 3.417045150948437, "grad_norm": 2.1549763679504395, "learning_rate": 9.508950040074807e-06, "loss": 0.4808, "step": 12790 }, { "epoch": 3.417045150948437, "step": 12790, "train/loss_ctc": 0.5206552743911743, "train/loss_error": 0.397333562374115, "train/loss_total": 0.42199790477752686 }, { "epoch": 3.4173123163238044, "step": 12791, "train/loss_ctc": 0.6571366190910339, "train/loss_error": 0.350433349609375, "train/loss_total": 0.41177403926849365 }, { "epoch": 3.417579481699172, "step": 12792, "train/loss_ctc": 0.5774202346801758, "train/loss_error": 0.4273853302001953, "train/loss_total": 0.4573923349380493 }, { "epoch": 3.4178466470745392, "step": 12793, "train/loss_ctc": 0.4435797929763794, "train/loss_error": 0.4532800316810608, "train/loss_total": 0.451339989900589 }, { "epoch": 3.4181138124499064, "step": 12794, "train/loss_ctc": 1.0890684127807617, "train/loss_error": 0.4329546391963959, "train/loss_total": 0.564177393913269 }, { "epoch": 3.418380977825274, "step": 12795, "train/loss_ctc": 0.7300043106079102, "train/loss_error": 0.3557312488555908, "train/loss_total": 0.4305858612060547 }, { "epoch": 3.4186481432006413, "step": 12796, "train/loss_ctc": 1.3774768114089966, "train/loss_error": 0.4238210916519165, "train/loss_total": 0.6145522594451904 }, { "epoch": 3.4189153085760084, "step": 12797, "train/loss_ctc": 0.2928083539009094, "train/loss_error": 0.4532851278781891, "train/loss_total": 0.4211897850036621 }, { "epoch": 3.419182473951376, "step": 12798, "train/loss_ctc": 0.4618262052536011, "train/loss_error": 0.4573354125022888, "train/loss_total": 0.4582335650920868 }, { "epoch": 3.4194496393267433, "step": 12799, "train/loss_ctc": 0.8900042176246643, "train/loss_error": 0.4403764307498932, "train/loss_total": 0.5303019881248474 }, { "epoch": 3.4197168047021105, "grad_norm": 4.994259357452393, "learning_rate": 9.492920117552765e-06, "loss": 0.4762, "step": 12800 }, { "epoch": 3.4197168047021105, "step": 12800, "train/loss_ctc": 0.586579442024231, "train/loss_error": 0.42786309123039246, "train/loss_total": 0.4596063792705536 }, { "epoch": 3.419983970077478, "step": 12801, "train/loss_ctc": 0.9517185688018799, "train/loss_error": 0.39207056164741516, "train/loss_total": 0.504000186920166 }, { "epoch": 3.4202511354528453, "step": 12802, "train/loss_ctc": 1.282631516456604, "train/loss_error": 0.4682353734970093, "train/loss_total": 0.6311146020889282 }, { "epoch": 3.4205183008282125, "step": 12803, "train/loss_ctc": 0.6928004622459412, "train/loss_error": 0.45147353410720825, "train/loss_total": 0.4997389316558838 }, { "epoch": 3.42078546620358, "step": 12804, "train/loss_ctc": 0.4411866068840027, "train/loss_error": 0.5042043924331665, "train/loss_total": 0.4916008412837982 }, { "epoch": 3.4210526315789473, "step": 12805, "train/loss_ctc": 0.5240987539291382, "train/loss_error": 0.41273215413093567, "train/loss_total": 0.4350054860115051 }, { "epoch": 3.4213197969543145, "step": 12806, "train/loss_ctc": 0.8953270316123962, "train/loss_error": 0.5106828212738037, "train/loss_total": 0.5876116752624512 }, { "epoch": 3.421586962329682, "step": 12807, "train/loss_ctc": 0.717116117477417, "train/loss_error": 0.451308012008667, "train/loss_total": 0.504469633102417 }, { "epoch": 3.4218541277050494, "step": 12808, "train/loss_ctc": 0.487295538187027, "train/loss_error": 0.41655758023262024, "train/loss_total": 0.430705189704895 }, { "epoch": 3.4221212930804166, "step": 12809, "train/loss_ctc": 0.512073814868927, "train/loss_error": 0.48742595314979553, "train/loss_total": 0.4923555254936218 }, { "epoch": 3.422388458455784, "grad_norm": 3.485884189605713, "learning_rate": 9.476890195030724e-06, "loss": 0.5036, "step": 12810 }, { "epoch": 3.422388458455784, "step": 12810, "train/loss_ctc": 0.9505923986434937, "train/loss_error": 0.450848251581192, "train/loss_total": 0.5507971048355103 }, { "epoch": 3.4226556238311514, "step": 12811, "train/loss_ctc": 2.1936237812042236, "train/loss_error": 0.46392080187797546, "train/loss_total": 0.809861421585083 }, { "epoch": 3.4229227892065186, "step": 12812, "train/loss_ctc": 0.9488105773925781, "train/loss_error": 0.40224453806877136, "train/loss_total": 0.5115577578544617 }, { "epoch": 3.4231899545818862, "step": 12813, "train/loss_ctc": 1.0152698755264282, "train/loss_error": 0.4694659113883972, "train/loss_total": 0.5786267518997192 }, { "epoch": 3.4234571199572534, "step": 12814, "train/loss_ctc": 0.5242855548858643, "train/loss_error": 0.4662693440914154, "train/loss_total": 0.4778726100921631 }, { "epoch": 3.423724285332621, "step": 12815, "train/loss_ctc": 0.9935922622680664, "train/loss_error": 0.5334042310714722, "train/loss_total": 0.62544184923172 }, { "epoch": 3.4239914507079883, "step": 12816, "train/loss_ctc": 0.7987152338027954, "train/loss_error": 0.46900227665901184, "train/loss_total": 0.5349448919296265 }, { "epoch": 3.4242586160833555, "step": 12817, "train/loss_ctc": 0.9261075258255005, "train/loss_error": 0.43991830945014954, "train/loss_total": 0.5371561646461487 }, { "epoch": 3.424525781458723, "step": 12818, "train/loss_ctc": 0.6506156921386719, "train/loss_error": 0.45536911487579346, "train/loss_total": 0.4944184422492981 }, { "epoch": 3.4247929468340903, "step": 12819, "train/loss_ctc": 0.25576290488243103, "train/loss_error": 0.35499802231788635, "train/loss_total": 0.3351510167121887 }, { "epoch": 3.4250601122094575, "grad_norm": 2.412426471710205, "learning_rate": 9.460860272508683e-06, "loss": 0.5456, "step": 12820 }, { "epoch": 3.4250601122094575, "step": 12820, "train/loss_ctc": 0.7990474104881287, "train/loss_error": 0.42170754075050354, "train/loss_total": 0.49717551469802856 }, { "epoch": 3.425327277584825, "step": 12821, "train/loss_ctc": 0.5603522658348083, "train/loss_error": 0.4014701247215271, "train/loss_total": 0.43324655294418335 }, { "epoch": 3.4255944429601923, "step": 12822, "train/loss_ctc": 0.9729324579238892, "train/loss_error": 0.4134827256202698, "train/loss_total": 0.5253726840019226 }, { "epoch": 3.4258616083355595, "step": 12823, "train/loss_ctc": 0.42750251293182373, "train/loss_error": 0.4192170202732086, "train/loss_total": 0.42087411880493164 }, { "epoch": 3.426128773710927, "step": 12824, "train/loss_ctc": 0.49711665511131287, "train/loss_error": 0.3995518386363983, "train/loss_total": 0.41906481981277466 }, { "epoch": 3.4263959390862944, "step": 12825, "train/loss_ctc": 0.6181710362434387, "train/loss_error": 0.45451998710632324, "train/loss_total": 0.4872502088546753 }, { "epoch": 3.426663104461662, "step": 12826, "train/loss_ctc": 0.9776548147201538, "train/loss_error": 0.4681141674518585, "train/loss_total": 0.5700222849845886 }, { "epoch": 3.426930269837029, "step": 12827, "train/loss_ctc": 0.5851435661315918, "train/loss_error": 0.38051825761795044, "train/loss_total": 0.42144331336021423 }, { "epoch": 3.4271974352123964, "step": 12828, "train/loss_ctc": 0.4152843952178955, "train/loss_error": 0.4407626688480377, "train/loss_total": 0.4356670379638672 }, { "epoch": 3.427464600587764, "step": 12829, "train/loss_ctc": 0.6376925706863403, "train/loss_error": 0.546614408493042, "train/loss_total": 0.5648300647735596 }, { "epoch": 3.4277317659631312, "grad_norm": 2.769812822341919, "learning_rate": 9.444830349986641e-06, "loss": 0.4775, "step": 12830 }, { "epoch": 3.4277317659631312, "step": 12830, "train/loss_ctc": 0.7195000648498535, "train/loss_error": 0.4417347013950348, "train/loss_total": 0.4972878098487854 }, { "epoch": 3.4279989313384984, "step": 12831, "train/loss_ctc": 0.659815788269043, "train/loss_error": 0.4597509801387787, "train/loss_total": 0.49976396560668945 }, { "epoch": 3.428266096713866, "step": 12832, "train/loss_ctc": 0.6303152441978455, "train/loss_error": 0.40035369992256165, "train/loss_total": 0.4463460147380829 }, { "epoch": 3.4285332620892333, "step": 12833, "train/loss_ctc": 0.458344429731369, "train/loss_error": 0.49606072902679443, "train/loss_total": 0.4885174632072449 }, { "epoch": 3.4288004274646005, "step": 12834, "train/loss_ctc": 0.7918733358383179, "train/loss_error": 0.47890397906303406, "train/loss_total": 0.5414978265762329 }, { "epoch": 3.429067592839968, "step": 12835, "train/loss_ctc": 0.36319878697395325, "train/loss_error": 0.4468737542629242, "train/loss_total": 0.4301387667655945 }, { "epoch": 3.4293347582153353, "step": 12836, "train/loss_ctc": 0.7382633686065674, "train/loss_error": 0.40774086117744446, "train/loss_total": 0.47384536266326904 }, { "epoch": 3.4296019235907025, "step": 12837, "train/loss_ctc": 0.4718853533267975, "train/loss_error": 0.39779412746429443, "train/loss_total": 0.4126123785972595 }, { "epoch": 3.42986908896607, "step": 12838, "train/loss_ctc": 1.2383537292480469, "train/loss_error": 0.4658852815628052, "train/loss_total": 0.6203789710998535 }, { "epoch": 3.4301362543414373, "step": 12839, "train/loss_ctc": 0.5889835953712463, "train/loss_error": 0.42408889532089233, "train/loss_total": 0.4570678472518921 }, { "epoch": 3.4304034197168045, "grad_norm": 1.5762736797332764, "learning_rate": 9.4288004274646e-06, "loss": 0.4867, "step": 12840 }, { "epoch": 3.4304034197168045, "step": 12840, "train/loss_ctc": 1.9593331813812256, "train/loss_error": 0.46981972455978394, "train/loss_total": 0.7677224278450012 }, { "epoch": 3.430670585092172, "step": 12841, "train/loss_ctc": 1.0116328001022339, "train/loss_error": 0.44022950530052185, "train/loss_total": 0.5545101761817932 }, { "epoch": 3.4309377504675393, "step": 12842, "train/loss_ctc": 0.7593395709991455, "train/loss_error": 0.4757305085659027, "train/loss_total": 0.5324523448944092 }, { "epoch": 3.4312049158429065, "step": 12843, "train/loss_ctc": 0.6877014636993408, "train/loss_error": 0.5206515789031982, "train/loss_total": 0.5540615916252136 }, { "epoch": 3.431472081218274, "step": 12844, "train/loss_ctc": 1.2603459358215332, "train/loss_error": 0.3645251393318176, "train/loss_total": 0.5436893105506897 }, { "epoch": 3.4317392465936414, "step": 12845, "train/loss_ctc": 0.245869979262352, "train/loss_error": 0.4774932861328125, "train/loss_total": 0.4311686158180237 }, { "epoch": 3.4320064119690086, "step": 12846, "train/loss_ctc": 0.5096095204353333, "train/loss_error": 0.46083545684814453, "train/loss_total": 0.4705902934074402 }, { "epoch": 3.432273577344376, "step": 12847, "train/loss_ctc": 0.3610956370830536, "train/loss_error": 0.4027218222618103, "train/loss_total": 0.3943966031074524 }, { "epoch": 3.4325407427197434, "step": 12848, "train/loss_ctc": 0.6054246425628662, "train/loss_error": 0.42763012647628784, "train/loss_total": 0.463189035654068 }, { "epoch": 3.432807908095111, "step": 12849, "train/loss_ctc": 0.6635791659355164, "train/loss_error": 0.4579050838947296, "train/loss_total": 0.499039888381958 }, { "epoch": 3.4330750734704782, "grad_norm": 3.3417091369628906, "learning_rate": 9.41277050494256e-06, "loss": 0.5211, "step": 12850 }, { "epoch": 3.4330750734704782, "step": 12850, "train/loss_ctc": 0.9251865148544312, "train/loss_error": 0.4310508966445923, "train/loss_total": 0.5298780202865601 }, { "epoch": 3.4333422388458454, "step": 12851, "train/loss_ctc": 0.3446241617202759, "train/loss_error": 0.43472009897232056, "train/loss_total": 0.41670092940330505 }, { "epoch": 3.433609404221213, "step": 12852, "train/loss_ctc": 0.609845757484436, "train/loss_error": 0.5078375935554504, "train/loss_total": 0.5282392501831055 }, { "epoch": 3.4338765695965803, "step": 12853, "train/loss_ctc": 0.5519613027572632, "train/loss_error": 0.410834401845932, "train/loss_total": 0.4390597939491272 }, { "epoch": 3.4341437349719475, "step": 12854, "train/loss_ctc": 0.22062364220619202, "train/loss_error": 0.43192198872566223, "train/loss_total": 0.38966232538223267 }, { "epoch": 3.434410900347315, "step": 12855, "train/loss_ctc": 0.89823317527771, "train/loss_error": 0.45659321546554565, "train/loss_total": 0.5449212193489075 }, { "epoch": 3.4346780657226823, "step": 12856, "train/loss_ctc": 0.5292307138442993, "train/loss_error": 0.3677806258201599, "train/loss_total": 0.4000706374645233 }, { "epoch": 3.4349452310980495, "step": 12857, "train/loss_ctc": 1.100106120109558, "train/loss_error": 0.4937744736671448, "train/loss_total": 0.6150408387184143 }, { "epoch": 3.435212396473417, "step": 12858, "train/loss_ctc": 0.671399712562561, "train/loss_error": 0.4571091830730438, "train/loss_total": 0.4999672770500183 }, { "epoch": 3.4354795618487843, "step": 12859, "train/loss_ctc": 0.6668692827224731, "train/loss_error": 0.423052042722702, "train/loss_total": 0.4718154966831207 }, { "epoch": 3.435746727224152, "grad_norm": 1.4825356006622314, "learning_rate": 9.396740582420519e-06, "loss": 0.4835, "step": 12860 }, { "epoch": 3.435746727224152, "step": 12860, "train/loss_ctc": 0.5865628719329834, "train/loss_error": 0.5320629477500916, "train/loss_total": 0.542962908744812 }, { "epoch": 3.436013892599519, "step": 12861, "train/loss_ctc": 0.5430645942687988, "train/loss_error": 0.4195554554462433, "train/loss_total": 0.44425728917121887 }, { "epoch": 3.4362810579748864, "step": 12862, "train/loss_ctc": 0.4287024140357971, "train/loss_error": 0.3906177878379822, "train/loss_total": 0.3982347249984741 }, { "epoch": 3.436548223350254, "step": 12863, "train/loss_ctc": 0.5797214508056641, "train/loss_error": 0.4303792119026184, "train/loss_total": 0.460247665643692 }, { "epoch": 3.436815388725621, "step": 12864, "train/loss_ctc": 0.7952622175216675, "train/loss_error": 0.4860647916793823, "train/loss_total": 0.5479043126106262 }, { "epoch": 3.4370825541009884, "step": 12865, "train/loss_ctc": 0.26842188835144043, "train/loss_error": 0.37691444158554077, "train/loss_total": 0.3552159368991852 }, { "epoch": 3.437349719476356, "step": 12866, "train/loss_ctc": 0.8880079984664917, "train/loss_error": 0.4324807822704315, "train/loss_total": 0.5235862731933594 }, { "epoch": 3.4376168848517232, "step": 12867, "train/loss_ctc": 0.9987835884094238, "train/loss_error": 0.4253791272640228, "train/loss_total": 0.5400600433349609 }, { "epoch": 3.4378840502270904, "step": 12868, "train/loss_ctc": 1.2630832195281982, "train/loss_error": 0.4491511583328247, "train/loss_total": 0.6119375824928284 }, { "epoch": 3.438151215602458, "step": 12869, "train/loss_ctc": 0.5610202550888062, "train/loss_error": 0.4959828555583954, "train/loss_total": 0.5089903473854065 }, { "epoch": 3.4384183809778253, "grad_norm": 2.649319648742676, "learning_rate": 9.380710659898479e-06, "loss": 0.4933, "step": 12870 }, { "epoch": 3.4384183809778253, "step": 12870, "train/loss_ctc": 0.5312180519104004, "train/loss_error": 0.41330868005752563, "train/loss_total": 0.436890572309494 }, { "epoch": 3.4386855463531925, "step": 12871, "train/loss_ctc": 0.936033308506012, "train/loss_error": 0.4514213502407074, "train/loss_total": 0.5483437776565552 }, { "epoch": 3.43895271172856, "step": 12872, "train/loss_ctc": 0.321599543094635, "train/loss_error": 0.47943708300590515, "train/loss_total": 0.44786956906318665 }, { "epoch": 3.4392198771039273, "step": 12873, "train/loss_ctc": 0.5358309745788574, "train/loss_error": 0.42915329337120056, "train/loss_total": 0.4504888355731964 }, { "epoch": 3.4394870424792945, "step": 12874, "train/loss_ctc": 0.9420833587646484, "train/loss_error": 0.42930006980895996, "train/loss_total": 0.5318567156791687 }, { "epoch": 3.439754207854662, "step": 12875, "train/loss_ctc": 0.4261373281478882, "train/loss_error": 0.44993075728416443, "train/loss_total": 0.4451720714569092 }, { "epoch": 3.4400213732300293, "step": 12876, "train/loss_ctc": 0.6755895018577576, "train/loss_error": 0.5096445679664612, "train/loss_total": 0.5428335666656494 }, { "epoch": 3.4402885386053965, "step": 12877, "train/loss_ctc": 0.989090085029602, "train/loss_error": 0.43693870306015015, "train/loss_total": 0.5473690032958984 }, { "epoch": 3.440555703980764, "step": 12878, "train/loss_ctc": 0.2848004102706909, "train/loss_error": 0.4305686950683594, "train/loss_total": 0.40141505002975464 }, { "epoch": 3.4408228693561314, "step": 12879, "train/loss_ctc": 0.4919280707836151, "train/loss_error": 0.4234108626842499, "train/loss_total": 0.43711429834365845 }, { "epoch": 3.4410900347314985, "grad_norm": 2.460326671600342, "learning_rate": 9.364680737376437e-06, "loss": 0.4789, "step": 12880 }, { "epoch": 3.4410900347314985, "step": 12880, "train/loss_ctc": 0.546383261680603, "train/loss_error": 0.45109954476356506, "train/loss_total": 0.4701562821865082 }, { "epoch": 3.441357200106866, "step": 12881, "train/loss_ctc": 0.576594352722168, "train/loss_error": 0.42341676354408264, "train/loss_total": 0.45405226945877075 }, { "epoch": 3.4416243654822334, "step": 12882, "train/loss_ctc": 0.2547871172428131, "train/loss_error": 0.42095017433166504, "train/loss_total": 0.3877175450325012 }, { "epoch": 3.441891530857601, "step": 12883, "train/loss_ctc": 0.4015517234802246, "train/loss_error": 0.42982912063598633, "train/loss_total": 0.42417365312576294 }, { "epoch": 3.442158696232968, "step": 12884, "train/loss_ctc": 0.5191109776496887, "train/loss_error": 0.3755877614021301, "train/loss_total": 0.40429240465164185 }, { "epoch": 3.4424258616083354, "step": 12885, "train/loss_ctc": 0.5709954500198364, "train/loss_error": 0.5291919708251953, "train/loss_total": 0.5375526547431946 }, { "epoch": 3.442693026983703, "step": 12886, "train/loss_ctc": 0.5510349273681641, "train/loss_error": 0.40950340032577515, "train/loss_total": 0.43780970573425293 }, { "epoch": 3.4429601923590702, "step": 12887, "train/loss_ctc": 0.40613657236099243, "train/loss_error": 0.47018638253211975, "train/loss_total": 0.4573764204978943 }, { "epoch": 3.4432273577344374, "step": 12888, "train/loss_ctc": 0.5245814919471741, "train/loss_error": 0.40466123819351196, "train/loss_total": 0.4286453127861023 }, { "epoch": 3.443494523109805, "step": 12889, "train/loss_ctc": 1.2051494121551514, "train/loss_error": 0.495716392993927, "train/loss_total": 0.6376030445098877 }, { "epoch": 3.4437616884851723, "grad_norm": 5.434113502502441, "learning_rate": 9.348650814854395e-06, "loss": 0.4639, "step": 12890 }, { "epoch": 3.4437616884851723, "step": 12890, "train/loss_ctc": 0.4640039801597595, "train/loss_error": 0.44176971912384033, "train/loss_total": 0.4462165832519531 }, { "epoch": 3.44402885386054, "step": 12891, "train/loss_ctc": 0.44594621658325195, "train/loss_error": 0.5411078929901123, "train/loss_total": 0.5220755934715271 }, { "epoch": 3.444296019235907, "step": 12892, "train/loss_ctc": 0.5995146632194519, "train/loss_error": 0.45918548107147217, "train/loss_total": 0.487251341342926 }, { "epoch": 3.4445631846112743, "step": 12893, "train/loss_ctc": 0.7126724720001221, "train/loss_error": 0.5017781257629395, "train/loss_total": 0.543956995010376 }, { "epoch": 3.444830349986642, "step": 12894, "train/loss_ctc": 0.6989909410476685, "train/loss_error": 0.3933795988559723, "train/loss_total": 0.4545018672943115 }, { "epoch": 3.445097515362009, "step": 12895, "train/loss_ctc": 0.6130893230438232, "train/loss_error": 0.46571919322013855, "train/loss_total": 0.4951932430267334 }, { "epoch": 3.4453646807373763, "step": 12896, "train/loss_ctc": 1.1369283199310303, "train/loss_error": 0.4093971848487854, "train/loss_total": 0.5549033880233765 }, { "epoch": 3.445631846112744, "step": 12897, "train/loss_ctc": 0.8466280102729797, "train/loss_error": 0.36339259147644043, "train/loss_total": 0.4600396752357483 }, { "epoch": 3.445899011488111, "step": 12898, "train/loss_ctc": 0.8204176425933838, "train/loss_error": 0.45890384912490845, "train/loss_total": 0.5312066078186035 }, { "epoch": 3.4461661768634784, "step": 12899, "train/loss_ctc": 0.21135202050209045, "train/loss_error": 0.38882750272750854, "train/loss_total": 0.35333240032196045 }, { "epoch": 3.446433342238846, "grad_norm": 2.514958143234253, "learning_rate": 9.332620892332355e-06, "loss": 0.4849, "step": 12900 }, { "epoch": 3.446433342238846, "step": 12900, "train/loss_ctc": 0.6912757158279419, "train/loss_error": 0.3978801667690277, "train/loss_total": 0.45655930042266846 }, { "epoch": 3.446700507614213, "step": 12901, "train/loss_ctc": 0.33499157428741455, "train/loss_error": 0.4622028172016144, "train/loss_total": 0.4367606043815613 }, { "epoch": 3.4469676729895804, "step": 12902, "train/loss_ctc": 0.9150766730308533, "train/loss_error": 0.4608727693557739, "train/loss_total": 0.5517135262489319 }, { "epoch": 3.447234838364948, "step": 12903, "train/loss_ctc": 0.25867876410484314, "train/loss_error": 0.433441162109375, "train/loss_total": 0.39848870038986206 }, { "epoch": 3.4475020037403152, "step": 12904, "train/loss_ctc": 0.581291913986206, "train/loss_error": 0.3936344385147095, "train/loss_total": 0.4311659336090088 }, { "epoch": 3.4477691691156824, "step": 12905, "train/loss_ctc": 1.1246411800384521, "train/loss_error": 0.4835580885410309, "train/loss_total": 0.611774742603302 }, { "epoch": 3.44803633449105, "step": 12906, "train/loss_ctc": 0.44146353006362915, "train/loss_error": 0.37188810110092163, "train/loss_total": 0.3858031928539276 }, { "epoch": 3.4483034998664173, "step": 12907, "train/loss_ctc": 0.42824220657348633, "train/loss_error": 0.48229044675827026, "train/loss_total": 0.4714808166027069 }, { "epoch": 3.4485706652417845, "step": 12908, "train/loss_ctc": 0.9178223013877869, "train/loss_error": 0.42527905106544495, "train/loss_total": 0.5237877368927002 }, { "epoch": 3.448837830617152, "step": 12909, "train/loss_ctc": 0.7699439525604248, "train/loss_error": 0.3862108290195465, "train/loss_total": 0.4629574418067932 }, { "epoch": 3.4491049959925193, "grad_norm": 2.6682374477386475, "learning_rate": 9.316590969810313e-06, "loss": 0.473, "step": 12910 }, { "epoch": 3.4491049959925193, "step": 12910, "train/loss_ctc": 0.7899194955825806, "train/loss_error": 0.48177075386047363, "train/loss_total": 0.5434005260467529 }, { "epoch": 3.4493721613678865, "step": 12911, "train/loss_ctc": 0.5691331624984741, "train/loss_error": 0.4418482780456543, "train/loss_total": 0.4673052728176117 }, { "epoch": 3.449639326743254, "step": 12912, "train/loss_ctc": 0.5122123956680298, "train/loss_error": 0.373477965593338, "train/loss_total": 0.40122485160827637 }, { "epoch": 3.4499064921186213, "step": 12913, "train/loss_ctc": 0.43110379576683044, "train/loss_error": 0.40300169587135315, "train/loss_total": 0.408622145652771 }, { "epoch": 3.450173657493989, "step": 12914, "train/loss_ctc": 0.357826828956604, "train/loss_error": 0.4562966525554657, "train/loss_total": 0.4366026818752289 }, { "epoch": 3.450440822869356, "step": 12915, "train/loss_ctc": 1.040061593055725, "train/loss_error": 0.44777238368988037, "train/loss_total": 0.5662302374839783 }, { "epoch": 3.4507079882447234, "step": 12916, "train/loss_ctc": 1.0284045934677124, "train/loss_error": 0.45181623101234436, "train/loss_total": 0.567133903503418 }, { "epoch": 3.450975153620091, "step": 12917, "train/loss_ctc": 0.7793562412261963, "train/loss_error": 0.420935720205307, "train/loss_total": 0.4926198124885559 }, { "epoch": 3.451242318995458, "step": 12918, "train/loss_ctc": 1.2740437984466553, "train/loss_error": 0.4105639159679413, "train/loss_total": 0.5832598805427551 }, { "epoch": 3.4515094843708254, "step": 12919, "train/loss_ctc": 0.6822376251220703, "train/loss_error": 0.4505767822265625, "train/loss_total": 0.496908962726593 }, { "epoch": 3.451776649746193, "grad_norm": 3.212583541870117, "learning_rate": 9.300561047288271e-06, "loss": 0.4963, "step": 12920 }, { "epoch": 3.451776649746193, "step": 12920, "train/loss_ctc": 0.6611913442611694, "train/loss_error": 0.5278341770172119, "train/loss_total": 0.5545055866241455 }, { "epoch": 3.45204381512156, "step": 12921, "train/loss_ctc": 0.5974023342132568, "train/loss_error": 0.40781068801879883, "train/loss_total": 0.44572901725769043 }, { "epoch": 3.4523109804969274, "step": 12922, "train/loss_ctc": 0.7422879934310913, "train/loss_error": 0.5240083932876587, "train/loss_total": 0.5676643252372742 }, { "epoch": 3.452578145872295, "step": 12923, "train/loss_ctc": 0.6661050915718079, "train/loss_error": 0.4277339279651642, "train/loss_total": 0.475408136844635 }, { "epoch": 3.4528453112476623, "step": 12924, "train/loss_ctc": 0.43339985609054565, "train/loss_error": 0.5010806918144226, "train/loss_total": 0.48754453659057617 }, { "epoch": 3.45311247662303, "step": 12925, "train/loss_ctc": 0.2895525097846985, "train/loss_error": 0.39271774888038635, "train/loss_total": 0.37208470702171326 }, { "epoch": 3.453379641998397, "step": 12926, "train/loss_ctc": 0.7178052663803101, "train/loss_error": 0.40563371777534485, "train/loss_total": 0.46806800365448 }, { "epoch": 3.4536468073737643, "step": 12927, "train/loss_ctc": 0.9633347392082214, "train/loss_error": 0.4093637466430664, "train/loss_total": 0.5201579332351685 }, { "epoch": 3.453913972749132, "step": 12928, "train/loss_ctc": 0.7854928374290466, "train/loss_error": 0.3941764235496521, "train/loss_total": 0.472439706325531 }, { "epoch": 3.454181138124499, "step": 12929, "train/loss_ctc": 1.4315509796142578, "train/loss_error": 0.44227325916290283, "train/loss_total": 0.6401288509368896 }, { "epoch": 3.4544483034998663, "grad_norm": 4.239362716674805, "learning_rate": 9.28453112476623e-06, "loss": 0.5004, "step": 12930 }, { "epoch": 3.4544483034998663, "step": 12930, "train/loss_ctc": 0.2594207525253296, "train/loss_error": 0.34494972229003906, "train/loss_total": 0.32784393429756165 }, { "epoch": 3.454715468875234, "step": 12931, "train/loss_ctc": 1.2473102807998657, "train/loss_error": 0.4154341220855713, "train/loss_total": 0.5818093419075012 }, { "epoch": 3.454982634250601, "step": 12932, "train/loss_ctc": 1.5236306190490723, "train/loss_error": 0.44272926449775696, "train/loss_total": 0.6589095592498779 }, { "epoch": 3.4552497996259683, "step": 12933, "train/loss_ctc": 0.7720966339111328, "train/loss_error": 0.4395962953567505, "train/loss_total": 0.506096363067627 }, { "epoch": 3.455516965001336, "step": 12934, "train/loss_ctc": 0.5959892868995667, "train/loss_error": 0.4630388021469116, "train/loss_total": 0.4896289110183716 }, { "epoch": 3.455784130376703, "step": 12935, "train/loss_ctc": 1.2487175464630127, "train/loss_error": 0.4989415407180786, "train/loss_total": 0.6488967537879944 }, { "epoch": 3.4560512957520704, "step": 12936, "train/loss_ctc": 0.4780021607875824, "train/loss_error": 0.47039374709129333, "train/loss_total": 0.47191542387008667 }, { "epoch": 3.456318461127438, "step": 12937, "train/loss_ctc": 0.2196972817182541, "train/loss_error": 0.4377833604812622, "train/loss_total": 0.39416617155075073 }, { "epoch": 3.456585626502805, "step": 12938, "train/loss_ctc": 1.0094952583312988, "train/loss_error": 0.43306276202201843, "train/loss_total": 0.5483492612838745 }, { "epoch": 3.4568527918781724, "step": 12939, "train/loss_ctc": 0.999254584312439, "train/loss_error": 0.5002401471138, "train/loss_total": 0.6000430583953857 }, { "epoch": 3.45711995725354, "grad_norm": 2.1458072662353516, "learning_rate": 9.268501202244189e-06, "loss": 0.5228, "step": 12940 }, { "epoch": 3.45711995725354, "step": 12940, "train/loss_ctc": 0.8297142386436462, "train/loss_error": 0.4548656642436981, "train/loss_total": 0.5298353433609009 }, { "epoch": 3.4573871226289072, "step": 12941, "train/loss_ctc": 1.3136980533599854, "train/loss_error": 0.451827734708786, "train/loss_total": 0.6242018342018127 }, { "epoch": 3.4576542880042744, "step": 12942, "train/loss_ctc": 1.015673041343689, "train/loss_error": 0.5702341794967651, "train/loss_total": 0.6593219637870789 }, { "epoch": 3.457921453379642, "step": 12943, "train/loss_ctc": 0.36168691515922546, "train/loss_error": 0.4026476740837097, "train/loss_total": 0.39445552229881287 }, { "epoch": 3.4581886187550093, "step": 12944, "train/loss_ctc": 1.0551553964614868, "train/loss_error": 0.39904099702835083, "train/loss_total": 0.5302639007568359 }, { "epoch": 3.4584557841303765, "step": 12945, "train/loss_ctc": 0.7643638253211975, "train/loss_error": 0.44313013553619385, "train/loss_total": 0.5073769092559814 }, { "epoch": 3.458722949505744, "step": 12946, "train/loss_ctc": 0.44204655289649963, "train/loss_error": 0.4571782946586609, "train/loss_total": 0.4541519582271576 }, { "epoch": 3.4589901148811113, "step": 12947, "train/loss_ctc": 0.7416408061981201, "train/loss_error": 0.4847481846809387, "train/loss_total": 0.5361267328262329 }, { "epoch": 3.459257280256479, "step": 12948, "train/loss_ctc": 0.5849833488464355, "train/loss_error": 0.37886711955070496, "train/loss_total": 0.42009037733078003 }, { "epoch": 3.459524445631846, "step": 12949, "train/loss_ctc": 0.35957071185112, "train/loss_error": 0.43190133571624756, "train/loss_total": 0.4174351990222931 }, { "epoch": 3.4597916110072133, "grad_norm": 3.0223443508148193, "learning_rate": 9.252471279722149e-06, "loss": 0.5073, "step": 12950 }, { "epoch": 3.4597916110072133, "step": 12950, "train/loss_ctc": 0.8153026103973389, "train/loss_error": 0.34765151143074036, "train/loss_total": 0.4411817193031311 }, { "epoch": 3.460058776382581, "step": 12951, "train/loss_ctc": 0.704414963722229, "train/loss_error": 0.4234849512577057, "train/loss_total": 0.4796709716320038 }, { "epoch": 3.460325941757948, "step": 12952, "train/loss_ctc": 0.5395464897155762, "train/loss_error": 0.4455187916755676, "train/loss_total": 0.46432432532310486 }, { "epoch": 3.4605931071333154, "step": 12953, "train/loss_ctc": 0.6164923906326294, "train/loss_error": 0.40944406390190125, "train/loss_total": 0.45085376501083374 }, { "epoch": 3.460860272508683, "step": 12954, "train/loss_ctc": 0.6826726198196411, "train/loss_error": 0.42507651448249817, "train/loss_total": 0.47659575939178467 }, { "epoch": 3.46112743788405, "step": 12955, "train/loss_ctc": 1.0271451473236084, "train/loss_error": 0.47087666392326355, "train/loss_total": 0.5821303725242615 }, { "epoch": 3.4613946032594174, "step": 12956, "train/loss_ctc": 0.5474370718002319, "train/loss_error": 0.45883554220199585, "train/loss_total": 0.47655585408210754 }, { "epoch": 3.461661768634785, "step": 12957, "train/loss_ctc": 1.0238980054855347, "train/loss_error": 0.45794349908828735, "train/loss_total": 0.5711343884468079 }, { "epoch": 3.4619289340101522, "step": 12958, "train/loss_ctc": 0.1862969845533371, "train/loss_error": 0.43165507912635803, "train/loss_total": 0.38258346915245056 }, { "epoch": 3.46219609938552, "step": 12959, "train/loss_ctc": 0.7148038148880005, "train/loss_error": 0.43417873978614807, "train/loss_total": 0.49030375480651855 }, { "epoch": 3.462463264760887, "grad_norm": 1.9033244848251343, "learning_rate": 9.236441357200108e-06, "loss": 0.4815, "step": 12960 }, { "epoch": 3.462463264760887, "step": 12960, "train/loss_ctc": 0.6223085522651672, "train/loss_error": 0.4955335259437561, "train/loss_total": 0.5208885669708252 }, { "epoch": 3.4627304301362543, "step": 12961, "train/loss_ctc": 0.6531943082809448, "train/loss_error": 0.44650089740753174, "train/loss_total": 0.48783957958221436 }, { "epoch": 3.462997595511622, "step": 12962, "train/loss_ctc": 0.9799339771270752, "train/loss_error": 0.4179423153400421, "train/loss_total": 0.5303406715393066 }, { "epoch": 3.463264760886989, "step": 12963, "train/loss_ctc": 0.7325904369354248, "train/loss_error": 0.4913191795349121, "train/loss_total": 0.5395734310150146 }, { "epoch": 3.4635319262623563, "step": 12964, "train/loss_ctc": 0.8352680802345276, "train/loss_error": 0.5380624532699585, "train/loss_total": 0.5975036025047302 }, { "epoch": 3.463799091637724, "step": 12965, "train/loss_ctc": 0.47250285744667053, "train/loss_error": 0.4096381664276123, "train/loss_total": 0.4222111105918884 }, { "epoch": 3.464066257013091, "step": 12966, "train/loss_ctc": 0.5249085426330566, "train/loss_error": 0.4108177125453949, "train/loss_total": 0.4336358904838562 }, { "epoch": 3.4643334223884583, "step": 12967, "train/loss_ctc": 1.1268088817596436, "train/loss_error": 0.43412652611732483, "train/loss_total": 0.5726630091667175 }, { "epoch": 3.464600587763826, "step": 12968, "train/loss_ctc": 0.7818748354911804, "train/loss_error": 0.46685144305229187, "train/loss_total": 0.5298561453819275 }, { "epoch": 3.464867753139193, "step": 12969, "train/loss_ctc": 1.312023401260376, "train/loss_error": 0.44041311740875244, "train/loss_total": 0.6147351861000061 }, { "epoch": 3.4651349185145603, "grad_norm": 1.0195916891098022, "learning_rate": 9.220411434678066e-06, "loss": 0.5249, "step": 12970 }, { "epoch": 3.4651349185145603, "step": 12970, "train/loss_ctc": 0.38878121972084045, "train/loss_error": 0.4281063377857208, "train/loss_total": 0.4202413260936737 }, { "epoch": 3.465402083889928, "step": 12971, "train/loss_ctc": 0.41873860359191895, "train/loss_error": 0.43535757064819336, "train/loss_total": 0.4320337772369385 }, { "epoch": 3.465669249265295, "step": 12972, "train/loss_ctc": 0.6428091526031494, "train/loss_error": 0.4273812174797058, "train/loss_total": 0.4704667925834656 }, { "epoch": 3.4659364146406624, "step": 12973, "train/loss_ctc": 0.8733695149421692, "train/loss_error": 0.432140976190567, "train/loss_total": 0.5203866958618164 }, { "epoch": 3.46620358001603, "step": 12974, "train/loss_ctc": 0.3514934778213501, "train/loss_error": 0.4089944064617157, "train/loss_total": 0.39749422669410706 }, { "epoch": 3.466470745391397, "step": 12975, "train/loss_ctc": 0.5694060325622559, "train/loss_error": 0.43510207533836365, "train/loss_total": 0.46196287870407104 }, { "epoch": 3.4667379107667644, "step": 12976, "train/loss_ctc": 0.7407655715942383, "train/loss_error": 0.4470957815647125, "train/loss_total": 0.5058297514915466 }, { "epoch": 3.467005076142132, "step": 12977, "train/loss_ctc": 1.1948829889297485, "train/loss_error": 0.4266297221183777, "train/loss_total": 0.5802804231643677 }, { "epoch": 3.4672722415174992, "step": 12978, "train/loss_ctc": 0.4532056450843811, "train/loss_error": 0.4301483929157257, "train/loss_total": 0.43475985527038574 }, { "epoch": 3.4675394068928664, "step": 12979, "train/loss_ctc": 0.39569899439811707, "train/loss_error": 0.49563565850257874, "train/loss_total": 0.47564834356307983 }, { "epoch": 3.467806572268234, "grad_norm": 10.920069694519043, "learning_rate": 9.204381512156025e-06, "loss": 0.4699, "step": 12980 }, { "epoch": 3.467806572268234, "step": 12980, "train/loss_ctc": 0.4568331241607666, "train/loss_error": 0.4994855523109436, "train/loss_total": 0.49095505475997925 }, { "epoch": 3.4680737376436013, "step": 12981, "train/loss_ctc": 0.8271764516830444, "train/loss_error": 0.38163474202156067, "train/loss_total": 0.4707431197166443 }, { "epoch": 3.468340903018969, "step": 12982, "train/loss_ctc": 0.9283243417739868, "train/loss_error": 0.45740944147109985, "train/loss_total": 0.5515924096107483 }, { "epoch": 3.468608068394336, "step": 12983, "train/loss_ctc": 0.5085562467575073, "train/loss_error": 0.47316282987594604, "train/loss_total": 0.4802415072917938 }, { "epoch": 3.4688752337697033, "step": 12984, "train/loss_ctc": 1.5027704238891602, "train/loss_error": 0.46069955825805664, "train/loss_total": 0.6691137552261353 }, { "epoch": 3.469142399145071, "step": 12985, "train/loss_ctc": 0.518065333366394, "train/loss_error": 0.4084908962249756, "train/loss_total": 0.43040579557418823 }, { "epoch": 3.469409564520438, "step": 12986, "train/loss_ctc": 0.7409524917602539, "train/loss_error": 0.46274298429489136, "train/loss_total": 0.5183849334716797 }, { "epoch": 3.4696767298958053, "step": 12987, "train/loss_ctc": 0.49623820185661316, "train/loss_error": 0.4199882745742798, "train/loss_total": 0.4352382719516754 }, { "epoch": 3.469943895271173, "step": 12988, "train/loss_ctc": 0.33824029564857483, "train/loss_error": 0.4901033043861389, "train/loss_total": 0.45973071455955505 }, { "epoch": 3.47021106064654, "step": 12989, "train/loss_ctc": 0.8499680161476135, "train/loss_error": 0.40883615612983704, "train/loss_total": 0.4970625340938568 }, { "epoch": 3.470478226021908, "grad_norm": 1.3923324346542358, "learning_rate": 9.188351589633984e-06, "loss": 0.5003, "step": 12990 }, { "epoch": 3.470478226021908, "step": 12990, "train/loss_ctc": 1.2150005102157593, "train/loss_error": 0.5484863519668579, "train/loss_total": 0.681789219379425 }, { "epoch": 3.470745391397275, "step": 12991, "train/loss_ctc": 0.4365667998790741, "train/loss_error": 0.46541231870651245, "train/loss_total": 0.4596432149410248 }, { "epoch": 3.471012556772642, "step": 12992, "train/loss_ctc": 1.366375207901001, "train/loss_error": 0.4942140281200409, "train/loss_total": 0.6686462759971619 }, { "epoch": 3.47127972214801, "step": 12993, "train/loss_ctc": 0.7561535835266113, "train/loss_error": 0.4567931890487671, "train/loss_total": 0.5166652798652649 }, { "epoch": 3.471546887523377, "step": 12994, "train/loss_ctc": 1.5122840404510498, "train/loss_error": 0.43160030245780945, "train/loss_total": 0.6477370858192444 }, { "epoch": 3.4718140528987442, "step": 12995, "train/loss_ctc": 0.3043068051338196, "train/loss_error": 0.4412355422973633, "train/loss_total": 0.413849800825119 }, { "epoch": 3.472081218274112, "step": 12996, "train/loss_ctc": 0.6810282468795776, "train/loss_error": 0.48429620265960693, "train/loss_total": 0.5236425995826721 }, { "epoch": 3.472348383649479, "step": 12997, "train/loss_ctc": 1.2603487968444824, "train/loss_error": 0.36987197399139404, "train/loss_total": 0.5479673147201538 }, { "epoch": 3.4726155490248463, "step": 12998, "train/loss_ctc": 0.5379446744918823, "train/loss_error": 0.3536413311958313, "train/loss_total": 0.390502005815506 }, { "epoch": 3.472882714400214, "step": 12999, "train/loss_ctc": 1.2520051002502441, "train/loss_error": 0.3948117196559906, "train/loss_total": 0.5662504434585571 }, { "epoch": 3.473149879775581, "grad_norm": 4.095322132110596, "learning_rate": 9.172321667111942e-06, "loss": 0.5417, "step": 13000 }, { "epoch": 3.473149879775581, "step": 13000, "train/loss_ctc": 0.7502040863037109, "train/loss_error": 0.4566762149333954, "train/loss_total": 0.5153818130493164 }, { "epoch": 3.4734170451509483, "step": 13001, "train/loss_ctc": 1.1109474897384644, "train/loss_error": 0.5146587491035461, "train/loss_total": 0.6339164972305298 }, { "epoch": 3.473684210526316, "step": 13002, "train/loss_ctc": 0.6205117106437683, "train/loss_error": 0.49160662293434143, "train/loss_total": 0.5173876285552979 }, { "epoch": 3.473951375901683, "step": 13003, "train/loss_ctc": 0.5501774549484253, "train/loss_error": 0.40457096695899963, "train/loss_total": 0.4336922764778137 }, { "epoch": 3.4742185412770503, "step": 13004, "train/loss_ctc": 0.6740988492965698, "train/loss_error": 0.40672722458839417, "train/loss_total": 0.46020156145095825 }, { "epoch": 3.474485706652418, "step": 13005, "train/loss_ctc": 0.448458731174469, "train/loss_error": 0.4757528305053711, "train/loss_total": 0.4702940285205841 }, { "epoch": 3.474752872027785, "step": 13006, "train/loss_ctc": 0.9962136745452881, "train/loss_error": 0.4378480017185211, "train/loss_total": 0.5495211482048035 }, { "epoch": 3.4750200374031524, "step": 13007, "train/loss_ctc": 0.9671175479888916, "train/loss_error": 0.5542858839035034, "train/loss_total": 0.6368522644042969 }, { "epoch": 3.47528720277852, "step": 13008, "train/loss_ctc": 0.846940279006958, "train/loss_error": 0.45423486828804016, "train/loss_total": 0.5327759981155396 }, { "epoch": 3.475554368153887, "step": 13009, "train/loss_ctc": 0.7313473224639893, "train/loss_error": 0.38967376947402954, "train/loss_total": 0.4580084979534149 }, { "epoch": 3.4758215335292544, "grad_norm": 2.027169704437256, "learning_rate": 9.1562917445899e-06, "loss": 0.5208, "step": 13010 }, { "epoch": 3.4758215335292544, "step": 13010, "train/loss_ctc": 0.7031265497207642, "train/loss_error": 0.418947696685791, "train/loss_total": 0.47578346729278564 }, { "epoch": 3.476088698904622, "step": 13011, "train/loss_ctc": 0.6522706747055054, "train/loss_error": 0.48358362913131714, "train/loss_total": 0.5173210501670837 }, { "epoch": 3.476355864279989, "step": 13012, "train/loss_ctc": 0.8953613638877869, "train/loss_error": 0.4336884021759033, "train/loss_total": 0.5260230302810669 }, { "epoch": 3.476623029655357, "step": 13013, "train/loss_ctc": 0.5858931541442871, "train/loss_error": 0.47710898518562317, "train/loss_total": 0.49886584281921387 }, { "epoch": 3.476890195030724, "step": 13014, "train/loss_ctc": 0.3796581029891968, "train/loss_error": 0.3898358941078186, "train/loss_total": 0.38780033588409424 }, { "epoch": 3.4771573604060912, "step": 13015, "train/loss_ctc": 0.5004951357841492, "train/loss_error": 0.3868158459663391, "train/loss_total": 0.4095517098903656 }, { "epoch": 3.477424525781459, "step": 13016, "train/loss_ctc": 0.4695899486541748, "train/loss_error": 0.4178899824619293, "train/loss_total": 0.4282299876213074 }, { "epoch": 3.477691691156826, "step": 13017, "train/loss_ctc": 0.9687547087669373, "train/loss_error": 0.4406591057777405, "train/loss_total": 0.5462782382965088 }, { "epoch": 3.4779588565321933, "step": 13018, "train/loss_ctc": 0.9020010232925415, "train/loss_error": 0.4715227782726288, "train/loss_total": 0.5576184391975403 }, { "epoch": 3.478226021907561, "step": 13019, "train/loss_ctc": 0.5924805402755737, "train/loss_error": 0.42836228013038635, "train/loss_total": 0.4611859619617462 }, { "epoch": 3.478493187282928, "grad_norm": 2.1029837131500244, "learning_rate": 9.14026182206786e-06, "loss": 0.4809, "step": 13020 }, { "epoch": 3.478493187282928, "step": 13020, "train/loss_ctc": 0.5540274381637573, "train/loss_error": 0.437644362449646, "train/loss_total": 0.4609209895133972 }, { "epoch": 3.4787603526582953, "step": 13021, "train/loss_ctc": 0.5944048166275024, "train/loss_error": 0.4184102714061737, "train/loss_total": 0.4536091685295105 }, { "epoch": 3.479027518033663, "step": 13022, "train/loss_ctc": 0.9121660590171814, "train/loss_error": 0.4673460125923157, "train/loss_total": 0.5563100576400757 }, { "epoch": 3.47929468340903, "step": 13023, "train/loss_ctc": 0.4781002402305603, "train/loss_error": 0.3617989122867584, "train/loss_total": 0.3850591778755188 }, { "epoch": 3.479561848784398, "step": 13024, "train/loss_ctc": 0.6893653869628906, "train/loss_error": 0.44175419211387634, "train/loss_total": 0.49127644300460815 }, { "epoch": 3.479829014159765, "step": 13025, "train/loss_ctc": 0.6362709999084473, "train/loss_error": 0.45731309056282043, "train/loss_total": 0.4931046962738037 }, { "epoch": 3.480096179535132, "step": 13026, "train/loss_ctc": 0.4532857835292816, "train/loss_error": 0.4462865889072418, "train/loss_total": 0.44768643379211426 }, { "epoch": 3.4803633449105, "step": 13027, "train/loss_ctc": 1.1926026344299316, "train/loss_error": 0.4581107497215271, "train/loss_total": 0.605009138584137 }, { "epoch": 3.480630510285867, "step": 13028, "train/loss_ctc": 1.6500755548477173, "train/loss_error": 0.4552586078643799, "train/loss_total": 0.6942219734191895 }, { "epoch": 3.480897675661234, "step": 13029, "train/loss_ctc": 0.37271860241889954, "train/loss_error": 0.38690659403800964, "train/loss_total": 0.3840689957141876 }, { "epoch": 3.481164841036602, "grad_norm": 1.6747041940689087, "learning_rate": 9.124231899545818e-06, "loss": 0.4971, "step": 13030 }, { "epoch": 3.481164841036602, "step": 13030, "train/loss_ctc": 1.5000700950622559, "train/loss_error": 0.3989499807357788, "train/loss_total": 0.6191740036010742 }, { "epoch": 3.481432006411969, "step": 13031, "train/loss_ctc": 0.21800708770751953, "train/loss_error": 0.3677372336387634, "train/loss_total": 0.33779120445251465 }, { "epoch": 3.4816991717873362, "step": 13032, "train/loss_ctc": 0.6779415607452393, "train/loss_error": 0.46285292506217957, "train/loss_total": 0.5058706998825073 }, { "epoch": 3.481966337162704, "step": 13033, "train/loss_ctc": 0.517325758934021, "train/loss_error": 0.42393752932548523, "train/loss_total": 0.44261521100997925 }, { "epoch": 3.482233502538071, "step": 13034, "train/loss_ctc": 0.6032146215438843, "train/loss_error": 0.436893105506897, "train/loss_total": 0.4701574146747589 }, { "epoch": 3.4825006679134383, "step": 13035, "train/loss_ctc": 0.8984123468399048, "train/loss_error": 0.4731324315071106, "train/loss_total": 0.5581884384155273 }, { "epoch": 3.482767833288806, "step": 13036, "train/loss_ctc": 0.9396864175796509, "train/loss_error": 0.4593159854412079, "train/loss_total": 0.5553901195526123 }, { "epoch": 3.483034998664173, "step": 13037, "train/loss_ctc": 0.3823467493057251, "train/loss_error": 0.415289044380188, "train/loss_total": 0.4087006151676178 }, { "epoch": 3.4833021640395403, "step": 13038, "train/loss_ctc": 1.1377911567687988, "train/loss_error": 0.5188069343566895, "train/loss_total": 0.6426038146018982 }, { "epoch": 3.483569329414908, "step": 13039, "train/loss_ctc": 0.5985498428344727, "train/loss_error": 0.3976450264453888, "train/loss_total": 0.437826007604599 }, { "epoch": 3.483836494790275, "grad_norm": 1.4371408224105835, "learning_rate": 9.108201977023778e-06, "loss": 0.4978, "step": 13040 }, { "epoch": 3.483836494790275, "step": 13040, "train/loss_ctc": 0.27190476655960083, "train/loss_error": 0.3381609618663788, "train/loss_total": 0.3249097168445587 }, { "epoch": 3.4841036601656423, "step": 13041, "train/loss_ctc": 1.1460682153701782, "train/loss_error": 0.4532814025878906, "train/loss_total": 0.5918387770652771 }, { "epoch": 3.48437082554101, "step": 13042, "train/loss_ctc": 0.6586693525314331, "train/loss_error": 0.5176756381988525, "train/loss_total": 0.5458744168281555 }, { "epoch": 3.484637990916377, "step": 13043, "train/loss_ctc": 0.8564788103103638, "train/loss_error": 0.48501017689704895, "train/loss_total": 0.559303879737854 }, { "epoch": 3.4849051562917444, "step": 13044, "train/loss_ctc": 0.8291767835617065, "train/loss_error": 0.44449785351753235, "train/loss_total": 0.5214336514472961 }, { "epoch": 3.485172321667112, "step": 13045, "train/loss_ctc": 0.5362918972969055, "train/loss_error": 0.4187667667865753, "train/loss_total": 0.44227179884910583 }, { "epoch": 3.485439487042479, "step": 13046, "train/loss_ctc": 0.5787196159362793, "train/loss_error": 0.36279645562171936, "train/loss_total": 0.4059810936450958 }, { "epoch": 3.485706652417847, "step": 13047, "train/loss_ctc": 1.0468635559082031, "train/loss_error": 0.47949740290641785, "train/loss_total": 0.5929706692695618 }, { "epoch": 3.485973817793214, "step": 13048, "train/loss_ctc": 1.0977811813354492, "train/loss_error": 0.43924006819725037, "train/loss_total": 0.5709483027458191 }, { "epoch": 3.486240983168581, "step": 13049, "train/loss_ctc": 0.9555282592773438, "train/loss_error": 0.4169413447380066, "train/loss_total": 0.524658739566803 }, { "epoch": 3.486508148543949, "grad_norm": 2.089576244354248, "learning_rate": 9.092172054501738e-06, "loss": 0.508, "step": 13050 }, { "epoch": 3.486508148543949, "step": 13050, "train/loss_ctc": 0.6991918087005615, "train/loss_error": 0.46399611234664917, "train/loss_total": 0.5110352635383606 }, { "epoch": 3.486775313919316, "step": 13051, "train/loss_ctc": 1.4034343957901, "train/loss_error": 0.4688913822174072, "train/loss_total": 0.6557999849319458 }, { "epoch": 3.4870424792946833, "step": 13052, "train/loss_ctc": 0.3338356614112854, "train/loss_error": 0.42872416973114014, "train/loss_total": 0.4097464680671692 }, { "epoch": 3.487309644670051, "step": 13053, "train/loss_ctc": 0.7545539140701294, "train/loss_error": 0.38689932227134705, "train/loss_total": 0.4604302644729614 }, { "epoch": 3.487576810045418, "step": 13054, "train/loss_ctc": 0.46836036443710327, "train/loss_error": 0.4291541576385498, "train/loss_total": 0.43699538707733154 }, { "epoch": 3.4878439754207853, "step": 13055, "train/loss_ctc": 0.4906443953514099, "train/loss_error": 0.4310194253921509, "train/loss_total": 0.4429444372653961 }, { "epoch": 3.488111140796153, "step": 13056, "train/loss_ctc": 1.2486423254013062, "train/loss_error": 0.4067835807800293, "train/loss_total": 0.5751553773880005 }, { "epoch": 3.48837830617152, "step": 13057, "train/loss_ctc": 0.6221638321876526, "train/loss_error": 0.46713578701019287, "train/loss_total": 0.49814140796661377 }, { "epoch": 3.4886454715468878, "step": 13058, "train/loss_ctc": 0.43819576501846313, "train/loss_error": 0.5142190456390381, "train/loss_total": 0.49901440739631653 }, { "epoch": 3.488912636922255, "step": 13059, "train/loss_ctc": 0.9654139876365662, "train/loss_error": 0.47221046686172485, "train/loss_total": 0.5708511471748352 }, { "epoch": 3.489179802297622, "grad_norm": 2.375847816467285, "learning_rate": 9.076142131979696e-06, "loss": 0.506, "step": 13060 }, { "epoch": 3.489179802297622, "step": 13060, "train/loss_ctc": 0.7707442045211792, "train/loss_error": 0.482437402009964, "train/loss_total": 0.5400987863540649 }, { "epoch": 3.48944696767299, "step": 13061, "train/loss_ctc": 0.5242793560028076, "train/loss_error": 0.48788806796073914, "train/loss_total": 0.4951663315296173 }, { "epoch": 3.489714133048357, "step": 13062, "train/loss_ctc": 0.43197527527809143, "train/loss_error": 0.39694684743881226, "train/loss_total": 0.40395253896713257 }, { "epoch": 3.489981298423724, "step": 13063, "train/loss_ctc": 1.2381106615066528, "train/loss_error": 0.40716496109962463, "train/loss_total": 0.5733541250228882 }, { "epoch": 3.490248463799092, "step": 13064, "train/loss_ctc": 0.7477317452430725, "train/loss_error": 0.5427793860435486, "train/loss_total": 0.5837698578834534 }, { "epoch": 3.490515629174459, "step": 13065, "train/loss_ctc": 0.7609684467315674, "train/loss_error": 0.4677538275718689, "train/loss_total": 0.5263967514038086 }, { "epoch": 3.490782794549826, "step": 13066, "train/loss_ctc": 1.002260684967041, "train/loss_error": 0.47719329595565796, "train/loss_total": 0.5822067856788635 }, { "epoch": 3.491049959925194, "step": 13067, "train/loss_ctc": 0.9508224725723267, "train/loss_error": 0.4915556311607361, "train/loss_total": 0.5834090113639832 }, { "epoch": 3.491317125300561, "step": 13068, "train/loss_ctc": 0.41256600618362427, "train/loss_error": 0.4549621343612671, "train/loss_total": 0.44648292660713196 }, { "epoch": 3.4915842906759282, "step": 13069, "train/loss_ctc": 0.2155347764492035, "train/loss_error": 0.45591872930526733, "train/loss_total": 0.40784192085266113 }, { "epoch": 3.491851456051296, "grad_norm": 1.619402289390564, "learning_rate": 9.060112209457654e-06, "loss": 0.5143, "step": 13070 }, { "epoch": 3.491851456051296, "step": 13070, "train/loss_ctc": 0.4354032874107361, "train/loss_error": 0.47777730226516724, "train/loss_total": 0.4693025052547455 }, { "epoch": 3.492118621426663, "step": 13071, "train/loss_ctc": 0.796044647693634, "train/loss_error": 0.4812047481536865, "train/loss_total": 0.5441727638244629 }, { "epoch": 3.4923857868020303, "step": 13072, "train/loss_ctc": 0.7765342593193054, "train/loss_error": 0.45955610275268555, "train/loss_total": 0.5229517221450806 }, { "epoch": 3.492652952177398, "step": 13073, "train/loss_ctc": 0.7190686464309692, "train/loss_error": 0.48433640599250793, "train/loss_total": 0.531282901763916 }, { "epoch": 3.492920117552765, "step": 13074, "train/loss_ctc": 0.7181088924407959, "train/loss_error": 0.5067797303199768, "train/loss_total": 0.5490455627441406 }, { "epoch": 3.4931872829281323, "step": 13075, "train/loss_ctc": 0.6100843548774719, "train/loss_error": 0.4271059036254883, "train/loss_total": 0.46370160579681396 }, { "epoch": 3.4934544483035, "step": 13076, "train/loss_ctc": 0.4334712028503418, "train/loss_error": 0.3848070502281189, "train/loss_total": 0.39453989267349243 }, { "epoch": 3.493721613678867, "step": 13077, "train/loss_ctc": 0.7111924886703491, "train/loss_error": 0.45975977182388306, "train/loss_total": 0.5100463628768921 }, { "epoch": 3.4939887790542343, "step": 13078, "train/loss_ctc": 0.31410157680511475, "train/loss_error": 0.3744124174118042, "train/loss_total": 0.3623502552509308 }, { "epoch": 3.494255944429602, "step": 13079, "train/loss_ctc": 0.6785714626312256, "train/loss_error": 0.39546263217926025, "train/loss_total": 0.45208439230918884 }, { "epoch": 3.494523109804969, "grad_norm": 1.7573299407958984, "learning_rate": 9.044082286935614e-06, "loss": 0.4799, "step": 13080 }, { "epoch": 3.494523109804969, "step": 13080, "train/loss_ctc": 0.3565993309020996, "train/loss_error": 0.5196340084075928, "train/loss_total": 0.4870270788669586 }, { "epoch": 3.494790275180337, "step": 13081, "train/loss_ctc": 0.8945710062980652, "train/loss_error": 0.418363094329834, "train/loss_total": 0.5136047005653381 }, { "epoch": 3.495057440555704, "step": 13082, "train/loss_ctc": 1.119469404220581, "train/loss_error": 0.4885398745536804, "train/loss_total": 0.6147258281707764 }, { "epoch": 3.495324605931071, "step": 13083, "train/loss_ctc": 1.1093764305114746, "train/loss_error": 0.5430195331573486, "train/loss_total": 0.6562909483909607 }, { "epoch": 3.495591771306439, "step": 13084, "train/loss_ctc": 0.29780036211013794, "train/loss_error": 0.38211125135421753, "train/loss_total": 0.36524906754493713 }, { "epoch": 3.495858936681806, "step": 13085, "train/loss_ctc": 0.29429686069488525, "train/loss_error": 0.44857257604599, "train/loss_total": 0.41771745681762695 }, { "epoch": 3.4961261020571732, "step": 13086, "train/loss_ctc": 0.9908434748649597, "train/loss_error": 0.40346112847328186, "train/loss_total": 0.5209375619888306 }, { "epoch": 3.496393267432541, "step": 13087, "train/loss_ctc": 0.7346615791320801, "train/loss_error": 0.4545131027698517, "train/loss_total": 0.5105428099632263 }, { "epoch": 3.496660432807908, "step": 13088, "train/loss_ctc": 0.7545823454856873, "train/loss_error": 0.49391546845436096, "train/loss_total": 0.5460488796234131 }, { "epoch": 3.4969275981832753, "step": 13089, "train/loss_ctc": 0.7692894339561462, "train/loss_error": 0.4385383129119873, "train/loss_total": 0.5046885013580322 }, { "epoch": 3.497194763558643, "grad_norm": 1.0473257303237915, "learning_rate": 9.028052364413572e-06, "loss": 0.5137, "step": 13090 }, { "epoch": 3.497194763558643, "step": 13090, "train/loss_ctc": 0.8198127746582031, "train/loss_error": 0.44590866565704346, "train/loss_total": 0.5206894874572754 }, { "epoch": 3.49746192893401, "step": 13091, "train/loss_ctc": 0.9995355010032654, "train/loss_error": 0.4363304078578949, "train/loss_total": 0.54897141456604 }, { "epoch": 3.4977290943093777, "step": 13092, "train/loss_ctc": 0.7318369150161743, "train/loss_error": 0.49761855602264404, "train/loss_total": 0.544462263584137 }, { "epoch": 3.497996259684745, "step": 13093, "train/loss_ctc": 0.9237347841262817, "train/loss_error": 0.4347259998321533, "train/loss_total": 0.53252774477005 }, { "epoch": 3.498263425060112, "step": 13094, "train/loss_ctc": 0.41845181584358215, "train/loss_error": 0.4421836733818054, "train/loss_total": 0.4374373257160187 }, { "epoch": 3.4985305904354798, "step": 13095, "train/loss_ctc": 0.41351407766342163, "train/loss_error": 0.4214061200618744, "train/loss_total": 0.41982772946357727 }, { "epoch": 3.498797755810847, "step": 13096, "train/loss_ctc": 0.5902059078216553, "train/loss_error": 0.40566274523735046, "train/loss_total": 0.44257140159606934 }, { "epoch": 3.499064921186214, "step": 13097, "train/loss_ctc": 0.5768975019454956, "train/loss_error": 0.47336700558662415, "train/loss_total": 0.49407312273979187 }, { "epoch": 3.499332086561582, "step": 13098, "train/loss_ctc": 0.7178730964660645, "train/loss_error": 0.46888983249664307, "train/loss_total": 0.5186865329742432 }, { "epoch": 3.499599251936949, "step": 13099, "train/loss_ctc": 0.5409089922904968, "train/loss_error": 0.5061039924621582, "train/loss_total": 0.513064980506897 }, { "epoch": 3.499866417312316, "grad_norm": 2.7959165573120117, "learning_rate": 9.01202244189153e-06, "loss": 0.4972, "step": 13100 }, { "epoch": 3.499866417312316, "step": 13100, "train/loss_ctc": 0.8189666271209717, "train/loss_error": 0.45027950406074524, "train/loss_total": 0.5240169763565063 }, { "epoch": 3.500133582687684, "step": 13101, "train/loss_ctc": 0.4588174819946289, "train/loss_error": 0.4638422727584839, "train/loss_total": 0.4628373086452484 }, { "epoch": 3.500400748063051, "step": 13102, "train/loss_ctc": 0.3059898018836975, "train/loss_error": 0.3814077079296112, "train/loss_total": 0.36632412672042847 }, { "epoch": 3.500667913438418, "step": 13103, "train/loss_ctc": 0.8261963129043579, "train/loss_error": 0.5543295741081238, "train/loss_total": 0.6087028980255127 }, { "epoch": 3.500935078813786, "step": 13104, "train/loss_ctc": 0.6647911667823792, "train/loss_error": 0.42513301968574524, "train/loss_total": 0.473064661026001 }, { "epoch": 3.501202244189153, "step": 13105, "train/loss_ctc": 0.5735452175140381, "train/loss_error": 0.4239736497402191, "train/loss_total": 0.4538879692554474 }, { "epoch": 3.5014694095645202, "step": 13106, "train/loss_ctc": 0.6881406903266907, "train/loss_error": 0.5486399531364441, "train/loss_total": 0.5765401124954224 }, { "epoch": 3.501736574939888, "step": 13107, "train/loss_ctc": 0.7653385400772095, "train/loss_error": 0.45570769906044006, "train/loss_total": 0.5176339149475098 }, { "epoch": 3.502003740315255, "step": 13108, "train/loss_ctc": 0.6862989664077759, "train/loss_error": 0.42386937141418457, "train/loss_total": 0.47635531425476074 }, { "epoch": 3.5022709056906223, "step": 13109, "train/loss_ctc": 0.9275006651878357, "train/loss_error": 0.48657095432281494, "train/loss_total": 0.574756920337677 }, { "epoch": 3.50253807106599, "grad_norm": 1.5212066173553467, "learning_rate": 8.99599251936949e-06, "loss": 0.5034, "step": 13110 }, { "epoch": 3.50253807106599, "step": 13110, "train/loss_ctc": 0.5292913913726807, "train/loss_error": 0.48161572217941284, "train/loss_total": 0.4911508560180664 }, { "epoch": 3.502805236441357, "step": 13111, "train/loss_ctc": 0.8599867224693298, "train/loss_error": 0.47947007417678833, "train/loss_total": 0.5555734038352966 }, { "epoch": 3.5030724018167243, "step": 13112, "train/loss_ctc": 0.49939072132110596, "train/loss_error": 0.4858649969100952, "train/loss_total": 0.4885701537132263 }, { "epoch": 3.503339567192092, "step": 13113, "train/loss_ctc": 0.3970760107040405, "train/loss_error": 0.4314349293708801, "train/loss_total": 0.42456313967704773 }, { "epoch": 3.503606732567459, "step": 13114, "train/loss_ctc": 0.6804060339927673, "train/loss_error": 0.4573915898799896, "train/loss_total": 0.5019944906234741 }, { "epoch": 3.5038738979428268, "step": 13115, "train/loss_ctc": 0.7030310034751892, "train/loss_error": 0.5078939199447632, "train/loss_total": 0.5469213724136353 }, { "epoch": 3.504141063318194, "step": 13116, "train/loss_ctc": 0.6428420543670654, "train/loss_error": 0.5513004064559937, "train/loss_total": 0.569608747959137 }, { "epoch": 3.504408228693561, "step": 13117, "train/loss_ctc": 0.7696943879127502, "train/loss_error": 0.43007251620292664, "train/loss_total": 0.49799686670303345 }, { "epoch": 3.504675394068929, "step": 13118, "train/loss_ctc": 0.5844862461090088, "train/loss_error": 0.3921397626399994, "train/loss_total": 0.4306090772151947 }, { "epoch": 3.504942559444296, "step": 13119, "train/loss_ctc": 0.7386263608932495, "train/loss_error": 0.456834614276886, "train/loss_total": 0.5131929516792297 }, { "epoch": 3.5052097248196636, "grad_norm": 5.905002117156982, "learning_rate": 8.979962596847448e-06, "loss": 0.502, "step": 13120 }, { "epoch": 3.5052097248196636, "step": 13120, "train/loss_ctc": 1.1929339170455933, "train/loss_error": 0.3952046036720276, "train/loss_total": 0.5547504425048828 }, { "epoch": 3.505476890195031, "step": 13121, "train/loss_ctc": 0.88437819480896, "train/loss_error": 0.3987758755683899, "train/loss_total": 0.4958963394165039 }, { "epoch": 3.505744055570398, "step": 13122, "train/loss_ctc": 0.5598118305206299, "train/loss_error": 0.4188920855522156, "train/loss_total": 0.44707605242729187 }, { "epoch": 3.5060112209457657, "step": 13123, "train/loss_ctc": 0.49439549446105957, "train/loss_error": 0.4915589988231659, "train/loss_total": 0.49212631583213806 }, { "epoch": 3.506278386321133, "step": 13124, "train/loss_ctc": 0.7543778419494629, "train/loss_error": 0.4571725130081177, "train/loss_total": 0.5166136026382446 }, { "epoch": 3.5065455516965, "step": 13125, "train/loss_ctc": 0.35010644793510437, "train/loss_error": 0.47536712884902954, "train/loss_total": 0.450314998626709 }, { "epoch": 3.5068127170718677, "step": 13126, "train/loss_ctc": 0.39293184876441956, "train/loss_error": 0.4425930678844452, "train/loss_total": 0.4326608180999756 }, { "epoch": 3.507079882447235, "step": 13127, "train/loss_ctc": 0.5839424133300781, "train/loss_error": 0.3976686894893646, "train/loss_total": 0.4349234402179718 }, { "epoch": 3.507347047822602, "step": 13128, "train/loss_ctc": 0.8367341756820679, "train/loss_error": 0.47562381625175476, "train/loss_total": 0.5478459000587463 }, { "epoch": 3.5076142131979697, "step": 13129, "train/loss_ctc": 0.4427363872528076, "train/loss_error": 0.43922242522239685, "train/loss_total": 0.4399252235889435 }, { "epoch": 3.507881378573337, "grad_norm": 1.5913910865783691, "learning_rate": 8.963932674325408e-06, "loss": 0.4812, "step": 13130 }, { "epoch": 3.507881378573337, "step": 13130, "train/loss_ctc": 0.4646083116531372, "train/loss_error": 0.4572969675064087, "train/loss_total": 0.45875924825668335 }, { "epoch": 3.508148543948704, "step": 13131, "train/loss_ctc": 0.38225239515304565, "train/loss_error": 0.42694926261901855, "train/loss_total": 0.418009877204895 }, { "epoch": 3.5084157093240718, "step": 13132, "train/loss_ctc": 0.27863043546676636, "train/loss_error": 0.4430329501628876, "train/loss_total": 0.4101524353027344 }, { "epoch": 3.508682874699439, "step": 13133, "train/loss_ctc": 0.6577130556106567, "train/loss_error": 0.44310709834098816, "train/loss_total": 0.4860283136367798 }, { "epoch": 3.508950040074806, "step": 13134, "train/loss_ctc": 0.6663687229156494, "train/loss_error": 0.5188412070274353, "train/loss_total": 0.548346757888794 }, { "epoch": 3.509217205450174, "step": 13135, "train/loss_ctc": 0.31142520904541016, "train/loss_error": 0.4975203275680542, "train/loss_total": 0.46030130982398987 }, { "epoch": 3.509484370825541, "step": 13136, "train/loss_ctc": 0.6324483156204224, "train/loss_error": 0.3792269229888916, "train/loss_total": 0.42987120151519775 }, { "epoch": 3.509751536200908, "step": 13137, "train/loss_ctc": 1.027833342552185, "train/loss_error": 0.41687050461769104, "train/loss_total": 0.5390630960464478 }, { "epoch": 3.510018701576276, "step": 13138, "train/loss_ctc": 0.6365209221839905, "train/loss_error": 0.527593731880188, "train/loss_total": 0.5493791699409485 }, { "epoch": 3.510285866951643, "step": 13139, "train/loss_ctc": 1.0970582962036133, "train/loss_error": 0.43996599316596985, "train/loss_total": 0.5713844299316406 }, { "epoch": 3.51055303232701, "grad_norm": 1.8743096590042114, "learning_rate": 8.947902751803367e-06, "loss": 0.4871, "step": 13140 }, { "epoch": 3.51055303232701, "step": 13140, "train/loss_ctc": 0.5295214653015137, "train/loss_error": 0.3920644223690033, "train/loss_total": 0.4195558428764343 }, { "epoch": 3.510820197702378, "step": 13141, "train/loss_ctc": 0.5543569326400757, "train/loss_error": 0.5674440860748291, "train/loss_total": 0.5648266673088074 }, { "epoch": 3.511087363077745, "step": 13142, "train/loss_ctc": 1.1235032081604004, "train/loss_error": 0.5168381333351135, "train/loss_total": 0.6381711363792419 }, { "epoch": 3.5113545284531122, "step": 13143, "train/loss_ctc": 0.6713487505912781, "train/loss_error": 0.43171215057373047, "train/loss_total": 0.47963947057724 }, { "epoch": 3.51162169382848, "step": 13144, "train/loss_ctc": 0.5288335084915161, "train/loss_error": 0.42362654209136963, "train/loss_total": 0.4446679651737213 }, { "epoch": 3.511888859203847, "step": 13145, "train/loss_ctc": 0.60780930519104, "train/loss_error": 0.49910539388656616, "train/loss_total": 0.5208461880683899 }, { "epoch": 3.5121560245792143, "step": 13146, "train/loss_ctc": 0.27843350172042847, "train/loss_error": 0.4284535050392151, "train/loss_total": 0.39844951033592224 }, { "epoch": 3.512423189954582, "step": 13147, "train/loss_ctc": 0.6001896858215332, "train/loss_error": 0.4310271739959717, "train/loss_total": 0.4648596942424774 }, { "epoch": 3.512690355329949, "step": 13148, "train/loss_ctc": 0.522760272026062, "train/loss_error": 0.36603665351867676, "train/loss_total": 0.39738139510154724 }, { "epoch": 3.5129575207053167, "step": 13149, "train/loss_ctc": 0.9402378797531128, "train/loss_error": 0.451162189245224, "train/loss_total": 0.5489773154258728 }, { "epoch": 3.513224686080684, "grad_norm": 2.36940598487854, "learning_rate": 8.931872829281325e-06, "loss": 0.4877, "step": 13150 }, { "epoch": 3.513224686080684, "step": 13150, "train/loss_ctc": 0.7701988220214844, "train/loss_error": 0.4573742151260376, "train/loss_total": 0.5199391841888428 }, { "epoch": 3.513491851456051, "step": 13151, "train/loss_ctc": 0.7039374113082886, "train/loss_error": 0.37398213148117065, "train/loss_total": 0.43997320532798767 }, { "epoch": 3.513759016831419, "step": 13152, "train/loss_ctc": 0.919330894947052, "train/loss_error": 0.4417255222797394, "train/loss_total": 0.537246584892273 }, { "epoch": 3.514026182206786, "step": 13153, "train/loss_ctc": 0.8676563501358032, "train/loss_error": 0.4133927822113037, "train/loss_total": 0.5042455196380615 }, { "epoch": 3.5142933475821536, "step": 13154, "train/loss_ctc": 0.35821759700775146, "train/loss_error": 0.43000009655952454, "train/loss_total": 0.4156436026096344 }, { "epoch": 3.514560512957521, "step": 13155, "train/loss_ctc": 0.4330902695655823, "train/loss_error": 0.45950639247894287, "train/loss_total": 0.4542231857776642 }, { "epoch": 3.514827678332888, "step": 13156, "train/loss_ctc": 0.27699577808380127, "train/loss_error": 0.42516860365867615, "train/loss_total": 0.39553403854370117 }, { "epoch": 3.5150948437082556, "step": 13157, "train/loss_ctc": 0.7741240859031677, "train/loss_error": 0.5161950588226318, "train/loss_total": 0.5677808523178101 }, { "epoch": 3.515362009083623, "step": 13158, "train/loss_ctc": 0.6261649131774902, "train/loss_error": 0.3950120806694031, "train/loss_total": 0.44124263525009155 }, { "epoch": 3.51562917445899, "step": 13159, "train/loss_ctc": 0.5659757852554321, "train/loss_error": 0.3621005117893219, "train/loss_total": 0.4028755724430084 }, { "epoch": 3.5158963398343577, "grad_norm": 1.8319334983825684, "learning_rate": 8.915842906759285e-06, "loss": 0.4679, "step": 13160 }, { "epoch": 3.5158963398343577, "step": 13160, "train/loss_ctc": 0.49376150965690613, "train/loss_error": 0.3822311758995056, "train/loss_total": 0.40453726053237915 }, { "epoch": 3.516163505209725, "step": 13161, "train/loss_ctc": 0.524728536605835, "train/loss_error": 0.4488731026649475, "train/loss_total": 0.4640442132949829 }, { "epoch": 3.516430670585092, "step": 13162, "train/loss_ctc": 0.45940208435058594, "train/loss_error": 0.4822525084018707, "train/loss_total": 0.4776824116706848 }, { "epoch": 3.5166978359604597, "step": 13163, "train/loss_ctc": 0.523171603679657, "train/loss_error": 0.3947230279445648, "train/loss_total": 0.42041274905204773 }, { "epoch": 3.516965001335827, "step": 13164, "train/loss_ctc": 0.8325262069702148, "train/loss_error": 0.4592079222202301, "train/loss_total": 0.533871591091156 }, { "epoch": 3.517232166711194, "step": 13165, "train/loss_ctc": 0.8835763335227966, "train/loss_error": 0.3966875970363617, "train/loss_total": 0.4940653443336487 }, { "epoch": 3.5174993320865617, "step": 13166, "train/loss_ctc": 0.4528133273124695, "train/loss_error": 0.5422411561012268, "train/loss_total": 0.5243555903434753 }, { "epoch": 3.517766497461929, "step": 13167, "train/loss_ctc": 0.6490142345428467, "train/loss_error": 0.42437148094177246, "train/loss_total": 0.4693000316619873 }, { "epoch": 3.518033662837296, "step": 13168, "train/loss_ctc": 1.0506515502929688, "train/loss_error": 0.4700872600078583, "train/loss_total": 0.5862001180648804 }, { "epoch": 3.5183008282126638, "step": 13169, "train/loss_ctc": 0.5127162933349609, "train/loss_error": 0.4057135581970215, "train/loss_total": 0.4271141290664673 }, { "epoch": 3.518567993588031, "grad_norm": 2.2611992359161377, "learning_rate": 8.899812984237243e-06, "loss": 0.4802, "step": 13170 }, { "epoch": 3.518567993588031, "step": 13170, "train/loss_ctc": 0.920894205570221, "train/loss_error": 0.490857869386673, "train/loss_total": 0.5768651366233826 }, { "epoch": 3.518835158963398, "step": 13171, "train/loss_ctc": 0.22935408353805542, "train/loss_error": 0.3789583742618561, "train/loss_total": 0.3490375280380249 }, { "epoch": 3.519102324338766, "step": 13172, "train/loss_ctc": 0.5387080907821655, "train/loss_error": 0.5063900351524353, "train/loss_total": 0.5128536224365234 }, { "epoch": 3.519369489714133, "step": 13173, "train/loss_ctc": 0.8177565336227417, "train/loss_error": 0.420159250497818, "train/loss_total": 0.49967873096466064 }, { "epoch": 3.5196366550895, "step": 13174, "train/loss_ctc": 0.8826917409896851, "train/loss_error": 0.47837525606155396, "train/loss_total": 0.5592385530471802 }, { "epoch": 3.519903820464868, "step": 13175, "train/loss_ctc": 0.8253746032714844, "train/loss_error": 0.4277164340019226, "train/loss_total": 0.5072481036186218 }, { "epoch": 3.520170985840235, "step": 13176, "train/loss_ctc": 0.6739548444747925, "train/loss_error": 0.4849938750267029, "train/loss_total": 0.5227860808372498 }, { "epoch": 3.520438151215602, "step": 13177, "train/loss_ctc": 0.32440829277038574, "train/loss_error": 0.4184751808643341, "train/loss_total": 0.3996618092060089 }, { "epoch": 3.52070531659097, "step": 13178, "train/loss_ctc": 0.5275799036026001, "train/loss_error": 0.44047173857688904, "train/loss_total": 0.45789337158203125 }, { "epoch": 3.520972481966337, "step": 13179, "train/loss_ctc": 0.732191801071167, "train/loss_error": 0.45342355966567993, "train/loss_total": 0.5091772079467773 }, { "epoch": 3.5212396473417042, "grad_norm": 2.2198984622955322, "learning_rate": 8.883783061715201e-06, "loss": 0.4894, "step": 13180 }, { "epoch": 3.5212396473417042, "step": 13180, "train/loss_ctc": 1.094504714012146, "train/loss_error": 0.4621511995792389, "train/loss_total": 0.5886219143867493 }, { "epoch": 3.521506812717072, "step": 13181, "train/loss_ctc": 0.6432067155838013, "train/loss_error": 0.42175135016441345, "train/loss_total": 0.4660424590110779 }, { "epoch": 3.521773978092439, "step": 13182, "train/loss_ctc": 0.49671366810798645, "train/loss_error": 0.46508902311325073, "train/loss_total": 0.4714139699935913 }, { "epoch": 3.5220411434678067, "step": 13183, "train/loss_ctc": 0.605605959892273, "train/loss_error": 0.4497654438018799, "train/loss_total": 0.4809335470199585 }, { "epoch": 3.522308308843174, "step": 13184, "train/loss_ctc": 1.2491028308868408, "train/loss_error": 0.464127779006958, "train/loss_total": 0.6211227774620056 }, { "epoch": 3.522575474218541, "step": 13185, "train/loss_ctc": 0.34031665325164795, "train/loss_error": 0.4216824173927307, "train/loss_total": 0.4054092764854431 }, { "epoch": 3.5228426395939088, "step": 13186, "train/loss_ctc": 0.600099503993988, "train/loss_error": 0.43080973625183105, "train/loss_total": 0.4646677076816559 }, { "epoch": 3.523109804969276, "step": 13187, "train/loss_ctc": 0.39587944746017456, "train/loss_error": 0.4213189482688904, "train/loss_total": 0.41623106598854065 }, { "epoch": 3.5233769703446436, "step": 13188, "train/loss_ctc": 0.7319114208221436, "train/loss_error": 0.4198729991912842, "train/loss_total": 0.4822806715965271 }, { "epoch": 3.523644135720011, "step": 13189, "train/loss_ctc": 0.7152559757232666, "train/loss_error": 0.39911240339279175, "train/loss_total": 0.4623411297798157 }, { "epoch": 3.523911301095378, "grad_norm": 3.5499346256256104, "learning_rate": 8.867753139193161e-06, "loss": 0.4859, "step": 13190 }, { "epoch": 3.523911301095378, "step": 13190, "train/loss_ctc": 0.6154146790504456, "train/loss_error": 0.4609321355819702, "train/loss_total": 0.49182865023612976 }, { "epoch": 3.5241784664707456, "step": 13191, "train/loss_ctc": 0.7258812189102173, "train/loss_error": 0.5407609343528748, "train/loss_total": 0.5777850151062012 }, { "epoch": 3.524445631846113, "step": 13192, "train/loss_ctc": 0.6269465684890747, "train/loss_error": 0.3194206953048706, "train/loss_total": 0.38092589378356934 }, { "epoch": 3.52471279722148, "step": 13193, "train/loss_ctc": 0.29733192920684814, "train/loss_error": 0.375459760427475, "train/loss_total": 0.3598341941833496 }, { "epoch": 3.5249799625968476, "step": 13194, "train/loss_ctc": 0.32952219247817993, "train/loss_error": 0.40742871165275574, "train/loss_total": 0.3918474018573761 }, { "epoch": 3.525247127972215, "step": 13195, "train/loss_ctc": 0.7034727931022644, "train/loss_error": 0.5096076726913452, "train/loss_total": 0.5483807325363159 }, { "epoch": 3.525514293347582, "step": 13196, "train/loss_ctc": 0.3288021385669708, "train/loss_error": 0.3940476179122925, "train/loss_total": 0.38099852204322815 }, { "epoch": 3.5257814587229497, "step": 13197, "train/loss_ctc": 1.0271936655044556, "train/loss_error": 0.45441117882728577, "train/loss_total": 0.5689677000045776 }, { "epoch": 3.526048624098317, "step": 13198, "train/loss_ctc": 0.8377407789230347, "train/loss_error": 0.4438406825065613, "train/loss_total": 0.5226207375526428 }, { "epoch": 3.526315789473684, "step": 13199, "train/loss_ctc": 0.8716758489608765, "train/loss_error": 0.4617672264575958, "train/loss_total": 0.5437489748001099 }, { "epoch": 3.5265829548490517, "grad_norm": 1.472884178161621, "learning_rate": 8.85172321667112e-06, "loss": 0.4767, "step": 13200 }, { "epoch": 3.5265829548490517, "step": 13200, "train/loss_ctc": 0.7415177822113037, "train/loss_error": 0.3913235664367676, "train/loss_total": 0.46136242151260376 }, { "epoch": 3.526850120224419, "step": 13201, "train/loss_ctc": 1.371606469154358, "train/loss_error": 0.4496324062347412, "train/loss_total": 0.6340272426605225 }, { "epoch": 3.527117285599786, "step": 13202, "train/loss_ctc": 0.5465226173400879, "train/loss_error": 0.4746725857257843, "train/loss_total": 0.48904258012771606 }, { "epoch": 3.5273844509751537, "step": 13203, "train/loss_ctc": 0.5956729650497437, "train/loss_error": 0.47469690442085266, "train/loss_total": 0.4988921284675598 }, { "epoch": 3.527651616350521, "step": 13204, "train/loss_ctc": 0.675382673740387, "train/loss_error": 0.43647709488868713, "train/loss_total": 0.484258234500885 }, { "epoch": 3.527918781725888, "step": 13205, "train/loss_ctc": 0.7040352821350098, "train/loss_error": 0.48555269837379456, "train/loss_total": 0.5292491912841797 }, { "epoch": 3.5281859471012558, "step": 13206, "train/loss_ctc": 0.5808796286582947, "train/loss_error": 0.49638110399246216, "train/loss_total": 0.5132808089256287 }, { "epoch": 3.528453112476623, "step": 13207, "train/loss_ctc": 0.8290510177612305, "train/loss_error": 0.4583394527435303, "train/loss_total": 0.5324817895889282 }, { "epoch": 3.52872027785199, "step": 13208, "train/loss_ctc": 1.5443081855773926, "train/loss_error": 0.4186691641807556, "train/loss_total": 0.643796980381012 }, { "epoch": 3.528987443227358, "step": 13209, "train/loss_ctc": 1.5462976694107056, "train/loss_error": 0.5061381459236145, "train/loss_total": 0.7141700983047485 }, { "epoch": 3.529254608602725, "grad_norm": 5.653702259063721, "learning_rate": 8.835693294149077e-06, "loss": 0.5501, "step": 13210 }, { "epoch": 3.529254608602725, "step": 13210, "train/loss_ctc": 0.37402504682540894, "train/loss_error": 0.37629732489585876, "train/loss_total": 0.3758428990840912 }, { "epoch": 3.529521773978092, "step": 13211, "train/loss_ctc": 0.3358234763145447, "train/loss_error": 0.3390122056007385, "train/loss_total": 0.33837446570396423 }, { "epoch": 3.52978893935346, "step": 13212, "train/loss_ctc": 0.8252263069152832, "train/loss_error": 0.45306897163391113, "train/loss_total": 0.5275004506111145 }, { "epoch": 3.530056104728827, "step": 13213, "train/loss_ctc": 0.6505863070487976, "train/loss_error": 0.43756788969039917, "train/loss_total": 0.4801715910434723 }, { "epoch": 3.5303232701041942, "step": 13214, "train/loss_ctc": 0.4835638999938965, "train/loss_error": 0.3593916893005371, "train/loss_total": 0.38422614336013794 }, { "epoch": 3.530590435479562, "step": 13215, "train/loss_ctc": 1.0059194564819336, "train/loss_error": 0.5146653056144714, "train/loss_total": 0.6129161715507507 }, { "epoch": 3.530857600854929, "step": 13216, "train/loss_ctc": 0.6563054919242859, "train/loss_error": 0.44798898696899414, "train/loss_total": 0.48965227603912354 }, { "epoch": 3.5311247662302967, "step": 13217, "train/loss_ctc": 0.38419145345687866, "train/loss_error": 0.4089987874031067, "train/loss_total": 0.40403732657432556 }, { "epoch": 3.531391931605664, "step": 13218, "train/loss_ctc": 0.40687885880470276, "train/loss_error": 0.4625789523124695, "train/loss_total": 0.45143893361091614 }, { "epoch": 3.5316590969810315, "step": 13219, "train/loss_ctc": 0.8476254343986511, "train/loss_error": 0.4582979679107666, "train/loss_total": 0.5361634492874146 }, { "epoch": 3.5319262623563987, "grad_norm": 1.9583309888839722, "learning_rate": 8.819663371627039e-06, "loss": 0.46, "step": 13220 }, { "epoch": 3.5319262623563987, "step": 13220, "train/loss_ctc": 0.6260426640510559, "train/loss_error": 0.45364663004875183, "train/loss_total": 0.48812586069107056 }, { "epoch": 3.532193427731766, "step": 13221, "train/loss_ctc": 0.44605663418769836, "train/loss_error": 0.4891813397407532, "train/loss_total": 0.4805564284324646 }, { "epoch": 3.5324605931071336, "step": 13222, "train/loss_ctc": 0.5677001476287842, "train/loss_error": 0.5049954652786255, "train/loss_total": 0.5175364017486572 }, { "epoch": 3.5327277584825008, "step": 13223, "train/loss_ctc": 0.9343270063400269, "train/loss_error": 0.4505540132522583, "train/loss_total": 0.547308623790741 }, { "epoch": 3.532994923857868, "step": 13224, "train/loss_ctc": 0.5267338752746582, "train/loss_error": 0.435491144657135, "train/loss_total": 0.4537397027015686 }, { "epoch": 3.5332620892332356, "step": 13225, "train/loss_ctc": 0.5282762050628662, "train/loss_error": 0.3906014859676361, "train/loss_total": 0.41813644766807556 }, { "epoch": 3.533529254608603, "step": 13226, "train/loss_ctc": 0.9098590016365051, "train/loss_error": 0.4406162202358246, "train/loss_total": 0.5344647765159607 }, { "epoch": 3.53379641998397, "step": 13227, "train/loss_ctc": 1.1553630828857422, "train/loss_error": 0.43940120935440063, "train/loss_total": 0.5825936198234558 }, { "epoch": 3.5340635853593376, "step": 13228, "train/loss_ctc": 0.5387783646583557, "train/loss_error": 0.39452430605888367, "train/loss_total": 0.42337512969970703 }, { "epoch": 3.534330750734705, "step": 13229, "train/loss_ctc": 1.2264018058776855, "train/loss_error": 0.45736730098724365, "train/loss_total": 0.6111742258071899 }, { "epoch": 3.534597916110072, "grad_norm": 2.529848098754883, "learning_rate": 8.803633449104997e-06, "loss": 0.5057, "step": 13230 }, { "epoch": 3.534597916110072, "step": 13230, "train/loss_ctc": 0.5470000505447388, "train/loss_error": 0.48166725039482117, "train/loss_total": 0.4947338104248047 }, { "epoch": 3.5348650814854397, "step": 13231, "train/loss_ctc": 0.8073861598968506, "train/loss_error": 0.4281369745731354, "train/loss_total": 0.5039868354797363 }, { "epoch": 3.535132246860807, "step": 13232, "train/loss_ctc": 0.9468119144439697, "train/loss_error": 0.4022804796695709, "train/loss_total": 0.5111867785453796 }, { "epoch": 3.535399412236174, "step": 13233, "train/loss_ctc": 0.42187461256980896, "train/loss_error": 0.518880307674408, "train/loss_total": 0.49947917461395264 }, { "epoch": 3.5356665776115417, "step": 13234, "train/loss_ctc": 0.8528153300285339, "train/loss_error": 0.5157367587089539, "train/loss_total": 0.5831524729728699 }, { "epoch": 3.535933742986909, "step": 13235, "train/loss_ctc": 0.4658796787261963, "train/loss_error": 0.4049217104911804, "train/loss_total": 0.417113333940506 }, { "epoch": 3.536200908362276, "step": 13236, "train/loss_ctc": 0.3591950833797455, "train/loss_error": 0.3999970853328705, "train/loss_total": 0.3918367028236389 }, { "epoch": 3.5364680737376437, "step": 13237, "train/loss_ctc": 0.6647841930389404, "train/loss_error": 0.45012715458869934, "train/loss_total": 0.49305856227874756 }, { "epoch": 3.536735239113011, "step": 13238, "train/loss_ctc": 0.5860305428504944, "train/loss_error": 0.3633624017238617, "train/loss_total": 0.4078960418701172 }, { "epoch": 3.537002404488378, "step": 13239, "train/loss_ctc": 0.9907509088516235, "train/loss_error": 0.4418097734451294, "train/loss_total": 0.5515980124473572 }, { "epoch": 3.5372695698637457, "grad_norm": 1.864382028579712, "learning_rate": 8.787603526582955e-06, "loss": 0.4854, "step": 13240 }, { "epoch": 3.5372695698637457, "step": 13240, "train/loss_ctc": 1.1417348384857178, "train/loss_error": 0.4433247447013855, "train/loss_total": 0.583006739616394 }, { "epoch": 3.537536735239113, "step": 13241, "train/loss_ctc": 0.912571907043457, "train/loss_error": 0.4003424346446991, "train/loss_total": 0.5027883648872375 }, { "epoch": 3.53780390061448, "step": 13242, "train/loss_ctc": 0.5067238807678223, "train/loss_error": 0.4493067264556885, "train/loss_total": 0.46079015731811523 }, { "epoch": 3.5380710659898478, "step": 13243, "train/loss_ctc": 1.0355310440063477, "train/loss_error": 0.4188748598098755, "train/loss_total": 0.5422061085700989 }, { "epoch": 3.538338231365215, "step": 13244, "train/loss_ctc": 1.2487976551055908, "train/loss_error": 0.421968013048172, "train/loss_total": 0.5873339772224426 }, { "epoch": 3.538605396740582, "step": 13245, "train/loss_ctc": 0.587755560874939, "train/loss_error": 0.4432375729084015, "train/loss_total": 0.47214117646217346 }, { "epoch": 3.53887256211595, "step": 13246, "train/loss_ctc": 0.6393715143203735, "train/loss_error": 0.42846307158470154, "train/loss_total": 0.4706447720527649 }, { "epoch": 3.539139727491317, "step": 13247, "train/loss_ctc": 0.7257707715034485, "train/loss_error": 0.4097386598587036, "train/loss_total": 0.47294509410858154 }, { "epoch": 3.5394068928666846, "step": 13248, "train/loss_ctc": 0.44520753622055054, "train/loss_error": 0.5151681303977966, "train/loss_total": 0.5011759996414185 }, { "epoch": 3.539674058242052, "step": 13249, "train/loss_ctc": 0.4761810898780823, "train/loss_error": 0.4069114029407501, "train/loss_total": 0.42076534032821655 }, { "epoch": 3.539941223617419, "grad_norm": 1.1968623399734497, "learning_rate": 8.771573604060915e-06, "loss": 0.5014, "step": 13250 }, { "epoch": 3.539941223617419, "step": 13250, "train/loss_ctc": 0.4146222472190857, "train/loss_error": 0.3954181671142578, "train/loss_total": 0.3992590010166168 }, { "epoch": 3.5402083889927867, "step": 13251, "train/loss_ctc": 0.6742552518844604, "train/loss_error": 0.43665066361427307, "train/loss_total": 0.4841715693473816 }, { "epoch": 3.540475554368154, "step": 13252, "train/loss_ctc": 0.7579792737960815, "train/loss_error": 0.3920077979564667, "train/loss_total": 0.46520209312438965 }, { "epoch": 3.5407427197435215, "step": 13253, "train/loss_ctc": 0.6599183082580566, "train/loss_error": 0.46140313148498535, "train/loss_total": 0.5011061429977417 }, { "epoch": 3.5410098851188887, "step": 13254, "train/loss_ctc": 0.7111328840255737, "train/loss_error": 0.45930224657058716, "train/loss_total": 0.5096683502197266 }, { "epoch": 3.541277050494256, "step": 13255, "train/loss_ctc": 0.7295949459075928, "train/loss_error": 0.46569907665252686, "train/loss_total": 0.518478274345398 }, { "epoch": 3.5415442158696235, "step": 13256, "train/loss_ctc": 0.9074041843414307, "train/loss_error": 0.4772365093231201, "train/loss_total": 0.5632700324058533 }, { "epoch": 3.5418113812449907, "step": 13257, "train/loss_ctc": 0.8527392148971558, "train/loss_error": 0.48873060941696167, "train/loss_total": 0.5615323781967163 }, { "epoch": 3.542078546620358, "step": 13258, "train/loss_ctc": 0.7615031003952026, "train/loss_error": 0.43535661697387695, "train/loss_total": 0.5005859136581421 }, { "epoch": 3.5423457119957256, "step": 13259, "train/loss_ctc": 1.6815428733825684, "train/loss_error": 0.5403560996055603, "train/loss_total": 0.768593430519104 }, { "epoch": 3.5426128773710928, "grad_norm": 2.7117271423339844, "learning_rate": 8.755543681538873e-06, "loss": 0.5272, "step": 13260 }, { "epoch": 3.5426128773710928, "step": 13260, "train/loss_ctc": 0.5140088796615601, "train/loss_error": 0.35206708312034607, "train/loss_total": 0.38445544242858887 }, { "epoch": 3.54288004274646, "step": 13261, "train/loss_ctc": 0.9329652786254883, "train/loss_error": 0.5105573534965515, "train/loss_total": 0.5950389504432678 }, { "epoch": 3.5431472081218276, "step": 13262, "train/loss_ctc": 0.3031194806098938, "train/loss_error": 0.4457716643810272, "train/loss_total": 0.4172412157058716 }, { "epoch": 3.543414373497195, "step": 13263, "train/loss_ctc": 0.6069645881652832, "train/loss_error": 0.39813148975372314, "train/loss_total": 0.43989813327789307 }, { "epoch": 3.543681538872562, "step": 13264, "train/loss_ctc": 0.6737464666366577, "train/loss_error": 0.44790083169937134, "train/loss_total": 0.49306997656822205 }, { "epoch": 3.5439487042479296, "step": 13265, "train/loss_ctc": 0.909005343914032, "train/loss_error": 0.43917927145957947, "train/loss_total": 0.533144474029541 }, { "epoch": 3.544215869623297, "step": 13266, "train/loss_ctc": 0.6312872171401978, "train/loss_error": 0.472261905670166, "train/loss_total": 0.5040669441223145 }, { "epoch": 3.544483034998664, "step": 13267, "train/loss_ctc": 0.39826613664627075, "train/loss_error": 0.39783260226249695, "train/loss_total": 0.39791932702064514 }, { "epoch": 3.5447502003740317, "step": 13268, "train/loss_ctc": 1.3895570039749146, "train/loss_error": 0.4166390895843506, "train/loss_total": 0.6112226843833923 }, { "epoch": 3.545017365749399, "step": 13269, "train/loss_ctc": 0.7488133907318115, "train/loss_error": 0.35688161849975586, "train/loss_total": 0.43526798486709595 }, { "epoch": 3.545284531124766, "grad_norm": 3.5468966960906982, "learning_rate": 8.739513759016831e-06, "loss": 0.4811, "step": 13270 }, { "epoch": 3.545284531124766, "step": 13270, "train/loss_ctc": 0.6131943464279175, "train/loss_error": 0.42904964089393616, "train/loss_total": 0.46587860584259033 }, { "epoch": 3.5455516965001337, "step": 13271, "train/loss_ctc": 0.23725494742393494, "train/loss_error": 0.4437086582183838, "train/loss_total": 0.402417927980423 }, { "epoch": 3.545818861875501, "step": 13272, "train/loss_ctc": 1.6005494594573975, "train/loss_error": 0.6654163599014282, "train/loss_total": 0.8524429798126221 }, { "epoch": 3.546086027250868, "step": 13273, "train/loss_ctc": 0.736522912979126, "train/loss_error": 0.4658423066139221, "train/loss_total": 0.519978404045105 }, { "epoch": 3.5463531926262357, "step": 13274, "train/loss_ctc": 0.5567613840103149, "train/loss_error": 0.48130345344543457, "train/loss_total": 0.4963950514793396 }, { "epoch": 3.546620358001603, "step": 13275, "train/loss_ctc": 1.3452997207641602, "train/loss_error": 0.4372255802154541, "train/loss_total": 0.6188404560089111 }, { "epoch": 3.54688752337697, "step": 13276, "train/loss_ctc": 0.5194236040115356, "train/loss_error": 0.4713258743286133, "train/loss_total": 0.4809454381465912 }, { "epoch": 3.5471546887523377, "step": 13277, "train/loss_ctc": 0.5847543478012085, "train/loss_error": 0.45111721754074097, "train/loss_total": 0.4778446555137634 }, { "epoch": 3.547421854127705, "step": 13278, "train/loss_ctc": 0.3729328513145447, "train/loss_error": 0.41046014428138733, "train/loss_total": 0.40295469760894775 }, { "epoch": 3.547689019503072, "step": 13279, "train/loss_ctc": 0.563173234462738, "train/loss_error": 0.396698534488678, "train/loss_total": 0.42999348044395447 }, { "epoch": 3.54795618487844, "grad_norm": 2.5182085037231445, "learning_rate": 8.72348383649479e-06, "loss": 0.5148, "step": 13280 }, { "epoch": 3.54795618487844, "step": 13280, "train/loss_ctc": 1.03574538230896, "train/loss_error": 0.4746401309967041, "train/loss_total": 0.5868611931800842 }, { "epoch": 3.548223350253807, "step": 13281, "train/loss_ctc": 0.3849160671234131, "train/loss_error": 0.5153800249099731, "train/loss_total": 0.48928722739219666 }, { "epoch": 3.5484905156291746, "step": 13282, "train/loss_ctc": 0.474690318107605, "train/loss_error": 0.41885867714881897, "train/loss_total": 0.43002501130104065 }, { "epoch": 3.548757681004542, "step": 13283, "train/loss_ctc": 0.9687978029251099, "train/loss_error": 0.4994528293609619, "train/loss_total": 0.5933218002319336 }, { "epoch": 3.549024846379909, "step": 13284, "train/loss_ctc": 1.1523323059082031, "train/loss_error": 0.4278731048107147, "train/loss_total": 0.5727649331092834 }, { "epoch": 3.5492920117552766, "step": 13285, "train/loss_ctc": 0.4610760509967804, "train/loss_error": 0.4398299753665924, "train/loss_total": 0.44407919049263 }, { "epoch": 3.549559177130644, "step": 13286, "train/loss_ctc": 0.5805387496948242, "train/loss_error": 0.40846216678619385, "train/loss_total": 0.44287750124931335 }, { "epoch": 3.5498263425060115, "step": 13287, "train/loss_ctc": 1.399172067642212, "train/loss_error": 0.43985113501548767, "train/loss_total": 0.6317152976989746 }, { "epoch": 3.5500935078813787, "step": 13288, "train/loss_ctc": 0.8211328387260437, "train/loss_error": 0.4483153223991394, "train/loss_total": 0.5228788256645203 }, { "epoch": 3.550360673256746, "step": 13289, "train/loss_ctc": 1.0075030326843262, "train/loss_error": 0.43791577219963074, "train/loss_total": 0.5518332123756409 }, { "epoch": 3.5506278386321135, "grad_norm": 2.3630259037017822, "learning_rate": 8.707453913972749e-06, "loss": 0.5266, "step": 13290 }, { "epoch": 3.5506278386321135, "step": 13290, "train/loss_ctc": 0.5180773735046387, "train/loss_error": 0.4519352316856384, "train/loss_total": 0.4651636481285095 }, { "epoch": 3.5508950040074807, "step": 13291, "train/loss_ctc": 0.8980478644371033, "train/loss_error": 0.49929356575012207, "train/loss_total": 0.5790444612503052 }, { "epoch": 3.551162169382848, "step": 13292, "train/loss_ctc": 1.1443442106246948, "train/loss_error": 0.42759695649147034, "train/loss_total": 0.570946455001831 }, { "epoch": 3.5514293347582155, "step": 13293, "train/loss_ctc": 0.38855403661727905, "train/loss_error": 0.42520272731781006, "train/loss_total": 0.41787299513816833 }, { "epoch": 3.5516965001335827, "step": 13294, "train/loss_ctc": 0.338655948638916, "train/loss_error": 0.3911251127719879, "train/loss_total": 0.38063129782676697 }, { "epoch": 3.55196366550895, "step": 13295, "train/loss_ctc": 0.31489402055740356, "train/loss_error": 0.338789165019989, "train/loss_total": 0.33401015400886536 }, { "epoch": 3.5522308308843176, "step": 13296, "train/loss_ctc": 0.7058554887771606, "train/loss_error": 0.3224800229072571, "train/loss_total": 0.3991551101207733 }, { "epoch": 3.5524979962596848, "step": 13297, "train/loss_ctc": 0.6830296516418457, "train/loss_error": 0.43082737922668457, "train/loss_total": 0.48126786947250366 }, { "epoch": 3.552765161635052, "step": 13298, "train/loss_ctc": 0.8817700743675232, "train/loss_error": 0.44088682532310486, "train/loss_total": 0.5290634632110596 }, { "epoch": 3.5530323270104196, "step": 13299, "train/loss_ctc": 0.6075806617736816, "train/loss_error": 0.45400020480155945, "train/loss_total": 0.4847162961959839 }, { "epoch": 3.553299492385787, "grad_norm": 2.813570976257324, "learning_rate": 8.691423991450707e-06, "loss": 0.4642, "step": 13300 }, { "epoch": 3.553299492385787, "step": 13300, "train/loss_ctc": 0.2882034182548523, "train/loss_error": 0.4723156988620758, "train/loss_total": 0.43549323081970215 }, { "epoch": 3.553566657761154, "step": 13301, "train/loss_ctc": 0.9764593839645386, "train/loss_error": 0.5144694447517395, "train/loss_total": 0.6068674325942993 }, { "epoch": 3.5538338231365216, "step": 13302, "train/loss_ctc": 0.48827582597732544, "train/loss_error": 0.3342879116535187, "train/loss_total": 0.36508551239967346 }, { "epoch": 3.554100988511889, "step": 13303, "train/loss_ctc": 0.5718587040901184, "train/loss_error": 0.5004715919494629, "train/loss_total": 0.5147489905357361 }, { "epoch": 3.554368153887256, "step": 13304, "train/loss_ctc": 0.5030977129936218, "train/loss_error": 0.41517552733421326, "train/loss_total": 0.43275997042655945 }, { "epoch": 3.5546353192626237, "step": 13305, "train/loss_ctc": 0.695289134979248, "train/loss_error": 0.4625919461250305, "train/loss_total": 0.5091313719749451 }, { "epoch": 3.554902484637991, "step": 13306, "train/loss_ctc": 1.3668769598007202, "train/loss_error": 0.42174971103668213, "train/loss_total": 0.6107751727104187 }, { "epoch": 3.555169650013358, "step": 13307, "train/loss_ctc": 0.36272040009498596, "train/loss_error": 0.45413798093795776, "train/loss_total": 0.4358544945716858 }, { "epoch": 3.5554368153887257, "step": 13308, "train/loss_ctc": 0.482990026473999, "train/loss_error": 0.460837721824646, "train/loss_total": 0.46526819467544556 }, { "epoch": 3.555703980764093, "step": 13309, "train/loss_ctc": 0.6947497129440308, "train/loss_error": 0.4449894428253174, "train/loss_total": 0.4949415326118469 }, { "epoch": 3.55597114613946, "grad_norm": 1.7171140909194946, "learning_rate": 8.675394068928668e-06, "loss": 0.4871, "step": 13310 }, { "epoch": 3.55597114613946, "step": 13310, "train/loss_ctc": 1.2491190433502197, "train/loss_error": 0.4968428611755371, "train/loss_total": 0.6472980976104736 }, { "epoch": 3.5562383115148277, "step": 13311, "train/loss_ctc": 0.4960882067680359, "train/loss_error": 0.4512338638305664, "train/loss_total": 0.46020475029945374 }, { "epoch": 3.556505476890195, "step": 13312, "train/loss_ctc": 1.2006559371948242, "train/loss_error": 0.4901217818260193, "train/loss_total": 0.6322286128997803 }, { "epoch": 3.556772642265562, "step": 13313, "train/loss_ctc": 0.591519296169281, "train/loss_error": 0.5022361278533936, "train/loss_total": 0.520092785358429 }, { "epoch": 3.5570398076409298, "step": 13314, "train/loss_ctc": 0.43753504753112793, "train/loss_error": 0.43738028407096863, "train/loss_total": 0.43741124868392944 }, { "epoch": 3.557306973016297, "step": 13315, "train/loss_ctc": 0.8571306467056274, "train/loss_error": 0.5595467686653137, "train/loss_total": 0.6190635561943054 }, { "epoch": 3.5575741383916646, "step": 13316, "train/loss_ctc": 1.161875605583191, "train/loss_error": 0.44396817684173584, "train/loss_total": 0.5875496864318848 }, { "epoch": 3.557841303767032, "step": 13317, "train/loss_ctc": 0.293110728263855, "train/loss_error": 0.37620657682418823, "train/loss_total": 0.3595874309539795 }, { "epoch": 3.5581084691423994, "step": 13318, "train/loss_ctc": 0.6512329578399658, "train/loss_error": 0.42341184616088867, "train/loss_total": 0.46897608041763306 }, { "epoch": 3.5583756345177666, "step": 13319, "train/loss_ctc": 0.5837562680244446, "train/loss_error": 0.4414975345134735, "train/loss_total": 0.46994927525520325 }, { "epoch": 3.558642799893134, "grad_norm": 1.1475582122802734, "learning_rate": 8.659364146406626e-06, "loss": 0.5202, "step": 13320 }, { "epoch": 3.558642799893134, "step": 13320, "train/loss_ctc": 0.5877142548561096, "train/loss_error": 0.45766276121139526, "train/loss_total": 0.4836730659008026 }, { "epoch": 3.5589099652685015, "step": 13321, "train/loss_ctc": 0.8329605460166931, "train/loss_error": 0.5499914288520813, "train/loss_total": 0.6065852642059326 }, { "epoch": 3.5591771306438686, "step": 13322, "train/loss_ctc": 0.9631609916687012, "train/loss_error": 0.41852298378944397, "train/loss_total": 0.5274505615234375 }, { "epoch": 3.559444296019236, "step": 13323, "train/loss_ctc": 0.7173154950141907, "train/loss_error": 0.4811564087867737, "train/loss_total": 0.528388261795044 }, { "epoch": 3.5597114613946035, "step": 13324, "train/loss_ctc": 0.8090531826019287, "train/loss_error": 0.4382784962654114, "train/loss_total": 0.5124334096908569 }, { "epoch": 3.5599786267699707, "step": 13325, "train/loss_ctc": 0.409476101398468, "train/loss_error": 0.43629252910614014, "train/loss_total": 0.4309292733669281 }, { "epoch": 3.560245792145338, "step": 13326, "train/loss_ctc": 0.7084975242614746, "train/loss_error": 0.44452357292175293, "train/loss_total": 0.4973183870315552 }, { "epoch": 3.5605129575207055, "step": 13327, "train/loss_ctc": 0.9327380657196045, "train/loss_error": 0.4568440020084381, "train/loss_total": 0.5520228147506714 }, { "epoch": 3.5607801228960727, "step": 13328, "train/loss_ctc": 0.8865667581558228, "train/loss_error": 0.41638877987861633, "train/loss_total": 0.5104243755340576 }, { "epoch": 3.56104728827144, "step": 13329, "train/loss_ctc": 0.6664066910743713, "train/loss_error": 0.4381684362888336, "train/loss_total": 0.48381608724594116 }, { "epoch": 3.5613144536468075, "grad_norm": 1.7802150249481201, "learning_rate": 8.643334223884585e-06, "loss": 0.5133, "step": 13330 }, { "epoch": 3.5613144536468075, "step": 13330, "train/loss_ctc": 0.7391618490219116, "train/loss_error": 0.41431400179862976, "train/loss_total": 0.47928357124328613 }, { "epoch": 3.5615816190221747, "step": 13331, "train/loss_ctc": 0.6602696180343628, "train/loss_error": 0.39120596647262573, "train/loss_total": 0.4450187087059021 }, { "epoch": 3.561848784397542, "step": 13332, "train/loss_ctc": 0.8768008351325989, "train/loss_error": 0.43666189908981323, "train/loss_total": 0.5246896743774414 }, { "epoch": 3.5621159497729096, "step": 13333, "train/loss_ctc": 1.1906991004943848, "train/loss_error": 0.42936864495277405, "train/loss_total": 0.5816347599029541 }, { "epoch": 3.5623831151482768, "step": 13334, "train/loss_ctc": 0.5235034227371216, "train/loss_error": 0.43297436833381653, "train/loss_total": 0.45108017325401306 }, { "epoch": 3.562650280523644, "step": 13335, "train/loss_ctc": 1.2426486015319824, "train/loss_error": 0.4370654225349426, "train/loss_total": 0.5981820821762085 }, { "epoch": 3.5629174458990116, "step": 13336, "train/loss_ctc": 0.6714645028114319, "train/loss_error": 0.504885733127594, "train/loss_total": 0.5382014513015747 }, { "epoch": 3.563184611274379, "step": 13337, "train/loss_ctc": 0.7032929062843323, "train/loss_error": 0.4276575744152069, "train/loss_total": 0.4827846586704254 }, { "epoch": 3.563451776649746, "step": 13338, "train/loss_ctc": 0.45068156719207764, "train/loss_error": 0.47981178760528564, "train/loss_total": 0.4739857614040375 }, { "epoch": 3.5637189420251136, "step": 13339, "train/loss_ctc": 0.6031292676925659, "train/loss_error": 0.42841729521751404, "train/loss_total": 0.46335968375205994 }, { "epoch": 3.563986107400481, "grad_norm": 1.8839972019195557, "learning_rate": 8.627304301362544e-06, "loss": 0.5038, "step": 13340 }, { "epoch": 3.563986107400481, "step": 13340, "train/loss_ctc": 0.5328242182731628, "train/loss_error": 0.3670375645160675, "train/loss_total": 0.40019491314888 }, { "epoch": 3.564253272775848, "step": 13341, "train/loss_ctc": 0.8459222316741943, "train/loss_error": 0.5106613039970398, "train/loss_total": 0.5777134895324707 }, { "epoch": 3.5645204381512157, "step": 13342, "train/loss_ctc": 0.5962079763412476, "train/loss_error": 0.42750242352485657, "train/loss_total": 0.46124354004859924 }, { "epoch": 3.564787603526583, "step": 13343, "train/loss_ctc": 0.572937548160553, "train/loss_error": 0.3719618320465088, "train/loss_total": 0.41215699911117554 }, { "epoch": 3.56505476890195, "step": 13344, "train/loss_ctc": 0.590178370475769, "train/loss_error": 0.467251181602478, "train/loss_total": 0.49183663725852966 }, { "epoch": 3.5653219342773177, "step": 13345, "train/loss_ctc": 1.2655704021453857, "train/loss_error": 0.4501088857650757, "train/loss_total": 0.6132012009620667 }, { "epoch": 3.565589099652685, "step": 13346, "train/loss_ctc": 0.6788080930709839, "train/loss_error": 0.3707326650619507, "train/loss_total": 0.43234774470329285 }, { "epoch": 3.5658562650280525, "step": 13347, "train/loss_ctc": 0.5868442058563232, "train/loss_error": 0.4160553812980652, "train/loss_total": 0.45021316409111023 }, { "epoch": 3.5661234304034197, "step": 13348, "train/loss_ctc": 0.6174392700195312, "train/loss_error": 0.453762024641037, "train/loss_total": 0.4864974915981293 }, { "epoch": 3.566390595778787, "step": 13349, "train/loss_ctc": 1.396522045135498, "train/loss_error": 0.48814547061920166, "train/loss_total": 0.6698207855224609 }, { "epoch": 3.5666577611541546, "grad_norm": 3.0418171882629395, "learning_rate": 8.611274378840502e-06, "loss": 0.4995, "step": 13350 }, { "epoch": 3.5666577611541546, "step": 13350, "train/loss_ctc": 0.7031111121177673, "train/loss_error": 0.41819316148757935, "train/loss_total": 0.47517675161361694 }, { "epoch": 3.5669249265295218, "step": 13351, "train/loss_ctc": 0.6407632827758789, "train/loss_error": 0.47264984250068665, "train/loss_total": 0.506272554397583 }, { "epoch": 3.5671920919048894, "step": 13352, "train/loss_ctc": 0.5397807955741882, "train/loss_error": 0.4056529104709625, "train/loss_total": 0.43247851729393005 }, { "epoch": 3.5674592572802566, "step": 13353, "train/loss_ctc": 0.4274049997329712, "train/loss_error": 0.44484883546829224, "train/loss_total": 0.4413600564002991 }, { "epoch": 3.567726422655624, "step": 13354, "train/loss_ctc": 0.6932395696640015, "train/loss_error": 0.4230961501598358, "train/loss_total": 0.4771248400211334 }, { "epoch": 3.5679935880309914, "step": 13355, "train/loss_ctc": 0.38936206698417664, "train/loss_error": 0.5214941501617432, "train/loss_total": 0.4950677454471588 }, { "epoch": 3.5682607534063586, "step": 13356, "train/loss_ctc": 0.5842000842094421, "train/loss_error": 0.41893380880355835, "train/loss_total": 0.451987087726593 }, { "epoch": 3.568527918781726, "step": 13357, "train/loss_ctc": 0.6932228803634644, "train/loss_error": 0.4563797414302826, "train/loss_total": 0.5037484169006348 }, { "epoch": 3.5687950841570935, "step": 13358, "train/loss_ctc": 0.8769639730453491, "train/loss_error": 0.4662967622280121, "train/loss_total": 0.5484302043914795 }, { "epoch": 3.5690622495324607, "step": 13359, "train/loss_ctc": 0.4306487441062927, "train/loss_error": 0.39399614930152893, "train/loss_total": 0.4013266861438751 }, { "epoch": 3.569329414907828, "grad_norm": 2.073622941970825, "learning_rate": 8.59524445631846e-06, "loss": 0.4733, "step": 13360 }, { "epoch": 3.569329414907828, "step": 13360, "train/loss_ctc": 1.780263900756836, "train/loss_error": 0.3919748067855835, "train/loss_total": 0.6696326732635498 }, { "epoch": 3.5695965802831955, "step": 13361, "train/loss_ctc": 0.700704038143158, "train/loss_error": 0.43591347336769104, "train/loss_total": 0.48887157440185547 }, { "epoch": 3.5698637456585627, "step": 13362, "train/loss_ctc": 0.39284661412239075, "train/loss_error": 0.5199750065803528, "train/loss_total": 0.49454933404922485 }, { "epoch": 3.57013091103393, "step": 13363, "train/loss_ctc": 0.6583207845687866, "train/loss_error": 0.4596872925758362, "train/loss_total": 0.49941399693489075 }, { "epoch": 3.5703980764092975, "step": 13364, "train/loss_ctc": 0.6707834601402283, "train/loss_error": 0.4265345633029938, "train/loss_total": 0.47538435459136963 }, { "epoch": 3.5706652417846647, "step": 13365, "train/loss_ctc": 0.4657827615737915, "train/loss_error": 0.4482358396053314, "train/loss_total": 0.4517452120780945 }, { "epoch": 3.570932407160032, "step": 13366, "train/loss_ctc": 0.8864725828170776, "train/loss_error": 0.4326484501361847, "train/loss_total": 0.5234133005142212 }, { "epoch": 3.5711995725353995, "step": 13367, "train/loss_ctc": 0.9588735103607178, "train/loss_error": 0.42686107754707336, "train/loss_total": 0.5332635641098022 }, { "epoch": 3.5714667379107667, "step": 13368, "train/loss_ctc": 0.7193624973297119, "train/loss_error": 0.39711612462997437, "train/loss_total": 0.46156540513038635 }, { "epoch": 3.571733903286134, "step": 13369, "train/loss_ctc": 0.8093376159667969, "train/loss_error": 0.4863755404949188, "train/loss_total": 0.5509679317474365 }, { "epoch": 3.5720010686615016, "grad_norm": 2.107886791229248, "learning_rate": 8.57921453379642e-06, "loss": 0.5149, "step": 13370 }, { "epoch": 3.5720010686615016, "step": 13370, "train/loss_ctc": 1.070099115371704, "train/loss_error": 0.4577837288379669, "train/loss_total": 0.5802468061447144 }, { "epoch": 3.5722682340368688, "step": 13371, "train/loss_ctc": 0.9060714244842529, "train/loss_error": 0.43250036239624023, "train/loss_total": 0.5272145867347717 }, { "epoch": 3.572535399412236, "step": 13372, "train/loss_ctc": 0.5624162554740906, "train/loss_error": 0.39890605211257935, "train/loss_total": 0.431608110666275 }, { "epoch": 3.5728025647876036, "step": 13373, "train/loss_ctc": 0.2926557958126068, "train/loss_error": 0.43693244457244873, "train/loss_total": 0.4080771207809448 }, { "epoch": 3.573069730162971, "step": 13374, "train/loss_ctc": 0.42197397351264954, "train/loss_error": 0.4830789566040039, "train/loss_total": 0.47085797786712646 }, { "epoch": 3.573336895538338, "step": 13375, "train/loss_ctc": 0.7065613269805908, "train/loss_error": 0.45588982105255127, "train/loss_total": 0.5060241222381592 }, { "epoch": 3.5736040609137056, "step": 13376, "train/loss_ctc": 0.6804695129394531, "train/loss_error": 0.5276517271995544, "train/loss_total": 0.5582152605056763 }, { "epoch": 3.573871226289073, "step": 13377, "train/loss_ctc": 1.03912353515625, "train/loss_error": 0.45942753553390503, "train/loss_total": 0.575366735458374 }, { "epoch": 3.57413839166444, "step": 13378, "train/loss_ctc": 0.5425746440887451, "train/loss_error": 0.48336878418922424, "train/loss_total": 0.4952099621295929 }, { "epoch": 3.5744055570398077, "step": 13379, "train/loss_ctc": 0.6120988130569458, "train/loss_error": 0.41143110394477844, "train/loss_total": 0.4515646696090698 }, { "epoch": 3.574672722415175, "grad_norm": 7.050710678100586, "learning_rate": 8.563184611274378e-06, "loss": 0.5004, "step": 13380 }, { "epoch": 3.574672722415175, "step": 13380, "train/loss_ctc": 0.913608968257904, "train/loss_error": 0.4233143627643585, "train/loss_total": 0.5213732719421387 }, { "epoch": 3.5749398877905425, "step": 13381, "train/loss_ctc": 0.6951794028282166, "train/loss_error": 0.3918984830379486, "train/loss_total": 0.4525546729564667 }, { "epoch": 3.5752070531659097, "step": 13382, "train/loss_ctc": 1.1627486944198608, "train/loss_error": 0.4718020558357239, "train/loss_total": 0.6099913716316223 }, { "epoch": 3.575474218541277, "step": 13383, "train/loss_ctc": 0.3990147113800049, "train/loss_error": 0.39378565549850464, "train/loss_total": 0.39483147859573364 }, { "epoch": 3.5757413839166445, "step": 13384, "train/loss_ctc": 0.49304962158203125, "train/loss_error": 0.3992120027542114, "train/loss_total": 0.41797953844070435 }, { "epoch": 3.5760085492920117, "step": 13385, "train/loss_ctc": 0.6451514363288879, "train/loss_error": 0.4441571533679962, "train/loss_total": 0.48435601592063904 }, { "epoch": 3.5762757146673794, "step": 13386, "train/loss_ctc": 1.1884338855743408, "train/loss_error": 0.4170304834842682, "train/loss_total": 0.5713111758232117 }, { "epoch": 3.5765428800427466, "step": 13387, "train/loss_ctc": 0.9885215759277344, "train/loss_error": 0.42287951707839966, "train/loss_total": 0.5360079407691956 }, { "epoch": 3.5768100454181138, "step": 13388, "train/loss_ctc": 1.393505573272705, "train/loss_error": 0.4125211834907532, "train/loss_total": 0.6087180376052856 }, { "epoch": 3.5770772107934814, "step": 13389, "train/loss_ctc": 0.6879534721374512, "train/loss_error": 0.5061433911323547, "train/loss_total": 0.5425053834915161 }, { "epoch": 3.5773443761688486, "grad_norm": 1.8564685583114624, "learning_rate": 8.547154688752336e-06, "loss": 0.514, "step": 13390 }, { "epoch": 3.5773443761688486, "step": 13390, "train/loss_ctc": 0.8621748685836792, "train/loss_error": 0.4206812381744385, "train/loss_total": 0.5089799761772156 }, { "epoch": 3.577611541544216, "step": 13391, "train/loss_ctc": 0.4111671447753906, "train/loss_error": 0.4373319745063782, "train/loss_total": 0.43209901452064514 }, { "epoch": 3.5778787069195834, "step": 13392, "train/loss_ctc": 0.7923488616943359, "train/loss_error": 0.44559574127197266, "train/loss_total": 0.5149464011192322 }, { "epoch": 3.5781458722949506, "step": 13393, "train/loss_ctc": 0.6383990049362183, "train/loss_error": 0.4264119565486908, "train/loss_total": 0.4688093662261963 }, { "epoch": 3.578413037670318, "step": 13394, "train/loss_ctc": 0.6788524389266968, "train/loss_error": 0.4545879364013672, "train/loss_total": 0.49944084882736206 }, { "epoch": 3.5786802030456855, "step": 13395, "train/loss_ctc": 0.7306464910507202, "train/loss_error": 0.4562903642654419, "train/loss_total": 0.5111615657806396 }, { "epoch": 3.5789473684210527, "step": 13396, "train/loss_ctc": 1.3065087795257568, "train/loss_error": 0.4597892761230469, "train/loss_total": 0.6291332244873047 }, { "epoch": 3.57921453379642, "step": 13397, "train/loss_ctc": 0.30004894733428955, "train/loss_error": 0.5077953338623047, "train/loss_total": 0.4662460386753082 }, { "epoch": 3.5794816991717875, "step": 13398, "train/loss_ctc": 0.4986855387687683, "train/loss_error": 0.4085944592952728, "train/loss_total": 0.4266126751899719 }, { "epoch": 3.5797488645471547, "step": 13399, "train/loss_ctc": 0.5562841892242432, "train/loss_error": 0.47233662009239197, "train/loss_total": 0.48912614583969116 }, { "epoch": 3.580016029922522, "grad_norm": 3.4748241901397705, "learning_rate": 8.531124766230298e-06, "loss": 0.4947, "step": 13400 }, { "epoch": 3.580016029922522, "step": 13400, "train/loss_ctc": 0.40392059087753296, "train/loss_error": 0.4251459836959839, "train/loss_total": 0.4209009110927582 }, { "epoch": 3.5802831952978895, "step": 13401, "train/loss_ctc": 0.6235188841819763, "train/loss_error": 0.4951556921005249, "train/loss_total": 0.520828366279602 }, { "epoch": 3.5805503606732567, "step": 13402, "train/loss_ctc": 0.3224085569381714, "train/loss_error": 0.4348616600036621, "train/loss_total": 0.41237103939056396 }, { "epoch": 3.580817526048624, "step": 13403, "train/loss_ctc": 0.6208118796348572, "train/loss_error": 0.3794301748275757, "train/loss_total": 0.4277065098285675 }, { "epoch": 3.5810846914239916, "step": 13404, "train/loss_ctc": 0.6441084146499634, "train/loss_error": 0.42228397727012634, "train/loss_total": 0.4666488766670227 }, { "epoch": 3.5813518567993587, "step": 13405, "train/loss_ctc": 0.41806453466415405, "train/loss_error": 0.403265118598938, "train/loss_total": 0.4062250256538391 }, { "epoch": 3.581619022174726, "step": 13406, "train/loss_ctc": 0.9134665727615356, "train/loss_error": 0.42785435914993286, "train/loss_total": 0.5249767899513245 }, { "epoch": 3.5818861875500936, "step": 13407, "train/loss_ctc": 0.5413029789924622, "train/loss_error": 0.428031325340271, "train/loss_total": 0.45068567991256714 }, { "epoch": 3.5821533529254608, "step": 13408, "train/loss_ctc": 0.5352232456207275, "train/loss_error": 0.48506930470466614, "train/loss_total": 0.49510008096694946 }, { "epoch": 3.582420518300828, "step": 13409, "train/loss_ctc": 0.5213428735733032, "train/loss_error": 0.4869576394557953, "train/loss_total": 0.4938347041606903 }, { "epoch": 3.5826876836761956, "grad_norm": 2.7562317848205566, "learning_rate": 8.515094843708256e-06, "loss": 0.4619, "step": 13410 }, { "epoch": 3.5826876836761956, "step": 13410, "train/loss_ctc": 0.9353155493736267, "train/loss_error": 0.4630144238471985, "train/loss_total": 0.557474672794342 }, { "epoch": 3.582954849051563, "step": 13411, "train/loss_ctc": 0.27354344725608826, "train/loss_error": 0.3703502118587494, "train/loss_total": 0.35098886489868164 }, { "epoch": 3.58322201442693, "step": 13412, "train/loss_ctc": 0.5015378594398499, "train/loss_error": 0.4066583514213562, "train/loss_total": 0.4256342649459839 }, { "epoch": 3.5834891798022976, "step": 13413, "train/loss_ctc": 0.35474881529808044, "train/loss_error": 0.3907572031021118, "train/loss_total": 0.38355553150177 }, { "epoch": 3.583756345177665, "step": 13414, "train/loss_ctc": 0.9269235134124756, "train/loss_error": 0.49255627393722534, "train/loss_total": 0.5794297456741333 }, { "epoch": 3.5840235105530325, "step": 13415, "train/loss_ctc": 0.7202786803245544, "train/loss_error": 0.43508675694465637, "train/loss_total": 0.49212515354156494 }, { "epoch": 3.5842906759283997, "step": 13416, "train/loss_ctc": 0.20032143592834473, "train/loss_error": 0.4093663990497589, "train/loss_total": 0.3675574064254761 }, { "epoch": 3.584557841303767, "step": 13417, "train/loss_ctc": 0.9108838438987732, "train/loss_error": 0.40383344888687134, "train/loss_total": 0.5052435398101807 }, { "epoch": 3.5848250066791345, "step": 13418, "train/loss_ctc": 0.4025702476501465, "train/loss_error": 0.4689081907272339, "train/loss_total": 0.45564061403274536 }, { "epoch": 3.5850921720545017, "step": 13419, "train/loss_ctc": 1.4984840154647827, "train/loss_error": 0.44780588150024414, "train/loss_total": 0.6579415202140808 }, { "epoch": 3.5853593374298693, "grad_norm": 2.1300735473632812, "learning_rate": 8.499064921186214e-06, "loss": 0.4776, "step": 13420 }, { "epoch": 3.5853593374298693, "step": 13420, "train/loss_ctc": 0.6377549171447754, "train/loss_error": 0.41137072443962097, "train/loss_total": 0.4566475749015808 }, { "epoch": 3.5856265028052365, "step": 13421, "train/loss_ctc": 0.6372895240783691, "train/loss_error": 0.45051488280296326, "train/loss_total": 0.4878697991371155 }, { "epoch": 3.5858936681806037, "step": 13422, "train/loss_ctc": 0.6007643342018127, "train/loss_error": 0.4943321943283081, "train/loss_total": 0.515618622303009 }, { "epoch": 3.5861608335559714, "step": 13423, "train/loss_ctc": 0.5724081993103027, "train/loss_error": 0.39314836263656616, "train/loss_total": 0.4290003180503845 }, { "epoch": 3.5864279989313386, "step": 13424, "train/loss_ctc": 0.4922638535499573, "train/loss_error": 0.44781041145324707, "train/loss_total": 0.4567010998725891 }, { "epoch": 3.5866951643067058, "step": 13425, "train/loss_ctc": 1.0445787906646729, "train/loss_error": 0.42645061016082764, "train/loss_total": 0.5500762462615967 }, { "epoch": 3.5869623296820734, "step": 13426, "train/loss_ctc": 0.8039541840553284, "train/loss_error": 0.4274663031101227, "train/loss_total": 0.5027638673782349 }, { "epoch": 3.5872294950574406, "step": 13427, "train/loss_ctc": 0.6947742700576782, "train/loss_error": 0.4512024521827698, "train/loss_total": 0.49991685152053833 }, { "epoch": 3.587496660432808, "step": 13428, "train/loss_ctc": 0.9140220284461975, "train/loss_error": 0.44033166766166687, "train/loss_total": 0.5350697040557861 }, { "epoch": 3.5877638258081754, "step": 13429, "train/loss_ctc": 0.4761815071105957, "train/loss_error": 0.43984124064445496, "train/loss_total": 0.44710931181907654 }, { "epoch": 3.5880309911835426, "grad_norm": 2.5159752368927, "learning_rate": 8.483034998664174e-06, "loss": 0.4881, "step": 13430 }, { "epoch": 3.5880309911835426, "step": 13430, "train/loss_ctc": 0.4254649579524994, "train/loss_error": 0.3969578444957733, "train/loss_total": 0.40265926718711853 }, { "epoch": 3.58829815655891, "step": 13431, "train/loss_ctc": 0.6302726864814758, "train/loss_error": 0.46115419268608093, "train/loss_total": 0.4949778914451599 }, { "epoch": 3.5885653219342775, "step": 13432, "train/loss_ctc": 0.7397078275680542, "train/loss_error": 0.3306369483470917, "train/loss_total": 0.4124511480331421 }, { "epoch": 3.5888324873096447, "step": 13433, "train/loss_ctc": 0.49573004245758057, "train/loss_error": 0.4208691418170929, "train/loss_total": 0.43584132194519043 }, { "epoch": 3.589099652685012, "step": 13434, "train/loss_ctc": 0.505723237991333, "train/loss_error": 0.41275936365127563, "train/loss_total": 0.4313521385192871 }, { "epoch": 3.5893668180603795, "step": 13435, "train/loss_ctc": 1.1681246757507324, "train/loss_error": 0.4994053244590759, "train/loss_total": 0.6331492066383362 }, { "epoch": 3.5896339834357467, "step": 13436, "train/loss_ctc": 0.7877804040908813, "train/loss_error": 0.40021178126335144, "train/loss_total": 0.4777255058288574 }, { "epoch": 3.589901148811114, "step": 13437, "train/loss_ctc": 0.3703102469444275, "train/loss_error": 0.47335851192474365, "train/loss_total": 0.4527488648891449 }, { "epoch": 3.5901683141864815, "step": 13438, "train/loss_ctc": 0.4147287607192993, "train/loss_error": 0.4278887212276459, "train/loss_total": 0.42525675892829895 }, { "epoch": 3.5904354795618487, "step": 13439, "train/loss_ctc": 1.1267375946044922, "train/loss_error": 0.4319171905517578, "train/loss_total": 0.5708812475204468 }, { "epoch": 3.590702644937216, "grad_norm": 1.3405390977859497, "learning_rate": 8.467005076142132e-06, "loss": 0.4737, "step": 13440 }, { "epoch": 3.590702644937216, "step": 13440, "train/loss_ctc": 0.7361916899681091, "train/loss_error": 0.4458097815513611, "train/loss_total": 0.5038861632347107 }, { "epoch": 3.5909698103125836, "step": 13441, "train/loss_ctc": 0.46468257904052734, "train/loss_error": 0.42447689175605774, "train/loss_total": 0.43251803517341614 }, { "epoch": 3.5912369756879507, "step": 13442, "train/loss_ctc": 0.6823762655258179, "train/loss_error": 0.4878043532371521, "train/loss_total": 0.5267187356948853 }, { "epoch": 3.591504141063318, "step": 13443, "train/loss_ctc": 0.8989903926849365, "train/loss_error": 0.46680641174316406, "train/loss_total": 0.5532432198524475 }, { "epoch": 3.5917713064386856, "step": 13444, "train/loss_ctc": 0.4197704792022705, "train/loss_error": 0.3712126612663269, "train/loss_total": 0.3809242248535156 }, { "epoch": 3.592038471814053, "step": 13445, "train/loss_ctc": 1.0231783390045166, "train/loss_error": 0.5196413397789001, "train/loss_total": 0.6203487515449524 }, { "epoch": 3.59230563718942, "step": 13446, "train/loss_ctc": 0.4365432858467102, "train/loss_error": 0.3628121316432953, "train/loss_total": 0.3775583505630493 }, { "epoch": 3.5925728025647876, "step": 13447, "train/loss_ctc": 0.6029629707336426, "train/loss_error": 0.4392063617706299, "train/loss_total": 0.4719576835632324 }, { "epoch": 3.592839967940155, "step": 13448, "train/loss_ctc": 0.21782395243644714, "train/loss_error": 0.40403181314468384, "train/loss_total": 0.3667902648448944 }, { "epoch": 3.5931071333155225, "step": 13449, "train/loss_ctc": 0.3183531165122986, "train/loss_error": 0.46228647232055664, "train/loss_total": 0.433499813079834 }, { "epoch": 3.5933742986908896, "grad_norm": 1.6921519041061401, "learning_rate": 8.450975153620092e-06, "loss": 0.4667, "step": 13450 }, { "epoch": 3.5933742986908896, "step": 13450, "train/loss_ctc": 0.8032132983207703, "train/loss_error": 0.41307181119918823, "train/loss_total": 0.49110013246536255 }, { "epoch": 3.5936414640662573, "step": 13451, "train/loss_ctc": 0.6591970920562744, "train/loss_error": 0.4668920338153839, "train/loss_total": 0.5053530931472778 }, { "epoch": 3.5939086294416245, "step": 13452, "train/loss_ctc": 0.9519851207733154, "train/loss_error": 0.47573739290237427, "train/loss_total": 0.5709869861602783 }, { "epoch": 3.5941757948169917, "step": 13453, "train/loss_ctc": 0.6368975043296814, "train/loss_error": 0.4797147512435913, "train/loss_total": 0.5111513137817383 }, { "epoch": 3.5944429601923593, "step": 13454, "train/loss_ctc": 0.8340088129043579, "train/loss_error": 0.4587705433368683, "train/loss_total": 0.5338181853294373 }, { "epoch": 3.5947101255677265, "step": 13455, "train/loss_ctc": 0.4295378625392914, "train/loss_error": 0.4220530688762665, "train/loss_total": 0.4235500395298004 }, { "epoch": 3.5949772909430937, "step": 13456, "train/loss_ctc": 1.0476789474487305, "train/loss_error": 0.45634275674819946, "train/loss_total": 0.5746099948883057 }, { "epoch": 3.5952444563184613, "step": 13457, "train/loss_ctc": 0.5321435928344727, "train/loss_error": 0.4445485770702362, "train/loss_total": 0.462067574262619 }, { "epoch": 3.5955116216938285, "step": 13458, "train/loss_ctc": 1.0140377283096313, "train/loss_error": 0.4660305082798004, "train/loss_total": 0.5756319761276245 }, { "epoch": 3.5957787870691957, "step": 13459, "train/loss_ctc": 1.4731333255767822, "train/loss_error": 0.46640151739120483, "train/loss_total": 0.6677479147911072 }, { "epoch": 3.5960459524445634, "grad_norm": 1.6265138387680054, "learning_rate": 8.43494523109805e-06, "loss": 0.5316, "step": 13460 }, { "epoch": 3.5960459524445634, "step": 13460, "train/loss_ctc": 0.5478188991546631, "train/loss_error": 0.4925907850265503, "train/loss_total": 0.5036364197731018 }, { "epoch": 3.5963131178199306, "step": 13461, "train/loss_ctc": 0.9387653470039368, "train/loss_error": 0.5162164568901062, "train/loss_total": 0.6007262468338013 }, { "epoch": 3.5965802831952978, "step": 13462, "train/loss_ctc": 1.0295426845550537, "train/loss_error": 0.45227140188217163, "train/loss_total": 0.567725658416748 }, { "epoch": 3.5968474485706654, "step": 13463, "train/loss_ctc": 1.642359972000122, "train/loss_error": 0.390647828578949, "train/loss_total": 0.6409902572631836 }, { "epoch": 3.5971146139460326, "step": 13464, "train/loss_ctc": 0.960087239742279, "train/loss_error": 0.4512896239757538, "train/loss_total": 0.5530491471290588 }, { "epoch": 3.5973817793214, "step": 13465, "train/loss_ctc": 1.3715770244598389, "train/loss_error": 0.43845754861831665, "train/loss_total": 0.6250814199447632 }, { "epoch": 3.5976489446967674, "step": 13466, "train/loss_ctc": 0.2923191785812378, "train/loss_error": 0.44323524832725525, "train/loss_total": 0.4130520522594452 }, { "epoch": 3.5979161100721346, "step": 13467, "train/loss_ctc": 0.2200903743505478, "train/loss_error": 0.5107828378677368, "train/loss_total": 0.45264434814453125 }, { "epoch": 3.598183275447502, "step": 13468, "train/loss_ctc": 0.27344828844070435, "train/loss_error": 0.4460708796977997, "train/loss_total": 0.41154634952545166 }, { "epoch": 3.5984504408228695, "step": 13469, "train/loss_ctc": 1.0546106100082397, "train/loss_error": 0.4819485545158386, "train/loss_total": 0.5964809656143188 }, { "epoch": 3.5987176061982367, "grad_norm": 1.5644862651824951, "learning_rate": 8.418915308576008e-06, "loss": 0.5365, "step": 13470 }, { "epoch": 3.5987176061982367, "step": 13470, "train/loss_ctc": 0.6577699184417725, "train/loss_error": 0.45854437351226807, "train/loss_total": 0.49838948249816895 }, { "epoch": 3.598984771573604, "step": 13471, "train/loss_ctc": 0.3547710180282593, "train/loss_error": 0.43582937121391296, "train/loss_total": 0.4196177124977112 }, { "epoch": 3.5992519369489715, "step": 13472, "train/loss_ctc": 0.3881775438785553, "train/loss_error": 0.47596147656440735, "train/loss_total": 0.4584047198295593 }, { "epoch": 3.5995191023243387, "step": 13473, "train/loss_ctc": 0.8876461386680603, "train/loss_error": 0.4713735580444336, "train/loss_total": 0.5546280741691589 }, { "epoch": 3.599786267699706, "step": 13474, "train/loss_ctc": 0.8056879043579102, "train/loss_error": 0.3999316394329071, "train/loss_total": 0.48108288645744324 }, { "epoch": 3.6000534330750735, "step": 13475, "train/loss_ctc": 0.503089189529419, "train/loss_error": 0.4263629913330078, "train/loss_total": 0.4417082667350769 }, { "epoch": 3.6003205984504407, "step": 13476, "train/loss_ctc": 1.0079967975616455, "train/loss_error": 0.46060776710510254, "train/loss_total": 0.5700855851173401 }, { "epoch": 3.600587763825808, "step": 13477, "train/loss_ctc": 0.8143702149391174, "train/loss_error": 0.4032651484012604, "train/loss_total": 0.4854861795902252 }, { "epoch": 3.6008549292011756, "step": 13478, "train/loss_ctc": 0.47772839665412903, "train/loss_error": 0.4186422824859619, "train/loss_total": 0.43045949935913086 }, { "epoch": 3.6011220945765428, "step": 13479, "train/loss_ctc": 1.2325842380523682, "train/loss_error": 0.45257675647735596, "train/loss_total": 0.6085782647132874 }, { "epoch": 3.6013892599519104, "grad_norm": 1.7129013538360596, "learning_rate": 8.402885386053968e-06, "loss": 0.4948, "step": 13480 }, { "epoch": 3.6013892599519104, "step": 13480, "train/loss_ctc": 0.8144441843032837, "train/loss_error": 0.4314722418785095, "train/loss_total": 0.5080666542053223 }, { "epoch": 3.6016564253272776, "step": 13481, "train/loss_ctc": 0.4542604088783264, "train/loss_error": 0.44858095049858093, "train/loss_total": 0.44971683621406555 }, { "epoch": 3.601923590702645, "step": 13482, "train/loss_ctc": 0.3473913371562958, "train/loss_error": 0.4572203457355499, "train/loss_total": 0.4352545440196991 }, { "epoch": 3.6021907560780124, "step": 13483, "train/loss_ctc": 0.7802199125289917, "train/loss_error": 0.4357399046421051, "train/loss_total": 0.5046359300613403 }, { "epoch": 3.6024579214533796, "step": 13484, "train/loss_ctc": 0.46873635053634644, "train/loss_error": 0.4126424789428711, "train/loss_total": 0.4238612651824951 }, { "epoch": 3.6027250868287473, "step": 13485, "train/loss_ctc": 0.3852842450141907, "train/loss_error": 0.4920724630355835, "train/loss_total": 0.47071483731269836 }, { "epoch": 3.6029922522041145, "step": 13486, "train/loss_ctc": 0.5905197858810425, "train/loss_error": 0.405792236328125, "train/loss_total": 0.44273775815963745 }, { "epoch": 3.6032594175794816, "step": 13487, "train/loss_ctc": 0.5195375084877014, "train/loss_error": 0.4650057256221771, "train/loss_total": 0.47591209411621094 }, { "epoch": 3.6035265829548493, "step": 13488, "train/loss_ctc": 0.897890567779541, "train/loss_error": 0.4168647229671478, "train/loss_total": 0.5130698680877686 }, { "epoch": 3.6037937483302165, "step": 13489, "train/loss_ctc": 1.026881217956543, "train/loss_error": 0.3916984498500824, "train/loss_total": 0.5187349915504456 }, { "epoch": 3.6040609137055837, "grad_norm": 1.6304702758789062, "learning_rate": 8.386855463531927e-06, "loss": 0.4743, "step": 13490 }, { "epoch": 3.6040609137055837, "step": 13490, "train/loss_ctc": 0.31513074040412903, "train/loss_error": 0.43483850359916687, "train/loss_total": 0.4108969569206238 }, { "epoch": 3.6043280790809513, "step": 13491, "train/loss_ctc": 1.0541428327560425, "train/loss_error": 0.4876849949359894, "train/loss_total": 0.6009765863418579 }, { "epoch": 3.6045952444563185, "step": 13492, "train/loss_ctc": 0.5115479826927185, "train/loss_error": 0.3931014835834503, "train/loss_total": 0.41679078340530396 }, { "epoch": 3.6048624098316857, "step": 13493, "train/loss_ctc": 0.7673035860061646, "train/loss_error": 0.47471803426742554, "train/loss_total": 0.5332351326942444 }, { "epoch": 3.6051295752070533, "step": 13494, "train/loss_ctc": 0.5202341675758362, "train/loss_error": 0.4211510419845581, "train/loss_total": 0.4409676790237427 }, { "epoch": 3.6053967405824205, "step": 13495, "train/loss_ctc": 1.0696388483047485, "train/loss_error": 0.46492668986320496, "train/loss_total": 0.5858691334724426 }, { "epoch": 3.6056639059577877, "step": 13496, "train/loss_ctc": 1.3312079906463623, "train/loss_error": 0.46375855803489685, "train/loss_total": 0.6372484564781189 }, { "epoch": 3.6059310713331554, "step": 13497, "train/loss_ctc": 0.5825945138931274, "train/loss_error": 0.41742146015167236, "train/loss_total": 0.45045608282089233 }, { "epoch": 3.6061982367085226, "step": 13498, "train/loss_ctc": 0.885647177696228, "train/loss_error": 0.42764633893966675, "train/loss_total": 0.519246518611908 }, { "epoch": 3.6064654020838898, "step": 13499, "train/loss_ctc": 0.4790074825286865, "train/loss_error": 0.437073677778244, "train/loss_total": 0.4454604387283325 }, { "epoch": 3.6067325674592574, "grad_norm": 1.650229811668396, "learning_rate": 8.370825541009885e-06, "loss": 0.5041, "step": 13500 }, { "epoch": 3.6067325674592574, "step": 13500, "train/loss_ctc": 1.0115429162979126, "train/loss_error": 0.4687217175960541, "train/loss_total": 0.5772859454154968 }, { "epoch": 3.6069997328346246, "step": 13501, "train/loss_ctc": 0.4072498381137848, "train/loss_error": 0.4336702525615692, "train/loss_total": 0.4283861517906189 }, { "epoch": 3.607266898209992, "step": 13502, "train/loss_ctc": 0.8814929723739624, "train/loss_error": 0.4768281579017639, "train/loss_total": 0.5577611327171326 }, { "epoch": 3.6075340635853594, "step": 13503, "train/loss_ctc": 0.5350004434585571, "train/loss_error": 0.44822433590888977, "train/loss_total": 0.4655795693397522 }, { "epoch": 3.6078012289607266, "step": 13504, "train/loss_ctc": 1.1007311344146729, "train/loss_error": 0.44293755292892456, "train/loss_total": 0.5744962692260742 }, { "epoch": 3.608068394336094, "step": 13505, "train/loss_ctc": 1.0394208431243896, "train/loss_error": 0.4800885319709778, "train/loss_total": 0.5919550061225891 }, { "epoch": 3.6083355597114615, "step": 13506, "train/loss_ctc": 0.966810941696167, "train/loss_error": 0.4632931053638458, "train/loss_total": 0.5639966726303101 }, { "epoch": 3.6086027250868287, "step": 13507, "train/loss_ctc": 0.5839271545410156, "train/loss_error": 0.4620524048805237, "train/loss_total": 0.486427366733551 }, { "epoch": 3.608869890462196, "step": 13508, "train/loss_ctc": 0.634329080581665, "train/loss_error": 0.4043335020542145, "train/loss_total": 0.4503326416015625 }, { "epoch": 3.6091370558375635, "step": 13509, "train/loss_ctc": 0.8001105189323425, "train/loss_error": 0.38291358947753906, "train/loss_total": 0.46635299921035767 }, { "epoch": 3.6094042212129307, "grad_norm": 1.7868787050247192, "learning_rate": 8.354795618487845e-06, "loss": 0.5163, "step": 13510 }, { "epoch": 3.6094042212129307, "step": 13510, "train/loss_ctc": 1.1645301580429077, "train/loss_error": 0.4552891254425049, "train/loss_total": 0.5971373319625854 }, { "epoch": 3.609671386588298, "step": 13511, "train/loss_ctc": 0.8395925164222717, "train/loss_error": 0.4212281405925751, "train/loss_total": 0.5049010515213013 }, { "epoch": 3.6099385519636655, "step": 13512, "train/loss_ctc": 0.8930777311325073, "train/loss_error": 0.4355442225933075, "train/loss_total": 0.5270509123802185 }, { "epoch": 3.6102057173390327, "step": 13513, "train/loss_ctc": 0.9964543581008911, "train/loss_error": 0.4353943467140198, "train/loss_total": 0.547606348991394 }, { "epoch": 3.6104728827144004, "step": 13514, "train/loss_ctc": 0.3581429123878479, "train/loss_error": 0.3748382031917572, "train/loss_total": 0.3714991807937622 }, { "epoch": 3.6107400480897676, "step": 13515, "train/loss_ctc": 1.4575738906860352, "train/loss_error": 0.412897527217865, "train/loss_total": 0.6218328475952148 }, { "epoch": 3.6110072134651348, "step": 13516, "train/loss_ctc": 1.3619087934494019, "train/loss_error": 0.49505293369293213, "train/loss_total": 0.668424129486084 }, { "epoch": 3.6112743788405024, "step": 13517, "train/loss_ctc": 0.3667382299900055, "train/loss_error": 0.5119765996932983, "train/loss_total": 0.48292893171310425 }, { "epoch": 3.6115415442158696, "step": 13518, "train/loss_ctc": 0.9999924898147583, "train/loss_error": 0.3975887596607208, "train/loss_total": 0.5180695056915283 }, { "epoch": 3.6118087095912372, "step": 13519, "train/loss_ctc": 0.5120297074317932, "train/loss_error": 0.45994481444358826, "train/loss_total": 0.4703617990016937 }, { "epoch": 3.6120758749666044, "grad_norm": 3.45950984954834, "learning_rate": 8.338765695965803e-06, "loss": 0.531, "step": 13520 }, { "epoch": 3.6120758749666044, "step": 13520, "train/loss_ctc": 0.8595103025436401, "train/loss_error": 0.4144299030303955, "train/loss_total": 0.5034459829330444 }, { "epoch": 3.6123430403419716, "step": 13521, "train/loss_ctc": 0.30465853214263916, "train/loss_error": 0.3760259449481964, "train/loss_total": 0.3617524802684784 }, { "epoch": 3.6126102057173393, "step": 13522, "train/loss_ctc": 1.007502555847168, "train/loss_error": 0.4248385429382324, "train/loss_total": 0.5413713455200195 }, { "epoch": 3.6128773710927065, "step": 13523, "train/loss_ctc": 0.5655341148376465, "train/loss_error": 0.40962573885917664, "train/loss_total": 0.44080740213394165 }, { "epoch": 3.6131445364680737, "step": 13524, "train/loss_ctc": 1.1454428434371948, "train/loss_error": 0.4394527077674866, "train/loss_total": 0.5806507468223572 }, { "epoch": 3.6134117018434413, "step": 13525, "train/loss_ctc": 0.4876856207847595, "train/loss_error": 0.4399260878562927, "train/loss_total": 0.44947800040245056 }, { "epoch": 3.6136788672188085, "step": 13526, "train/loss_ctc": 0.3617958128452301, "train/loss_error": 0.40922823548316956, "train/loss_total": 0.3997417688369751 }, { "epoch": 3.6139460325941757, "step": 13527, "train/loss_ctc": 1.2146694660186768, "train/loss_error": 0.43899983167648315, "train/loss_total": 0.594133734703064 }, { "epoch": 3.6142131979695433, "step": 13528, "train/loss_ctc": 1.1968110799789429, "train/loss_error": 0.465116947889328, "train/loss_total": 0.6114557981491089 }, { "epoch": 3.6144803633449105, "step": 13529, "train/loss_ctc": 0.711341142654419, "train/loss_error": 0.4944341778755188, "train/loss_total": 0.5378155708312988 }, { "epoch": 3.6147475287202777, "grad_norm": 1.7178281545639038, "learning_rate": 8.322735773443761e-06, "loss": 0.5021, "step": 13530 }, { "epoch": 3.6147475287202777, "step": 13530, "train/loss_ctc": 0.5754046440124512, "train/loss_error": 0.42817413806915283, "train/loss_total": 0.457620233297348 }, { "epoch": 3.6150146940956454, "step": 13531, "train/loss_ctc": 0.6707313060760498, "train/loss_error": 0.4503036439418793, "train/loss_total": 0.4943891763687134 }, { "epoch": 3.6152818594710125, "step": 13532, "train/loss_ctc": 0.5964251756668091, "train/loss_error": 0.45567288994789124, "train/loss_total": 0.48382335901260376 }, { "epoch": 3.6155490248463797, "step": 13533, "train/loss_ctc": 0.5874133110046387, "train/loss_error": 0.5182546377182007, "train/loss_total": 0.5320863723754883 }, { "epoch": 3.6158161902217474, "step": 13534, "train/loss_ctc": 1.264005422592163, "train/loss_error": 0.43578702211380005, "train/loss_total": 0.6014307141304016 }, { "epoch": 3.6160833555971146, "step": 13535, "train/loss_ctc": 0.458379864692688, "train/loss_error": 0.453743577003479, "train/loss_total": 0.45467084646224976 }, { "epoch": 3.6163505209724818, "step": 13536, "train/loss_ctc": 0.705198347568512, "train/loss_error": 0.47698020935058594, "train/loss_total": 0.5226238369941711 }, { "epoch": 3.6166176863478494, "step": 13537, "train/loss_ctc": 0.720054566860199, "train/loss_error": 0.4732879102230072, "train/loss_total": 0.5226412415504456 }, { "epoch": 3.6168848517232166, "step": 13538, "train/loss_ctc": 0.3456569314002991, "train/loss_error": 0.44633427262306213, "train/loss_total": 0.4261988401412964 }, { "epoch": 3.617152017098584, "step": 13539, "train/loss_ctc": 0.7220870852470398, "train/loss_error": 0.44561493396759033, "train/loss_total": 0.5009093880653381 }, { "epoch": 3.6174191824739514, "grad_norm": 1.852574348449707, "learning_rate": 8.306705850921721e-06, "loss": 0.4996, "step": 13540 }, { "epoch": 3.6174191824739514, "step": 13540, "train/loss_ctc": 1.1427472829818726, "train/loss_error": 0.4632410407066345, "train/loss_total": 0.59914231300354 }, { "epoch": 3.6176863478493186, "step": 13541, "train/loss_ctc": 0.502812385559082, "train/loss_error": 0.47136083245277405, "train/loss_total": 0.4776511490345001 }, { "epoch": 3.617953513224686, "step": 13542, "train/loss_ctc": 0.5183737874031067, "train/loss_error": 0.38346999883651733, "train/loss_total": 0.4104507863521576 }, { "epoch": 3.6182206786000535, "step": 13543, "train/loss_ctc": 0.8654357194900513, "train/loss_error": 0.4309532940387726, "train/loss_total": 0.5178498029708862 }, { "epoch": 3.6184878439754207, "step": 13544, "train/loss_ctc": 0.7208809852600098, "train/loss_error": 0.4010653793811798, "train/loss_total": 0.4650285243988037 }, { "epoch": 3.618755009350788, "step": 13545, "train/loss_ctc": 0.9773672223091125, "train/loss_error": 0.4408693313598633, "train/loss_total": 0.5481688976287842 }, { "epoch": 3.6190221747261555, "step": 13546, "train/loss_ctc": 0.4222553074359894, "train/loss_error": 0.4293663203716278, "train/loss_total": 0.4279441237449646 }, { "epoch": 3.6192893401015227, "step": 13547, "train/loss_ctc": 1.061571717262268, "train/loss_error": 0.4434356689453125, "train/loss_total": 0.5670629143714905 }, { "epoch": 3.6195565054768903, "step": 13548, "train/loss_ctc": 1.5565690994262695, "train/loss_error": 0.4781624674797058, "train/loss_total": 0.6938438415527344 }, { "epoch": 3.6198236708522575, "step": 13549, "train/loss_ctc": 1.1929504871368408, "train/loss_error": 0.42300406098365784, "train/loss_total": 0.5769933462142944 }, { "epoch": 3.620090836227625, "grad_norm": 3.343226909637451, "learning_rate": 8.29067592839968e-06, "loss": 0.5284, "step": 13550 }, { "epoch": 3.620090836227625, "step": 13550, "train/loss_ctc": 0.301220566034317, "train/loss_error": 0.465633749961853, "train/loss_total": 0.4327511191368103 }, { "epoch": 3.6203580016029924, "step": 13551, "train/loss_ctc": 0.5376354455947876, "train/loss_error": 0.45881184935569763, "train/loss_total": 0.47457659244537354 }, { "epoch": 3.6206251669783596, "step": 13552, "train/loss_ctc": 0.5230034589767456, "train/loss_error": 0.44619008898735046, "train/loss_total": 0.46155276894569397 }, { "epoch": 3.620892332353727, "step": 13553, "train/loss_ctc": 0.5702287554740906, "train/loss_error": 0.3864951431751251, "train/loss_total": 0.42324185371398926 }, { "epoch": 3.6211594977290944, "step": 13554, "train/loss_ctc": 1.436792254447937, "train/loss_error": 0.4737798869609833, "train/loss_total": 0.666382372379303 }, { "epoch": 3.6214266631044616, "step": 13555, "train/loss_ctc": 0.5871323347091675, "train/loss_error": 0.400519996881485, "train/loss_total": 0.4378424882888794 }, { "epoch": 3.6216938284798292, "step": 13556, "train/loss_ctc": 0.5317983031272888, "train/loss_error": 0.448993980884552, "train/loss_total": 0.4655548632144928 }, { "epoch": 3.6219609938551964, "step": 13557, "train/loss_ctc": 0.7435253858566284, "train/loss_error": 0.45533376932144165, "train/loss_total": 0.5129721164703369 }, { "epoch": 3.6222281592305636, "step": 13558, "train/loss_ctc": 0.6132418513298035, "train/loss_error": 0.4600001871585846, "train/loss_total": 0.4906485080718994 }, { "epoch": 3.6224953246059313, "step": 13559, "train/loss_ctc": 0.4950723946094513, "train/loss_error": 0.3927464187145233, "train/loss_total": 0.4132116436958313 }, { "epoch": 3.6227624899812985, "grad_norm": 1.688279628753662, "learning_rate": 8.274646005877637e-06, "loss": 0.4779, "step": 13560 }, { "epoch": 3.6227624899812985, "step": 13560, "train/loss_ctc": 0.6418787837028503, "train/loss_error": 0.4003845155239105, "train/loss_total": 0.44868338108062744 }, { "epoch": 3.6230296553566657, "step": 13561, "train/loss_ctc": 0.9604676365852356, "train/loss_error": 0.424513578414917, "train/loss_total": 0.5317044258117676 }, { "epoch": 3.6232968207320333, "step": 13562, "train/loss_ctc": 0.3941270112991333, "train/loss_error": 0.44103676080703735, "train/loss_total": 0.43165484070777893 }, { "epoch": 3.6235639861074005, "step": 13563, "train/loss_ctc": 1.100663661956787, "train/loss_error": 0.44192972779273987, "train/loss_total": 0.5736765265464783 }, { "epoch": 3.6238311514827677, "step": 13564, "train/loss_ctc": 0.4623502492904663, "train/loss_error": 0.4881289005279541, "train/loss_total": 0.48297318816185 }, { "epoch": 3.6240983168581353, "step": 13565, "train/loss_ctc": 0.9433578252792358, "train/loss_error": 0.4650696814060211, "train/loss_total": 0.5607272982597351 }, { "epoch": 3.6243654822335025, "step": 13566, "train/loss_ctc": 1.2676105499267578, "train/loss_error": 0.4934307336807251, "train/loss_total": 0.6482667326927185 }, { "epoch": 3.6246326476088697, "step": 13567, "train/loss_ctc": 0.3698282837867737, "train/loss_error": 0.421954870223999, "train/loss_total": 0.4115295708179474 }, { "epoch": 3.6248998129842374, "step": 13568, "train/loss_ctc": 0.5769484043121338, "train/loss_error": 0.44472771883010864, "train/loss_total": 0.47117185592651367 }, { "epoch": 3.6251669783596046, "step": 13569, "train/loss_ctc": 0.6231787204742432, "train/loss_error": 0.40903589129447937, "train/loss_total": 0.45186448097229004 }, { "epoch": 3.6254341437349717, "grad_norm": 2.2587971687316895, "learning_rate": 8.258616083355597e-06, "loss": 0.5012, "step": 13570 }, { "epoch": 3.6254341437349717, "step": 13570, "train/loss_ctc": 0.717852771282196, "train/loss_error": 0.48607468605041504, "train/loss_total": 0.5324302911758423 }, { "epoch": 3.6257013091103394, "step": 13571, "train/loss_ctc": 1.1369357109069824, "train/loss_error": 0.41091886162757874, "train/loss_total": 0.5561222434043884 }, { "epoch": 3.6259684744857066, "step": 13572, "train/loss_ctc": 0.5594997406005859, "train/loss_error": 0.4019380509853363, "train/loss_total": 0.4334504008293152 }, { "epoch": 3.626235639861074, "step": 13573, "train/loss_ctc": 0.8845670819282532, "train/loss_error": 0.43584951758384705, "train/loss_total": 0.5255930423736572 }, { "epoch": 3.6265028052364414, "step": 13574, "train/loss_ctc": 0.6003263592720032, "train/loss_error": 0.45530709624290466, "train/loss_total": 0.48431095480918884 }, { "epoch": 3.6267699706118086, "step": 13575, "train/loss_ctc": 0.8821181058883667, "train/loss_error": 0.43110260367393494, "train/loss_total": 0.5213057398796082 }, { "epoch": 3.627037135987176, "step": 13576, "train/loss_ctc": 0.8716732859611511, "train/loss_error": 0.44353511929512024, "train/loss_total": 0.5291627645492554 }, { "epoch": 3.6273043013625434, "step": 13577, "train/loss_ctc": 1.0491093397140503, "train/loss_error": 0.4780530035495758, "train/loss_total": 0.5922642946243286 }, { "epoch": 3.6275714667379106, "step": 13578, "train/loss_ctc": 0.8178749084472656, "train/loss_error": 0.4467686414718628, "train/loss_total": 0.5209898948669434 }, { "epoch": 3.6278386321132783, "step": 13579, "train/loss_ctc": 0.9030686616897583, "train/loss_error": 0.47243377566337585, "train/loss_total": 0.5585607886314392 }, { "epoch": 3.6281057974886455, "grad_norm": 3.288364887237549, "learning_rate": 8.242586160833557e-06, "loss": 0.5254, "step": 13580 }, { "epoch": 3.6281057974886455, "step": 13580, "train/loss_ctc": 0.655921995639801, "train/loss_error": 0.39994028210639954, "train/loss_total": 0.45113661885261536 }, { "epoch": 3.6283729628640127, "step": 13581, "train/loss_ctc": 0.9786407947540283, "train/loss_error": 0.4026362895965576, "train/loss_total": 0.5178372263908386 }, { "epoch": 3.6286401282393803, "step": 13582, "train/loss_ctc": 0.5327883958816528, "train/loss_error": 0.5432374477386475, "train/loss_total": 0.5411476492881775 }, { "epoch": 3.6289072936147475, "step": 13583, "train/loss_ctc": 0.9341164231300354, "train/loss_error": 0.47847607731819153, "train/loss_total": 0.5696041584014893 }, { "epoch": 3.629174458990115, "step": 13584, "train/loss_ctc": 0.926537036895752, "train/loss_error": 0.454498291015625, "train/loss_total": 0.5489060878753662 }, { "epoch": 3.6294416243654823, "step": 13585, "train/loss_ctc": 1.1280248165130615, "train/loss_error": 0.427329957485199, "train/loss_total": 0.5674689412117004 }, { "epoch": 3.6297087897408495, "step": 13586, "train/loss_ctc": 0.40806877613067627, "train/loss_error": 0.4493900239467621, "train/loss_total": 0.4411257803440094 }, { "epoch": 3.629975955116217, "step": 13587, "train/loss_ctc": 0.6293712854385376, "train/loss_error": 0.4380885064601898, "train/loss_total": 0.4763450622558594 }, { "epoch": 3.6302431204915844, "step": 13588, "train/loss_ctc": 0.5333347916603088, "train/loss_error": 0.4639766812324524, "train/loss_total": 0.4778482913970947 }, { "epoch": 3.6305102858669516, "step": 13589, "train/loss_ctc": 1.0175600051879883, "train/loss_error": 0.45534852147102356, "train/loss_total": 0.5677908062934875 }, { "epoch": 3.630777451242319, "grad_norm": 1.39591646194458, "learning_rate": 8.226556238311515e-06, "loss": 0.5159, "step": 13590 }, { "epoch": 3.630777451242319, "step": 13590, "train/loss_ctc": 0.5747581720352173, "train/loss_error": 0.46264588832855225, "train/loss_total": 0.48506835103034973 }, { "epoch": 3.6310446166176864, "step": 13591, "train/loss_ctc": 0.4570385217666626, "train/loss_error": 0.34980449080467224, "train/loss_total": 0.37125131487846375 }, { "epoch": 3.6313117819930536, "step": 13592, "train/loss_ctc": 0.8551756739616394, "train/loss_error": 0.5221785306930542, "train/loss_total": 0.5887779593467712 }, { "epoch": 3.6315789473684212, "step": 13593, "train/loss_ctc": 0.8476472496986389, "train/loss_error": 0.4357925057411194, "train/loss_total": 0.5181634426116943 }, { "epoch": 3.6318461127437884, "step": 13594, "train/loss_ctc": 0.5561038255691528, "train/loss_error": 0.42981553077697754, "train/loss_total": 0.45507320761680603 }, { "epoch": 3.6321132781191556, "step": 13595, "train/loss_ctc": 0.43100705742836, "train/loss_error": 0.382742702960968, "train/loss_total": 0.392395555973053 }, { "epoch": 3.6323804434945233, "step": 13596, "train/loss_ctc": 0.86797034740448, "train/loss_error": 0.5073807835578918, "train/loss_total": 0.5794987082481384 }, { "epoch": 3.6326476088698905, "step": 13597, "train/loss_ctc": 0.9995777606964111, "train/loss_error": 0.48941612243652344, "train/loss_total": 0.5914484262466431 }, { "epoch": 3.6329147742452577, "step": 13598, "train/loss_ctc": 0.7021517753601074, "train/loss_error": 0.41683292388916016, "train/loss_total": 0.47389671206474304 }, { "epoch": 3.6331819396206253, "step": 13599, "train/loss_ctc": 1.6034448146820068, "train/loss_error": 0.4971553087234497, "train/loss_total": 0.718413233757019 }, { "epoch": 3.6334491049959925, "grad_norm": 2.8367021083831787, "learning_rate": 8.210526315789475e-06, "loss": 0.5174, "step": 13600 }, { "epoch": 3.6334491049959925, "step": 13600, "train/loss_ctc": 0.6111701726913452, "train/loss_error": 0.4042677879333496, "train/loss_total": 0.44564828276634216 }, { "epoch": 3.6337162703713597, "step": 13601, "train/loss_ctc": 0.8889172673225403, "train/loss_error": 0.420658677816391, "train/loss_total": 0.5143104195594788 }, { "epoch": 3.6339834357467273, "step": 13602, "train/loss_ctc": 0.49530214071273804, "train/loss_error": 0.41054779291152954, "train/loss_total": 0.4274986982345581 }, { "epoch": 3.6342506011220945, "step": 13603, "train/loss_ctc": 0.6317119002342224, "train/loss_error": 0.43988752365112305, "train/loss_total": 0.4782524108886719 }, { "epoch": 3.6345177664974617, "step": 13604, "train/loss_ctc": 0.9212090969085693, "train/loss_error": 0.43972447514533997, "train/loss_total": 0.5360214114189148 }, { "epoch": 3.6347849318728294, "step": 13605, "train/loss_ctc": 0.8499680757522583, "train/loss_error": 0.49411654472351074, "train/loss_total": 0.5652868747711182 }, { "epoch": 3.6350520972481966, "step": 13606, "train/loss_ctc": 0.5126555562019348, "train/loss_error": 0.41192227602005005, "train/loss_total": 0.43206894397735596 }, { "epoch": 3.6353192626235638, "step": 13607, "train/loss_ctc": 0.7024892568588257, "train/loss_error": 0.478996604681015, "train/loss_total": 0.5236951112747192 }, { "epoch": 3.6355864279989314, "step": 13608, "train/loss_ctc": 0.49311619997024536, "train/loss_error": 0.43769916892051697, "train/loss_total": 0.4487825930118561 }, { "epoch": 3.6358535933742986, "step": 13609, "train/loss_ctc": 0.34776943922042847, "train/loss_error": 0.45983102917671204, "train/loss_total": 0.43741869926452637 }, { "epoch": 3.636120758749666, "grad_norm": 2.0526673793792725, "learning_rate": 8.194496393267433e-06, "loss": 0.4809, "step": 13610 }, { "epoch": 3.636120758749666, "step": 13610, "train/loss_ctc": 0.651698112487793, "train/loss_error": 0.43413594365119934, "train/loss_total": 0.47764837741851807 }, { "epoch": 3.6363879241250334, "step": 13611, "train/loss_ctc": 0.6434072256088257, "train/loss_error": 0.43207117915153503, "train/loss_total": 0.4743384122848511 }, { "epoch": 3.6366550895004006, "step": 13612, "train/loss_ctc": 0.3124428689479828, "train/loss_error": 0.4346536695957184, "train/loss_total": 0.4102115333080292 }, { "epoch": 3.6369222548757683, "step": 13613, "train/loss_ctc": 0.6555889844894409, "train/loss_error": 0.4284026026725769, "train/loss_total": 0.4738398790359497 }, { "epoch": 3.6371894202511355, "step": 13614, "train/loss_ctc": 0.6957807540893555, "train/loss_error": 0.4635874032974243, "train/loss_total": 0.5100260972976685 }, { "epoch": 3.6374565856265026, "step": 13615, "train/loss_ctc": 0.5765883922576904, "train/loss_error": 0.4918297529220581, "train/loss_total": 0.5087814927101135 }, { "epoch": 3.6377237510018703, "step": 13616, "train/loss_ctc": 0.5056242346763611, "train/loss_error": 0.4491810202598572, "train/loss_total": 0.46046966314315796 }, { "epoch": 3.6379909163772375, "step": 13617, "train/loss_ctc": 0.43144696950912476, "train/loss_error": 0.37100857496261597, "train/loss_total": 0.38309627771377563 }, { "epoch": 3.638258081752605, "step": 13618, "train/loss_ctc": 0.4240935444831848, "train/loss_error": 0.38589221239089966, "train/loss_total": 0.39353248476982117 }, { "epoch": 3.6385252471279723, "step": 13619, "train/loss_ctc": 0.8453598022460938, "train/loss_error": 0.4583575129508972, "train/loss_total": 0.5357579588890076 }, { "epoch": 3.6387924125033395, "grad_norm": 2.0769505500793457, "learning_rate": 8.178466470745391e-06, "loss": 0.4628, "step": 13620 }, { "epoch": 3.6387924125033395, "step": 13620, "train/loss_ctc": 1.1014456748962402, "train/loss_error": 0.4863729774951935, "train/loss_total": 0.6093875169754028 }, { "epoch": 3.639059577878707, "step": 13621, "train/loss_ctc": 0.5758365392684937, "train/loss_error": 0.47629326581954956, "train/loss_total": 0.49620193243026733 }, { "epoch": 3.6393267432540743, "step": 13622, "train/loss_ctc": 0.6040483117103577, "train/loss_error": 0.3892054259777069, "train/loss_total": 0.43217402696609497 }, { "epoch": 3.6395939086294415, "step": 13623, "train/loss_ctc": 0.6674227714538574, "train/loss_error": 0.4065268933773041, "train/loss_total": 0.4587060809135437 }, { "epoch": 3.639861074004809, "step": 13624, "train/loss_ctc": 0.5691076517105103, "train/loss_error": 0.43526244163513184, "train/loss_total": 0.4620314836502075 }, { "epoch": 3.6401282393801764, "step": 13625, "train/loss_ctc": 0.750902533531189, "train/loss_error": 0.4136432111263275, "train/loss_total": 0.4810950756072998 }, { "epoch": 3.6403954047555436, "step": 13626, "train/loss_ctc": 0.6062222719192505, "train/loss_error": 0.4387906491756439, "train/loss_total": 0.4722769856452942 }, { "epoch": 3.640662570130911, "step": 13627, "train/loss_ctc": 0.6681839227676392, "train/loss_error": 0.4101185202598572, "train/loss_total": 0.46173161268234253 }, { "epoch": 3.6409297355062784, "step": 13628, "train/loss_ctc": 0.657028317451477, "train/loss_error": 0.36078622937202454, "train/loss_total": 0.42003464698791504 }, { "epoch": 3.6411969008816456, "step": 13629, "train/loss_ctc": 0.23855207860469818, "train/loss_error": 0.432782381772995, "train/loss_total": 0.3939363360404968 }, { "epoch": 3.6414640662570132, "grad_norm": 1.3685402870178223, "learning_rate": 8.16243654822335e-06, "loss": 0.4688, "step": 13630 }, { "epoch": 3.6414640662570132, "step": 13630, "train/loss_ctc": 0.48792344331741333, "train/loss_error": 0.4086255133152008, "train/loss_total": 0.42448511719703674 }, { "epoch": 3.6417312316323804, "step": 13631, "train/loss_ctc": 1.0759520530700684, "train/loss_error": 0.44376903772354126, "train/loss_total": 0.5702056884765625 }, { "epoch": 3.6419983970077476, "step": 13632, "train/loss_ctc": 0.8370331525802612, "train/loss_error": 0.42198988795280457, "train/loss_total": 0.5049985647201538 }, { "epoch": 3.6422655623831153, "step": 13633, "train/loss_ctc": 1.699180245399475, "train/loss_error": 0.5257346630096436, "train/loss_total": 0.7604237794876099 }, { "epoch": 3.6425327277584825, "step": 13634, "train/loss_ctc": 0.7815511226654053, "train/loss_error": 0.4795610010623932, "train/loss_total": 0.5399590730667114 }, { "epoch": 3.6427998931338497, "step": 13635, "train/loss_ctc": 1.2984721660614014, "train/loss_error": 0.48424455523490906, "train/loss_total": 0.6470900774002075 }, { "epoch": 3.6430670585092173, "step": 13636, "train/loss_ctc": 0.7577403783798218, "train/loss_error": 0.4069259762763977, "train/loss_total": 0.4770888686180115 }, { "epoch": 3.6433342238845845, "step": 13637, "train/loss_ctc": 0.4332346022129059, "train/loss_error": 0.39146214723587036, "train/loss_total": 0.399816632270813 }, { "epoch": 3.6436013892599517, "step": 13638, "train/loss_ctc": 0.6449044346809387, "train/loss_error": 0.4437108635902405, "train/loss_total": 0.48394960165023804 }, { "epoch": 3.6438685546353193, "step": 13639, "train/loss_ctc": 0.8611302375793457, "train/loss_error": 0.4277273416519165, "train/loss_total": 0.5144079327583313 }, { "epoch": 3.6441357200106865, "grad_norm": 1.7635544538497925, "learning_rate": 8.146406625701309e-06, "loss": 0.5322, "step": 13640 }, { "epoch": 3.6441357200106865, "step": 13640, "train/loss_ctc": 0.7147842645645142, "train/loss_error": 0.4254401624202728, "train/loss_total": 0.4833090007305145 }, { "epoch": 3.6444028853860537, "step": 13641, "train/loss_ctc": 0.34834349155426025, "train/loss_error": 0.40992775559425354, "train/loss_total": 0.39761093258857727 }, { "epoch": 3.6446700507614214, "step": 13642, "train/loss_ctc": 0.6734346151351929, "train/loss_error": 0.4198731482028961, "train/loss_total": 0.4705854654312134 }, { "epoch": 3.6449372161367886, "step": 13643, "train/loss_ctc": 0.7175211906433105, "train/loss_error": 0.40326210856437683, "train/loss_total": 0.4661139249801636 }, { "epoch": 3.6452043815121558, "step": 13644, "train/loss_ctc": 0.488215833902359, "train/loss_error": 0.41511833667755127, "train/loss_total": 0.4297378361225128 }, { "epoch": 3.6454715468875234, "step": 13645, "train/loss_ctc": 0.43051183223724365, "train/loss_error": 0.4320741593837738, "train/loss_total": 0.4317617118358612 }, { "epoch": 3.6457387122628906, "step": 13646, "train/loss_ctc": 0.6283690929412842, "train/loss_error": 0.4008828401565552, "train/loss_total": 0.446380078792572 }, { "epoch": 3.6460058776382582, "step": 13647, "train/loss_ctc": 0.7700365781784058, "train/loss_error": 0.4167890250682831, "train/loss_total": 0.48743852972984314 }, { "epoch": 3.6462730430136254, "step": 13648, "train/loss_ctc": 0.6088115572929382, "train/loss_error": 0.3333028256893158, "train/loss_total": 0.38840457797050476 }, { "epoch": 3.6465402083889926, "step": 13649, "train/loss_ctc": 0.3426230549812317, "train/loss_error": 0.36362627148628235, "train/loss_total": 0.3594256639480591 }, { "epoch": 3.6468073737643603, "grad_norm": 1.3934190273284912, "learning_rate": 8.130376703179267e-06, "loss": 0.4361, "step": 13650 }, { "epoch": 3.6468073737643603, "step": 13650, "train/loss_ctc": 1.8125073909759521, "train/loss_error": 0.5292664766311646, "train/loss_total": 0.7859146595001221 }, { "epoch": 3.6470745391397275, "step": 13651, "train/loss_ctc": 0.5892379283905029, "train/loss_error": 0.42761415243148804, "train/loss_total": 0.4599389135837555 }, { "epoch": 3.647341704515095, "step": 13652, "train/loss_ctc": 0.5675877928733826, "train/loss_error": 0.4433659315109253, "train/loss_total": 0.4682103097438812 }, { "epoch": 3.6476088698904623, "step": 13653, "train/loss_ctc": 0.5588176846504211, "train/loss_error": 0.4361569583415985, "train/loss_total": 0.46068909764289856 }, { "epoch": 3.6478760352658295, "step": 13654, "train/loss_ctc": 0.157633438706398, "train/loss_error": 0.48404207825660706, "train/loss_total": 0.41876035928726196 }, { "epoch": 3.648143200641197, "step": 13655, "train/loss_ctc": 0.4446505606174469, "train/loss_error": 0.41819554567337036, "train/loss_total": 0.42348653078079224 }, { "epoch": 3.6484103660165643, "step": 13656, "train/loss_ctc": 0.5224213600158691, "train/loss_error": 0.39635059237480164, "train/loss_total": 0.4215647578239441 }, { "epoch": 3.6486775313919315, "step": 13657, "train/loss_ctc": 0.4517720341682434, "train/loss_error": 0.45950496196746826, "train/loss_total": 0.4579584002494812 }, { "epoch": 3.648944696767299, "step": 13658, "train/loss_ctc": 1.1986966133117676, "train/loss_error": 0.5002995729446411, "train/loss_total": 0.6399790048599243 }, { "epoch": 3.6492118621426664, "step": 13659, "train/loss_ctc": 0.653921365737915, "train/loss_error": 0.49623405933380127, "train/loss_total": 0.527771532535553 }, { "epoch": 3.6494790275180335, "grad_norm": 6.613559722900391, "learning_rate": 8.114346780657227e-06, "loss": 0.5064, "step": 13660 }, { "epoch": 3.6494790275180335, "step": 13660, "train/loss_ctc": 0.8008378744125366, "train/loss_error": 0.437028169631958, "train/loss_total": 0.5097901225090027 }, { "epoch": 3.649746192893401, "step": 13661, "train/loss_ctc": 0.7518092393875122, "train/loss_error": 0.40871527791023254, "train/loss_total": 0.47733408212661743 }, { "epoch": 3.6500133582687684, "step": 13662, "train/loss_ctc": 0.8305215239524841, "train/loss_error": 0.4636027216911316, "train/loss_total": 0.5369864702224731 }, { "epoch": 3.6502805236441356, "step": 13663, "train/loss_ctc": 0.9137008786201477, "train/loss_error": 0.4805365800857544, "train/loss_total": 0.5671694278717041 }, { "epoch": 3.650547689019503, "step": 13664, "train/loss_ctc": 1.2625935077667236, "train/loss_error": 0.4567801356315613, "train/loss_total": 0.6179428100585938 }, { "epoch": 3.6508148543948704, "step": 13665, "train/loss_ctc": 0.5595338344573975, "train/loss_error": 0.409570574760437, "train/loss_total": 0.43956324458122253 }, { "epoch": 3.6510820197702376, "step": 13666, "train/loss_ctc": 1.170758605003357, "train/loss_error": 0.4076756238937378, "train/loss_total": 0.5602922439575195 }, { "epoch": 3.6513491851456052, "step": 13667, "train/loss_ctc": 0.4251461327075958, "train/loss_error": 0.4211288392543793, "train/loss_total": 0.42193228006362915 }, { "epoch": 3.6516163505209724, "step": 13668, "train/loss_ctc": 0.45751404762268066, "train/loss_error": 0.39938536286354065, "train/loss_total": 0.41101109981536865 }, { "epoch": 3.6518835158963396, "step": 13669, "train/loss_ctc": 0.6975891590118408, "train/loss_error": 0.42074793577194214, "train/loss_total": 0.4761161804199219 }, { "epoch": 3.6521506812717073, "grad_norm": 1.4114305973052979, "learning_rate": 8.098316858135186e-06, "loss": 0.5018, "step": 13670 }, { "epoch": 3.6521506812717073, "step": 13670, "train/loss_ctc": 0.6273544430732727, "train/loss_error": 0.4658815860748291, "train/loss_total": 0.4981761574745178 }, { "epoch": 3.6524178466470745, "step": 13671, "train/loss_ctc": 0.4382997155189514, "train/loss_error": 0.41616663336753845, "train/loss_total": 0.42059326171875 }, { "epoch": 3.6526850120224417, "step": 13672, "train/loss_ctc": 0.6728733777999878, "train/loss_error": 0.42027169466018677, "train/loss_total": 0.4707920551300049 }, { "epoch": 3.6529521773978093, "step": 13673, "train/loss_ctc": 0.5726854801177979, "train/loss_error": 0.40526723861694336, "train/loss_total": 0.43875089287757874 }, { "epoch": 3.6532193427731765, "step": 13674, "train/loss_ctc": 0.17420710623264313, "train/loss_error": 0.42020171880722046, "train/loss_total": 0.37100279331207275 }, { "epoch": 3.6534865081485437, "step": 13675, "train/loss_ctc": 0.289919912815094, "train/loss_error": 0.4713544249534607, "train/loss_total": 0.4350675344467163 }, { "epoch": 3.6537536735239113, "step": 13676, "train/loss_ctc": 0.29013776779174805, "train/loss_error": 0.45600125193595886, "train/loss_total": 0.4228285849094391 }, { "epoch": 3.6540208388992785, "step": 13677, "train/loss_ctc": 1.5997161865234375, "train/loss_error": 0.47242632508277893, "train/loss_total": 0.6978843212127686 }, { "epoch": 3.6542880042746457, "step": 13678, "train/loss_ctc": 0.6796962022781372, "train/loss_error": 0.4745519161224365, "train/loss_total": 0.5155807733535767 }, { "epoch": 3.6545551696500134, "step": 13679, "train/loss_ctc": 1.144895315170288, "train/loss_error": 0.4868481457233429, "train/loss_total": 0.6184576153755188 }, { "epoch": 3.6548223350253806, "grad_norm": 2.7671058177948, "learning_rate": 8.082286935613145e-06, "loss": 0.4889, "step": 13680 }, { "epoch": 3.6548223350253806, "step": 13680, "train/loss_ctc": 0.42685920000076294, "train/loss_error": 0.34652334451675415, "train/loss_total": 0.3625905215740204 }, { "epoch": 3.655089500400748, "step": 13681, "train/loss_ctc": 0.6821656227111816, "train/loss_error": 0.46867796778678894, "train/loss_total": 0.5113755464553833 }, { "epoch": 3.6553566657761154, "step": 13682, "train/loss_ctc": 0.8265412449836731, "train/loss_error": 0.4214237630367279, "train/loss_total": 0.502447247505188 }, { "epoch": 3.655623831151483, "step": 13683, "train/loss_ctc": 0.4750984013080597, "train/loss_error": 0.4629625976085663, "train/loss_total": 0.46538978815078735 }, { "epoch": 3.6558909965268502, "step": 13684, "train/loss_ctc": 1.487320065498352, "train/loss_error": 0.3689069151878357, "train/loss_total": 0.5925895571708679 }, { "epoch": 3.6561581619022174, "step": 13685, "train/loss_ctc": 0.4915567934513092, "train/loss_error": 0.5099686980247498, "train/loss_total": 0.5062863230705261 }, { "epoch": 3.656425327277585, "step": 13686, "train/loss_ctc": 0.8249174356460571, "train/loss_error": 0.45417487621307373, "train/loss_total": 0.5283234119415283 }, { "epoch": 3.6566924926529523, "step": 13687, "train/loss_ctc": 0.5145149230957031, "train/loss_error": 0.3982252776622772, "train/loss_total": 0.42148321866989136 }, { "epoch": 3.6569596580283195, "step": 13688, "train/loss_ctc": 0.48518258333206177, "train/loss_error": 0.46805813908576965, "train/loss_total": 0.4714830219745636 }, { "epoch": 3.657226823403687, "step": 13689, "train/loss_ctc": 1.0936148166656494, "train/loss_error": 0.47744491696357727, "train/loss_total": 0.6006789207458496 }, { "epoch": 3.6574939887790543, "grad_norm": 1.1759445667266846, "learning_rate": 8.066257013091104e-06, "loss": 0.4963, "step": 13690 }, { "epoch": 3.6574939887790543, "step": 13690, "train/loss_ctc": 0.6747069954872131, "train/loss_error": 0.3867512047290802, "train/loss_total": 0.44434237480163574 }, { "epoch": 3.6577611541544215, "step": 13691, "train/loss_ctc": 0.4868239760398865, "train/loss_error": 0.42123258113861084, "train/loss_total": 0.434350848197937 }, { "epoch": 3.658028319529789, "step": 13692, "train/loss_ctc": 0.2863282263278961, "train/loss_error": 0.3179696500301361, "train/loss_total": 0.3116413652896881 }, { "epoch": 3.6582954849051563, "step": 13693, "train/loss_ctc": 0.9046281576156616, "train/loss_error": 0.4718417525291443, "train/loss_total": 0.5583990812301636 }, { "epoch": 3.6585626502805235, "step": 13694, "train/loss_ctc": 0.6163296103477478, "train/loss_error": 0.43157732486724854, "train/loss_total": 0.46852779388427734 }, { "epoch": 3.658829815655891, "step": 13695, "train/loss_ctc": 0.839219331741333, "train/loss_error": 0.43314406275749207, "train/loss_total": 0.5143591165542603 }, { "epoch": 3.6590969810312584, "step": 13696, "train/loss_ctc": 0.6999332308769226, "train/loss_error": 0.40184783935546875, "train/loss_total": 0.46146494150161743 }, { "epoch": 3.6593641464066256, "step": 13697, "train/loss_ctc": 0.7197619676589966, "train/loss_error": 0.45368692278862, "train/loss_total": 0.5069019794464111 }, { "epoch": 3.659631311781993, "step": 13698, "train/loss_ctc": 0.5713127851486206, "train/loss_error": 0.47182226181030273, "train/loss_total": 0.49172037839889526 }, { "epoch": 3.6598984771573604, "step": 13699, "train/loss_ctc": 0.846871554851532, "train/loss_error": 0.43530431389808655, "train/loss_total": 0.5176177620887756 }, { "epoch": 3.6601656425327276, "grad_norm": 2.2576446533203125, "learning_rate": 8.050227090569062e-06, "loss": 0.4709, "step": 13700 }, { "epoch": 3.6601656425327276, "step": 13700, "train/loss_ctc": 0.7885288596153259, "train/loss_error": 0.4284278452396393, "train/loss_total": 0.5004480481147766 }, { "epoch": 3.660432807908095, "step": 13701, "train/loss_ctc": 0.840028703212738, "train/loss_error": 0.4765256643295288, "train/loss_total": 0.5492262840270996 }, { "epoch": 3.6606999732834624, "step": 13702, "train/loss_ctc": 0.6524462699890137, "train/loss_error": 0.4742498993873596, "train/loss_total": 0.5098891854286194 }, { "epoch": 3.6609671386588296, "step": 13703, "train/loss_ctc": 0.4321955144405365, "train/loss_error": 0.42407944798469543, "train/loss_total": 0.42570266127586365 }, { "epoch": 3.6612343040341973, "step": 13704, "train/loss_ctc": 0.471478134393692, "train/loss_error": 0.4107879400253296, "train/loss_total": 0.4229259788990021 }, { "epoch": 3.6615014694095644, "step": 13705, "train/loss_ctc": 0.7012848258018494, "train/loss_error": 0.4086163341999054, "train/loss_total": 0.4671500325202942 }, { "epoch": 3.6617686347849316, "step": 13706, "train/loss_ctc": 0.32998421788215637, "train/loss_error": 0.48543068766593933, "train/loss_total": 0.45434141159057617 }, { "epoch": 3.6620358001602993, "step": 13707, "train/loss_ctc": 1.1898407936096191, "train/loss_error": 0.4424681067466736, "train/loss_total": 0.5919426679611206 }, { "epoch": 3.6623029655356665, "step": 13708, "train/loss_ctc": 0.418530136346817, "train/loss_error": 0.45737648010253906, "train/loss_total": 0.4496072232723236 }, { "epoch": 3.6625701309110337, "step": 13709, "train/loss_ctc": 0.3099222779273987, "train/loss_error": 0.4428577721118927, "train/loss_total": 0.4162706732749939 }, { "epoch": 3.6628372962864013, "grad_norm": 1.644471526145935, "learning_rate": 8.03419716804702e-06, "loss": 0.4788, "step": 13710 }, { "epoch": 3.6628372962864013, "step": 13710, "train/loss_ctc": 1.0767531394958496, "train/loss_error": 0.3715578019618988, "train/loss_total": 0.512596845626831 }, { "epoch": 3.6631044616617685, "step": 13711, "train/loss_ctc": 0.6476389169692993, "train/loss_error": 0.4434322118759155, "train/loss_total": 0.4842735528945923 }, { "epoch": 3.663371627037136, "step": 13712, "train/loss_ctc": 0.831591010093689, "train/loss_error": 0.4391685724258423, "train/loss_total": 0.5176531076431274 }, { "epoch": 3.6636387924125033, "step": 13713, "train/loss_ctc": 0.5509790182113647, "train/loss_error": 0.4281836748123169, "train/loss_total": 0.4527427554130554 }, { "epoch": 3.6639059577878705, "step": 13714, "train/loss_ctc": 0.576616644859314, "train/loss_error": 0.4546802341938019, "train/loss_total": 0.47906753420829773 }, { "epoch": 3.664173123163238, "step": 13715, "train/loss_ctc": 0.5779648423194885, "train/loss_error": 0.3909890353679657, "train/loss_total": 0.4283841848373413 }, { "epoch": 3.6644402885386054, "step": 13716, "train/loss_ctc": 0.534187912940979, "train/loss_error": 0.4297070801258087, "train/loss_total": 0.4506032466888428 }, { "epoch": 3.664707453913973, "step": 13717, "train/loss_ctc": 0.9080544710159302, "train/loss_error": 0.5252074003219604, "train/loss_total": 0.6017768383026123 }, { "epoch": 3.66497461928934, "step": 13718, "train/loss_ctc": 0.4303138554096222, "train/loss_error": 0.47564831376075745, "train/loss_total": 0.46658140420913696 }, { "epoch": 3.6652417846647074, "step": 13719, "train/loss_ctc": 0.4976961612701416, "train/loss_error": 0.4632209837436676, "train/loss_total": 0.4701160192489624 }, { "epoch": 3.665508950040075, "grad_norm": 9.407690048217773, "learning_rate": 8.01816724552498e-06, "loss": 0.4864, "step": 13720 }, { "epoch": 3.665508950040075, "step": 13720, "train/loss_ctc": 1.1849452257156372, "train/loss_error": 0.413876473903656, "train/loss_total": 0.5680902004241943 }, { "epoch": 3.6657761154154422, "step": 13721, "train/loss_ctc": 0.2330406755208969, "train/loss_error": 0.41417866945266724, "train/loss_total": 0.37795108556747437 }, { "epoch": 3.6660432807908094, "step": 13722, "train/loss_ctc": 1.7216517925262451, "train/loss_error": 0.4812590479850769, "train/loss_total": 0.7293375730514526 }, { "epoch": 3.666310446166177, "step": 13723, "train/loss_ctc": 1.429619312286377, "train/loss_error": 0.4898335635662079, "train/loss_total": 0.6777907609939575 }, { "epoch": 3.6665776115415443, "step": 13724, "train/loss_ctc": 0.5517891645431519, "train/loss_error": 0.4144498407840729, "train/loss_total": 0.4419177174568176 }, { "epoch": 3.6668447769169115, "step": 13725, "train/loss_ctc": 0.3236604332923889, "train/loss_error": 0.39483827352523804, "train/loss_total": 0.38060271739959717 }, { "epoch": 3.667111942292279, "step": 13726, "train/loss_ctc": 0.5221703052520752, "train/loss_error": 0.5718919038772583, "train/loss_total": 0.5619475841522217 }, { "epoch": 3.6673791076676463, "step": 13727, "train/loss_ctc": 0.2813969850540161, "train/loss_error": 0.3928680717945099, "train/loss_total": 0.37057384848594666 }, { "epoch": 3.6676462730430135, "step": 13728, "train/loss_ctc": 0.8806014060974121, "train/loss_error": 0.39711475372314453, "train/loss_total": 0.49381208419799805 }, { "epoch": 3.667913438418381, "step": 13729, "train/loss_ctc": 1.0034421682357788, "train/loss_error": 0.45615556836128235, "train/loss_total": 0.5656129121780396 }, { "epoch": 3.6681806037937483, "grad_norm": NaN, "learning_rate": 8.002137323002938e-06, "loss": 0.5168, "step": 13730 }, { "epoch": 3.6681806037937483, "step": 13730, "train/loss_ctc": 0.9281437397003174, "train/loss_error": 0.43340903520584106, "train/loss_total": 0.5323559641838074 }, { "epoch": 3.6684477691691155, "step": 13731, "train/loss_ctc": 1.0964691638946533, "train/loss_error": 0.4964328706264496, "train/loss_total": 0.6164401769638062 }, { "epoch": 3.668714934544483, "step": 13732, "train/loss_ctc": 0.728728175163269, "train/loss_error": 0.41241350769996643, "train/loss_total": 0.47567644715309143 }, { "epoch": 3.6689820999198504, "step": 13733, "train/loss_ctc": 0.5561996102333069, "train/loss_error": 0.3753262460231781, "train/loss_total": 0.4115009307861328 }, { "epoch": 3.6692492652952176, "step": 13734, "train/loss_ctc": 0.8549482226371765, "train/loss_error": 0.476347416639328, "train/loss_total": 0.5520675778388977 }, { "epoch": 3.669516430670585, "step": 13735, "train/loss_ctc": 0.7891731858253479, "train/loss_error": 0.5022131204605103, "train/loss_total": 0.5596051216125488 }, { "epoch": 3.6697835960459524, "step": 13736, "train/loss_ctc": 0.4501953423023224, "train/loss_error": 0.44501936435699463, "train/loss_total": 0.4460545778274536 }, { "epoch": 3.6700507614213196, "step": 13737, "train/loss_ctc": 0.5632209777832031, "train/loss_error": 0.44951581954956055, "train/loss_total": 0.4722568690776825 }, { "epoch": 3.6703179267966872, "step": 13738, "train/loss_ctc": 0.41851571202278137, "train/loss_error": 0.4022914171218872, "train/loss_total": 0.4055362939834595 }, { "epoch": 3.6705850921720544, "step": 13739, "train/loss_ctc": 0.44231706857681274, "train/loss_error": 0.46994397044181824, "train/loss_total": 0.4644186198711395 }, { "epoch": 3.6708522575474216, "grad_norm": 3.0024077892303467, "learning_rate": 7.987710392733101e-06, "loss": 0.4936, "step": 13740 }, { "epoch": 3.6708522575474216, "step": 13740, "train/loss_ctc": 0.21825942397117615, "train/loss_error": 0.44990572333335876, "train/loss_total": 0.40357646346092224 }, { "epoch": 3.6711194229227893, "step": 13741, "train/loss_ctc": 0.683418869972229, "train/loss_error": 0.4626292586326599, "train/loss_total": 0.5067871809005737 }, { "epoch": 3.6713865882981565, "step": 13742, "train/loss_ctc": 0.5671526193618774, "train/loss_error": 0.45373278856277466, "train/loss_total": 0.47641676664352417 }, { "epoch": 3.6716537536735236, "step": 13743, "train/loss_ctc": 1.0531413555145264, "train/loss_error": 0.476605623960495, "train/loss_total": 0.5919127464294434 }, { "epoch": 3.6719209190488913, "step": 13744, "train/loss_ctc": 0.7885057926177979, "train/loss_error": 0.38918861746788025, "train/loss_total": 0.4690520763397217 }, { "epoch": 3.6721880844242585, "step": 13745, "train/loss_ctc": 0.4883977174758911, "train/loss_error": 0.39383190870285034, "train/loss_total": 0.41274508833885193 }, { "epoch": 3.672455249799626, "step": 13746, "train/loss_ctc": 0.5368807315826416, "train/loss_error": 0.5434278845787048, "train/loss_total": 0.5421184301376343 }, { "epoch": 3.6727224151749933, "step": 13747, "train/loss_ctc": 0.4306487739086151, "train/loss_error": 0.4799555242061615, "train/loss_total": 0.4700941741466522 }, { "epoch": 3.6729895805503605, "step": 13748, "train/loss_ctc": 0.6068809628486633, "train/loss_error": 0.40765127539634705, "train/loss_total": 0.4474972188472748 }, { "epoch": 3.673256745925728, "step": 13749, "train/loss_ctc": 1.1449657678604126, "train/loss_error": 0.4175204038619995, "train/loss_total": 0.56300950050354 }, { "epoch": 3.6735239113010953, "grad_norm": 2.8387527465820312, "learning_rate": 7.97168047021106e-06, "loss": 0.4883, "step": 13750 }, { "epoch": 3.6735239113010953, "step": 13750, "train/loss_ctc": 0.47080540657043457, "train/loss_error": 0.43778282403945923, "train/loss_total": 0.4443873465061188 }, { "epoch": 3.673791076676463, "step": 13751, "train/loss_ctc": 1.0938069820404053, "train/loss_error": 0.42843085527420044, "train/loss_total": 0.5615060925483704 }, { "epoch": 3.67405824205183, "step": 13752, "train/loss_ctc": 1.0288422107696533, "train/loss_error": 0.42358526587486267, "train/loss_total": 0.5446366667747498 }, { "epoch": 3.6743254074271974, "step": 13753, "train/loss_ctc": 0.8214833736419678, "train/loss_error": 0.43666449189186096, "train/loss_total": 0.5136282444000244 }, { "epoch": 3.674592572802565, "step": 13754, "train/loss_ctc": 0.3802797794342041, "train/loss_error": 0.37575241923332214, "train/loss_total": 0.3766579031944275 }, { "epoch": 3.674859738177932, "step": 13755, "train/loss_ctc": 0.8318085074424744, "train/loss_error": 0.5047727227210999, "train/loss_total": 0.5701798796653748 }, { "epoch": 3.6751269035532994, "step": 13756, "train/loss_ctc": 0.5833680629730225, "train/loss_error": 0.4716169536113739, "train/loss_total": 0.4939671754837036 }, { "epoch": 3.675394068928667, "step": 13757, "train/loss_ctc": 0.6994568109512329, "train/loss_error": 0.4366160035133362, "train/loss_total": 0.4891842007637024 }, { "epoch": 3.6756612343040342, "step": 13758, "train/loss_ctc": 0.8044897317886353, "train/loss_error": 0.45660245418548584, "train/loss_total": 0.5261799097061157 }, { "epoch": 3.6759283996794014, "step": 13759, "train/loss_ctc": 0.6976927518844604, "train/loss_error": 0.46770837903022766, "train/loss_total": 0.5137052536010742 }, { "epoch": 3.676195565054769, "grad_norm": 4.182204723358154, "learning_rate": 7.95565054768902e-06, "loss": 0.5034, "step": 13760 }, { "epoch": 3.676195565054769, "step": 13760, "train/loss_ctc": 0.5225173234939575, "train/loss_error": 0.3906429409980774, "train/loss_total": 0.4170178174972534 }, { "epoch": 3.6764627304301363, "step": 13761, "train/loss_ctc": 0.7353938221931458, "train/loss_error": 0.4049724042415619, "train/loss_total": 0.4710566997528076 }, { "epoch": 3.6767298958055035, "step": 13762, "train/loss_ctc": 0.7473574876785278, "train/loss_error": 0.43119534850120544, "train/loss_total": 0.49442780017852783 }, { "epoch": 3.676997061180871, "step": 13763, "train/loss_ctc": 0.6008774042129517, "train/loss_error": 0.40033233165740967, "train/loss_total": 0.4404413402080536 }, { "epoch": 3.6772642265562383, "step": 13764, "train/loss_ctc": 0.912427544593811, "train/loss_error": 0.39802631735801697, "train/loss_total": 0.5009065866470337 }, { "epoch": 3.6775313919316055, "step": 13765, "train/loss_ctc": 1.4677624702453613, "train/loss_error": 0.44926217198371887, "train/loss_total": 0.6529622077941895 }, { "epoch": 3.677798557306973, "step": 13766, "train/loss_ctc": 0.6249874830245972, "train/loss_error": 0.4355408251285553, "train/loss_total": 0.47343015670776367 }, { "epoch": 3.6780657226823403, "step": 13767, "train/loss_ctc": 0.5114288926124573, "train/loss_error": 0.529076337814331, "train/loss_total": 0.5255468487739563 }, { "epoch": 3.6783328880577075, "step": 13768, "train/loss_ctc": 0.821661114692688, "train/loss_error": 0.49222618341445923, "train/loss_total": 0.558113157749176 }, { "epoch": 3.678600053433075, "step": 13769, "train/loss_ctc": 1.1740461587905884, "train/loss_error": 0.4009585678577423, "train/loss_total": 0.5555760860443115 }, { "epoch": 3.6788672188084424, "grad_norm": 3.8003876209259033, "learning_rate": 7.939620625166979e-06, "loss": 0.5089, "step": 13770 }, { "epoch": 3.6788672188084424, "step": 13770, "train/loss_ctc": 0.6835495233535767, "train/loss_error": 0.40723899006843567, "train/loss_total": 0.4625011086463928 }, { "epoch": 3.6791343841838096, "step": 13771, "train/loss_ctc": 0.8318572044372559, "train/loss_error": 0.45972469449043274, "train/loss_total": 0.5341511964797974 }, { "epoch": 3.679401549559177, "step": 13772, "train/loss_ctc": 0.4577009081840515, "train/loss_error": 0.4801405072212219, "train/loss_total": 0.4756526052951813 }, { "epoch": 3.6796687149345444, "step": 13773, "train/loss_ctc": 0.4867829382419586, "train/loss_error": 0.5426045656204224, "train/loss_total": 0.531440258026123 }, { "epoch": 3.6799358803099116, "step": 13774, "train/loss_ctc": 1.3231728076934814, "train/loss_error": 0.47642362117767334, "train/loss_total": 0.6457734704017639 }, { "epoch": 3.6802030456852792, "step": 13775, "train/loss_ctc": 0.9514012336730957, "train/loss_error": 0.4463541805744171, "train/loss_total": 0.5473635792732239 }, { "epoch": 3.6804702110606464, "step": 13776, "train/loss_ctc": 0.6546124815940857, "train/loss_error": 0.4792669117450714, "train/loss_total": 0.5143359899520874 }, { "epoch": 3.6807373764360136, "step": 13777, "train/loss_ctc": 0.6083091497421265, "train/loss_error": 0.5232154130935669, "train/loss_total": 0.5402341485023499 }, { "epoch": 3.6810045418113813, "step": 13778, "train/loss_ctc": 0.4307515621185303, "train/loss_error": 0.42805957794189453, "train/loss_total": 0.42859798669815063 }, { "epoch": 3.6812717071867485, "step": 13779, "train/loss_ctc": 0.7328977584838867, "train/loss_error": 0.3939884603023529, "train/loss_total": 0.46177029609680176 }, { "epoch": 3.681538872562116, "grad_norm": 3.711132049560547, "learning_rate": 7.923590702644937e-06, "loss": 0.5142, "step": 13780 }, { "epoch": 3.681538872562116, "step": 13780, "train/loss_ctc": 1.0010496377944946, "train/loss_error": 0.4778116047382355, "train/loss_total": 0.5824592113494873 }, { "epoch": 3.6818060379374833, "step": 13781, "train/loss_ctc": 0.6515669822692871, "train/loss_error": 0.4787374436855316, "train/loss_total": 0.5133033990859985 }, { "epoch": 3.682073203312851, "step": 13782, "train/loss_ctc": 0.7744753360748291, "train/loss_error": 0.42834120988845825, "train/loss_total": 0.4975680410861969 }, { "epoch": 3.682340368688218, "step": 13783, "train/loss_ctc": 1.2435890436172485, "train/loss_error": 0.5279600620269775, "train/loss_total": 0.6710858345031738 }, { "epoch": 3.6826075340635853, "step": 13784, "train/loss_ctc": 0.5270446538925171, "train/loss_error": 0.3841300308704376, "train/loss_total": 0.412712961435318 }, { "epoch": 3.682874699438953, "step": 13785, "train/loss_ctc": 0.8157112002372742, "train/loss_error": 0.4032197892665863, "train/loss_total": 0.4857180714607239 }, { "epoch": 3.68314186481432, "step": 13786, "train/loss_ctc": 0.46425768733024597, "train/loss_error": 0.3919898569583893, "train/loss_total": 0.40644344687461853 }, { "epoch": 3.6834090301896873, "step": 13787, "train/loss_ctc": 0.378268837928772, "train/loss_error": 0.39478564262390137, "train/loss_total": 0.39148229360580444 }, { "epoch": 3.683676195565055, "step": 13788, "train/loss_ctc": 0.6747050285339355, "train/loss_error": 0.41740432381629944, "train/loss_total": 0.46886447072029114 }, { "epoch": 3.683943360940422, "step": 13789, "train/loss_ctc": 0.6056351065635681, "train/loss_error": 0.42896923422813416, "train/loss_total": 0.4643024206161499 }, { "epoch": 3.6842105263157894, "grad_norm": 1.575981616973877, "learning_rate": 7.907560780122897e-06, "loss": 0.4894, "step": 13790 }, { "epoch": 3.6842105263157894, "step": 13790, "train/loss_ctc": 0.6750833988189697, "train/loss_error": 0.41886240243911743, "train/loss_total": 0.4701066017150879 }, { "epoch": 3.684477691691157, "step": 13791, "train/loss_ctc": 0.5073355436325073, "train/loss_error": 0.4608856737613678, "train/loss_total": 0.4701756536960602 }, { "epoch": 3.684744857066524, "step": 13792, "train/loss_ctc": 0.445482075214386, "train/loss_error": 0.5123593211174011, "train/loss_total": 0.49898388981819153 }, { "epoch": 3.6850120224418914, "step": 13793, "train/loss_ctc": 0.39511120319366455, "train/loss_error": 0.45598211884498596, "train/loss_total": 0.4438079595565796 }, { "epoch": 3.685279187817259, "step": 13794, "train/loss_ctc": 0.4088108241558075, "train/loss_error": 0.3789963126182556, "train/loss_total": 0.38495922088623047 }, { "epoch": 3.6855463531926262, "step": 13795, "train/loss_ctc": 1.2623862028121948, "train/loss_error": 0.469077467918396, "train/loss_total": 0.6277392506599426 }, { "epoch": 3.6858135185679934, "step": 13796, "train/loss_ctc": 1.0085803270339966, "train/loss_error": 0.4942377507686615, "train/loss_total": 0.5971062779426575 }, { "epoch": 3.686080683943361, "step": 13797, "train/loss_ctc": 0.5599240660667419, "train/loss_error": 0.42872634530067444, "train/loss_total": 0.45496588945388794 }, { "epoch": 3.6863478493187283, "step": 13798, "train/loss_ctc": 1.0179705619812012, "train/loss_error": 0.2949289083480835, "train/loss_total": 0.4395372271537781 }, { "epoch": 3.6866150146940955, "step": 13799, "train/loss_ctc": 0.5231229066848755, "train/loss_error": 0.4400685429573059, "train/loss_total": 0.45667940378189087 }, { "epoch": 3.686882180069463, "grad_norm": 2.9590377807617188, "learning_rate": 7.891530857600855e-06, "loss": 0.4844, "step": 13800 }, { "epoch": 3.686882180069463, "step": 13800, "train/loss_ctc": 0.29598015546798706, "train/loss_error": 0.46487048268318176, "train/loss_total": 0.43109241127967834 }, { "epoch": 3.6871493454448303, "step": 13801, "train/loss_ctc": 0.7267427444458008, "train/loss_error": 0.47292235493659973, "train/loss_total": 0.523686408996582 }, { "epoch": 3.6874165108201975, "step": 13802, "train/loss_ctc": 0.6234145164489746, "train/loss_error": 0.40124648809432983, "train/loss_total": 0.4456801116466522 }, { "epoch": 3.687683676195565, "step": 13803, "train/loss_ctc": 0.6934584975242615, "train/loss_error": 0.4711665213108063, "train/loss_total": 0.5156249403953552 }, { "epoch": 3.6879508415709323, "step": 13804, "train/loss_ctc": 0.5209994912147522, "train/loss_error": 0.5441970825195312, "train/loss_total": 0.5395575761795044 }, { "epoch": 3.6882180069462995, "step": 13805, "train/loss_ctc": 1.0630940198898315, "train/loss_error": 0.4480404853820801, "train/loss_total": 0.5710511803627014 }, { "epoch": 3.688485172321667, "step": 13806, "train/loss_ctc": 0.5440343022346497, "train/loss_error": 0.465461790561676, "train/loss_total": 0.48117631673812866 }, { "epoch": 3.6887523376970344, "step": 13807, "train/loss_ctc": 0.6212674379348755, "train/loss_error": 0.39911144971847534, "train/loss_total": 0.4435426592826843 }, { "epoch": 3.6890195030724016, "step": 13808, "train/loss_ctc": 0.555181622505188, "train/loss_error": 0.37968552112579346, "train/loss_total": 0.4147847592830658 }, { "epoch": 3.689286668447769, "step": 13809, "train/loss_ctc": 0.32550710439682007, "train/loss_error": 0.3245900869369507, "train/loss_total": 0.32477349042892456 }, { "epoch": 3.6895538338231364, "grad_norm": 4.137174606323242, "learning_rate": 7.875500935078813e-06, "loss": 0.4691, "step": 13810 }, { "epoch": 3.6895538338231364, "step": 13810, "train/loss_ctc": 1.1407387256622314, "train/loss_error": 0.446608304977417, "train/loss_total": 0.5854344367980957 }, { "epoch": 3.689820999198504, "step": 13811, "train/loss_ctc": 0.521384596824646, "train/loss_error": 0.45931848883628845, "train/loss_total": 0.4717317223548889 }, { "epoch": 3.6900881645738712, "step": 13812, "train/loss_ctc": 0.5263734459877014, "train/loss_error": 0.4795914888381958, "train/loss_total": 0.48894786834716797 }, { "epoch": 3.6903553299492384, "step": 13813, "train/loss_ctc": 0.7341257929801941, "train/loss_error": 0.47600701451301575, "train/loss_total": 0.5276308059692383 }, { "epoch": 3.690622495324606, "step": 13814, "train/loss_ctc": 0.9504156112670898, "train/loss_error": 0.47687771916389465, "train/loss_total": 0.5715852975845337 }, { "epoch": 3.6908896606999733, "step": 13815, "train/loss_ctc": 0.9934896230697632, "train/loss_error": 0.4317210614681244, "train/loss_total": 0.5440747737884521 }, { "epoch": 3.691156826075341, "step": 13816, "train/loss_ctc": 0.5230738520622253, "train/loss_error": 0.44830432534217834, "train/loss_total": 0.4632582366466522 }, { "epoch": 3.691423991450708, "step": 13817, "train/loss_ctc": 1.2587307691574097, "train/loss_error": 0.3992794454097748, "train/loss_total": 0.5711697340011597 }, { "epoch": 3.6916911568260753, "step": 13818, "train/loss_ctc": 1.2261594533920288, "train/loss_error": 0.49224984645843506, "train/loss_total": 0.6390317678451538 }, { "epoch": 3.691958322201443, "step": 13819, "train/loss_ctc": 0.37549111247062683, "train/loss_error": 0.49199292063713074, "train/loss_total": 0.4686925709247589 }, { "epoch": 3.69222548757681, "grad_norm": 2.8682005405426025, "learning_rate": 7.859471012556773e-06, "loss": 0.5332, "step": 13820 }, { "epoch": 3.69222548757681, "step": 13820, "train/loss_ctc": 0.5068814754486084, "train/loss_error": 0.44836875796318054, "train/loss_total": 0.46007129549980164 }, { "epoch": 3.6924926529521773, "step": 13821, "train/loss_ctc": 0.9946275949478149, "train/loss_error": 0.4624124765396118, "train/loss_total": 0.5688555240631104 }, { "epoch": 3.692759818327545, "step": 13822, "train/loss_ctc": 0.4383472800254822, "train/loss_error": 0.34010380506515503, "train/loss_total": 0.35975250601768494 }, { "epoch": 3.693026983702912, "step": 13823, "train/loss_ctc": 0.8033950328826904, "train/loss_error": 0.4810061752796173, "train/loss_total": 0.5454839468002319 }, { "epoch": 3.6932941490782794, "step": 13824, "train/loss_ctc": 1.0794398784637451, "train/loss_error": 0.43632712960243225, "train/loss_total": 0.5649496912956238 }, { "epoch": 3.693561314453647, "step": 13825, "train/loss_ctc": 0.4872359335422516, "train/loss_error": 0.3816632628440857, "train/loss_total": 0.4027777910232544 }, { "epoch": 3.693828479829014, "step": 13826, "train/loss_ctc": 0.7137970924377441, "train/loss_error": 0.4189029037952423, "train/loss_total": 0.4778817296028137 }, { "epoch": 3.6940956452043814, "step": 13827, "train/loss_ctc": 0.5139822363853455, "train/loss_error": 0.46427565813064575, "train/loss_total": 0.4742169976234436 }, { "epoch": 3.694362810579749, "step": 13828, "train/loss_ctc": 0.9501519799232483, "train/loss_error": 0.45293933153152466, "train/loss_total": 0.5523818731307983 }, { "epoch": 3.694629975955116, "step": 13829, "train/loss_ctc": 0.349054753780365, "train/loss_error": 0.38470733165740967, "train/loss_total": 0.3775768280029297 }, { "epoch": 3.6948971413304834, "grad_norm": 2.627948522567749, "learning_rate": 7.84344109003473e-06, "loss": 0.4784, "step": 13830 }, { "epoch": 3.6948971413304834, "step": 13830, "train/loss_ctc": 0.6343981027603149, "train/loss_error": 0.48884260654449463, "train/loss_total": 0.5179536938667297 }, { "epoch": 3.695164306705851, "step": 13831, "train/loss_ctc": 0.705501139163971, "train/loss_error": 0.4087195098400116, "train/loss_total": 0.46807584166526794 }, { "epoch": 3.6954314720812182, "step": 13832, "train/loss_ctc": 0.6133681535720825, "train/loss_error": 0.43001142144203186, "train/loss_total": 0.4666827619075775 }, { "epoch": 3.6956986374565854, "step": 13833, "train/loss_ctc": 0.9246223568916321, "train/loss_error": 0.45680224895477295, "train/loss_total": 0.5503662824630737 }, { "epoch": 3.695965802831953, "step": 13834, "train/loss_ctc": 0.7830090522766113, "train/loss_error": 0.4495285451412201, "train/loss_total": 0.5162246227264404 }, { "epoch": 3.6962329682073203, "step": 13835, "train/loss_ctc": 0.8555029630661011, "train/loss_error": 0.4145462214946747, "train/loss_total": 0.5027375817298889 }, { "epoch": 3.6965001335826875, "step": 13836, "train/loss_ctc": 1.5057885646820068, "train/loss_error": 0.39053910970687866, "train/loss_total": 0.6135890483856201 }, { "epoch": 3.696767298958055, "step": 13837, "train/loss_ctc": 0.6839394569396973, "train/loss_error": 0.42459988594055176, "train/loss_total": 0.4764678180217743 }, { "epoch": 3.6970344643334223, "step": 13838, "train/loss_ctc": 0.4426610767841339, "train/loss_error": 0.3651813566684723, "train/loss_total": 0.38067731261253357 }, { "epoch": 3.6973016297087895, "step": 13839, "train/loss_ctc": 0.37568560242652893, "train/loss_error": 0.3631806969642639, "train/loss_total": 0.3656817078590393 }, { "epoch": 3.697568795084157, "grad_norm": 1.7492448091506958, "learning_rate": 7.82741116751269e-06, "loss": 0.4858, "step": 13840 }, { "epoch": 3.697568795084157, "step": 13840, "train/loss_ctc": 0.5676860213279724, "train/loss_error": 0.44808271527290344, "train/loss_total": 0.47200340032577515 }, { "epoch": 3.6978359604595243, "step": 13841, "train/loss_ctc": 0.5073279738426208, "train/loss_error": 0.45162954926490784, "train/loss_total": 0.4627692699432373 }, { "epoch": 3.6981031258348915, "step": 13842, "train/loss_ctc": 1.19611394405365, "train/loss_error": 0.49089545011520386, "train/loss_total": 0.631939172744751 }, { "epoch": 3.698370291210259, "step": 13843, "train/loss_ctc": 1.5815753936767578, "train/loss_error": 0.439755916595459, "train/loss_total": 0.6681197881698608 }, { "epoch": 3.6986374565856264, "step": 13844, "train/loss_ctc": 1.100629448890686, "train/loss_error": 0.4415460228919983, "train/loss_total": 0.5733627080917358 }, { "epoch": 3.698904621960994, "step": 13845, "train/loss_ctc": 0.7675737142562866, "train/loss_error": 0.450004518032074, "train/loss_total": 0.5135183334350586 }, { "epoch": 3.699171787336361, "step": 13846, "train/loss_ctc": 0.7034282684326172, "train/loss_error": 0.42594781517982483, "train/loss_total": 0.48144394159317017 }, { "epoch": 3.6994389527117284, "step": 13847, "train/loss_ctc": 0.6272342801094055, "train/loss_error": 0.5163328647613525, "train/loss_total": 0.53851318359375 }, { "epoch": 3.699706118087096, "step": 13848, "train/loss_ctc": 1.1329784393310547, "train/loss_error": 0.4256054162979126, "train/loss_total": 0.567080020904541 }, { "epoch": 3.6999732834624632, "step": 13849, "train/loss_ctc": 0.6030104160308838, "train/loss_error": 0.48306140303611755, "train/loss_total": 0.5070512294769287 }, { "epoch": 3.700240448837831, "grad_norm": 2.8841145038604736, "learning_rate": 7.81138124499065e-06, "loss": 0.5416, "step": 13850 }, { "epoch": 3.700240448837831, "step": 13850, "train/loss_ctc": 0.3740083873271942, "train/loss_error": 0.4896838665008545, "train/loss_total": 0.46654877066612244 }, { "epoch": 3.700507614213198, "step": 13851, "train/loss_ctc": 0.4065307378768921, "train/loss_error": 0.4820626378059387, "train/loss_total": 0.4669562876224518 }, { "epoch": 3.7007747795885653, "step": 13852, "train/loss_ctc": 0.8467113971710205, "train/loss_error": 0.47348982095718384, "train/loss_total": 0.5481341481208801 }, { "epoch": 3.701041944963933, "step": 13853, "train/loss_ctc": 0.7661365270614624, "train/loss_error": 0.42837244272232056, "train/loss_total": 0.49592524766921997 }, { "epoch": 3.7013091103393, "step": 13854, "train/loss_ctc": 0.4015381932258606, "train/loss_error": 0.4431302845478058, "train/loss_total": 0.43481186032295227 }, { "epoch": 3.7015762757146673, "step": 13855, "train/loss_ctc": 0.39772099256515503, "train/loss_error": 0.344755083322525, "train/loss_total": 0.35534828901290894 }, { "epoch": 3.701843441090035, "step": 13856, "train/loss_ctc": 0.7899322509765625, "train/loss_error": 0.4339908957481384, "train/loss_total": 0.5051791667938232 }, { "epoch": 3.702110606465402, "step": 13857, "train/loss_ctc": 0.8743117451667786, "train/loss_error": 0.4312893748283386, "train/loss_total": 0.5198938846588135 }, { "epoch": 3.7023777718407693, "step": 13858, "train/loss_ctc": 1.0170238018035889, "train/loss_error": 0.48874565958976746, "train/loss_total": 0.5944012999534607 }, { "epoch": 3.702644937216137, "step": 13859, "train/loss_ctc": 0.7763246297836304, "train/loss_error": 0.39893531799316406, "train/loss_total": 0.4744132161140442 }, { "epoch": 3.702912102591504, "grad_norm": 1.998902678489685, "learning_rate": 7.795351322468608e-06, "loss": 0.4862, "step": 13860 }, { "epoch": 3.702912102591504, "step": 13860, "train/loss_ctc": 0.5228366255760193, "train/loss_error": 0.4126496911048889, "train/loss_total": 0.434687077999115 }, { "epoch": 3.7031792679668714, "step": 13861, "train/loss_ctc": 0.38422632217407227, "train/loss_error": 0.39817577600479126, "train/loss_total": 0.39538589119911194 }, { "epoch": 3.703446433342239, "step": 13862, "train/loss_ctc": 0.6604368686676025, "train/loss_error": 0.4637540876865387, "train/loss_total": 0.5030906200408936 }, { "epoch": 3.703713598717606, "step": 13863, "train/loss_ctc": 1.4179458618164062, "train/loss_error": 0.42460957169532776, "train/loss_total": 0.6232768297195435 }, { "epoch": 3.7039807640929734, "step": 13864, "train/loss_ctc": 0.697012186050415, "train/loss_error": 0.5002306699752808, "train/loss_total": 0.5395869612693787 }, { "epoch": 3.704247929468341, "step": 13865, "train/loss_ctc": 1.556283712387085, "train/loss_error": 0.4206337630748749, "train/loss_total": 0.647763729095459 }, { "epoch": 3.7045150948437082, "step": 13866, "train/loss_ctc": 1.152099847793579, "train/loss_error": 0.4876542389392853, "train/loss_total": 0.620543360710144 }, { "epoch": 3.7047822602190754, "step": 13867, "train/loss_ctc": 0.3747698962688446, "train/loss_error": 0.42681556940078735, "train/loss_total": 0.41640642285346985 }, { "epoch": 3.705049425594443, "step": 13868, "train/loss_ctc": 0.4519055187702179, "train/loss_error": 0.4830513596534729, "train/loss_total": 0.4768221974372864 }, { "epoch": 3.7053165909698103, "step": 13869, "train/loss_ctc": 0.6225242614746094, "train/loss_error": 0.44597184658050537, "train/loss_total": 0.4812823534011841 }, { "epoch": 3.7055837563451774, "grad_norm": 1.570997714996338, "learning_rate": 7.779321399946568e-06, "loss": 0.5139, "step": 13870 }, { "epoch": 3.7055837563451774, "step": 13870, "train/loss_ctc": 0.4444364607334137, "train/loss_error": 0.41209280490875244, "train/loss_total": 0.41856154799461365 }, { "epoch": 3.705850921720545, "step": 13871, "train/loss_ctc": 0.5946032404899597, "train/loss_error": 0.4745924770832062, "train/loss_total": 0.49859464168548584 }, { "epoch": 3.7061180870959123, "step": 13872, "train/loss_ctc": 0.9039943218231201, "train/loss_error": 0.43960970640182495, "train/loss_total": 0.532486617565155 }, { "epoch": 3.7063852524712795, "step": 13873, "train/loss_ctc": 0.5990402698516846, "train/loss_error": 0.400719553232193, "train/loss_total": 0.4403837025165558 }, { "epoch": 3.706652417846647, "step": 13874, "train/loss_ctc": 0.9407380819320679, "train/loss_error": 0.47457677125930786, "train/loss_total": 0.5678090453147888 }, { "epoch": 3.7069195832220143, "step": 13875, "train/loss_ctc": 0.6685128211975098, "train/loss_error": 0.3789310157299042, "train/loss_total": 0.43684738874435425 }, { "epoch": 3.7071867485973815, "step": 13876, "train/loss_ctc": 1.1756064891815186, "train/loss_error": 0.4431154131889343, "train/loss_total": 0.5896136164665222 }, { "epoch": 3.707453913972749, "step": 13877, "train/loss_ctc": 1.095503807067871, "train/loss_error": 0.4302787780761719, "train/loss_total": 0.5633237957954407 }, { "epoch": 3.7077210793481163, "step": 13878, "train/loss_ctc": 0.6450924277305603, "train/loss_error": 0.42322075366973877, "train/loss_total": 0.46759510040283203 }, { "epoch": 3.707988244723484, "step": 13879, "train/loss_ctc": 0.6373618841171265, "train/loss_error": 0.4365684986114502, "train/loss_total": 0.4767271876335144 }, { "epoch": 3.708255410098851, "grad_norm": 2.287205219268799, "learning_rate": 7.763291477424526e-06, "loss": 0.4992, "step": 13880 }, { "epoch": 3.708255410098851, "step": 13880, "train/loss_ctc": 1.0305454730987549, "train/loss_error": 0.38824957609176636, "train/loss_total": 0.5167087316513062 }, { "epoch": 3.708522575474219, "step": 13881, "train/loss_ctc": 0.7151476740837097, "train/loss_error": 0.4790917932987213, "train/loss_total": 0.5263029932975769 }, { "epoch": 3.708789740849586, "step": 13882, "train/loss_ctc": 0.8547850847244263, "train/loss_error": 0.48756250739097595, "train/loss_total": 0.561007022857666 }, { "epoch": 3.709056906224953, "step": 13883, "train/loss_ctc": 1.051934003829956, "train/loss_error": 0.5301511287689209, "train/loss_total": 0.6345077157020569 }, { "epoch": 3.709324071600321, "step": 13884, "train/loss_ctc": 0.29615122079849243, "train/loss_error": 0.38554832339286804, "train/loss_total": 0.36766889691352844 }, { "epoch": 3.709591236975688, "step": 13885, "train/loss_ctc": 0.6125174760818481, "train/loss_error": 0.4318065643310547, "train/loss_total": 0.4679487347602844 }, { "epoch": 3.7098584023510552, "step": 13886, "train/loss_ctc": 0.4775453805923462, "train/loss_error": 0.42544814944267273, "train/loss_total": 0.4358676075935364 }, { "epoch": 3.710125567726423, "step": 13887, "train/loss_ctc": 0.6666409373283386, "train/loss_error": 0.49134621024131775, "train/loss_total": 0.5264051556587219 }, { "epoch": 3.71039273310179, "step": 13888, "train/loss_ctc": 1.1746920347213745, "train/loss_error": 0.36566466093063354, "train/loss_total": 0.5274701118469238 }, { "epoch": 3.7106598984771573, "step": 13889, "train/loss_ctc": 1.3462193012237549, "train/loss_error": 0.3827856481075287, "train/loss_total": 0.575472354888916 }, { "epoch": 3.710927063852525, "grad_norm": 3.922854423522949, "learning_rate": 7.747261554902484e-06, "loss": 0.5139, "step": 13890 }, { "epoch": 3.710927063852525, "step": 13890, "train/loss_ctc": 1.0180485248565674, "train/loss_error": 0.5297762751579285, "train/loss_total": 0.6274307370185852 }, { "epoch": 3.711194229227892, "step": 13891, "train/loss_ctc": 0.39851611852645874, "train/loss_error": 0.45492708683013916, "train/loss_total": 0.4436448812484741 }, { "epoch": 3.7114613946032593, "step": 13892, "train/loss_ctc": 0.7658673524856567, "train/loss_error": 0.4387568235397339, "train/loss_total": 0.5041789412498474 }, { "epoch": 3.711728559978627, "step": 13893, "train/loss_ctc": 0.9731886386871338, "train/loss_error": 0.4862897992134094, "train/loss_total": 0.5836696028709412 }, { "epoch": 3.711995725353994, "step": 13894, "train/loss_ctc": 0.4369214177131653, "train/loss_error": 0.3768608868122101, "train/loss_total": 0.38887301087379456 }, { "epoch": 3.7122628907293613, "step": 13895, "train/loss_ctc": 0.8056107759475708, "train/loss_error": 0.4429585039615631, "train/loss_total": 0.5154889822006226 }, { "epoch": 3.712530056104729, "step": 13896, "train/loss_ctc": 0.5671283006668091, "train/loss_error": 0.4152352809906006, "train/loss_total": 0.44561389088630676 }, { "epoch": 3.712797221480096, "step": 13897, "train/loss_ctc": 0.4134258031845093, "train/loss_error": 0.44977226853370667, "train/loss_total": 0.4425030052661896 }, { "epoch": 3.7130643868554634, "step": 13898, "train/loss_ctc": 0.6126556396484375, "train/loss_error": 0.4014235734939575, "train/loss_total": 0.44366997480392456 }, { "epoch": 3.713331552230831, "step": 13899, "train/loss_ctc": 0.860000491142273, "train/loss_error": 0.3844144344329834, "train/loss_total": 0.4795316457748413 }, { "epoch": 3.713598717606198, "grad_norm": 3.571946620941162, "learning_rate": 7.731231632380444e-06, "loss": 0.4875, "step": 13900 }, { "epoch": 3.713598717606198, "step": 13900, "train/loss_ctc": 0.6412451863288879, "train/loss_error": 0.4821093678474426, "train/loss_total": 0.5139365196228027 }, { "epoch": 3.7138658829815654, "step": 13901, "train/loss_ctc": 1.1323485374450684, "train/loss_error": 0.4411855936050415, "train/loss_total": 0.5794181823730469 }, { "epoch": 3.714133048356933, "step": 13902, "train/loss_ctc": 0.2861908972263336, "train/loss_error": 0.44981783628463745, "train/loss_total": 0.4170924723148346 }, { "epoch": 3.7144002137323002, "step": 13903, "train/loss_ctc": 0.3981037139892578, "train/loss_error": 0.4404166340827942, "train/loss_total": 0.4319540560245514 }, { "epoch": 3.7146673791076674, "step": 13904, "train/loss_ctc": 1.3236759901046753, "train/loss_error": 0.526033878326416, "train/loss_total": 0.6855623126029968 }, { "epoch": 3.714934544483035, "step": 13905, "train/loss_ctc": 0.3981202244758606, "train/loss_error": 0.3707027733325958, "train/loss_total": 0.3761862814426422 }, { "epoch": 3.7152017098584023, "step": 13906, "train/loss_ctc": 0.3544865846633911, "train/loss_error": 0.433843195438385, "train/loss_total": 0.4179718792438507 }, { "epoch": 3.7154688752337695, "step": 13907, "train/loss_ctc": 0.5292999744415283, "train/loss_error": 0.49686700105667114, "train/loss_total": 0.5033535957336426 }, { "epoch": 3.715736040609137, "step": 13908, "train/loss_ctc": 0.8016291856765747, "train/loss_error": 0.44202813506126404, "train/loss_total": 0.513948380947113 }, { "epoch": 3.7160032059845043, "step": 13909, "train/loss_ctc": 0.7226169109344482, "train/loss_error": 0.4673846662044525, "train/loss_total": 0.5184311270713806 }, { "epoch": 3.716270371359872, "grad_norm": 1.3416271209716797, "learning_rate": 7.715201709858402e-06, "loss": 0.4958, "step": 13910 }, { "epoch": 3.716270371359872, "step": 13910, "train/loss_ctc": 0.4537685215473175, "train/loss_error": 0.42751410603523254, "train/loss_total": 0.43276500701904297 }, { "epoch": 3.716537536735239, "step": 13911, "train/loss_ctc": 0.3469715714454651, "train/loss_error": 0.47204700112342834, "train/loss_total": 0.4470319151878357 }, { "epoch": 3.7168047021106063, "step": 13912, "train/loss_ctc": 0.8632326126098633, "train/loss_error": 0.4000310003757477, "train/loss_total": 0.49267134070396423 }, { "epoch": 3.717071867485974, "step": 13913, "train/loss_ctc": 1.0137420892715454, "train/loss_error": 0.42785313725471497, "train/loss_total": 0.545030951499939 }, { "epoch": 3.717339032861341, "step": 13914, "train/loss_ctc": 0.6743025779724121, "train/loss_error": 0.4045734703540802, "train/loss_total": 0.45851930975914 }, { "epoch": 3.717606198236709, "step": 13915, "train/loss_ctc": 0.7549890279769897, "train/loss_error": 0.4373437166213989, "train/loss_total": 0.500872790813446 }, { "epoch": 3.717873363612076, "step": 13916, "train/loss_ctc": 0.5578325986862183, "train/loss_error": 0.5565907955169678, "train/loss_total": 0.5568391680717468 }, { "epoch": 3.718140528987443, "step": 13917, "train/loss_ctc": 0.6698756217956543, "train/loss_error": 0.4216066300868988, "train/loss_total": 0.4712604284286499 }, { "epoch": 3.718407694362811, "step": 13918, "train/loss_ctc": 0.9723436832427979, "train/loss_error": 0.4997520446777344, "train/loss_total": 0.5942703485488892 }, { "epoch": 3.718674859738178, "step": 13919, "train/loss_ctc": 0.6402314901351929, "train/loss_error": 0.5505574941635132, "train/loss_total": 0.5684922933578491 }, { "epoch": 3.718942025113545, "grad_norm": 3.243187189102173, "learning_rate": 7.69917178733636e-06, "loss": 0.5068, "step": 13920 }, { "epoch": 3.718942025113545, "step": 13920, "train/loss_ctc": 0.9450284242630005, "train/loss_error": 0.42883890867233276, "train/loss_total": 0.5320768356323242 }, { "epoch": 3.719209190488913, "step": 13921, "train/loss_ctc": 0.9443471431732178, "train/loss_error": 0.4898206889629364, "train/loss_total": 0.5807259678840637 }, { "epoch": 3.71947635586428, "step": 13922, "train/loss_ctc": 0.5807968378067017, "train/loss_error": 0.4157421588897705, "train/loss_total": 0.44875311851501465 }, { "epoch": 3.7197435212396472, "step": 13923, "train/loss_ctc": 0.23983234167099, "train/loss_error": 0.36285585165023804, "train/loss_total": 0.33825117349624634 }, { "epoch": 3.720010686615015, "step": 13924, "train/loss_ctc": 0.5250446200370789, "train/loss_error": 0.3458622992038727, "train/loss_total": 0.3816987872123718 }, { "epoch": 3.720277851990382, "step": 13925, "train/loss_ctc": 0.8330045342445374, "train/loss_error": 0.3994200825691223, "train/loss_total": 0.4861369729042053 }, { "epoch": 3.7205450173657493, "step": 13926, "train/loss_ctc": 0.7993869781494141, "train/loss_error": 0.4286302924156189, "train/loss_total": 0.5027816295623779 }, { "epoch": 3.720812182741117, "step": 13927, "train/loss_ctc": 1.1047611236572266, "train/loss_error": 0.4627922475337982, "train/loss_total": 0.5911860466003418 }, { "epoch": 3.721079348116484, "step": 13928, "train/loss_ctc": 0.8010436296463013, "train/loss_error": 0.4053259491920471, "train/loss_total": 0.4844695031642914 }, { "epoch": 3.7213465134918513, "step": 13929, "train/loss_ctc": 0.39616310596466064, "train/loss_error": 0.38817527890205383, "train/loss_total": 0.38977286219596863 }, { "epoch": 3.721613678867219, "grad_norm": 1.6788324117660522, "learning_rate": 7.68314186481432e-06, "loss": 0.4736, "step": 13930 }, { "epoch": 3.721613678867219, "step": 13930, "train/loss_ctc": 0.392579585313797, "train/loss_error": 0.47622281312942505, "train/loss_total": 0.4594941735267639 }, { "epoch": 3.721880844242586, "step": 13931, "train/loss_ctc": 1.415010929107666, "train/loss_error": 0.4401775300502777, "train/loss_total": 0.6351442337036133 }, { "epoch": 3.7221480096179533, "step": 13932, "train/loss_ctc": 0.25658801198005676, "train/loss_error": 0.37273067235946655, "train/loss_total": 0.3495021462440491 }, { "epoch": 3.722415174993321, "step": 13933, "train/loss_ctc": 0.5243834257125854, "train/loss_error": 0.3835861384868622, "train/loss_total": 0.4117456078529358 }, { "epoch": 3.722682340368688, "step": 13934, "train/loss_ctc": 0.6302639245986938, "train/loss_error": 0.40048784017562866, "train/loss_total": 0.4464430809020996 }, { "epoch": 3.7229495057440554, "step": 13935, "train/loss_ctc": 0.9477749466896057, "train/loss_error": 0.5475416779518127, "train/loss_total": 0.6275883316993713 }, { "epoch": 3.723216671119423, "step": 13936, "train/loss_ctc": 0.5809122920036316, "train/loss_error": 0.4285893738269806, "train/loss_total": 0.45905399322509766 }, { "epoch": 3.72348383649479, "step": 13937, "train/loss_ctc": 0.781475841999054, "train/loss_error": 0.4394189417362213, "train/loss_total": 0.5078303217887878 }, { "epoch": 3.7237510018701574, "step": 13938, "train/loss_ctc": 1.0169190168380737, "train/loss_error": 0.34276318550109863, "train/loss_total": 0.4775943458080292 }, { "epoch": 3.724018167245525, "step": 13939, "train/loss_ctc": 0.28454750776290894, "train/loss_error": 0.42525872588157654, "train/loss_total": 0.397116482257843 }, { "epoch": 3.7242853326208922, "grad_norm": 2.1776552200317383, "learning_rate": 7.66711194229228e-06, "loss": 0.4772, "step": 13940 }, { "epoch": 3.7242853326208922, "step": 13940, "train/loss_ctc": 1.0068483352661133, "train/loss_error": 0.4746561050415039, "train/loss_total": 0.5810945630073547 }, { "epoch": 3.7245524979962594, "step": 13941, "train/loss_ctc": 0.6900200843811035, "train/loss_error": 0.41831347346305847, "train/loss_total": 0.4726548194885254 }, { "epoch": 3.724819663371627, "step": 13942, "train/loss_ctc": 0.40148600935935974, "train/loss_error": 0.4913983941078186, "train/loss_total": 0.47341591119766235 }, { "epoch": 3.7250868287469943, "step": 13943, "train/loss_ctc": 0.39484697580337524, "train/loss_error": 0.3988116979598999, "train/loss_total": 0.3980187475681305 }, { "epoch": 3.725353994122362, "step": 13944, "train/loss_ctc": 0.28475749492645264, "train/loss_error": 0.39763155579566956, "train/loss_total": 0.37505674362182617 }, { "epoch": 3.725621159497729, "step": 13945, "train/loss_ctc": 0.6998519897460938, "train/loss_error": 0.46927911043167114, "train/loss_total": 0.5153936743736267 }, { "epoch": 3.7258883248730963, "step": 13946, "train/loss_ctc": 0.5350014567375183, "train/loss_error": 0.3945857584476471, "train/loss_total": 0.4226689040660858 }, { "epoch": 3.726155490248464, "step": 13947, "train/loss_ctc": 0.8201653361320496, "train/loss_error": 0.4588868319988251, "train/loss_total": 0.53114253282547 }, { "epoch": 3.726422655623831, "step": 13948, "train/loss_ctc": 0.8597384691238403, "train/loss_error": 0.4522828161716461, "train/loss_total": 0.5337739586830139 }, { "epoch": 3.7266898209991988, "step": 13949, "train/loss_ctc": 0.30547618865966797, "train/loss_error": 0.45408469438552856, "train/loss_total": 0.42436298727989197 }, { "epoch": 3.726956986374566, "grad_norm": 1.887783169746399, "learning_rate": 7.651082019770238e-06, "loss": 0.4728, "step": 13950 }, { "epoch": 3.726956986374566, "step": 13950, "train/loss_ctc": 0.45869994163513184, "train/loss_error": 0.42644616961479187, "train/loss_total": 0.4328969120979309 }, { "epoch": 3.727224151749933, "step": 13951, "train/loss_ctc": 0.5231698751449585, "train/loss_error": 0.460750550031662, "train/loss_total": 0.4732344448566437 }, { "epoch": 3.727491317125301, "step": 13952, "train/loss_ctc": 0.5811320543289185, "train/loss_error": 0.4232824742794037, "train/loss_total": 0.4548524022102356 }, { "epoch": 3.727758482500668, "step": 13953, "train/loss_ctc": 0.4454101324081421, "train/loss_error": 0.4938858449459076, "train/loss_total": 0.4841907024383545 }, { "epoch": 3.728025647876035, "step": 13954, "train/loss_ctc": 0.688225269317627, "train/loss_error": 0.5228792428970337, "train/loss_total": 0.5559484362602234 }, { "epoch": 3.728292813251403, "step": 13955, "train/loss_ctc": 0.531521201133728, "train/loss_error": 0.437196284532547, "train/loss_total": 0.45606130361557007 }, { "epoch": 3.72855997862677, "step": 13956, "train/loss_ctc": 0.45229730010032654, "train/loss_error": 0.4396962821483612, "train/loss_total": 0.4422164857387543 }, { "epoch": 3.728827144002137, "step": 13957, "train/loss_ctc": 0.449790358543396, "train/loss_error": 0.469752699136734, "train/loss_total": 0.4657602310180664 }, { "epoch": 3.729094309377505, "step": 13958, "train/loss_ctc": 0.7232991456985474, "train/loss_error": 0.512840747833252, "train/loss_total": 0.5549324154853821 }, { "epoch": 3.729361474752872, "step": 13959, "train/loss_ctc": 0.7145383358001709, "train/loss_error": 0.4556773006916046, "train/loss_total": 0.5074495077133179 }, { "epoch": 3.7296286401282392, "grad_norm": 1.630245566368103, "learning_rate": 7.635052097248198e-06, "loss": 0.4828, "step": 13960 }, { "epoch": 3.7296286401282392, "step": 13960, "train/loss_ctc": 0.6925815343856812, "train/loss_error": 0.4478149116039276, "train/loss_total": 0.4967682361602783 }, { "epoch": 3.729895805503607, "step": 13961, "train/loss_ctc": 1.012251377105713, "train/loss_error": 0.4001573920249939, "train/loss_total": 0.5225762128829956 }, { "epoch": 3.730162970878974, "step": 13962, "train/loss_ctc": 0.40094736218452454, "train/loss_error": 0.443077951669693, "train/loss_total": 0.43465182185173035 }, { "epoch": 3.7304301362543413, "step": 13963, "train/loss_ctc": 0.5377360582351685, "train/loss_error": 0.4643329679965973, "train/loss_total": 0.479013592004776 }, { "epoch": 3.730697301629709, "step": 13964, "train/loss_ctc": 0.918414294719696, "train/loss_error": 0.43673279881477356, "train/loss_total": 0.5330691337585449 }, { "epoch": 3.730964467005076, "step": 13965, "train/loss_ctc": 0.5193041563034058, "train/loss_error": 0.41098126769065857, "train/loss_total": 0.43264585733413696 }, { "epoch": 3.7312316323804433, "step": 13966, "train/loss_ctc": 1.942299246788025, "train/loss_error": 0.4390932023525238, "train/loss_total": 0.739734411239624 }, { "epoch": 3.731498797755811, "step": 13967, "train/loss_ctc": 0.37454086542129517, "train/loss_error": 0.492943674325943, "train/loss_total": 0.46926310658454895 }, { "epoch": 3.731765963131178, "step": 13968, "train/loss_ctc": 0.46967554092407227, "train/loss_error": 0.41433608531951904, "train/loss_total": 0.42540398240089417 }, { "epoch": 3.7320331285065453, "step": 13969, "train/loss_ctc": 0.6164106726646423, "train/loss_error": 0.4871460497379303, "train/loss_total": 0.5129989385604858 }, { "epoch": 3.732300293881913, "grad_norm": 1.6582621335983276, "learning_rate": 7.619022174726156e-06, "loss": 0.5046, "step": 13970 }, { "epoch": 3.732300293881913, "step": 13970, "train/loss_ctc": 0.4892737865447998, "train/loss_error": 0.4098348915576935, "train/loss_total": 0.4257226884365082 }, { "epoch": 3.73256745925728, "step": 13971, "train/loss_ctc": 0.6123189926147461, "train/loss_error": 0.45616111159324646, "train/loss_total": 0.48739269375801086 }, { "epoch": 3.7328346246326474, "step": 13972, "train/loss_ctc": 0.6360713243484497, "train/loss_error": 0.3957968056201935, "train/loss_total": 0.4438517093658447 }, { "epoch": 3.733101790008015, "step": 13973, "train/loss_ctc": 0.5836096405982971, "train/loss_error": 0.43926072120666504, "train/loss_total": 0.468130499124527 }, { "epoch": 3.733368955383382, "step": 13974, "train/loss_ctc": 0.19589591026306152, "train/loss_error": 0.4313688278198242, "train/loss_total": 0.3842742443084717 }, { "epoch": 3.7336361207587494, "step": 13975, "train/loss_ctc": 0.24761229753494263, "train/loss_error": 0.40307489037513733, "train/loss_total": 0.3719823658466339 }, { "epoch": 3.733903286134117, "step": 13976, "train/loss_ctc": 0.45006728172302246, "train/loss_error": 0.3816542327404022, "train/loss_total": 0.3953368663787842 }, { "epoch": 3.7341704515094842, "step": 13977, "train/loss_ctc": 0.6389084458351135, "train/loss_error": 0.4709891974925995, "train/loss_total": 0.5045730471611023 }, { "epoch": 3.734437616884852, "step": 13978, "train/loss_ctc": 0.5172595381736755, "train/loss_error": 0.41056549549102783, "train/loss_total": 0.43190431594848633 }, { "epoch": 3.734704782260219, "step": 13979, "train/loss_ctc": 0.38243505358695984, "train/loss_error": 0.5494905114173889, "train/loss_total": 0.5160794258117676 }, { "epoch": 3.7349719476355863, "grad_norm": 2.5302064418792725, "learning_rate": 7.602992252204114e-06, "loss": 0.4429, "step": 13980 }, { "epoch": 3.7349719476355863, "step": 13980, "train/loss_ctc": 0.982401967048645, "train/loss_error": 0.44918742775917053, "train/loss_total": 0.5558303594589233 }, { "epoch": 3.735239113010954, "step": 13981, "train/loss_ctc": 0.613013505935669, "train/loss_error": 0.400150328874588, "train/loss_total": 0.44272297620773315 }, { "epoch": 3.735506278386321, "step": 13982, "train/loss_ctc": 0.43170085549354553, "train/loss_error": 0.41023877263069153, "train/loss_total": 0.4145311713218689 }, { "epoch": 3.7357734437616887, "step": 13983, "train/loss_ctc": 0.43423789739608765, "train/loss_error": 0.4109262228012085, "train/loss_total": 0.4155885577201843 }, { "epoch": 3.736040609137056, "step": 13984, "train/loss_ctc": 1.018409252166748, "train/loss_error": 0.3956499695777893, "train/loss_total": 0.5202018022537231 }, { "epoch": 3.736307774512423, "step": 13985, "train/loss_ctc": 0.6125220060348511, "train/loss_error": 0.4390718340873718, "train/loss_total": 0.4737618863582611 }, { "epoch": 3.7365749398877908, "step": 13986, "train/loss_ctc": 1.163318395614624, "train/loss_error": 0.40100082755088806, "train/loss_total": 0.5534643530845642 }, { "epoch": 3.736842105263158, "step": 13987, "train/loss_ctc": 0.6133832931518555, "train/loss_error": 0.3586958646774292, "train/loss_total": 0.4096333682537079 }, { "epoch": 3.737109270638525, "step": 13988, "train/loss_ctc": 0.7471835613250732, "train/loss_error": 0.4787486791610718, "train/loss_total": 0.5324356555938721 }, { "epoch": 3.737376436013893, "step": 13989, "train/loss_ctc": 0.24392086267471313, "train/loss_error": 0.41738414764404297, "train/loss_total": 0.38269150257110596 }, { "epoch": 3.73764360138926, "grad_norm": 1.0955626964569092, "learning_rate": 7.586962329682074e-06, "loss": 0.4701, "step": 13990 }, { "epoch": 3.73764360138926, "step": 13990, "train/loss_ctc": 0.9326621294021606, "train/loss_error": 0.43402352929115295, "train/loss_total": 0.5337512493133545 }, { "epoch": 3.737910766764627, "step": 13991, "train/loss_ctc": 0.7718005180358887, "train/loss_error": 0.5107761025428772, "train/loss_total": 0.5629810094833374 }, { "epoch": 3.738177932139995, "step": 13992, "train/loss_ctc": 0.49209025502204895, "train/loss_error": 0.4483758509159088, "train/loss_total": 0.4571187496185303 }, { "epoch": 3.738445097515362, "step": 13993, "train/loss_ctc": 0.7056102156639099, "train/loss_error": 0.42085835337638855, "train/loss_total": 0.47780874371528625 }, { "epoch": 3.738712262890729, "step": 13994, "train/loss_ctc": 0.7324540615081787, "train/loss_error": 0.48672184348106384, "train/loss_total": 0.5358682870864868 }, { "epoch": 3.738979428266097, "step": 13995, "train/loss_ctc": 1.1045466661453247, "train/loss_error": 0.45771315693855286, "train/loss_total": 0.5870798826217651 }, { "epoch": 3.739246593641464, "step": 13996, "train/loss_ctc": 0.7352872490882874, "train/loss_error": 0.3706030249595642, "train/loss_total": 0.4435398578643799 }, { "epoch": 3.7395137590168313, "step": 13997, "train/loss_ctc": 0.8059332370758057, "train/loss_error": 0.46736809611320496, "train/loss_total": 0.535081148147583 }, { "epoch": 3.739780924392199, "step": 13998, "train/loss_ctc": 0.48370763659477234, "train/loss_error": 0.4017656743526459, "train/loss_total": 0.4181540608406067 }, { "epoch": 3.740048089767566, "step": 13999, "train/loss_ctc": 1.0551949739456177, "train/loss_error": 0.44551339745521545, "train/loss_total": 0.567449688911438 }, { "epoch": 3.7403152551429333, "grad_norm": 4.867980003356934, "learning_rate": 7.570932407160033e-06, "loss": 0.5119, "step": 14000 }, { "epoch": 3.7403152551429333, "step": 14000, "train/loss_ctc": 1.0289558172225952, "train/loss_error": 0.48568567633628845, "train/loss_total": 0.5943397283554077 }, { "epoch": 3.740582420518301, "step": 14001, "train/loss_ctc": 0.8014012575149536, "train/loss_error": 0.3806983232498169, "train/loss_total": 0.4648389220237732 }, { "epoch": 3.740849585893668, "step": 14002, "train/loss_ctc": 0.7085614800453186, "train/loss_error": 0.4242026209831238, "train/loss_total": 0.48107439279556274 }, { "epoch": 3.7411167512690353, "step": 14003, "train/loss_ctc": 0.5286450386047363, "train/loss_error": 0.4338679611682892, "train/loss_total": 0.4528234004974365 }, { "epoch": 3.741383916644403, "step": 14004, "train/loss_ctc": 0.8354360461235046, "train/loss_error": 0.4417174160480499, "train/loss_total": 0.5204611420631409 }, { "epoch": 3.74165108201977, "step": 14005, "train/loss_ctc": 0.724223256111145, "train/loss_error": 0.4238933324813843, "train/loss_total": 0.4839593172073364 }, { "epoch": 3.7419182473951373, "step": 14006, "train/loss_ctc": 1.2290561199188232, "train/loss_error": 0.4688262343406677, "train/loss_total": 0.6208722591400146 }, { "epoch": 3.742185412770505, "step": 14007, "train/loss_ctc": 0.6202259063720703, "train/loss_error": 0.41678667068481445, "train/loss_total": 0.4574745297431946 }, { "epoch": 3.742452578145872, "step": 14008, "train/loss_ctc": 0.5148850083351135, "train/loss_error": 0.42512789368629456, "train/loss_total": 0.4430793225765228 }, { "epoch": 3.7427197435212394, "step": 14009, "train/loss_ctc": 0.20305338501930237, "train/loss_error": 0.4302333891391754, "train/loss_total": 0.3847973942756653 }, { "epoch": 3.742986908896607, "grad_norm": 13.050407409667969, "learning_rate": 7.554902484637991e-06, "loss": 0.4904, "step": 14010 }, { "epoch": 3.742986908896607, "step": 14010, "train/loss_ctc": 0.7195875644683838, "train/loss_error": 0.4300321638584137, "train/loss_total": 0.48794323205947876 }, { "epoch": 3.743254074271974, "step": 14011, "train/loss_ctc": 0.3006379008293152, "train/loss_error": 0.38050103187561035, "train/loss_total": 0.3645284175872803 }, { "epoch": 3.743521239647342, "step": 14012, "train/loss_ctc": 0.5531898736953735, "train/loss_error": 0.4217036962509155, "train/loss_total": 0.448000967502594 }, { "epoch": 3.743788405022709, "step": 14013, "train/loss_ctc": 0.9426109790802002, "train/loss_error": 0.3775448501110077, "train/loss_total": 0.49055808782577515 }, { "epoch": 3.7440555703980767, "step": 14014, "train/loss_ctc": 0.5344461798667908, "train/loss_error": 0.4648323953151703, "train/loss_total": 0.4787551760673523 }, { "epoch": 3.744322735773444, "step": 14015, "train/loss_ctc": 0.6422248482704163, "train/loss_error": 0.41332900524139404, "train/loss_total": 0.4591081738471985 }, { "epoch": 3.744589901148811, "step": 14016, "train/loss_ctc": 0.9280992746353149, "train/loss_error": 0.42433252930641174, "train/loss_total": 0.5250859260559082 }, { "epoch": 3.7448570665241787, "step": 14017, "train/loss_ctc": 0.35744303464889526, "train/loss_error": 0.49404194951057434, "train/loss_total": 0.46672219038009644 }, { "epoch": 3.745124231899546, "step": 14018, "train/loss_ctc": 0.46582022309303284, "train/loss_error": 0.44048675894737244, "train/loss_total": 0.4455534815788269 }, { "epoch": 3.745391397274913, "step": 14019, "train/loss_ctc": 0.6575678586959839, "train/loss_error": 0.4408695101737976, "train/loss_total": 0.48420917987823486 }, { "epoch": 3.7456585626502807, "grad_norm": 1.2694050073623657, "learning_rate": 7.5388725621159505e-06, "loss": 0.465, "step": 14020 }, { "epoch": 3.7456585626502807, "step": 14020, "train/loss_ctc": 0.9596637487411499, "train/loss_error": 0.42540618777275085, "train/loss_total": 0.5322577357292175 }, { "epoch": 3.745925728025648, "step": 14021, "train/loss_ctc": 0.5933002233505249, "train/loss_error": 0.41717958450317383, "train/loss_total": 0.452403724193573 }, { "epoch": 3.746192893401015, "step": 14022, "train/loss_ctc": 0.47405970096588135, "train/loss_error": 0.3592291474342346, "train/loss_total": 0.38219526410102844 }, { "epoch": 3.7464600587763828, "step": 14023, "train/loss_ctc": 1.5048140287399292, "train/loss_error": 0.5075846910476685, "train/loss_total": 0.7070305347442627 }, { "epoch": 3.74672722415175, "step": 14024, "train/loss_ctc": 0.4174056649208069, "train/loss_error": 0.39982837438583374, "train/loss_total": 0.4033438265323639 }, { "epoch": 3.746994389527117, "step": 14025, "train/loss_ctc": 0.8773502707481384, "train/loss_error": 0.4603712260723114, "train/loss_total": 0.5437670350074768 }, { "epoch": 3.747261554902485, "step": 14026, "train/loss_ctc": 0.6227999925613403, "train/loss_error": 0.4372733235359192, "train/loss_total": 0.47437867522239685 }, { "epoch": 3.747528720277852, "step": 14027, "train/loss_ctc": 0.569176197052002, "train/loss_error": 0.4604674279689789, "train/loss_total": 0.4822092056274414 }, { "epoch": 3.747795885653219, "step": 14028, "train/loss_ctc": 0.9999553561210632, "train/loss_error": 0.41231343150138855, "train/loss_total": 0.5298418402671814 }, { "epoch": 3.748063051028587, "step": 14029, "train/loss_ctc": 0.7242370843887329, "train/loss_error": 0.45517757534980774, "train/loss_total": 0.5089894533157349 }, { "epoch": 3.748330216403954, "grad_norm": 2.0570433139801025, "learning_rate": 7.5228426395939086e-06, "loss": 0.5016, "step": 14030 }, { "epoch": 3.748330216403954, "step": 14030, "train/loss_ctc": 0.7877558469772339, "train/loss_error": 0.4785008430480957, "train/loss_total": 0.5403518676757812 }, { "epoch": 3.7485973817793212, "step": 14031, "train/loss_ctc": 0.750457227230072, "train/loss_error": 0.461568146944046, "train/loss_total": 0.5193459987640381 }, { "epoch": 3.748864547154689, "step": 14032, "train/loss_ctc": 0.6155818700790405, "train/loss_error": 0.4416672885417938, "train/loss_total": 0.47645020484924316 }, { "epoch": 3.749131712530056, "step": 14033, "train/loss_ctc": 0.8964144587516785, "train/loss_error": 0.48191961646080017, "train/loss_total": 0.5648185610771179 }, { "epoch": 3.7493988779054233, "step": 14034, "train/loss_ctc": 1.1723484992980957, "train/loss_error": 0.4158534109592438, "train/loss_total": 0.5671524405479431 }, { "epoch": 3.749666043280791, "step": 14035, "train/loss_ctc": 0.8668603897094727, "train/loss_error": 0.44484248757362366, "train/loss_total": 0.5292460918426514 }, { "epoch": 3.749933208656158, "step": 14036, "train/loss_ctc": 0.6453888416290283, "train/loss_error": 0.4846230447292328, "train/loss_total": 0.5167762041091919 }, { "epoch": 3.7502003740315253, "step": 14037, "train/loss_ctc": 0.8414709568023682, "train/loss_error": 0.41541588306427, "train/loss_total": 0.5006269216537476 }, { "epoch": 3.750467539406893, "step": 14038, "train/loss_ctc": 0.3988020718097687, "train/loss_error": 0.36502814292907715, "train/loss_total": 0.37178295850753784 }, { "epoch": 3.75073470478226, "step": 14039, "train/loss_ctc": 0.2961777448654175, "train/loss_error": 0.43067750334739685, "train/loss_total": 0.403777539730072 }, { "epoch": 3.7510018701576273, "grad_norm": 2.676388740539551, "learning_rate": 7.506812717071867e-06, "loss": 0.499, "step": 14040 }, { "epoch": 3.7510018701576273, "step": 14040, "train/loss_ctc": 0.7451767921447754, "train/loss_error": 0.38818514347076416, "train/loss_total": 0.45958349108695984 }, { "epoch": 3.751269035532995, "step": 14041, "train/loss_ctc": 0.6669151782989502, "train/loss_error": 0.40097668766975403, "train/loss_total": 0.45416438579559326 }, { "epoch": 3.751536200908362, "step": 14042, "train/loss_ctc": 0.6616876125335693, "train/loss_error": 0.40745776891708374, "train/loss_total": 0.4583037495613098 }, { "epoch": 3.75180336628373, "step": 14043, "train/loss_ctc": 0.33712857961654663, "train/loss_error": 0.4104064106941223, "train/loss_total": 0.39575085043907166 }, { "epoch": 3.752070531659097, "step": 14044, "train/loss_ctc": 0.486062616109848, "train/loss_error": 0.4343119263648987, "train/loss_total": 0.44466206431388855 }, { "epoch": 3.752337697034464, "step": 14045, "train/loss_ctc": 0.6244818568229675, "train/loss_error": 0.43118953704833984, "train/loss_total": 0.46984800696372986 }, { "epoch": 3.752604862409832, "step": 14046, "train/loss_ctc": 0.439838707447052, "train/loss_error": 0.4175085723400116, "train/loss_total": 0.42197462916374207 }, { "epoch": 3.752872027785199, "step": 14047, "train/loss_ctc": 1.1654924154281616, "train/loss_error": 0.47812357544898987, "train/loss_total": 0.6155973672866821 }, { "epoch": 3.7531391931605667, "step": 14048, "train/loss_ctc": 1.2414782047271729, "train/loss_error": 0.46497151255607605, "train/loss_total": 0.6202728748321533 }, { "epoch": 3.753406358535934, "step": 14049, "train/loss_ctc": 0.6745386123657227, "train/loss_error": 0.43665629625320435, "train/loss_total": 0.48423275351524353 }, { "epoch": 3.753673523911301, "grad_norm": 7.906583309173584, "learning_rate": 7.4907827945498264e-06, "loss": 0.4824, "step": 14050 }, { "epoch": 3.753673523911301, "step": 14050, "train/loss_ctc": 0.6983072757720947, "train/loss_error": 0.49269115924835205, "train/loss_total": 0.5338144302368164 }, { "epoch": 3.7539406892866687, "step": 14051, "train/loss_ctc": 0.37592071294784546, "train/loss_error": 0.37922203540802, "train/loss_total": 0.37856176495552063 }, { "epoch": 3.754207854662036, "step": 14052, "train/loss_ctc": 0.6252633333206177, "train/loss_error": 0.42833054065704346, "train/loss_total": 0.46771711111068726 }, { "epoch": 3.754475020037403, "step": 14053, "train/loss_ctc": 0.8169509768486023, "train/loss_error": 0.4468575119972229, "train/loss_total": 0.5208762288093567 }, { "epoch": 3.7547421854127707, "step": 14054, "train/loss_ctc": 0.5382102727890015, "train/loss_error": 0.4284266531467438, "train/loss_total": 0.45038339495658875 }, { "epoch": 3.755009350788138, "step": 14055, "train/loss_ctc": 0.9189515709877014, "train/loss_error": 0.38986581563949585, "train/loss_total": 0.495682954788208 }, { "epoch": 3.755276516163505, "step": 14056, "train/loss_ctc": 0.2774413824081421, "train/loss_error": 0.4185413420200348, "train/loss_total": 0.39032137393951416 }, { "epoch": 3.7555436815388727, "step": 14057, "train/loss_ctc": 0.5003736019134521, "train/loss_error": 0.37225064635276794, "train/loss_total": 0.39787524938583374 }, { "epoch": 3.75581084691424, "step": 14058, "train/loss_ctc": 0.8641219139099121, "train/loss_error": 0.41205522418022156, "train/loss_total": 0.5024685859680176 }, { "epoch": 3.756078012289607, "step": 14059, "train/loss_ctc": 0.6023046970367432, "train/loss_error": 0.40988287329673767, "train/loss_total": 0.44836723804473877 }, { "epoch": 3.7563451776649748, "grad_norm": 1.7404416799545288, "learning_rate": 7.474752872027785e-06, "loss": 0.4586, "step": 14060 }, { "epoch": 3.7563451776649748, "step": 14060, "train/loss_ctc": 0.6015055179595947, "train/loss_error": 0.41054344177246094, "train/loss_total": 0.4487358629703522 }, { "epoch": 3.756612343040342, "step": 14061, "train/loss_ctc": 0.772565484046936, "train/loss_error": 0.40903642773628235, "train/loss_total": 0.481742262840271 }, { "epoch": 3.756879508415709, "step": 14062, "train/loss_ctc": 0.4883061945438385, "train/loss_error": 0.4523176848888397, "train/loss_total": 0.45951539278030396 }, { "epoch": 3.757146673791077, "step": 14063, "train/loss_ctc": 0.6865562796592712, "train/loss_error": 0.4440159499645233, "train/loss_total": 0.49252402782440186 }, { "epoch": 3.757413839166444, "step": 14064, "train/loss_ctc": 0.659507155418396, "train/loss_error": 0.41515830159187317, "train/loss_total": 0.4640280604362488 }, { "epoch": 3.757681004541811, "step": 14065, "train/loss_ctc": 0.6487129926681519, "train/loss_error": 0.46233686804771423, "train/loss_total": 0.49961209297180176 }, { "epoch": 3.757948169917179, "step": 14066, "train/loss_ctc": 1.132697343826294, "train/loss_error": 0.4603118300437927, "train/loss_total": 0.5947889685630798 }, { "epoch": 3.758215335292546, "step": 14067, "train/loss_ctc": 1.6521623134613037, "train/loss_error": 0.5155929327011108, "train/loss_total": 0.7429068088531494 }, { "epoch": 3.7584825006679132, "step": 14068, "train/loss_ctc": 0.5743345618247986, "train/loss_error": 0.47985607385635376, "train/loss_total": 0.49875178933143616 }, { "epoch": 3.758749666043281, "step": 14069, "train/loss_ctc": 0.6872555017471313, "train/loss_error": 0.486749529838562, "train/loss_total": 0.526850700378418 }, { "epoch": 3.759016831418648, "grad_norm": 3.151998996734619, "learning_rate": 7.458722949505744e-06, "loss": 0.5209, "step": 14070 }, { "epoch": 3.759016831418648, "step": 14070, "train/loss_ctc": 0.6527820229530334, "train/loss_error": 0.4141559898853302, "train/loss_total": 0.46188119053840637 }, { "epoch": 3.7592839967940153, "step": 14071, "train/loss_ctc": 0.4546232521533966, "train/loss_error": 0.4437689185142517, "train/loss_total": 0.4459397792816162 }, { "epoch": 3.759551162169383, "step": 14072, "train/loss_ctc": 0.9321598410606384, "train/loss_error": 0.4923398792743683, "train/loss_total": 0.5803039073944092 }, { "epoch": 3.75981832754475, "step": 14073, "train/loss_ctc": 1.0748515129089355, "train/loss_error": 0.44753870368003845, "train/loss_total": 0.5730012655258179 }, { "epoch": 3.7600854929201173, "step": 14074, "train/loss_ctc": 0.749441385269165, "train/loss_error": 0.4511028826236725, "train/loss_total": 0.5107705593109131 }, { "epoch": 3.760352658295485, "step": 14075, "train/loss_ctc": 0.6461461782455444, "train/loss_error": 0.48265931010246277, "train/loss_total": 0.5153566598892212 }, { "epoch": 3.760619823670852, "step": 14076, "train/loss_ctc": 0.7600908279418945, "train/loss_error": 0.4957643747329712, "train/loss_total": 0.5486297011375427 }, { "epoch": 3.7608869890462198, "step": 14077, "train/loss_ctc": 0.8057336807250977, "train/loss_error": 0.4682099223136902, "train/loss_total": 0.5357146859169006 }, { "epoch": 3.761154154421587, "step": 14078, "train/loss_ctc": 0.856501579284668, "train/loss_error": 0.5065414309501648, "train/loss_total": 0.5765334367752075 }, { "epoch": 3.761421319796954, "step": 14079, "train/loss_ctc": 1.2918989658355713, "train/loss_error": 0.4583764672279358, "train/loss_total": 0.625080943107605 }, { "epoch": 3.761688485172322, "grad_norm": 2.065647602081299, "learning_rate": 7.442693026983703e-06, "loss": 0.5373, "step": 14080 }, { "epoch": 3.761688485172322, "step": 14080, "train/loss_ctc": 0.8667230010032654, "train/loss_error": 0.49350035190582275, "train/loss_total": 0.5681448578834534 }, { "epoch": 3.761955650547689, "step": 14081, "train/loss_ctc": 0.5116373300552368, "train/loss_error": 0.3686797022819519, "train/loss_total": 0.3972712457180023 }, { "epoch": 3.7622228159230566, "step": 14082, "train/loss_ctc": 0.5180405974388123, "train/loss_error": 0.4283009469509125, "train/loss_total": 0.4462488889694214 }, { "epoch": 3.762489981298424, "step": 14083, "train/loss_ctc": 0.8516861796379089, "train/loss_error": 0.4448620676994324, "train/loss_total": 0.5262268781661987 }, { "epoch": 3.762757146673791, "step": 14084, "train/loss_ctc": 0.5648409128189087, "train/loss_error": 0.4267031252384186, "train/loss_total": 0.4543306827545166 }, { "epoch": 3.7630243120491587, "step": 14085, "train/loss_ctc": 0.5686022639274597, "train/loss_error": 0.41185325384140015, "train/loss_total": 0.44320306181907654 }, { "epoch": 3.763291477424526, "step": 14086, "train/loss_ctc": 0.6987507343292236, "train/loss_error": 0.42260169982910156, "train/loss_total": 0.47783151268959045 }, { "epoch": 3.763558642799893, "step": 14087, "train/loss_ctc": 1.1201539039611816, "train/loss_error": 0.5065236687660217, "train/loss_total": 0.6292496919631958 }, { "epoch": 3.7638258081752607, "step": 14088, "train/loss_ctc": 0.5436292886734009, "train/loss_error": 0.4550531804561615, "train/loss_total": 0.4727684259414673 }, { "epoch": 3.764092973550628, "step": 14089, "train/loss_ctc": 0.46809735894203186, "train/loss_error": 0.40936875343322754, "train/loss_total": 0.4211144745349884 }, { "epoch": 3.764360138925995, "grad_norm": 2.2149417400360107, "learning_rate": 7.426663104461662e-06, "loss": 0.4836, "step": 14090 }, { "epoch": 3.764360138925995, "step": 14090, "train/loss_ctc": 0.5139176845550537, "train/loss_error": 0.44340258836746216, "train/loss_total": 0.45750561356544495 }, { "epoch": 3.7646273043013627, "step": 14091, "train/loss_ctc": 0.7655658721923828, "train/loss_error": 0.44676673412323, "train/loss_total": 0.5105265378952026 }, { "epoch": 3.76489446967673, "step": 14092, "train/loss_ctc": 0.9001091718673706, "train/loss_error": 0.4877512753009796, "train/loss_total": 0.5702228546142578 }, { "epoch": 3.765161635052097, "step": 14093, "train/loss_ctc": 0.4683084189891815, "train/loss_error": 0.45159661769866943, "train/loss_total": 0.45493900775909424 }, { "epoch": 3.7654288004274648, "step": 14094, "train/loss_ctc": 0.4498143792152405, "train/loss_error": 0.5341724753379822, "train/loss_total": 0.5173008441925049 }, { "epoch": 3.765695965802832, "step": 14095, "train/loss_ctc": 0.7492387294769287, "train/loss_error": 0.48787015676498413, "train/loss_total": 0.5401438474655151 }, { "epoch": 3.765963131178199, "step": 14096, "train/loss_ctc": 0.40796732902526855, "train/loss_error": 0.44839566946029663, "train/loss_total": 0.440310001373291 }, { "epoch": 3.766230296553567, "step": 14097, "train/loss_ctc": 0.8146989345550537, "train/loss_error": 0.4364263117313385, "train/loss_total": 0.5120808482170105 }, { "epoch": 3.766497461928934, "step": 14098, "train/loss_ctc": 0.8203029632568359, "train/loss_error": 0.4498775601387024, "train/loss_total": 0.5239626169204712 }, { "epoch": 3.766764627304301, "step": 14099, "train/loss_ctc": 0.8758354187011719, "train/loss_error": 0.49636754393577576, "train/loss_total": 0.5722610950469971 }, { "epoch": 3.767031792679669, "grad_norm": 1.4977816343307495, "learning_rate": 7.410633181939621e-06, "loss": 0.5099, "step": 14100 }, { "epoch": 3.767031792679669, "step": 14100, "train/loss_ctc": 0.46678638458251953, "train/loss_error": 0.40443888306617737, "train/loss_total": 0.4169083833694458 }, { "epoch": 3.767298958055036, "step": 14101, "train/loss_ctc": 0.9761549234390259, "train/loss_error": 0.4317723214626312, "train/loss_total": 0.5406488180160522 }, { "epoch": 3.767566123430403, "step": 14102, "train/loss_ctc": 1.0020275115966797, "train/loss_error": 0.4272959530353546, "train/loss_total": 0.5422422885894775 }, { "epoch": 3.767833288805771, "step": 14103, "train/loss_ctc": 1.0407404899597168, "train/loss_error": 0.4581546187400818, "train/loss_total": 0.5746718049049377 }, { "epoch": 3.768100454181138, "step": 14104, "train/loss_ctc": 0.5418840646743774, "train/loss_error": 0.47069528698921204, "train/loss_total": 0.484933078289032 }, { "epoch": 3.7683676195565052, "step": 14105, "train/loss_ctc": 0.8060557246208191, "train/loss_error": 0.43097779154777527, "train/loss_total": 0.5059933662414551 }, { "epoch": 3.768634784931873, "step": 14106, "train/loss_ctc": 0.8358132839202881, "train/loss_error": 0.3420107066631317, "train/loss_total": 0.440771222114563 }, { "epoch": 3.76890195030724, "step": 14107, "train/loss_ctc": 0.35214853286743164, "train/loss_error": 0.4260403513908386, "train/loss_total": 0.41126200556755066 }, { "epoch": 3.7691691156826073, "step": 14108, "train/loss_ctc": 0.5781972408294678, "train/loss_error": 0.47287309169769287, "train/loss_total": 0.4939379394054413 }, { "epoch": 3.769436281057975, "step": 14109, "train/loss_ctc": 0.652632474899292, "train/loss_error": 0.37030768394470215, "train/loss_total": 0.4267726540565491 }, { "epoch": 3.769703446433342, "grad_norm": 2.9789469242095947, "learning_rate": 7.39460325941758e-06, "loss": 0.4838, "step": 14110 }, { "epoch": 3.769703446433342, "step": 14110, "train/loss_ctc": 0.5327730178833008, "train/loss_error": 0.4510829448699951, "train/loss_total": 0.4674209654331207 }, { "epoch": 3.7699706118087097, "step": 14111, "train/loss_ctc": 0.5031343698501587, "train/loss_error": 0.47527992725372314, "train/loss_total": 0.48085084557533264 }, { "epoch": 3.770237777184077, "step": 14112, "train/loss_ctc": 0.18807090818881989, "train/loss_error": 0.41977444291114807, "train/loss_total": 0.37343376874923706 }, { "epoch": 3.7705049425594446, "step": 14113, "train/loss_ctc": 0.6260025501251221, "train/loss_error": 0.44241422414779663, "train/loss_total": 0.47913190722465515 }, { "epoch": 3.7707721079348118, "step": 14114, "train/loss_ctc": 0.8447672128677368, "train/loss_error": 0.4441310465335846, "train/loss_total": 0.5242582559585571 }, { "epoch": 3.771039273310179, "step": 14115, "train/loss_ctc": 1.057883858680725, "train/loss_error": 0.39409297704696655, "train/loss_total": 0.5268511772155762 }, { "epoch": 3.7713064386855466, "step": 14116, "train/loss_ctc": 0.5110940933227539, "train/loss_error": 0.4556860625743866, "train/loss_total": 0.46676766872406006 }, { "epoch": 3.771573604060914, "step": 14117, "train/loss_ctc": 0.2404535412788391, "train/loss_error": 0.427532434463501, "train/loss_total": 0.3901166617870331 }, { "epoch": 3.771840769436281, "step": 14118, "train/loss_ctc": 0.7559126615524292, "train/loss_error": 0.483355313539505, "train/loss_total": 0.5378668308258057 }, { "epoch": 3.7721079348116486, "step": 14119, "train/loss_ctc": 0.3044530749320984, "train/loss_error": 0.41568806767463684, "train/loss_total": 0.3934410810470581 }, { "epoch": 3.772375100187016, "grad_norm": 1.4044651985168457, "learning_rate": 7.378573336895538e-06, "loss": 0.464, "step": 14120 }, { "epoch": 3.772375100187016, "step": 14120, "train/loss_ctc": 0.746487021446228, "train/loss_error": 0.42439696192741394, "train/loss_total": 0.48881494998931885 }, { "epoch": 3.772642265562383, "step": 14121, "train/loss_ctc": 1.017540454864502, "train/loss_error": 0.45480290055274963, "train/loss_total": 0.567350447177887 }, { "epoch": 3.7729094309377507, "step": 14122, "train/loss_ctc": 0.5275754332542419, "train/loss_error": 0.4054332971572876, "train/loss_total": 0.42986172437667847 }, { "epoch": 3.773176596313118, "step": 14123, "train/loss_ctc": 1.3575081825256348, "train/loss_error": 0.4920428693294525, "train/loss_total": 0.66513592004776 }, { "epoch": 3.773443761688485, "step": 14124, "train/loss_ctc": 1.2075450420379639, "train/loss_error": 0.48991507291793823, "train/loss_total": 0.6334410905838013 }, { "epoch": 3.7737109270638527, "step": 14125, "train/loss_ctc": 0.5977069139480591, "train/loss_error": 0.3946835994720459, "train/loss_total": 0.4352882504463196 }, { "epoch": 3.77397809243922, "step": 14126, "train/loss_ctc": 0.31194546818733215, "train/loss_error": 0.37403029203414917, "train/loss_total": 0.36161333322525024 }, { "epoch": 3.774245257814587, "step": 14127, "train/loss_ctc": 0.7528195381164551, "train/loss_error": 0.4976511299610138, "train/loss_total": 0.54868483543396 }, { "epoch": 3.7745124231899547, "step": 14128, "train/loss_ctc": 0.7606815099716187, "train/loss_error": 0.4670034646987915, "train/loss_total": 0.5257390737533569 }, { "epoch": 3.774779588565322, "step": 14129, "train/loss_ctc": 0.9413079023361206, "train/loss_error": 0.38484320044517517, "train/loss_total": 0.4961361289024353 }, { "epoch": 3.775046753940689, "grad_norm": 2.3600711822509766, "learning_rate": 7.362543414373497e-06, "loss": 0.5152, "step": 14130 }, { "epoch": 3.775046753940689, "step": 14130, "train/loss_ctc": 0.3248821198940277, "train/loss_error": 0.44543686509132385, "train/loss_total": 0.4213259220123291 }, { "epoch": 3.7753139193160568, "step": 14131, "train/loss_ctc": 0.24501603841781616, "train/loss_error": 0.444149374961853, "train/loss_total": 0.40432271361351013 }, { "epoch": 3.775581084691424, "step": 14132, "train/loss_ctc": 1.186706304550171, "train/loss_error": 0.5095975995063782, "train/loss_total": 0.6450193524360657 }, { "epoch": 3.775848250066791, "step": 14133, "train/loss_ctc": 0.9105536937713623, "train/loss_error": 0.4576314091682434, "train/loss_total": 0.5482158660888672 }, { "epoch": 3.776115415442159, "step": 14134, "train/loss_ctc": 0.8794958591461182, "train/loss_error": 0.4182043969631195, "train/loss_total": 0.5104627013206482 }, { "epoch": 3.776382580817526, "step": 14135, "train/loss_ctc": 0.6346577405929565, "train/loss_error": 0.5007483959197998, "train/loss_total": 0.527530312538147 }, { "epoch": 3.776649746192893, "step": 14136, "train/loss_ctc": 0.4827647805213928, "train/loss_error": 0.41836607456207275, "train/loss_total": 0.4312458336353302 }, { "epoch": 3.776916911568261, "step": 14137, "train/loss_ctc": 0.5011106729507446, "train/loss_error": 0.43209221959114075, "train/loss_total": 0.4458959102630615 }, { "epoch": 3.777184076943628, "step": 14138, "train/loss_ctc": 0.7759983539581299, "train/loss_error": 0.5093219876289368, "train/loss_total": 0.5626572370529175 }, { "epoch": 3.777451242318995, "step": 14139, "train/loss_ctc": 0.5131790041923523, "train/loss_error": 0.46290451288223267, "train/loss_total": 0.47295942902565 }, { "epoch": 3.777718407694363, "grad_norm": 2.5216593742370605, "learning_rate": 7.346513491851457e-06, "loss": 0.497, "step": 14140 }, { "epoch": 3.777718407694363, "step": 14140, "train/loss_ctc": 0.5031296610832214, "train/loss_error": 0.4243055582046509, "train/loss_total": 0.44007039070129395 }, { "epoch": 3.77798557306973, "step": 14141, "train/loss_ctc": 0.5298923254013062, "train/loss_error": 0.4497899115085602, "train/loss_total": 0.4658103883266449 }, { "epoch": 3.7782527384450977, "step": 14142, "train/loss_ctc": 0.3108599781990051, "train/loss_error": 0.34665313363075256, "train/loss_total": 0.3394944965839386 }, { "epoch": 3.778519903820465, "step": 14143, "train/loss_ctc": 0.9325156211853027, "train/loss_error": 0.4338482916355133, "train/loss_total": 0.5335817933082581 }, { "epoch": 3.778787069195832, "step": 14144, "train/loss_ctc": 0.7253933548927307, "train/loss_error": 0.44295555353164673, "train/loss_total": 0.4994431138038635 }, { "epoch": 3.7790542345711997, "step": 14145, "train/loss_ctc": 0.6530412435531616, "train/loss_error": 0.3882911205291748, "train/loss_total": 0.44124114513397217 }, { "epoch": 3.779321399946567, "step": 14146, "train/loss_ctc": 0.652691662311554, "train/loss_error": 0.40104979276657104, "train/loss_total": 0.4513781666755676 }, { "epoch": 3.7795885653219345, "step": 14147, "train/loss_ctc": 0.3273787796497345, "train/loss_error": 0.4054786264896393, "train/loss_total": 0.3898586630821228 }, { "epoch": 3.7798557306973017, "step": 14148, "train/loss_ctc": 0.9032458662986755, "train/loss_error": 0.4953984022140503, "train/loss_total": 0.5769678950309753 }, { "epoch": 3.780122896072669, "step": 14149, "train/loss_ctc": 0.5007022619247437, "train/loss_error": 0.40655574202537537, "train/loss_total": 0.425385057926178 }, { "epoch": 3.7803900614480366, "grad_norm": 3.31843900680542, "learning_rate": 7.330483569329415e-06, "loss": 0.4563, "step": 14150 }, { "epoch": 3.7803900614480366, "step": 14150, "train/loss_ctc": 1.1216490268707275, "train/loss_error": 0.4246831238269806, "train/loss_total": 0.56407630443573 }, { "epoch": 3.7806572268234038, "step": 14151, "train/loss_ctc": 1.0245723724365234, "train/loss_error": 0.4708743691444397, "train/loss_total": 0.5816140174865723 }, { "epoch": 3.780924392198771, "step": 14152, "train/loss_ctc": 0.44261273741722107, "train/loss_error": 0.46934765577316284, "train/loss_total": 0.4640006721019745 }, { "epoch": 3.7811915575741386, "step": 14153, "train/loss_ctc": 0.8216989040374756, "train/loss_error": 0.517509400844574, "train/loss_total": 0.5783473253250122 }, { "epoch": 3.781458722949506, "step": 14154, "train/loss_ctc": 0.46389061212539673, "train/loss_error": 0.371774286031723, "train/loss_total": 0.3901975452899933 }, { "epoch": 3.781725888324873, "step": 14155, "train/loss_ctc": 0.42513006925582886, "train/loss_error": 0.4141676425933838, "train/loss_total": 0.41636013984680176 }, { "epoch": 3.7819930537002406, "step": 14156, "train/loss_ctc": 1.19313645362854, "train/loss_error": 0.5037293434143066, "train/loss_total": 0.6416108012199402 }, { "epoch": 3.782260219075608, "step": 14157, "train/loss_ctc": 1.285682201385498, "train/loss_error": 0.41451767086982727, "train/loss_total": 0.5887506008148193 }, { "epoch": 3.782527384450975, "step": 14158, "train/loss_ctc": 0.7997795343399048, "train/loss_error": 0.40182486176490784, "train/loss_total": 0.4814158082008362 }, { "epoch": 3.7827945498263427, "step": 14159, "train/loss_ctc": 0.65077805519104, "train/loss_error": 0.47143635153770447, "train/loss_total": 0.5073046684265137 }, { "epoch": 3.78306171520171, "grad_norm": 1.4857133626937866, "learning_rate": 7.314453646807374e-06, "loss": 0.5214, "step": 14160 }, { "epoch": 3.78306171520171, "step": 14160, "train/loss_ctc": 0.2811519503593445, "train/loss_error": 0.4042433500289917, "train/loss_total": 0.3796250820159912 }, { "epoch": 3.783328880577077, "step": 14161, "train/loss_ctc": 0.9884084463119507, "train/loss_error": 0.426928848028183, "train/loss_total": 0.5392247438430786 }, { "epoch": 3.7835960459524447, "step": 14162, "train/loss_ctc": 0.6204848885536194, "train/loss_error": 0.3815712332725525, "train/loss_total": 0.4293539822101593 }, { "epoch": 3.783863211327812, "step": 14163, "train/loss_ctc": 1.0475565195083618, "train/loss_error": 0.48482924699783325, "train/loss_total": 0.597374677658081 }, { "epoch": 3.784130376703179, "step": 14164, "train/loss_ctc": 0.22539404034614563, "train/loss_error": 0.3912278711795807, "train/loss_total": 0.3580611050128937 }, { "epoch": 3.7843975420785467, "step": 14165, "train/loss_ctc": 0.5725603103637695, "train/loss_error": 0.3752463161945343, "train/loss_total": 0.4147091209888458 }, { "epoch": 3.784664707453914, "step": 14166, "train/loss_ctc": 1.1498326063156128, "train/loss_error": 0.48508331179618835, "train/loss_total": 0.6180331707000732 }, { "epoch": 3.784931872829281, "step": 14167, "train/loss_ctc": 0.5712735056877136, "train/loss_error": 0.4209604561328888, "train/loss_total": 0.45102307200431824 }, { "epoch": 3.7851990382046488, "step": 14168, "train/loss_ctc": 1.2115638256072998, "train/loss_error": 0.3992580771446228, "train/loss_total": 0.5617192387580872 }, { "epoch": 3.785466203580016, "step": 14169, "train/loss_ctc": 1.0115545988082886, "train/loss_error": 0.44242745637893677, "train/loss_total": 0.5562528967857361 }, { "epoch": 3.785733368955383, "grad_norm": 1.9384303092956543, "learning_rate": 7.298423724285333e-06, "loss": 0.4905, "step": 14170 }, { "epoch": 3.785733368955383, "step": 14170, "train/loss_ctc": 0.5948395729064941, "train/loss_error": 0.44641274213790894, "train/loss_total": 0.47609812021255493 }, { "epoch": 3.786000534330751, "step": 14171, "train/loss_ctc": 0.8727707266807556, "train/loss_error": 0.4812753200531006, "train/loss_total": 0.5595744252204895 }, { "epoch": 3.786267699706118, "step": 14172, "train/loss_ctc": 0.74083411693573, "train/loss_error": 0.3924097418785095, "train/loss_total": 0.46209460496902466 }, { "epoch": 3.786534865081485, "step": 14173, "train/loss_ctc": 0.2919026017189026, "train/loss_error": 0.4456804394721985, "train/loss_total": 0.41492486000061035 }, { "epoch": 3.786802030456853, "step": 14174, "train/loss_ctc": 0.8196849822998047, "train/loss_error": 0.48797810077667236, "train/loss_total": 0.5543195009231567 }, { "epoch": 3.78706919583222, "step": 14175, "train/loss_ctc": 0.5805670022964478, "train/loss_error": 0.427400141954422, "train/loss_total": 0.4580335021018982 }, { "epoch": 3.7873363612075877, "step": 14176, "train/loss_ctc": 0.422805517911911, "train/loss_error": 0.40425485372543335, "train/loss_total": 0.4079650044441223 }, { "epoch": 3.787603526582955, "step": 14177, "train/loss_ctc": 1.0062367916107178, "train/loss_error": 0.44930794835090637, "train/loss_total": 0.5606937408447266 }, { "epoch": 3.787870691958322, "step": 14178, "train/loss_ctc": 0.7563247680664062, "train/loss_error": 0.46297359466552734, "train/loss_total": 0.5216438174247742 }, { "epoch": 3.7881378573336897, "step": 14179, "train/loss_ctc": 0.3655955195426941, "train/loss_error": 0.4699282944202423, "train/loss_total": 0.4490617513656616 }, { "epoch": 3.788405022709057, "grad_norm": 2.021296977996826, "learning_rate": 7.282393801763292e-06, "loss": 0.4864, "step": 14180 }, { "epoch": 3.788405022709057, "step": 14180, "train/loss_ctc": 1.1615674495697021, "train/loss_error": 0.4651404023170471, "train/loss_total": 0.604425847530365 }, { "epoch": 3.7886721880844245, "step": 14181, "train/loss_ctc": 0.4516783058643341, "train/loss_error": 0.43156298995018005, "train/loss_total": 0.4355860650539398 }, { "epoch": 3.7889393534597917, "step": 14182, "train/loss_ctc": 0.8976529836654663, "train/loss_error": 0.4522082805633545, "train/loss_total": 0.5412972569465637 }, { "epoch": 3.789206518835159, "step": 14183, "train/loss_ctc": 0.6298428177833557, "train/loss_error": 0.4433583915233612, "train/loss_total": 0.4806552529335022 }, { "epoch": 3.7894736842105265, "step": 14184, "train/loss_ctc": 1.183004379272461, "train/loss_error": 0.5367809534072876, "train/loss_total": 0.6660256385803223 }, { "epoch": 3.7897408495858937, "step": 14185, "train/loss_ctc": 0.8789782524108887, "train/loss_error": 0.5004490613937378, "train/loss_total": 0.576154887676239 }, { "epoch": 3.790008014961261, "step": 14186, "train/loss_ctc": 0.6727355122566223, "train/loss_error": 0.45118680596351624, "train/loss_total": 0.49549657106399536 }, { "epoch": 3.7902751803366286, "step": 14187, "train/loss_ctc": 1.175951361656189, "train/loss_error": 0.44960543513298035, "train/loss_total": 0.5948746204376221 }, { "epoch": 3.7905423457119958, "step": 14188, "train/loss_ctc": 0.585198163986206, "train/loss_error": 0.3885348439216614, "train/loss_total": 0.4278675317764282 }, { "epoch": 3.790809511087363, "step": 14189, "train/loss_ctc": 0.4103502631187439, "train/loss_error": 0.4399597942829132, "train/loss_total": 0.4340378940105438 }, { "epoch": 3.7910766764627306, "grad_norm": 1.9205293655395508, "learning_rate": 7.266363879241251e-06, "loss": 0.5256, "step": 14190 }, { "epoch": 3.7910766764627306, "step": 14190, "train/loss_ctc": 0.8196075558662415, "train/loss_error": 0.434499591588974, "train/loss_total": 0.5115212202072144 }, { "epoch": 3.791343841838098, "step": 14191, "train/loss_ctc": 1.3902263641357422, "train/loss_error": 0.40185368061065674, "train/loss_total": 0.5995281934738159 }, { "epoch": 3.791611007213465, "step": 14192, "train/loss_ctc": 0.5995394587516785, "train/loss_error": 0.3881588876247406, "train/loss_total": 0.4304350018501282 }, { "epoch": 3.7918781725888326, "step": 14193, "train/loss_ctc": 0.9452527761459351, "train/loss_error": 0.5148248076438904, "train/loss_total": 0.6009104251861572 }, { "epoch": 3.7921453379642, "step": 14194, "train/loss_ctc": 0.688491940498352, "train/loss_error": 0.3724418878555298, "train/loss_total": 0.43565189838409424 }, { "epoch": 3.792412503339567, "step": 14195, "train/loss_ctc": 0.8323652744293213, "train/loss_error": 0.45645272731781006, "train/loss_total": 0.5316352844238281 }, { "epoch": 3.7926796687149347, "step": 14196, "train/loss_ctc": 0.5941427946090698, "train/loss_error": 0.38197043538093567, "train/loss_total": 0.42440491914749146 }, { "epoch": 3.792946834090302, "step": 14197, "train/loss_ctc": 0.5831847190856934, "train/loss_error": 0.46524640917778015, "train/loss_total": 0.48883408308029175 }, { "epoch": 3.793213999465669, "step": 14198, "train/loss_ctc": 0.8813976049423218, "train/loss_error": 0.4114968776702881, "train/loss_total": 0.5054770112037659 }, { "epoch": 3.7934811648410367, "step": 14199, "train/loss_ctc": 2.147634267807007, "train/loss_error": 0.48769620060920715, "train/loss_total": 0.8196837902069092 }, { "epoch": 3.793748330216404, "grad_norm": 4.229666709899902, "learning_rate": 7.2503339567192095e-06, "loss": 0.5348, "step": 14200 }, { "epoch": 3.793748330216404, "step": 14200, "train/loss_ctc": 0.7155227661132812, "train/loss_error": 0.37068188190460205, "train/loss_total": 0.4396500587463379 }, { "epoch": 3.794015495591771, "step": 14201, "train/loss_ctc": 0.6542074084281921, "train/loss_error": 0.5121718049049377, "train/loss_total": 0.5405789017677307 }, { "epoch": 3.7942826609671387, "step": 14202, "train/loss_ctc": 0.98136967420578, "train/loss_error": 0.4044018089771271, "train/loss_total": 0.5197954177856445 }, { "epoch": 3.794549826342506, "step": 14203, "train/loss_ctc": 0.9769318103790283, "train/loss_error": 0.5767590403556824, "train/loss_total": 0.6567935943603516 }, { "epoch": 3.794816991717873, "step": 14204, "train/loss_ctc": 0.2931321859359741, "train/loss_error": 0.36577704548835754, "train/loss_total": 0.3512480854988098 }, { "epoch": 3.7950841570932408, "step": 14205, "train/loss_ctc": 0.547512412071228, "train/loss_error": 0.42467185854911804, "train/loss_total": 0.4492399990558624 }, { "epoch": 3.795351322468608, "step": 14206, "train/loss_ctc": 1.2350013256072998, "train/loss_error": 0.43198686838150024, "train/loss_total": 0.5925897359848022 }, { "epoch": 3.795618487843975, "step": 14207, "train/loss_ctc": 0.6108949184417725, "train/loss_error": 0.3637978732585907, "train/loss_total": 0.41321730613708496 }, { "epoch": 3.795885653219343, "step": 14208, "train/loss_ctc": 0.7302370667457581, "train/loss_error": 0.4642612338066101, "train/loss_total": 0.5174564123153687 }, { "epoch": 3.79615281859471, "step": 14209, "train/loss_ctc": 0.19405828416347504, "train/loss_error": 0.4105384051799774, "train/loss_total": 0.36724239587783813 }, { "epoch": 3.7964199839700776, "grad_norm": 1.7069957256317139, "learning_rate": 7.2343040341971685e-06, "loss": 0.4848, "step": 14210 }, { "epoch": 3.7964199839700776, "step": 14210, "train/loss_ctc": 0.7996333837509155, "train/loss_error": 0.37782758474349976, "train/loss_total": 0.4621887505054474 }, { "epoch": 3.796687149345445, "step": 14211, "train/loss_ctc": 1.0096162557601929, "train/loss_error": 0.4498822093009949, "train/loss_total": 0.5618290305137634 }, { "epoch": 3.796954314720812, "step": 14212, "train/loss_ctc": 0.206790491938591, "train/loss_error": 0.43771886825561523, "train/loss_total": 0.3915331959724426 }, { "epoch": 3.7972214800961797, "step": 14213, "train/loss_ctc": 0.2917659282684326, "train/loss_error": 0.37529268860816956, "train/loss_total": 0.3585873544216156 }, { "epoch": 3.797488645471547, "step": 14214, "train/loss_ctc": 0.8767201900482178, "train/loss_error": 0.4416528344154358, "train/loss_total": 0.5286663174629211 }, { "epoch": 3.7977558108469145, "step": 14215, "train/loss_ctc": 1.3963453769683838, "train/loss_error": 0.5028430819511414, "train/loss_total": 0.6815435290336609 }, { "epoch": 3.7980229762222817, "step": 14216, "train/loss_ctc": 0.7497286796569824, "train/loss_error": 0.36482226848602295, "train/loss_total": 0.44180354475975037 }, { "epoch": 3.798290141597649, "step": 14217, "train/loss_ctc": 0.7091073989868164, "train/loss_error": 0.44835948944091797, "train/loss_total": 0.5005090832710266 }, { "epoch": 3.7985573069730165, "step": 14218, "train/loss_ctc": 0.6325340270996094, "train/loss_error": 0.4391153156757355, "train/loss_total": 0.47779905796051025 }, { "epoch": 3.7988244723483837, "step": 14219, "train/loss_ctc": 0.4373284578323364, "train/loss_error": 0.41947469115257263, "train/loss_total": 0.42304545640945435 }, { "epoch": 3.799091637723751, "grad_norm": 1.7272759675979614, "learning_rate": 7.2182741116751265e-06, "loss": 0.4828, "step": 14220 }, { "epoch": 3.799091637723751, "step": 14220, "train/loss_ctc": 0.6116583943367004, "train/loss_error": 0.389679878950119, "train/loss_total": 0.43407559394836426 }, { "epoch": 3.7993588030991186, "step": 14221, "train/loss_ctc": 0.5428361296653748, "train/loss_error": 0.41655614972114563, "train/loss_total": 0.4418121576309204 }, { "epoch": 3.7996259684744857, "step": 14222, "train/loss_ctc": 1.3305977582931519, "train/loss_error": 0.42398473620414734, "train/loss_total": 0.6053073406219482 }, { "epoch": 3.799893133849853, "step": 14223, "train/loss_ctc": 1.3299554586410522, "train/loss_error": 0.47341832518577576, "train/loss_total": 0.6447257995605469 }, { "epoch": 3.8001602992252206, "step": 14224, "train/loss_ctc": 1.0045289993286133, "train/loss_error": 0.4681318402290344, "train/loss_total": 0.575411319732666 }, { "epoch": 3.800427464600588, "step": 14225, "train/loss_ctc": 0.8891905546188354, "train/loss_error": 0.43922942876815796, "train/loss_total": 0.5292216539382935 }, { "epoch": 3.800694629975955, "step": 14226, "train/loss_ctc": 1.5119290351867676, "train/loss_error": 0.47311320900917053, "train/loss_total": 0.6808763742446899 }, { "epoch": 3.8009617953513226, "step": 14227, "train/loss_ctc": 0.5688511729240417, "train/loss_error": 0.4119918644428253, "train/loss_total": 0.443363755941391 }, { "epoch": 3.80122896072669, "step": 14228, "train/loss_ctc": 0.7155836820602417, "train/loss_error": 0.44043052196502686, "train/loss_total": 0.4954611659049988 }, { "epoch": 3.801496126102057, "step": 14229, "train/loss_ctc": 1.2300636768341064, "train/loss_error": 0.45349153876304626, "train/loss_total": 0.6088059544563293 }, { "epoch": 3.8017632914774246, "grad_norm": 1.1763848066329956, "learning_rate": 7.202244189153086e-06, "loss": 0.5459, "step": 14230 }, { "epoch": 3.8017632914774246, "step": 14230, "train/loss_ctc": 0.42222246527671814, "train/loss_error": 0.45495256781578064, "train/loss_total": 0.44840654730796814 }, { "epoch": 3.802030456852792, "step": 14231, "train/loss_ctc": 0.3721418082714081, "train/loss_error": 0.4401787519454956, "train/loss_total": 0.4265713691711426 }, { "epoch": 3.802297622228159, "step": 14232, "train/loss_ctc": 0.8249569535255432, "train/loss_error": 0.3892013132572174, "train/loss_total": 0.4763524532318115 }, { "epoch": 3.8025647876035267, "step": 14233, "train/loss_ctc": 0.9742472171783447, "train/loss_error": 0.42845359444618225, "train/loss_total": 0.5376123189926147 }, { "epoch": 3.802831952978894, "step": 14234, "train/loss_ctc": 0.8067579865455627, "train/loss_error": 0.3938668668270111, "train/loss_total": 0.4764450788497925 }, { "epoch": 3.803099118354261, "step": 14235, "train/loss_ctc": 0.6683193445205688, "train/loss_error": 0.522068202495575, "train/loss_total": 0.5513184666633606 }, { "epoch": 3.8033662837296287, "step": 14236, "train/loss_ctc": 0.7247588038444519, "train/loss_error": 0.4496031403541565, "train/loss_total": 0.5046342611312866 }, { "epoch": 3.803633449104996, "step": 14237, "train/loss_ctc": 0.6428273916244507, "train/loss_error": 0.4637986123561859, "train/loss_total": 0.49960434436798096 }, { "epoch": 3.803900614480363, "step": 14238, "train/loss_ctc": 0.5651718974113464, "train/loss_error": 0.41977256536483765, "train/loss_total": 0.44885241985321045 }, { "epoch": 3.8041677798557307, "step": 14239, "train/loss_ctc": 0.6614115238189697, "train/loss_error": 0.46818163990974426, "train/loss_total": 0.5068275928497314 }, { "epoch": 3.804434945231098, "grad_norm": 1.6074726581573486, "learning_rate": 7.186214266631045e-06, "loss": 0.4877, "step": 14240 }, { "epoch": 3.804434945231098, "step": 14240, "train/loss_ctc": 0.7016925811767578, "train/loss_error": 0.40856465697288513, "train/loss_total": 0.4671902656555176 }, { "epoch": 3.804702110606465, "step": 14241, "train/loss_ctc": 0.29256588220596313, "train/loss_error": 0.4391336441040039, "train/loss_total": 0.4098200798034668 }, { "epoch": 3.8049692759818328, "step": 14242, "train/loss_ctc": 0.8051539659500122, "train/loss_error": 0.44295090436935425, "train/loss_total": 0.5153915286064148 }, { "epoch": 3.8052364413572, "step": 14243, "train/loss_ctc": 0.7116444110870361, "train/loss_error": 0.4513271152973175, "train/loss_total": 0.5033905506134033 }, { "epoch": 3.8055036067325676, "step": 14244, "train/loss_ctc": 0.5759720206260681, "train/loss_error": 0.4022117257118225, "train/loss_total": 0.4369637966156006 }, { "epoch": 3.805770772107935, "step": 14245, "train/loss_ctc": 0.29593342542648315, "train/loss_error": 0.4400481581687927, "train/loss_total": 0.41122522950172424 }, { "epoch": 3.8060379374833024, "step": 14246, "train/loss_ctc": 0.751177191734314, "train/loss_error": 0.421162486076355, "train/loss_total": 0.4871654510498047 }, { "epoch": 3.8063051028586696, "step": 14247, "train/loss_ctc": 0.8841462135314941, "train/loss_error": 0.3804425001144409, "train/loss_total": 0.481183260679245 }, { "epoch": 3.806572268234037, "step": 14248, "train/loss_ctc": 0.670931339263916, "train/loss_error": 0.36887606978416443, "train/loss_total": 0.4292871356010437 }, { "epoch": 3.8068394336094045, "step": 14249, "train/loss_ctc": 0.5467499494552612, "train/loss_error": 0.48195213079452515, "train/loss_total": 0.49491170048713684 }, { "epoch": 3.8071065989847717, "grad_norm": 1.9545658826828003, "learning_rate": 7.170184344109003e-06, "loss": 0.4637, "step": 14250 }, { "epoch": 3.8071065989847717, "step": 14250, "train/loss_ctc": 0.47139567136764526, "train/loss_error": 0.3955158293247223, "train/loss_total": 0.4106917977333069 }, { "epoch": 3.807373764360139, "step": 14251, "train/loss_ctc": 0.5247405767440796, "train/loss_error": 0.45060643553733826, "train/loss_total": 0.4654332995414734 }, { "epoch": 3.8076409297355065, "step": 14252, "train/loss_ctc": 0.5838127136230469, "train/loss_error": 0.4317285418510437, "train/loss_total": 0.4621453881263733 }, { "epoch": 3.8079080951108737, "step": 14253, "train/loss_ctc": 1.102694034576416, "train/loss_error": 0.4099191725254059, "train/loss_total": 0.548474133014679 }, { "epoch": 3.808175260486241, "step": 14254, "train/loss_ctc": 0.4097786247730255, "train/loss_error": 0.4206477403640747, "train/loss_total": 0.4184739291667938 }, { "epoch": 3.8084424258616085, "step": 14255, "train/loss_ctc": 0.9140467047691345, "train/loss_error": 0.48650071024894714, "train/loss_total": 0.5720099210739136 }, { "epoch": 3.8087095912369757, "step": 14256, "train/loss_ctc": 0.7925399541854858, "train/loss_error": 0.49101126194000244, "train/loss_total": 0.5513169765472412 }, { "epoch": 3.808976756612343, "step": 14257, "train/loss_ctc": 1.1795644760131836, "train/loss_error": 0.4687816798686981, "train/loss_total": 0.6109382510185242 }, { "epoch": 3.8092439219877106, "step": 14258, "train/loss_ctc": 1.0270718336105347, "train/loss_error": 0.49576136469841003, "train/loss_total": 0.6020234823226929 }, { "epoch": 3.8095110873630778, "step": 14259, "train/loss_ctc": 0.3442116975784302, "train/loss_error": 0.4297791123390198, "train/loss_total": 0.41266563534736633 }, { "epoch": 3.809778252738445, "grad_norm": 3.2092232704162598, "learning_rate": 7.154154421586962e-06, "loss": 0.5054, "step": 14260 }, { "epoch": 3.809778252738445, "step": 14260, "train/loss_ctc": 0.910557210445404, "train/loss_error": 0.3901979327201843, "train/loss_total": 0.49426978826522827 }, { "epoch": 3.8100454181138126, "step": 14261, "train/loss_ctc": 0.2995336651802063, "train/loss_error": 0.4621332585811615, "train/loss_total": 0.429613322019577 }, { "epoch": 3.81031258348918, "step": 14262, "train/loss_ctc": 0.40176331996917725, "train/loss_error": 0.4912342429161072, "train/loss_total": 0.47334006428718567 }, { "epoch": 3.810579748864547, "step": 14263, "train/loss_ctc": 1.1459167003631592, "train/loss_error": 0.3861701488494873, "train/loss_total": 0.5381194353103638 }, { "epoch": 3.8108469142399146, "step": 14264, "train/loss_ctc": 0.5686050653457642, "train/loss_error": 0.4761849641799927, "train/loss_total": 0.49466899037361145 }, { "epoch": 3.811114079615282, "step": 14265, "train/loss_ctc": 0.979053258895874, "train/loss_error": 0.37533918023109436, "train/loss_total": 0.49608200788497925 }, { "epoch": 3.811381244990649, "step": 14266, "train/loss_ctc": 0.445403516292572, "train/loss_error": 0.3898904323577881, "train/loss_total": 0.4009930491447449 }, { "epoch": 3.8116484103660166, "step": 14267, "train/loss_ctc": 1.0120885372161865, "train/loss_error": 0.41784366965293884, "train/loss_total": 0.5366926789283752 }, { "epoch": 3.811915575741384, "step": 14268, "train/loss_ctc": 0.3568384647369385, "train/loss_error": 0.42315664887428284, "train/loss_total": 0.4098930358886719 }, { "epoch": 3.812182741116751, "step": 14269, "train/loss_ctc": 1.2720398902893066, "train/loss_error": 0.460054486989975, "train/loss_total": 0.6224515438079834 }, { "epoch": 3.8124499064921187, "grad_norm": 1.5859969854354858, "learning_rate": 7.138124499064922e-06, "loss": 0.4896, "step": 14270 }, { "epoch": 3.8124499064921187, "step": 14270, "train/loss_ctc": 0.4676879048347473, "train/loss_error": 0.45014727115631104, "train/loss_total": 0.4536554217338562 }, { "epoch": 3.812717071867486, "step": 14271, "train/loss_ctc": 0.6789048910140991, "train/loss_error": 0.49112504720687866, "train/loss_total": 0.5286810398101807 }, { "epoch": 3.812984237242853, "step": 14272, "train/loss_ctc": 0.3577462434768677, "train/loss_error": 0.44940903782844543, "train/loss_total": 0.4310764670372009 }, { "epoch": 3.8132514026182207, "step": 14273, "train/loss_ctc": 0.27612459659576416, "train/loss_error": 0.44461506605148315, "train/loss_total": 0.4109169840812683 }, { "epoch": 3.813518567993588, "step": 14274, "train/loss_ctc": 0.15648414194583893, "train/loss_error": 0.3695724904537201, "train/loss_total": 0.32695481181144714 }, { "epoch": 3.8137857333689555, "step": 14275, "train/loss_ctc": 0.5896938443183899, "train/loss_error": 0.4259827435016632, "train/loss_total": 0.4587249755859375 }, { "epoch": 3.8140528987443227, "step": 14276, "train/loss_ctc": 0.43936362862586975, "train/loss_error": 0.4637928903102875, "train/loss_total": 0.4589070677757263 }, { "epoch": 3.81432006411969, "step": 14277, "train/loss_ctc": 1.5953264236450195, "train/loss_error": 0.5208712220191956, "train/loss_total": 0.7357622981071472 }, { "epoch": 3.8145872294950576, "step": 14278, "train/loss_ctc": 1.0416767597198486, "train/loss_error": 0.4848429262638092, "train/loss_total": 0.596209704875946 }, { "epoch": 3.8148543948704248, "step": 14279, "train/loss_ctc": 0.7559819221496582, "train/loss_error": 0.471670001745224, "train/loss_total": 0.5285323858261108 }, { "epoch": 3.8151215602457924, "grad_norm": 2.679194927215576, "learning_rate": 7.12209457654288e-06, "loss": 0.4929, "step": 14280 }, { "epoch": 3.8151215602457924, "step": 14280, "train/loss_ctc": 1.018549919128418, "train/loss_error": 0.43933728337287903, "train/loss_total": 0.5551798343658447 }, { "epoch": 3.8153887256211596, "step": 14281, "train/loss_ctc": 0.21683314442634583, "train/loss_error": 0.4352016746997833, "train/loss_total": 0.3915279805660248 }, { "epoch": 3.815655890996527, "step": 14282, "train/loss_ctc": 0.3524552285671234, "train/loss_error": 0.4283478558063507, "train/loss_total": 0.41316932439804077 }, { "epoch": 3.8159230563718944, "step": 14283, "train/loss_ctc": 0.8019242286682129, "train/loss_error": 0.4415542185306549, "train/loss_total": 0.5136282444000244 }, { "epoch": 3.8161902217472616, "step": 14284, "train/loss_ctc": 0.40819448232650757, "train/loss_error": 0.4270625114440918, "train/loss_total": 0.42328891158103943 }, { "epoch": 3.816457387122629, "step": 14285, "train/loss_ctc": 0.3378380537033081, "train/loss_error": 0.36944422125816345, "train/loss_total": 0.36312299966812134 }, { "epoch": 3.8167245524979965, "step": 14286, "train/loss_ctc": 0.5671139359474182, "train/loss_error": 0.40808412432670593, "train/loss_total": 0.4398900866508484 }, { "epoch": 3.8169917178733637, "step": 14287, "train/loss_ctc": 0.6480849385261536, "train/loss_error": 0.3924778699874878, "train/loss_total": 0.44359928369522095 }, { "epoch": 3.817258883248731, "step": 14288, "train/loss_ctc": 0.8643393516540527, "train/loss_error": 0.5056626796722412, "train/loss_total": 0.5773980021476746 }, { "epoch": 3.8175260486240985, "step": 14289, "train/loss_ctc": 0.40198880434036255, "train/loss_error": 0.4668344259262085, "train/loss_total": 0.45386528968811035 }, { "epoch": 3.8177932139994657, "grad_norm": 2.044827938079834, "learning_rate": 7.106064654020839e-06, "loss": 0.4575, "step": 14290 }, { "epoch": 3.8177932139994657, "step": 14290, "train/loss_ctc": 0.3551523983478546, "train/loss_error": 0.41786864399909973, "train/loss_total": 0.40532541275024414 }, { "epoch": 3.818060379374833, "step": 14291, "train/loss_ctc": 1.1540391445159912, "train/loss_error": 0.4350045919418335, "train/loss_total": 0.578811526298523 }, { "epoch": 3.8183275447502005, "step": 14292, "train/loss_ctc": 0.7181440591812134, "train/loss_error": 0.4511360228061676, "train/loss_total": 0.5045376420021057 }, { "epoch": 3.8185947101255677, "step": 14293, "train/loss_ctc": 0.930443286895752, "train/loss_error": 0.45767027139663696, "train/loss_total": 0.55222487449646 }, { "epoch": 3.818861875500935, "step": 14294, "train/loss_ctc": 0.6197630763053894, "train/loss_error": 0.4829310476779938, "train/loss_total": 0.5102974772453308 }, { "epoch": 3.8191290408763026, "step": 14295, "train/loss_ctc": 0.9004842638969421, "train/loss_error": 0.46601173281669617, "train/loss_total": 0.5529062151908875 }, { "epoch": 3.8193962062516698, "step": 14296, "train/loss_ctc": 1.3684256076812744, "train/loss_error": 0.4346856474876404, "train/loss_total": 0.6214336156845093 }, { "epoch": 3.819663371627037, "step": 14297, "train/loss_ctc": 0.7233644127845764, "train/loss_error": 0.39590150117874146, "train/loss_total": 0.4613940715789795 }, { "epoch": 3.8199305370024046, "step": 14298, "train/loss_ctc": 0.43994927406311035, "train/loss_error": 0.43469613790512085, "train/loss_total": 0.43574678897857666 }, { "epoch": 3.820197702377772, "step": 14299, "train/loss_ctc": 0.6482760906219482, "train/loss_error": 0.40306881070137024, "train/loss_total": 0.45211029052734375 }, { "epoch": 3.820464867753139, "grad_norm": 2.961627244949341, "learning_rate": 7.090034731498798e-06, "loss": 0.5075, "step": 14300 }, { "epoch": 3.820464867753139, "step": 14300, "train/loss_ctc": 0.5281158089637756, "train/loss_error": 0.44885262846946716, "train/loss_total": 0.4647052586078644 }, { "epoch": 3.8207320331285066, "step": 14301, "train/loss_ctc": 1.1322978734970093, "train/loss_error": 0.48835450410842896, "train/loss_total": 0.6171432137489319 }, { "epoch": 3.820999198503874, "step": 14302, "train/loss_ctc": 0.9284422993659973, "train/loss_error": 0.4529839754104614, "train/loss_total": 0.5480756759643555 }, { "epoch": 3.821266363879241, "step": 14303, "train/loss_ctc": 1.254831075668335, "train/loss_error": 0.43829724192619324, "train/loss_total": 0.6016039848327637 }, { "epoch": 3.8215335292546087, "step": 14304, "train/loss_ctc": 1.1122498512268066, "train/loss_error": 0.4405268132686615, "train/loss_total": 0.5748714208602905 }, { "epoch": 3.821800694629976, "step": 14305, "train/loss_ctc": 0.7596075534820557, "train/loss_error": 0.4468914568424225, "train/loss_total": 0.509434700012207 }, { "epoch": 3.822067860005343, "step": 14306, "train/loss_ctc": 0.6515882015228271, "train/loss_error": 0.4097152650356293, "train/loss_total": 0.4580898880958557 }, { "epoch": 3.8223350253807107, "step": 14307, "train/loss_ctc": 0.668450653553009, "train/loss_error": 0.39939454197883606, "train/loss_total": 0.45320576429367065 }, { "epoch": 3.822602190756078, "step": 14308, "train/loss_ctc": 1.26654052734375, "train/loss_error": 0.47066909074783325, "train/loss_total": 0.6298433542251587 }, { "epoch": 3.8228693561314455, "step": 14309, "train/loss_ctc": 0.2976662814617157, "train/loss_error": 0.4102177619934082, "train/loss_total": 0.3877074718475342 }, { "epoch": 3.8231365215068127, "grad_norm": 3.334481716156006, "learning_rate": 7.074004808976756e-06, "loss": 0.5245, "step": 14310 }, { "epoch": 3.8231365215068127, "step": 14310, "train/loss_ctc": 0.8566510677337646, "train/loss_error": 0.46923568844795227, "train/loss_total": 0.5467187762260437 }, { "epoch": 3.82340368688218, "step": 14311, "train/loss_ctc": 0.3780381977558136, "train/loss_error": 0.35345518589019775, "train/loss_total": 0.3583717942237854 }, { "epoch": 3.8236708522575475, "step": 14312, "train/loss_ctc": 0.39365196228027344, "train/loss_error": 0.4422319233417511, "train/loss_total": 0.432515949010849 }, { "epoch": 3.8239380176329147, "step": 14313, "train/loss_ctc": 0.4585357904434204, "train/loss_error": 0.45159056782722473, "train/loss_total": 0.4529796242713928 }, { "epoch": 3.8242051830082824, "step": 14314, "train/loss_ctc": 0.6841910481452942, "train/loss_error": 0.46954742074012756, "train/loss_total": 0.5124761462211609 }, { "epoch": 3.8244723483836496, "step": 14315, "train/loss_ctc": 0.9582284092903137, "train/loss_error": 0.4954220652580261, "train/loss_total": 0.5879833698272705 }, { "epoch": 3.8247395137590168, "step": 14316, "train/loss_ctc": 0.8289895057678223, "train/loss_error": 0.440432071685791, "train/loss_total": 0.5181435942649841 }, { "epoch": 3.8250066791343844, "step": 14317, "train/loss_ctc": 1.2227602005004883, "train/loss_error": 0.4828495383262634, "train/loss_total": 0.6308317184448242 }, { "epoch": 3.8252738445097516, "step": 14318, "train/loss_ctc": 1.0524581670761108, "train/loss_error": 0.4155725836753845, "train/loss_total": 0.5429497361183167 }, { "epoch": 3.825541009885119, "step": 14319, "train/loss_ctc": 0.48131299018859863, "train/loss_error": 0.4652653634548187, "train/loss_total": 0.4684748947620392 }, { "epoch": 3.8258081752604864, "grad_norm": 4.266377925872803, "learning_rate": 7.057974886454716e-06, "loss": 0.5051, "step": 14320 }, { "epoch": 3.8258081752604864, "step": 14320, "train/loss_ctc": 0.5116504430770874, "train/loss_error": 0.40928903222084045, "train/loss_total": 0.4297613203525543 }, { "epoch": 3.8260753406358536, "step": 14321, "train/loss_ctc": 0.7878671884536743, "train/loss_error": 0.3982682526111603, "train/loss_total": 0.476188063621521 }, { "epoch": 3.826342506011221, "step": 14322, "train/loss_ctc": 0.5550891757011414, "train/loss_error": 0.45777174830436707, "train/loss_total": 0.47723525762557983 }, { "epoch": 3.8266096713865885, "step": 14323, "train/loss_ctc": 0.5255062580108643, "train/loss_error": 0.3885522484779358, "train/loss_total": 0.41594305634498596 }, { "epoch": 3.8268768367619557, "step": 14324, "train/loss_ctc": 1.0666840076446533, "train/loss_error": 0.4525285065174103, "train/loss_total": 0.5753596425056458 }, { "epoch": 3.827144002137323, "step": 14325, "train/loss_ctc": 0.5726646184921265, "train/loss_error": 0.4765523374080658, "train/loss_total": 0.4957748055458069 }, { "epoch": 3.8274111675126905, "step": 14326, "train/loss_ctc": 0.5318218469619751, "train/loss_error": 0.47022759914398193, "train/loss_total": 0.48254644870758057 }, { "epoch": 3.8276783328880577, "step": 14327, "train/loss_ctc": 0.9999852180480957, "train/loss_error": 0.4611966013908386, "train/loss_total": 0.568954348564148 }, { "epoch": 3.827945498263425, "step": 14328, "train/loss_ctc": 0.5790488719940186, "train/loss_error": 0.4207540452480316, "train/loss_total": 0.45241302251815796 }, { "epoch": 3.8282126636387925, "step": 14329, "train/loss_ctc": 0.7063169479370117, "train/loss_error": 0.4639846086502075, "train/loss_total": 0.5124510526657104 }, { "epoch": 3.8284798290141597, "grad_norm": 2.0748531818389893, "learning_rate": 7.041944963932675e-06, "loss": 0.4887, "step": 14330 }, { "epoch": 3.8284798290141597, "step": 14330, "train/loss_ctc": 0.6885368824005127, "train/loss_error": 0.3823804557323456, "train/loss_total": 0.443611741065979 }, { "epoch": 3.828746994389527, "step": 14331, "train/loss_ctc": 1.099259853363037, "train/loss_error": 0.434457004070282, "train/loss_total": 0.5674176216125488 }, { "epoch": 3.8290141597648946, "step": 14332, "train/loss_ctc": 1.1675820350646973, "train/loss_error": 0.444731742143631, "train/loss_total": 0.5893018245697021 }, { "epoch": 3.8292813251402618, "step": 14333, "train/loss_ctc": 0.40760624408721924, "train/loss_error": 0.47057998180389404, "train/loss_total": 0.4579852223396301 }, { "epoch": 3.829548490515629, "step": 14334, "train/loss_ctc": 0.8574285507202148, "train/loss_error": 0.41763734817504883, "train/loss_total": 0.5055955648422241 }, { "epoch": 3.8298156558909966, "step": 14335, "train/loss_ctc": 0.7594302892684937, "train/loss_error": 0.45857805013656616, "train/loss_total": 0.5187485218048096 }, { "epoch": 3.830082821266364, "step": 14336, "train/loss_ctc": 1.0805354118347168, "train/loss_error": 0.4230763912200928, "train/loss_total": 0.5545682311058044 }, { "epoch": 3.830349986641731, "step": 14337, "train/loss_ctc": 0.6460976004600525, "train/loss_error": 0.4924135208129883, "train/loss_total": 0.5231503248214722 }, { "epoch": 3.8306171520170986, "step": 14338, "train/loss_ctc": 0.5997927188873291, "train/loss_error": 0.38617464900016785, "train/loss_total": 0.42889827489852905 }, { "epoch": 3.830884317392466, "step": 14339, "train/loss_ctc": 0.811198353767395, "train/loss_error": 0.43740114569664, "train/loss_total": 0.51216059923172 }, { "epoch": 3.831151482767833, "grad_norm": 4.733405113220215, "learning_rate": 7.025915041410633e-06, "loss": 0.5101, "step": 14340 }, { "epoch": 3.831151482767833, "step": 14340, "train/loss_ctc": 0.6639100909233093, "train/loss_error": 0.4087831974029541, "train/loss_total": 0.4598085880279541 }, { "epoch": 3.8314186481432007, "step": 14341, "train/loss_ctc": 1.055579423904419, "train/loss_error": 0.4364767074584961, "train/loss_total": 0.5602972507476807 }, { "epoch": 3.831685813518568, "step": 14342, "train/loss_ctc": 0.9105998277664185, "train/loss_error": 0.41547703742980957, "train/loss_total": 0.5145015716552734 }, { "epoch": 3.8319529788939355, "step": 14343, "train/loss_ctc": 0.7549041509628296, "train/loss_error": 0.3538835048675537, "train/loss_total": 0.4340876340866089 }, { "epoch": 3.8322201442693027, "step": 14344, "train/loss_ctc": 1.0851225852966309, "train/loss_error": 0.47894373536109924, "train/loss_total": 0.6001794934272766 }, { "epoch": 3.8324873096446703, "step": 14345, "train/loss_ctc": 0.8631609678268433, "train/loss_error": 0.4334911108016968, "train/loss_total": 0.519425094127655 }, { "epoch": 3.8327544750200375, "step": 14346, "train/loss_ctc": 0.7537006139755249, "train/loss_error": 0.42846691608428955, "train/loss_total": 0.49351364374160767 }, { "epoch": 3.8330216403954047, "step": 14347, "train/loss_ctc": 0.4599907398223877, "train/loss_error": 0.36892572045326233, "train/loss_total": 0.3871387541294098 }, { "epoch": 3.8332888057707724, "step": 14348, "train/loss_ctc": 1.0794780254364014, "train/loss_error": 0.461083322763443, "train/loss_total": 0.5847622752189636 }, { "epoch": 3.8335559711461396, "step": 14349, "train/loss_ctc": 0.6756273508071899, "train/loss_error": 0.3844251036643982, "train/loss_total": 0.44266557693481445 }, { "epoch": 3.8338231365215067, "grad_norm": 2.0134694576263428, "learning_rate": 7.009885118888592e-06, "loss": 0.4996, "step": 14350 }, { "epoch": 3.8338231365215067, "step": 14350, "train/loss_ctc": 0.5711145401000977, "train/loss_error": 0.38705652952194214, "train/loss_total": 0.4238681495189667 }, { "epoch": 3.8340903018968744, "step": 14351, "train/loss_ctc": 0.8841811418533325, "train/loss_error": 0.47527045011520386, "train/loss_total": 0.5570526123046875 }, { "epoch": 3.8343574672722416, "step": 14352, "train/loss_ctc": 0.5098019242286682, "train/loss_error": 0.4984407126903534, "train/loss_total": 0.5007129311561584 }, { "epoch": 3.8346246326476088, "step": 14353, "train/loss_ctc": 0.4548901319503784, "train/loss_error": 0.42027780413627625, "train/loss_total": 0.4272002875804901 }, { "epoch": 3.8348917980229764, "step": 14354, "train/loss_ctc": 0.9149094223976135, "train/loss_error": 0.4071153402328491, "train/loss_total": 0.508674144744873 }, { "epoch": 3.8351589633983436, "step": 14355, "train/loss_ctc": 0.38151609897613525, "train/loss_error": 0.45677581429481506, "train/loss_total": 0.44172388315200806 }, { "epoch": 3.835426128773711, "step": 14356, "train/loss_ctc": 0.47358351945877075, "train/loss_error": 0.5003173351287842, "train/loss_total": 0.49497056007385254 }, { "epoch": 3.8356932941490784, "step": 14357, "train/loss_ctc": 0.7103040814399719, "train/loss_error": 0.38572075963020325, "train/loss_total": 0.45063742995262146 }, { "epoch": 3.8359604595244456, "step": 14358, "train/loss_ctc": 0.6110684871673584, "train/loss_error": 0.48142459988594055, "train/loss_total": 0.5073533654212952 }, { "epoch": 3.836227624899813, "step": 14359, "train/loss_ctc": 0.46316516399383545, "train/loss_error": 0.4272175431251526, "train/loss_total": 0.4344070851802826 }, { "epoch": 3.8364947902751805, "grad_norm": 1.6556464433670044, "learning_rate": 6.9938551963665516e-06, "loss": 0.4747, "step": 14360 }, { "epoch": 3.8364947902751805, "step": 14360, "train/loss_ctc": 0.42472153902053833, "train/loss_error": 0.4071662425994873, "train/loss_total": 0.41067731380462646 }, { "epoch": 3.8367619556505477, "step": 14361, "train/loss_ctc": 0.45414936542510986, "train/loss_error": 0.4531019926071167, "train/loss_total": 0.4533114731311798 }, { "epoch": 3.837029121025915, "step": 14362, "train/loss_ctc": 0.6980733871459961, "train/loss_error": 0.4539641737937927, "train/loss_total": 0.5027860403060913 }, { "epoch": 3.8372962864012825, "step": 14363, "train/loss_ctc": 1.0768238306045532, "train/loss_error": 0.5094736218452454, "train/loss_total": 0.6229436993598938 }, { "epoch": 3.8375634517766497, "step": 14364, "train/loss_ctc": 1.5714516639709473, "train/loss_error": 0.4454321265220642, "train/loss_total": 0.6706360578536987 }, { "epoch": 3.837830617152017, "step": 14365, "train/loss_ctc": 0.32641419768333435, "train/loss_error": 0.4279859960079193, "train/loss_total": 0.40767166018486023 }, { "epoch": 3.8380977825273845, "step": 14366, "train/loss_ctc": 0.5812979936599731, "train/loss_error": 0.417563259601593, "train/loss_total": 0.45031023025512695 }, { "epoch": 3.8383649479027517, "step": 14367, "train/loss_ctc": 1.3287197351455688, "train/loss_error": 0.4705744981765747, "train/loss_total": 0.6422035694122314 }, { "epoch": 3.838632113278119, "step": 14368, "train/loss_ctc": 0.4051370620727539, "train/loss_error": 0.45354121923446655, "train/loss_total": 0.44386041164398193 }, { "epoch": 3.8388992786534866, "step": 14369, "train/loss_ctc": 0.5938990712165833, "train/loss_error": 0.46878719329833984, "train/loss_total": 0.4938095808029175 }, { "epoch": 3.8391664440288538, "grad_norm": 2.187772035598755, "learning_rate": 6.9778252738445105e-06, "loss": 0.5098, "step": 14370 }, { "epoch": 3.8391664440288538, "step": 14370, "train/loss_ctc": 0.8383386135101318, "train/loss_error": 0.4688304662704468, "train/loss_total": 0.5427321195602417 }, { "epoch": 3.839433609404221, "step": 14371, "train/loss_ctc": 0.7262285351753235, "train/loss_error": 0.5113232731819153, "train/loss_total": 0.5543043613433838 }, { "epoch": 3.8397007747795886, "step": 14372, "train/loss_ctc": 0.7154018878936768, "train/loss_error": 0.4928644895553589, "train/loss_total": 0.5373719930648804 }, { "epoch": 3.839967940154956, "step": 14373, "train/loss_ctc": 0.7159349322319031, "train/loss_error": 0.4394676983356476, "train/loss_total": 0.4947611391544342 }, { "epoch": 3.8402351055303234, "step": 14374, "train/loss_ctc": 0.3898501992225647, "train/loss_error": 0.38205039501190186, "train/loss_total": 0.3836103677749634 }, { "epoch": 3.8405022709056906, "step": 14375, "train/loss_ctc": 1.1541764736175537, "train/loss_error": 0.4362795948982239, "train/loss_total": 0.5798589587211609 }, { "epoch": 3.840769436281058, "step": 14376, "train/loss_ctc": 0.3898738920688629, "train/loss_error": 0.46369031071662903, "train/loss_total": 0.44892704486846924 }, { "epoch": 3.8410366016564255, "step": 14377, "train/loss_ctc": 0.5207473635673523, "train/loss_error": 0.4313235580921173, "train/loss_total": 0.4492083191871643 }, { "epoch": 3.8413037670317927, "step": 14378, "train/loss_ctc": 1.03369140625, "train/loss_error": 0.448930025100708, "train/loss_total": 0.5658823251724243 }, { "epoch": 3.8415709324071603, "step": 14379, "train/loss_ctc": 0.5159839391708374, "train/loss_error": 0.46711215376853943, "train/loss_total": 0.4768865406513214 }, { "epoch": 3.8418380977825275, "grad_norm": 3.565070152282715, "learning_rate": 6.961795351322469e-06, "loss": 0.5034, "step": 14380 }, { "epoch": 3.8418380977825275, "step": 14380, "train/loss_ctc": 1.0227724313735962, "train/loss_error": 0.5066509246826172, "train/loss_total": 0.6098752021789551 }, { "epoch": 3.8421052631578947, "step": 14381, "train/loss_ctc": 1.2461209297180176, "train/loss_error": 0.49333393573760986, "train/loss_total": 0.6438913345336914 }, { "epoch": 3.8423724285332623, "step": 14382, "train/loss_ctc": 0.7455401420593262, "train/loss_error": 0.4547843635082245, "train/loss_total": 0.5129355192184448 }, { "epoch": 3.8426395939086295, "step": 14383, "train/loss_ctc": 0.6104075908660889, "train/loss_error": 0.37930747866630554, "train/loss_total": 0.42552751302719116 }, { "epoch": 3.8429067592839967, "step": 14384, "train/loss_ctc": 0.7571160197257996, "train/loss_error": 0.41948607563972473, "train/loss_total": 0.4870120882987976 }, { "epoch": 3.8431739246593644, "step": 14385, "train/loss_ctc": 1.070965051651001, "train/loss_error": 0.46744611859321594, "train/loss_total": 0.588149905204773 }, { "epoch": 3.8434410900347316, "step": 14386, "train/loss_ctc": 0.4874810576438904, "train/loss_error": 0.482550710439682, "train/loss_total": 0.4835367798805237 }, { "epoch": 3.8437082554100988, "step": 14387, "train/loss_ctc": 0.5042542219161987, "train/loss_error": 0.5329630374908447, "train/loss_total": 0.5272212624549866 }, { "epoch": 3.8439754207854664, "step": 14388, "train/loss_ctc": 0.6944148540496826, "train/loss_error": 0.4011445641517639, "train/loss_total": 0.4597986340522766 }, { "epoch": 3.8442425861608336, "step": 14389, "train/loss_ctc": 0.8680990934371948, "train/loss_error": 0.39326977729797363, "train/loss_total": 0.4882356524467468 }, { "epoch": 3.844509751536201, "grad_norm": 1.647572636604309, "learning_rate": 6.9457654288004275e-06, "loss": 0.5226, "step": 14390 }, { "epoch": 3.844509751536201, "step": 14390, "train/loss_ctc": 0.5530920028686523, "train/loss_error": 0.4796018600463867, "train/loss_total": 0.49429991841316223 }, { "epoch": 3.8447769169115684, "step": 14391, "train/loss_ctc": 0.7861836552619934, "train/loss_error": 0.4301931858062744, "train/loss_total": 0.5013912916183472 }, { "epoch": 3.8450440822869356, "step": 14392, "train/loss_ctc": 0.9083207249641418, "train/loss_error": 0.422549307346344, "train/loss_total": 0.5197036266326904 }, { "epoch": 3.845311247662303, "step": 14393, "train/loss_ctc": 1.3701143264770508, "train/loss_error": 0.42362213134765625, "train/loss_total": 0.6129205822944641 }, { "epoch": 3.8455784130376705, "step": 14394, "train/loss_ctc": 0.34749600291252136, "train/loss_error": 0.47148698568344116, "train/loss_total": 0.44668880105018616 }, { "epoch": 3.8458455784130376, "step": 14395, "train/loss_ctc": 0.38001692295074463, "train/loss_error": 0.48158952593803406, "train/loss_total": 0.46127504110336304 }, { "epoch": 3.846112743788405, "step": 14396, "train/loss_ctc": 0.32485896348953247, "train/loss_error": 0.38894084095954895, "train/loss_total": 0.37612447142601013 }, { "epoch": 3.8463799091637725, "step": 14397, "train/loss_ctc": 2.001166820526123, "train/loss_error": 0.4490467309951782, "train/loss_total": 0.7594707608222961 }, { "epoch": 3.8466470745391397, "step": 14398, "train/loss_ctc": 0.8570224046707153, "train/loss_error": 0.4985123574733734, "train/loss_total": 0.5702143907546997 }, { "epoch": 3.846914239914507, "step": 14399, "train/loss_ctc": 1.0116698741912842, "train/loss_error": 0.4277215301990509, "train/loss_total": 0.5445111989974976 }, { "epoch": 3.8471814052898745, "grad_norm": 2.53385591506958, "learning_rate": 6.9297355062783864e-06, "loss": 0.5287, "step": 14400 }, { "epoch": 3.8471814052898745, "step": 14400, "train/loss_ctc": 1.6134817600250244, "train/loss_error": 0.44605401158332825, "train/loss_total": 0.6795395612716675 }, { "epoch": 3.8474485706652417, "step": 14401, "train/loss_ctc": 0.6538571715354919, "train/loss_error": 0.4585580825805664, "train/loss_total": 0.4976179003715515 }, { "epoch": 3.847715736040609, "step": 14402, "train/loss_ctc": 0.5571755170822144, "train/loss_error": 0.41010811924934387, "train/loss_total": 0.4395216107368469 }, { "epoch": 3.8479829014159765, "step": 14403, "train/loss_ctc": 0.5928938388824463, "train/loss_error": 0.40949341654777527, "train/loss_total": 0.4461734890937805 }, { "epoch": 3.8482500667913437, "step": 14404, "train/loss_ctc": 0.5258041620254517, "train/loss_error": 0.4042143225669861, "train/loss_total": 0.42853230237960815 }, { "epoch": 3.848517232166711, "step": 14405, "train/loss_ctc": 0.3639545440673828, "train/loss_error": 0.3711967468261719, "train/loss_total": 0.3697483241558075 }, { "epoch": 3.8487843975420786, "step": 14406, "train/loss_ctc": 0.9780252575874329, "train/loss_error": 0.4738306999206543, "train/loss_total": 0.574669599533081 }, { "epoch": 3.8490515629174458, "step": 14407, "train/loss_ctc": 0.5191047191619873, "train/loss_error": 0.36209237575531006, "train/loss_total": 0.3934948444366455 }, { "epoch": 3.8493187282928134, "step": 14408, "train/loss_ctc": 0.7372523546218872, "train/loss_error": 0.39428675174713135, "train/loss_total": 0.46287989616394043 }, { "epoch": 3.8495858936681806, "step": 14409, "train/loss_ctc": 0.6393077373504639, "train/loss_error": 0.37549492716789246, "train/loss_total": 0.42825746536254883 }, { "epoch": 3.849853059043548, "grad_norm": 1.7620782852172852, "learning_rate": 6.913705583756345e-06, "loss": 0.472, "step": 14410 }, { "epoch": 3.849853059043548, "step": 14410, "train/loss_ctc": 0.8368969559669495, "train/loss_error": 0.45200204849243164, "train/loss_total": 0.5289810299873352 }, { "epoch": 3.8501202244189154, "step": 14411, "train/loss_ctc": 0.3882562220096588, "train/loss_error": 0.356280654668808, "train/loss_total": 0.3626757860183716 }, { "epoch": 3.8503873897942826, "step": 14412, "train/loss_ctc": 0.4239908754825592, "train/loss_error": 0.4132612645626068, "train/loss_total": 0.4154072105884552 }, { "epoch": 3.8506545551696503, "step": 14413, "train/loss_ctc": 1.0383256673812866, "train/loss_error": 0.5114467740058899, "train/loss_total": 0.6168225407600403 }, { "epoch": 3.8509217205450175, "step": 14414, "train/loss_ctc": 0.4258437752723694, "train/loss_error": 0.45804867148399353, "train/loss_total": 0.45160770416259766 }, { "epoch": 3.8511888859203847, "step": 14415, "train/loss_ctc": 0.39386826753616333, "train/loss_error": 0.42056986689567566, "train/loss_total": 0.41522955894470215 }, { "epoch": 3.8514560512957523, "step": 14416, "train/loss_ctc": 0.5736528635025024, "train/loss_error": 0.39920803904533386, "train/loss_total": 0.4340969920158386 }, { "epoch": 3.8517232166711195, "step": 14417, "train/loss_ctc": 1.0550482273101807, "train/loss_error": 0.44224312901496887, "train/loss_total": 0.564804196357727 }, { "epoch": 3.8519903820464867, "step": 14418, "train/loss_ctc": 0.5182540416717529, "train/loss_error": 0.387209415435791, "train/loss_total": 0.41341835260391235 }, { "epoch": 3.8522575474218543, "step": 14419, "train/loss_ctc": 0.8529331088066101, "train/loss_error": 0.45450252294540405, "train/loss_total": 0.5341886281967163 }, { "epoch": 3.8525247127972215, "grad_norm": 1.7329249382019043, "learning_rate": 6.897675661234304e-06, "loss": 0.4737, "step": 14420 }, { "epoch": 3.8525247127972215, "step": 14420, "train/loss_ctc": 0.2553292512893677, "train/loss_error": 0.4014696776866913, "train/loss_total": 0.3722416162490845 }, { "epoch": 3.8527918781725887, "step": 14421, "train/loss_ctc": 0.8865691423416138, "train/loss_error": 0.37639760971069336, "train/loss_total": 0.47843194007873535 }, { "epoch": 3.8530590435479564, "step": 14422, "train/loss_ctc": 1.032737135887146, "train/loss_error": 0.41965848207473755, "train/loss_total": 0.5422742366790771 }, { "epoch": 3.8533262089233236, "step": 14423, "train/loss_ctc": 1.5573558807373047, "train/loss_error": 0.4364217519760132, "train/loss_total": 0.6606085896492004 }, { "epoch": 3.8535933742986908, "step": 14424, "train/loss_ctc": 0.6111459732055664, "train/loss_error": 0.42457619309425354, "train/loss_total": 0.46189016103744507 }, { "epoch": 3.8538605396740584, "step": 14425, "train/loss_ctc": 0.212934672832489, "train/loss_error": 0.43467000126838684, "train/loss_total": 0.3903229236602783 }, { "epoch": 3.8541277050494256, "step": 14426, "train/loss_ctc": 0.4701259434223175, "train/loss_error": 0.4130730628967285, "train/loss_total": 0.42448365688323975 }, { "epoch": 3.854394870424793, "step": 14427, "train/loss_ctc": 0.495784729719162, "train/loss_error": 0.36167752742767334, "train/loss_total": 0.3884989619255066 }, { "epoch": 3.8546620358001604, "step": 14428, "train/loss_ctc": 0.6238904595375061, "train/loss_error": 0.40130800008773804, "train/loss_total": 0.4458245038986206 }, { "epoch": 3.8549292011755276, "step": 14429, "train/loss_ctc": 1.49441397190094, "train/loss_error": 0.4492952525615692, "train/loss_total": 0.6583189964294434 }, { "epoch": 3.855196366550895, "grad_norm": 1.7330782413482666, "learning_rate": 6.881645738712263e-06, "loss": 0.4823, "step": 14430 }, { "epoch": 3.855196366550895, "step": 14430, "train/loss_ctc": 2.2292704582214355, "train/loss_error": 0.4544892907142639, "train/loss_total": 0.8094455003738403 }, { "epoch": 3.8554635319262625, "step": 14431, "train/loss_ctc": 0.7927219867706299, "train/loss_error": 0.4821978807449341, "train/loss_total": 0.5443027019500732 }, { "epoch": 3.8557306973016297, "step": 14432, "train/loss_ctc": 0.7205989360809326, "train/loss_error": 0.42491090297698975, "train/loss_total": 0.4840484857559204 }, { "epoch": 3.855997862676997, "step": 14433, "train/loss_ctc": 0.8349521160125732, "train/loss_error": 0.5597856044769287, "train/loss_total": 0.6148189306259155 }, { "epoch": 3.8562650280523645, "step": 14434, "train/loss_ctc": 0.910788893699646, "train/loss_error": 0.4491817057132721, "train/loss_total": 0.5415031909942627 }, { "epoch": 3.8565321934277317, "step": 14435, "train/loss_ctc": 0.7732852697372437, "train/loss_error": 0.39382728934288025, "train/loss_total": 0.469718873500824 }, { "epoch": 3.856799358803099, "step": 14436, "train/loss_ctc": 1.0458441972732544, "train/loss_error": 0.45660045742988586, "train/loss_total": 0.5744491815567017 }, { "epoch": 3.8570665241784665, "step": 14437, "train/loss_ctc": 0.4477495849132538, "train/loss_error": 0.4101477265357971, "train/loss_total": 0.41766810417175293 }, { "epoch": 3.8573336895538337, "step": 14438, "train/loss_ctc": 0.46502864360809326, "train/loss_error": 0.4492446780204773, "train/loss_total": 0.45240145921707153 }, { "epoch": 3.857600854929201, "step": 14439, "train/loss_ctc": 0.7406959533691406, "train/loss_error": 0.44152653217315674, "train/loss_total": 0.5013604164123535 }, { "epoch": 3.8578680203045685, "grad_norm": 1.4216169118881226, "learning_rate": 6.865615816190221e-06, "loss": 0.541, "step": 14440 }, { "epoch": 3.8578680203045685, "step": 14440, "train/loss_ctc": 0.9191166162490845, "train/loss_error": 0.4370156526565552, "train/loss_total": 0.5334358811378479 }, { "epoch": 3.8581351856799357, "step": 14441, "train/loss_ctc": 0.5766902565956116, "train/loss_error": 0.4624447524547577, "train/loss_total": 0.4852938652038574 }, { "epoch": 3.8584023510553034, "step": 14442, "train/loss_ctc": 0.6100203394889832, "train/loss_error": 0.4846384823322296, "train/loss_total": 0.5097148418426514 }, { "epoch": 3.8586695164306706, "step": 14443, "train/loss_ctc": 0.672091543674469, "train/loss_error": 0.4773193597793579, "train/loss_total": 0.5162737965583801 }, { "epoch": 3.8589366818060378, "step": 14444, "train/loss_ctc": 0.3908325433731079, "train/loss_error": 0.43344756960868835, "train/loss_total": 0.4249245822429657 }, { "epoch": 3.8592038471814054, "step": 14445, "train/loss_ctc": 0.9364694952964783, "train/loss_error": 0.42064204812049866, "train/loss_total": 0.5238075256347656 }, { "epoch": 3.8594710125567726, "step": 14446, "train/loss_ctc": 0.6928843259811401, "train/loss_error": 0.44478386640548706, "train/loss_total": 0.4944039583206177 }, { "epoch": 3.8597381779321402, "step": 14447, "train/loss_ctc": 0.5163843631744385, "train/loss_error": 0.4650366008281708, "train/loss_total": 0.4753061532974243 }, { "epoch": 3.8600053433075074, "step": 14448, "train/loss_ctc": 1.2574734687805176, "train/loss_error": 0.4942476153373718, "train/loss_total": 0.646892786026001 }, { "epoch": 3.8602725086828746, "step": 14449, "train/loss_ctc": 0.34612077474594116, "train/loss_error": 0.4021720886230469, "train/loss_total": 0.39096182584762573 }, { "epoch": 3.8605396740582423, "grad_norm": 1.7652422189712524, "learning_rate": 6.849585893668181e-06, "loss": 0.5001, "step": 14450 }, { "epoch": 3.8605396740582423, "step": 14450, "train/loss_ctc": 0.9191073775291443, "train/loss_error": 0.42313089966773987, "train/loss_total": 0.5223262310028076 }, { "epoch": 3.8608068394336095, "step": 14451, "train/loss_ctc": 0.6565980315208435, "train/loss_error": 0.42943045496940613, "train/loss_total": 0.4748639762401581 }, { "epoch": 3.8610740048089767, "step": 14452, "train/loss_ctc": 0.5330367684364319, "train/loss_error": 0.4335218667984009, "train/loss_total": 0.4534248411655426 }, { "epoch": 3.8613411701843443, "step": 14453, "train/loss_ctc": 1.1510770320892334, "train/loss_error": 0.4554654061794281, "train/loss_total": 0.5945877432823181 }, { "epoch": 3.8616083355597115, "step": 14454, "train/loss_ctc": 1.072296380996704, "train/loss_error": 0.5105475187301636, "train/loss_total": 0.6228973269462585 }, { "epoch": 3.8618755009350787, "step": 14455, "train/loss_ctc": 0.5343242883682251, "train/loss_error": 0.4004911184310913, "train/loss_total": 0.427257776260376 }, { "epoch": 3.8621426663104463, "step": 14456, "train/loss_ctc": 0.49435126781463623, "train/loss_error": 0.40992698073387146, "train/loss_total": 0.4268118441104889 }, { "epoch": 3.8624098316858135, "step": 14457, "train/loss_ctc": 0.6062804460525513, "train/loss_error": 0.4260069727897644, "train/loss_total": 0.46206167340278625 }, { "epoch": 3.8626769970611807, "step": 14458, "train/loss_ctc": 0.460310161113739, "train/loss_error": 0.3902170658111572, "train/loss_total": 0.40423569083213806 }, { "epoch": 3.8629441624365484, "step": 14459, "train/loss_ctc": 0.6227099299430847, "train/loss_error": 0.49095481634140015, "train/loss_total": 0.517305850982666 }, { "epoch": 3.8632113278119156, "grad_norm": 1.7263803482055664, "learning_rate": 6.83355597114614e-06, "loss": 0.4906, "step": 14460 }, { "epoch": 3.8632113278119156, "step": 14460, "train/loss_ctc": 0.3193124532699585, "train/loss_error": 0.43301674723625183, "train/loss_total": 0.4102759063243866 }, { "epoch": 3.8634784931872828, "step": 14461, "train/loss_ctc": 1.352447748184204, "train/loss_error": 0.4658109247684479, "train/loss_total": 0.6431382894515991 }, { "epoch": 3.8637456585626504, "step": 14462, "train/loss_ctc": 0.4692540168762207, "train/loss_error": 0.44228988885879517, "train/loss_total": 0.4476827383041382 }, { "epoch": 3.8640128239380176, "step": 14463, "train/loss_ctc": 0.8347301483154297, "train/loss_error": 0.4919865131378174, "train/loss_total": 0.5605352520942688 }, { "epoch": 3.864279989313385, "step": 14464, "train/loss_ctc": 0.39188626408576965, "train/loss_error": 0.37671399116516113, "train/loss_total": 0.3797484338283539 }, { "epoch": 3.8645471546887524, "step": 14465, "train/loss_ctc": 0.5345746874809265, "train/loss_error": 0.4259566068649292, "train/loss_total": 0.4476802349090576 }, { "epoch": 3.8648143200641196, "step": 14466, "train/loss_ctc": 0.6907862424850464, "train/loss_error": 0.4535113275051117, "train/loss_total": 0.5009663105010986 }, { "epoch": 3.865081485439487, "step": 14467, "train/loss_ctc": 0.42684412002563477, "train/loss_error": 0.42679113149642944, "train/loss_total": 0.42680174112319946 }, { "epoch": 3.8653486508148545, "step": 14468, "train/loss_ctc": 0.5031392574310303, "train/loss_error": 0.35528913140296936, "train/loss_total": 0.3848591446876526 }, { "epoch": 3.8656158161902217, "step": 14469, "train/loss_ctc": 0.34615352749824524, "train/loss_error": 0.47633880376815796, "train/loss_total": 0.45030176639556885 }, { "epoch": 3.865882981565589, "grad_norm": 2.16715145111084, "learning_rate": 6.817526048624098e-06, "loss": 0.4652, "step": 14470 }, { "epoch": 3.865882981565589, "step": 14470, "train/loss_ctc": 0.3973468244075775, "train/loss_error": 0.3586813509464264, "train/loss_total": 0.3664144277572632 }, { "epoch": 3.8661501469409565, "step": 14471, "train/loss_ctc": 0.8449026346206665, "train/loss_error": 0.4096943438053131, "train/loss_total": 0.49673599004745483 }, { "epoch": 3.8664173123163237, "step": 14472, "train/loss_ctc": 0.7375825047492981, "train/loss_error": 0.5163838267326355, "train/loss_total": 0.5606235861778259 }, { "epoch": 3.866684477691691, "step": 14473, "train/loss_ctc": 0.22248104214668274, "train/loss_error": 0.4475807845592499, "train/loss_total": 0.402560830116272 }, { "epoch": 3.8669516430670585, "step": 14474, "train/loss_ctc": 0.39996442198753357, "train/loss_error": 0.4781617522239685, "train/loss_total": 0.4625222980976105 }, { "epoch": 3.8672188084424257, "step": 14475, "train/loss_ctc": 0.6809477210044861, "train/loss_error": 0.4012080132961273, "train/loss_total": 0.4571559727191925 }, { "epoch": 3.8674859738177934, "step": 14476, "train/loss_ctc": 1.1295771598815918, "train/loss_error": 0.4812236428260803, "train/loss_total": 0.6108943223953247 }, { "epoch": 3.8677531391931605, "step": 14477, "train/loss_ctc": 0.7878942489624023, "train/loss_error": 0.44548502564430237, "train/loss_total": 0.5139669179916382 }, { "epoch": 3.868020304568528, "step": 14478, "train/loss_ctc": 0.5639399290084839, "train/loss_error": 0.5216713547706604, "train/loss_total": 0.530125081539154 }, { "epoch": 3.8682874699438954, "step": 14479, "train/loss_ctc": 0.4174489378929138, "train/loss_error": 0.4021395444869995, "train/loss_total": 0.40520143508911133 }, { "epoch": 3.8685546353192626, "grad_norm": 1.818850040435791, "learning_rate": 6.801496126102057e-06, "loss": 0.4806, "step": 14480 }, { "epoch": 3.8685546353192626, "step": 14480, "train/loss_ctc": 1.4518176317214966, "train/loss_error": 0.4811111092567444, "train/loss_total": 0.6752524375915527 }, { "epoch": 3.86882180069463, "step": 14481, "train/loss_ctc": 0.8502814769744873, "train/loss_error": 0.47895386815071106, "train/loss_total": 0.5532193779945374 }, { "epoch": 3.8690889660699974, "step": 14482, "train/loss_ctc": 1.0439459085464478, "train/loss_error": 0.4776115119457245, "train/loss_total": 0.590878427028656 }, { "epoch": 3.8693561314453646, "step": 14483, "train/loss_ctc": 0.5158690214157104, "train/loss_error": 0.3996734321117401, "train/loss_total": 0.4229125380516052 }, { "epoch": 3.8696232968207323, "step": 14484, "train/loss_ctc": 0.5965735912322998, "train/loss_error": 0.4525642693042755, "train/loss_total": 0.4813661575317383 }, { "epoch": 3.8698904621960994, "step": 14485, "train/loss_ctc": 0.893009603023529, "train/loss_error": 0.4530400335788727, "train/loss_total": 0.5410339832305908 }, { "epoch": 3.8701576275714666, "step": 14486, "train/loss_ctc": 0.6822952032089233, "train/loss_error": 0.4987732172012329, "train/loss_total": 0.5354776382446289 }, { "epoch": 3.8704247929468343, "step": 14487, "train/loss_ctc": 0.7222216725349426, "train/loss_error": 0.4532419741153717, "train/loss_total": 0.5070379376411438 }, { "epoch": 3.8706919583222015, "step": 14488, "train/loss_ctc": 0.6361624598503113, "train/loss_error": 0.44685786962509155, "train/loss_total": 0.48471879959106445 }, { "epoch": 3.8709591236975687, "step": 14489, "train/loss_ctc": 0.5377405881881714, "train/loss_error": 0.3406597971916199, "train/loss_total": 0.38007596135139465 }, { "epoch": 3.8712262890729363, "grad_norm": 3.5362329483032227, "learning_rate": 6.785466203580016e-06, "loss": 0.5172, "step": 14490 }, { "epoch": 3.8712262890729363, "step": 14490, "train/loss_ctc": 0.4094875156879425, "train/loss_error": 0.402767151594162, "train/loss_total": 0.40411123633384705 }, { "epoch": 3.8714934544483035, "step": 14491, "train/loss_ctc": 0.5484741926193237, "train/loss_error": 0.3639426529407501, "train/loss_total": 0.40084895491600037 }, { "epoch": 3.8717606198236707, "step": 14492, "train/loss_ctc": 0.9808686375617981, "train/loss_error": 0.4025593101978302, "train/loss_total": 0.5182211399078369 }, { "epoch": 3.8720277851990383, "step": 14493, "train/loss_ctc": 0.45234689116477966, "train/loss_error": 0.3564489483833313, "train/loss_total": 0.3756285607814789 }, { "epoch": 3.8722949505744055, "step": 14494, "train/loss_ctc": 1.1838792562484741, "train/loss_error": 0.475327730178833, "train/loss_total": 0.6170380711555481 }, { "epoch": 3.8725621159497727, "step": 14495, "train/loss_ctc": 0.45850858092308044, "train/loss_error": 0.44469135999679565, "train/loss_total": 0.4474548101425171 }, { "epoch": 3.8728292813251404, "step": 14496, "train/loss_ctc": 0.7986375093460083, "train/loss_error": 0.4429187476634979, "train/loss_total": 0.5140625238418579 }, { "epoch": 3.8730964467005076, "step": 14497, "train/loss_ctc": 0.7574836015701294, "train/loss_error": 0.47676047682762146, "train/loss_total": 0.532905101776123 }, { "epoch": 3.8733636120758748, "step": 14498, "train/loss_ctc": 1.1015644073486328, "train/loss_error": 0.45662710070610046, "train/loss_total": 0.5856145620346069 }, { "epoch": 3.8736307774512424, "step": 14499, "train/loss_ctc": 0.6916887760162354, "train/loss_error": 0.43944644927978516, "train/loss_total": 0.48989492654800415 }, { "epoch": 3.8738979428266096, "grad_norm": 1.9730554819107056, "learning_rate": 6.769436281057976e-06, "loss": 0.4886, "step": 14500 }, { "epoch": 3.8738979428266096, "step": 14500, "train/loss_ctc": 1.3266258239746094, "train/loss_error": 0.44736483693122864, "train/loss_total": 0.6232170462608337 }, { "epoch": 3.874165108201977, "step": 14501, "train/loss_ctc": 0.863347589969635, "train/loss_error": 0.42332762479782104, "train/loss_total": 0.5113316178321838 }, { "epoch": 3.8744322735773444, "step": 14502, "train/loss_ctc": 0.5064713358879089, "train/loss_error": 0.5025799870491028, "train/loss_total": 0.5033582448959351 }, { "epoch": 3.8746994389527116, "step": 14503, "train/loss_ctc": 0.34017428755760193, "train/loss_error": 0.4427303373813629, "train/loss_total": 0.4222191274166107 }, { "epoch": 3.874966604328079, "step": 14504, "train/loss_ctc": 0.9349870681762695, "train/loss_error": 0.4465140998363495, "train/loss_total": 0.5442087054252625 }, { "epoch": 3.8752337697034465, "step": 14505, "train/loss_ctc": 0.6343876123428345, "train/loss_error": 0.4522853493690491, "train/loss_total": 0.4887058138847351 }, { "epoch": 3.8755009350788137, "step": 14506, "train/loss_ctc": 0.6335815787315369, "train/loss_error": 0.4139821529388428, "train/loss_total": 0.45790204405784607 }, { "epoch": 3.8757681004541813, "step": 14507, "train/loss_ctc": 0.4613915681838989, "train/loss_error": 0.45184415578842163, "train/loss_total": 0.45375365018844604 }, { "epoch": 3.8760352658295485, "step": 14508, "train/loss_ctc": 0.59510338306427, "train/loss_error": 0.4199010133743286, "train/loss_total": 0.4549414813518524 }, { "epoch": 3.8763024312049157, "step": 14509, "train/loss_ctc": 0.6006295680999756, "train/loss_error": 0.4719972312450409, "train/loss_total": 0.49772369861602783 }, { "epoch": 3.8765695965802833, "grad_norm": 1.3653082847595215, "learning_rate": 6.753406358535934e-06, "loss": 0.4957, "step": 14510 }, { "epoch": 3.8765695965802833, "step": 14510, "train/loss_ctc": 0.981066107749939, "train/loss_error": 0.45685699582099915, "train/loss_total": 0.561698853969574 }, { "epoch": 3.8768367619556505, "step": 14511, "train/loss_ctc": 0.56776362657547, "train/loss_error": 0.42852482199668884, "train/loss_total": 0.45637258887290955 }, { "epoch": 3.877103927331018, "step": 14512, "train/loss_ctc": 0.5286860466003418, "train/loss_error": 0.45437631011009216, "train/loss_total": 0.4692382514476776 }, { "epoch": 3.8773710927063854, "step": 14513, "train/loss_ctc": 0.43528029322624207, "train/loss_error": 0.5428390502929688, "train/loss_total": 0.5213273167610168 }, { "epoch": 3.8776382580817526, "step": 14514, "train/loss_ctc": 0.580772876739502, "train/loss_error": 0.4521598517894745, "train/loss_total": 0.4778824746608734 }, { "epoch": 3.87790542345712, "step": 14515, "train/loss_ctc": 0.8494261503219604, "train/loss_error": 0.3852812945842743, "train/loss_total": 0.47811025381088257 }, { "epoch": 3.8781725888324874, "step": 14516, "train/loss_ctc": 0.6853156089782715, "train/loss_error": 0.48454388976097107, "train/loss_total": 0.5246982574462891 }, { "epoch": 3.8784397542078546, "step": 14517, "train/loss_ctc": 0.31091296672821045, "train/loss_error": 0.4738803505897522, "train/loss_total": 0.4412868916988373 }, { "epoch": 3.8787069195832222, "step": 14518, "train/loss_ctc": 1.2726970911026, "train/loss_error": 0.49580320715904236, "train/loss_total": 0.6511819958686829 }, { "epoch": 3.8789740849585894, "step": 14519, "train/loss_ctc": 0.38453957438468933, "train/loss_error": 0.45866647362709045, "train/loss_total": 0.4438410997390747 }, { "epoch": 3.8792412503339566, "grad_norm": 1.0692886114120483, "learning_rate": 6.737376436013893e-06, "loss": 0.5026, "step": 14520 }, { "epoch": 3.8792412503339566, "step": 14520, "train/loss_ctc": 0.9207156896591187, "train/loss_error": 0.38070371747016907, "train/loss_total": 0.488706111907959 }, { "epoch": 3.8795084157093243, "step": 14521, "train/loss_ctc": 0.49135634303092957, "train/loss_error": 0.432041734457016, "train/loss_total": 0.44390466809272766 }, { "epoch": 3.8797755810846914, "step": 14522, "train/loss_ctc": 0.3881736397743225, "train/loss_error": 0.4236485958099365, "train/loss_total": 0.4165536165237427 }, { "epoch": 3.8800427464600586, "step": 14523, "train/loss_ctc": 0.957288920879364, "train/loss_error": 0.3979994058609009, "train/loss_total": 0.5098572969436646 }, { "epoch": 3.8803099118354263, "step": 14524, "train/loss_ctc": 0.8117831945419312, "train/loss_error": 0.41462263464927673, "train/loss_total": 0.49405476450920105 }, { "epoch": 3.8805770772107935, "step": 14525, "train/loss_ctc": 1.119255542755127, "train/loss_error": 0.4331282675266266, "train/loss_total": 0.5703537464141846 }, { "epoch": 3.8808442425861607, "step": 14526, "train/loss_ctc": 0.6845604181289673, "train/loss_error": 0.42737728357315063, "train/loss_total": 0.47881394624710083 }, { "epoch": 3.8811114079615283, "step": 14527, "train/loss_ctc": 0.5554192662239075, "train/loss_error": 0.41744112968444824, "train/loss_total": 0.44503676891326904 }, { "epoch": 3.8813785733368955, "step": 14528, "train/loss_ctc": 0.7532393932342529, "train/loss_error": 0.41230687499046326, "train/loss_total": 0.4804933965206146 }, { "epoch": 3.8816457387122627, "step": 14529, "train/loss_ctc": 0.5848715901374817, "train/loss_error": 0.45434218645095825, "train/loss_total": 0.48044806718826294 }, { "epoch": 3.8819129040876303, "grad_norm": 1.5379126071929932, "learning_rate": 6.721346513491852e-06, "loss": 0.4808, "step": 14530 }, { "epoch": 3.8819129040876303, "step": 14530, "train/loss_ctc": 0.6810978055000305, "train/loss_error": 0.3754604756832123, "train/loss_total": 0.43658795952796936 }, { "epoch": 3.8821800694629975, "step": 14531, "train/loss_ctc": 0.793110728263855, "train/loss_error": 0.5435957908630371, "train/loss_total": 0.5934988260269165 }, { "epoch": 3.8824472348383647, "step": 14532, "train/loss_ctc": 0.9073754549026489, "train/loss_error": 0.37305784225463867, "train/loss_total": 0.4799213409423828 }, { "epoch": 3.8827144002137324, "step": 14533, "train/loss_ctc": 1.1171778440475464, "train/loss_error": 0.411244660615921, "train/loss_total": 0.5524312853813171 }, { "epoch": 3.8829815655890996, "step": 14534, "train/loss_ctc": 0.34713882207870483, "train/loss_error": 0.4120764434337616, "train/loss_total": 0.39908891916275024 }, { "epoch": 3.8832487309644668, "step": 14535, "train/loss_ctc": 0.46750733256340027, "train/loss_error": 0.4831486642360687, "train/loss_total": 0.4800204038619995 }, { "epoch": 3.8835158963398344, "step": 14536, "train/loss_ctc": 0.4575059115886688, "train/loss_error": 0.43027570843696594, "train/loss_total": 0.435721755027771 }, { "epoch": 3.8837830617152016, "step": 14537, "train/loss_ctc": 1.4500763416290283, "train/loss_error": 0.47108036279678345, "train/loss_total": 0.6668795347213745 }, { "epoch": 3.884050227090569, "step": 14538, "train/loss_ctc": 0.36759036779403687, "train/loss_error": 0.4305039048194885, "train/loss_total": 0.41792118549346924 }, { "epoch": 3.8843173924659364, "step": 14539, "train/loss_ctc": 0.5448316335678101, "train/loss_error": 0.41916826367378235, "train/loss_total": 0.44430094957351685 }, { "epoch": 3.8845845578413036, "grad_norm": 1.4745886325836182, "learning_rate": 6.705316590969811e-06, "loss": 0.4906, "step": 14540 }, { "epoch": 3.8845845578413036, "step": 14540, "train/loss_ctc": 0.7539588212966919, "train/loss_error": 0.3914470672607422, "train/loss_total": 0.46394941210746765 }, { "epoch": 3.8848517232166713, "step": 14541, "train/loss_ctc": 1.490493655204773, "train/loss_error": 0.4853381812572479, "train/loss_total": 0.6863692998886108 }, { "epoch": 3.8851188885920385, "step": 14542, "train/loss_ctc": 0.69698566198349, "train/loss_error": 0.5092527866363525, "train/loss_total": 0.54679936170578 }, { "epoch": 3.8853860539674057, "step": 14543, "train/loss_ctc": 0.4472666084766388, "train/loss_error": 0.4852105677127838, "train/loss_total": 0.47762179374694824 }, { "epoch": 3.8856532193427733, "step": 14544, "train/loss_ctc": 1.00126051902771, "train/loss_error": 0.4698511064052582, "train/loss_total": 0.5761330127716064 }, { "epoch": 3.8859203847181405, "step": 14545, "train/loss_ctc": 0.5429081916809082, "train/loss_error": 0.43403005599975586, "train/loss_total": 0.4558056890964508 }, { "epoch": 3.886187550093508, "step": 14546, "train/loss_ctc": 0.4945964217185974, "train/loss_error": 0.5319934487342834, "train/loss_total": 0.5245140790939331 }, { "epoch": 3.8864547154688753, "step": 14547, "train/loss_ctc": 0.5399577021598816, "train/loss_error": 0.4253736436367035, "train/loss_total": 0.44829046726226807 }, { "epoch": 3.8867218808442425, "step": 14548, "train/loss_ctc": 0.42878496646881104, "train/loss_error": 0.4012158215045929, "train/loss_total": 0.40672963857650757 }, { "epoch": 3.88698904621961, "step": 14549, "train/loss_ctc": 1.0424290895462036, "train/loss_error": 0.37302452325820923, "train/loss_total": 0.5069054365158081 }, { "epoch": 3.8872562115949774, "grad_norm": 2.1860976219177246, "learning_rate": 6.6892866684477695e-06, "loss": 0.5093, "step": 14550 }, { "epoch": 3.8872562115949774, "step": 14550, "train/loss_ctc": 0.3847048580646515, "train/loss_error": 0.4311618506908417, "train/loss_total": 0.42187047004699707 }, { "epoch": 3.8875233769703446, "step": 14551, "train/loss_ctc": 0.8086907863616943, "train/loss_error": 0.4397469758987427, "train/loss_total": 0.513535737991333 }, { "epoch": 3.887790542345712, "step": 14552, "train/loss_ctc": 0.7955846786499023, "train/loss_error": 0.5279914140701294, "train/loss_total": 0.581510066986084 }, { "epoch": 3.8880577077210794, "step": 14553, "train/loss_ctc": 0.505236029624939, "train/loss_error": 0.5472358465194702, "train/loss_total": 0.538835883140564 }, { "epoch": 3.8883248730964466, "step": 14554, "train/loss_ctc": 0.7519242167472839, "train/loss_error": 0.39662426710128784, "train/loss_total": 0.467684268951416 }, { "epoch": 3.8885920384718142, "step": 14555, "train/loss_ctc": 0.5180044174194336, "train/loss_error": 0.39915943145751953, "train/loss_total": 0.42292845249176025 }, { "epoch": 3.8888592038471814, "step": 14556, "train/loss_ctc": 0.4430670738220215, "train/loss_error": 0.412597119808197, "train/loss_total": 0.41869112849235535 }, { "epoch": 3.8891263692225486, "step": 14557, "train/loss_ctc": 0.559131383895874, "train/loss_error": 0.4338871240615845, "train/loss_total": 0.4589359760284424 }, { "epoch": 3.8893935345979163, "step": 14558, "train/loss_ctc": 0.6412633061408997, "train/loss_error": 0.4958661198616028, "train/loss_total": 0.5249455571174622 }, { "epoch": 3.8896606999732835, "step": 14559, "train/loss_ctc": 0.3766476511955261, "train/loss_error": 0.38665392994880676, "train/loss_total": 0.384652704000473 }, { "epoch": 3.8899278653486506, "grad_norm": 1.7052279710769653, "learning_rate": 6.6732567459257285e-06, "loss": 0.4734, "step": 14560 }, { "epoch": 3.8899278653486506, "step": 14560, "train/loss_ctc": 0.46140944957733154, "train/loss_error": 0.4131864607334137, "train/loss_total": 0.42283105850219727 }, { "epoch": 3.8901950307240183, "step": 14561, "train/loss_ctc": 1.5893582105636597, "train/loss_error": 0.4191790223121643, "train/loss_total": 0.6532148718833923 }, { "epoch": 3.8904621960993855, "step": 14562, "train/loss_ctc": 0.8953098058700562, "train/loss_error": 0.46768686175346375, "train/loss_total": 0.5532114505767822 }, { "epoch": 3.8907293614747527, "step": 14563, "train/loss_ctc": 0.5762646794319153, "train/loss_error": 0.45392322540283203, "train/loss_total": 0.47839152812957764 }, { "epoch": 3.8909965268501203, "step": 14564, "train/loss_ctc": 0.7519153356552124, "train/loss_error": 0.35755306482315063, "train/loss_total": 0.43642550706863403 }, { "epoch": 3.8912636922254875, "step": 14565, "train/loss_ctc": 0.6538857817649841, "train/loss_error": 0.5558159351348877, "train/loss_total": 0.5754299163818359 }, { "epoch": 3.8915308576008547, "step": 14566, "train/loss_ctc": 0.7276345491409302, "train/loss_error": 0.41857630014419556, "train/loss_total": 0.48038795590400696 }, { "epoch": 3.8917980229762223, "step": 14567, "train/loss_ctc": 0.5350993871688843, "train/loss_error": 0.4222507178783417, "train/loss_total": 0.44482046365737915 }, { "epoch": 3.8920651883515895, "step": 14568, "train/loss_ctc": 0.623073399066925, "train/loss_error": 0.4293234944343567, "train/loss_total": 0.4680734872817993 }, { "epoch": 3.8923323537269567, "step": 14569, "train/loss_ctc": 0.9658257365226746, "train/loss_error": 0.506127119064331, "train/loss_total": 0.5980668663978577 }, { "epoch": 3.8925995191023244, "grad_norm": 4.4436750411987305, "learning_rate": 6.6572268234036866e-06, "loss": 0.5111, "step": 14570 }, { "epoch": 3.8925995191023244, "step": 14570, "train/loss_ctc": 0.854803740978241, "train/loss_error": 0.42357853055000305, "train/loss_total": 0.5098235607147217 }, { "epoch": 3.8928666844776916, "step": 14571, "train/loss_ctc": 0.9570429921150208, "train/loss_error": 0.5054493546485901, "train/loss_total": 0.5957680940628052 }, { "epoch": 3.8931338498530588, "step": 14572, "train/loss_ctc": 1.5063743591308594, "train/loss_error": 0.44084978103637695, "train/loss_total": 0.6539546847343445 }, { "epoch": 3.8934010152284264, "step": 14573, "train/loss_ctc": 0.37219542264938354, "train/loss_error": 0.393553227186203, "train/loss_total": 0.38928166031837463 }, { "epoch": 3.8936681806037936, "step": 14574, "train/loss_ctc": 0.3339031934738159, "train/loss_error": 0.4855319559574127, "train/loss_total": 0.4552062153816223 }, { "epoch": 3.8939353459791612, "step": 14575, "train/loss_ctc": 1.1430091857910156, "train/loss_error": 0.43857941031455994, "train/loss_total": 0.579465389251709 }, { "epoch": 3.8942025113545284, "step": 14576, "train/loss_ctc": 0.5339488983154297, "train/loss_error": 0.5012317299842834, "train/loss_total": 0.5077751874923706 }, { "epoch": 3.894469676729896, "step": 14577, "train/loss_ctc": 0.8954649567604065, "train/loss_error": 0.4376591444015503, "train/loss_total": 0.5292202830314636 }, { "epoch": 3.8947368421052633, "step": 14578, "train/loss_ctc": 0.5858110189437866, "train/loss_error": 0.4311091899871826, "train/loss_total": 0.46204954385757446 }, { "epoch": 3.8950040074806305, "step": 14579, "train/loss_ctc": 0.5712560415267944, "train/loss_error": 0.440798282623291, "train/loss_total": 0.4668898582458496 }, { "epoch": 3.895271172855998, "grad_norm": 1.73086416721344, "learning_rate": 6.6411969008816455e-06, "loss": 0.5149, "step": 14580 }, { "epoch": 3.895271172855998, "step": 14580, "train/loss_ctc": 0.775856077671051, "train/loss_error": 0.44786787033081055, "train/loss_total": 0.5134655237197876 }, { "epoch": 3.8955383382313653, "step": 14581, "train/loss_ctc": 0.2763657569885254, "train/loss_error": 0.43743184208869934, "train/loss_total": 0.40521863102912903 }, { "epoch": 3.8958055036067325, "step": 14582, "train/loss_ctc": 0.49409088492393494, "train/loss_error": 0.41688695549964905, "train/loss_total": 0.4323277473449707 }, { "epoch": 3.8960726689821, "step": 14583, "train/loss_ctc": 0.36874139308929443, "train/loss_error": 0.41617530584335327, "train/loss_total": 0.40668854117393494 }, { "epoch": 3.8963398343574673, "step": 14584, "train/loss_ctc": 0.5270001888275146, "train/loss_error": 0.46258145570755005, "train/loss_total": 0.47546523809432983 }, { "epoch": 3.8966069997328345, "step": 14585, "train/loss_ctc": 0.3931013345718384, "train/loss_error": 0.39835309982299805, "train/loss_total": 0.3973027467727661 }, { "epoch": 3.896874165108202, "step": 14586, "train/loss_ctc": 1.0511698722839355, "train/loss_error": 0.39378079771995544, "train/loss_total": 0.5252586007118225 }, { "epoch": 3.8971413304835694, "step": 14587, "train/loss_ctc": 0.6885563135147095, "train/loss_error": 0.4753589928150177, "train/loss_total": 0.517998456954956 }, { "epoch": 3.8974084958589366, "step": 14588, "train/loss_ctc": 1.1200225353240967, "train/loss_error": 0.40685275197029114, "train/loss_total": 0.5494867563247681 }, { "epoch": 3.897675661234304, "step": 14589, "train/loss_ctc": 0.3829437494277954, "train/loss_error": 0.38294893503189087, "train/loss_total": 0.3829478919506073 }, { "epoch": 3.8979428266096714, "grad_norm": 1.1489765644073486, "learning_rate": 6.625166978359605e-06, "loss": 0.4606, "step": 14590 }, { "epoch": 3.8979428266096714, "step": 14590, "train/loss_ctc": 0.8573726415634155, "train/loss_error": 0.4656985402107239, "train/loss_total": 0.5440333485603333 }, { "epoch": 3.8982099919850386, "step": 14591, "train/loss_ctc": 0.4141383767127991, "train/loss_error": 0.4865533709526062, "train/loss_total": 0.4720703959465027 }, { "epoch": 3.8984771573604062, "step": 14592, "train/loss_ctc": 0.44919875264167786, "train/loss_error": 0.42646872997283936, "train/loss_total": 0.431014746427536 }, { "epoch": 3.8987443227357734, "step": 14593, "train/loss_ctc": 0.31158262491226196, "train/loss_error": 0.508320152759552, "train/loss_total": 0.46897265315055847 }, { "epoch": 3.8990114881111406, "step": 14594, "train/loss_ctc": 0.47629523277282715, "train/loss_error": 0.446799635887146, "train/loss_total": 0.4526987671852112 }, { "epoch": 3.8992786534865083, "step": 14595, "train/loss_ctc": 0.4632524251937866, "train/loss_error": 0.4382043480873108, "train/loss_total": 0.4432139992713928 }, { "epoch": 3.8995458188618755, "step": 14596, "train/loss_ctc": 0.25886270403862, "train/loss_error": 0.4375514090061188, "train/loss_total": 0.40181365609169006 }, { "epoch": 3.8998129842372427, "step": 14597, "train/loss_ctc": 0.9828593730926514, "train/loss_error": 0.3986012637615204, "train/loss_total": 0.5154528617858887 }, { "epoch": 3.9000801496126103, "step": 14598, "train/loss_ctc": 1.1582015752792358, "train/loss_error": 0.4844124913215637, "train/loss_total": 0.6191703081130981 }, { "epoch": 3.9003473149879775, "step": 14599, "train/loss_ctc": 0.8259468674659729, "train/loss_error": 0.46002474427223206, "train/loss_total": 0.5332091450691223 }, { "epoch": 3.9006144803633447, "grad_norm": 1.374982237815857, "learning_rate": 6.609137055837563e-06, "loss": 0.4882, "step": 14600 }, { "epoch": 3.9006144803633447, "step": 14600, "train/loss_ctc": 1.3524965047836304, "train/loss_error": 0.44245579838752747, "train/loss_total": 0.6244639754295349 }, { "epoch": 3.9008816457387123, "step": 14601, "train/loss_ctc": 0.8966425657272339, "train/loss_error": 0.4313584268093109, "train/loss_total": 0.5244152545928955 }, { "epoch": 3.9011488111140795, "step": 14602, "train/loss_ctc": 0.6784339547157288, "train/loss_error": 0.4499559998512268, "train/loss_total": 0.49565160274505615 }, { "epoch": 3.9014159764894467, "step": 14603, "train/loss_ctc": 0.8352431058883667, "train/loss_error": 0.4868670403957367, "train/loss_total": 0.5565422773361206 }, { "epoch": 3.9016831418648144, "step": 14604, "train/loss_ctc": 1.0874043703079224, "train/loss_error": 0.4913692772388458, "train/loss_total": 0.610576331615448 }, { "epoch": 3.9019503072401815, "step": 14605, "train/loss_ctc": 0.6051369905471802, "train/loss_error": 0.4476059079170227, "train/loss_total": 0.4791121482849121 }, { "epoch": 3.902217472615549, "step": 14606, "train/loss_ctc": 0.936359167098999, "train/loss_error": 0.5281122922897339, "train/loss_total": 0.6097617149353027 }, { "epoch": 3.9024846379909164, "step": 14607, "train/loss_ctc": 0.45827046036720276, "train/loss_error": 0.4957444965839386, "train/loss_total": 0.48824968934059143 }, { "epoch": 3.9027518033662836, "step": 14608, "train/loss_ctc": 0.8628870248794556, "train/loss_error": 0.41209694743156433, "train/loss_total": 0.5022549629211426 }, { "epoch": 3.903018968741651, "step": 14609, "train/loss_ctc": 1.178978443145752, "train/loss_error": 0.44210848212242126, "train/loss_total": 0.5894824862480164 }, { "epoch": 3.9032861341170184, "grad_norm": 1.9724094867706299, "learning_rate": 6.593107133315522e-06, "loss": 0.5481, "step": 14610 }, { "epoch": 3.9032861341170184, "step": 14610, "train/loss_ctc": 0.47282230854034424, "train/loss_error": 0.4412495493888855, "train/loss_total": 0.44756412506103516 }, { "epoch": 3.903553299492386, "step": 14611, "train/loss_ctc": 0.20944535732269287, "train/loss_error": 0.3817683160305023, "train/loss_total": 0.34730371832847595 }, { "epoch": 3.9038204648677532, "step": 14612, "train/loss_ctc": 0.6219770312309265, "train/loss_error": 0.4832932949066162, "train/loss_total": 0.5110300779342651 }, { "epoch": 3.9040876302431204, "step": 14613, "train/loss_ctc": 0.43431735038757324, "train/loss_error": 0.45296645164489746, "train/loss_total": 0.4492366313934326 }, { "epoch": 3.904354795618488, "step": 14614, "train/loss_ctc": 0.46189212799072266, "train/loss_error": 0.4291074872016907, "train/loss_total": 0.43566444516181946 }, { "epoch": 3.9046219609938553, "step": 14615, "train/loss_ctc": 0.5792791247367859, "train/loss_error": 0.44519513845443726, "train/loss_total": 0.472011923789978 }, { "epoch": 3.9048891263692225, "step": 14616, "train/loss_ctc": 0.3741121292114258, "train/loss_error": 0.4099661111831665, "train/loss_total": 0.40279531478881836 }, { "epoch": 3.90515629174459, "step": 14617, "train/loss_ctc": 0.6129564642906189, "train/loss_error": 0.3621765375137329, "train/loss_total": 0.41233253479003906 }, { "epoch": 3.9054234571199573, "step": 14618, "train/loss_ctc": 0.6738356947898865, "train/loss_error": 0.4882388114929199, "train/loss_total": 0.5253582000732422 }, { "epoch": 3.9056906224953245, "step": 14619, "train/loss_ctc": 0.6948968768119812, "train/loss_error": 0.4031001925468445, "train/loss_total": 0.46145954728126526 }, { "epoch": 3.905957787870692, "grad_norm": 2.9794087409973145, "learning_rate": 6.577077210793481e-06, "loss": 0.4465, "step": 14620 }, { "epoch": 3.905957787870692, "step": 14620, "train/loss_ctc": 0.4043067991733551, "train/loss_error": 0.42619287967681885, "train/loss_total": 0.4218156635761261 }, { "epoch": 3.9062249532460593, "step": 14621, "train/loss_ctc": 0.8606735467910767, "train/loss_error": 0.4286511242389679, "train/loss_total": 0.5150555968284607 }, { "epoch": 3.9064921186214265, "step": 14622, "train/loss_ctc": 0.8046139478683472, "train/loss_error": 0.3963591158390045, "train/loss_total": 0.4780100882053375 }, { "epoch": 3.906759283996794, "step": 14623, "train/loss_ctc": 0.6221099495887756, "train/loss_error": 0.38808494806289673, "train/loss_total": 0.43488994240760803 }, { "epoch": 3.9070264493721614, "step": 14624, "train/loss_ctc": 0.9453845024108887, "train/loss_error": 0.43751803040504456, "train/loss_total": 0.5390913486480713 }, { "epoch": 3.9072936147475286, "step": 14625, "train/loss_ctc": 0.8898746371269226, "train/loss_error": 0.4193204641342163, "train/loss_total": 0.5134313106536865 }, { "epoch": 3.907560780122896, "step": 14626, "train/loss_ctc": 0.28505948185920715, "train/loss_error": 0.48459485173225403, "train/loss_total": 0.44468778371810913 }, { "epoch": 3.9078279454982634, "step": 14627, "train/loss_ctc": 0.7645468711853027, "train/loss_error": 0.45741963386535645, "train/loss_total": 0.5188450813293457 }, { "epoch": 3.9080951108736306, "step": 14628, "train/loss_ctc": 0.6752001643180847, "train/loss_error": 0.4779830873012543, "train/loss_total": 0.5174264907836914 }, { "epoch": 3.9083622762489982, "step": 14629, "train/loss_ctc": 0.7062118649482727, "train/loss_error": 0.5274807810783386, "train/loss_total": 0.5632269978523254 }, { "epoch": 3.9086294416243654, "grad_norm": 4.1868438720703125, "learning_rate": 6.56104728827144e-06, "loss": 0.4946, "step": 14630 }, { "epoch": 3.9086294416243654, "step": 14630, "train/loss_ctc": 0.301445335149765, "train/loss_error": 0.4327690899372101, "train/loss_total": 0.4065043330192566 }, { "epoch": 3.9088966069997326, "step": 14631, "train/loss_ctc": 0.6941429972648621, "train/loss_error": 0.4748454988002777, "train/loss_total": 0.5187050104141235 }, { "epoch": 3.9091637723751003, "step": 14632, "train/loss_ctc": 0.7654439210891724, "train/loss_error": 0.4616881012916565, "train/loss_total": 0.5224393010139465 }, { "epoch": 3.9094309377504675, "step": 14633, "train/loss_ctc": 0.727053165435791, "train/loss_error": 0.4414747953414917, "train/loss_total": 0.49859046936035156 }, { "epoch": 3.9096981031258347, "step": 14634, "train/loss_ctc": 0.48832082748413086, "train/loss_error": 0.44792285561561584, "train/loss_total": 0.45600247383117676 }, { "epoch": 3.9099652685012023, "step": 14635, "train/loss_ctc": 1.142090082168579, "train/loss_error": 0.4877954423427582, "train/loss_total": 0.6186543703079224 }, { "epoch": 3.9102324338765695, "step": 14636, "train/loss_ctc": 0.5362812280654907, "train/loss_error": 0.4667445719242096, "train/loss_total": 0.4806519150733948 }, { "epoch": 3.9104995992519367, "step": 14637, "train/loss_ctc": 0.5978192090988159, "train/loss_error": 0.40990740060806274, "train/loss_total": 0.44748976826667786 }, { "epoch": 3.9107667646273043, "step": 14638, "train/loss_ctc": 0.9523144960403442, "train/loss_error": 0.5032364130020142, "train/loss_total": 0.5930520296096802 }, { "epoch": 3.9110339300026715, "step": 14639, "train/loss_ctc": 1.4848086833953857, "train/loss_error": 0.4569692015647888, "train/loss_total": 0.6625370979309082 }, { "epoch": 3.911301095378039, "grad_norm": 2.220599412918091, "learning_rate": 6.545017365749399e-06, "loss": 0.5205, "step": 14640 }, { "epoch": 3.911301095378039, "step": 14640, "train/loss_ctc": 0.5660933256149292, "train/loss_error": 0.4201173782348633, "train/loss_total": 0.44931256771087646 }, { "epoch": 3.9115682607534064, "step": 14641, "train/loss_ctc": 1.0126171112060547, "train/loss_error": 0.4212876856327057, "train/loss_total": 0.5395535826683044 }, { "epoch": 3.9118354261287736, "step": 14642, "train/loss_ctc": 0.7471092939376831, "train/loss_error": 0.378227174282074, "train/loss_total": 0.4520035982131958 }, { "epoch": 3.912102591504141, "step": 14643, "train/loss_ctc": 0.597543478012085, "train/loss_error": 0.46059900522232056, "train/loss_total": 0.4879879355430603 }, { "epoch": 3.9123697568795084, "step": 14644, "train/loss_ctc": 1.452735424041748, "train/loss_error": 0.4637700021266937, "train/loss_total": 0.6615630984306335 }, { "epoch": 3.912636922254876, "step": 14645, "train/loss_ctc": 0.6062524318695068, "train/loss_error": 0.3921043276786804, "train/loss_total": 0.43493396043777466 }, { "epoch": 3.912904087630243, "step": 14646, "train/loss_ctc": 0.6611945629119873, "train/loss_error": 0.4191173017024994, "train/loss_total": 0.467532753944397 }, { "epoch": 3.9131712530056104, "step": 14647, "train/loss_ctc": 0.5301715135574341, "train/loss_error": 0.4430399537086487, "train/loss_total": 0.46046626567840576 }, { "epoch": 3.913438418380978, "step": 14648, "train/loss_ctc": 0.8705718517303467, "train/loss_error": 0.3961654007434845, "train/loss_total": 0.4910466969013214 }, { "epoch": 3.9137055837563453, "step": 14649, "train/loss_ctc": 0.27699512243270874, "train/loss_error": 0.49191778898239136, "train/loss_total": 0.44893327355384827 }, { "epoch": 3.9139727491317124, "grad_norm": 1.9335496425628662, "learning_rate": 6.528987443227358e-06, "loss": 0.4893, "step": 14650 }, { "epoch": 3.9139727491317124, "step": 14650, "train/loss_ctc": 0.8263325691223145, "train/loss_error": 0.3892233073711395, "train/loss_total": 0.47664517164230347 }, { "epoch": 3.91423991450708, "step": 14651, "train/loss_ctc": 1.0046875476837158, "train/loss_error": 0.4237653613090515, "train/loss_total": 0.5399497747421265 }, { "epoch": 3.9145070798824473, "step": 14652, "train/loss_ctc": 0.6690696477890015, "train/loss_error": 0.42757710814476013, "train/loss_total": 0.4758756160736084 }, { "epoch": 3.9147742452578145, "step": 14653, "train/loss_ctc": 0.5980744361877441, "train/loss_error": 0.40315595269203186, "train/loss_total": 0.4421396553516388 }, { "epoch": 3.915041410633182, "step": 14654, "train/loss_ctc": 1.0616415739059448, "train/loss_error": 0.4173867106437683, "train/loss_total": 0.5462377071380615 }, { "epoch": 3.9153085760085493, "step": 14655, "train/loss_ctc": 0.6722967624664307, "train/loss_error": 0.4629678428173065, "train/loss_total": 0.5048336386680603 }, { "epoch": 3.9155757413839165, "step": 14656, "train/loss_ctc": 0.4669311046600342, "train/loss_error": 0.47476452589035034, "train/loss_total": 0.4731978476047516 }, { "epoch": 3.915842906759284, "step": 14657, "train/loss_ctc": 0.611707329750061, "train/loss_error": 0.468617707490921, "train/loss_total": 0.49723565578460693 }, { "epoch": 3.9161100721346513, "step": 14658, "train/loss_ctc": 0.4996488392353058, "train/loss_error": 0.43084049224853516, "train/loss_total": 0.44460219144821167 }, { "epoch": 3.9163772375100185, "step": 14659, "train/loss_ctc": 0.8965857028961182, "train/loss_error": 0.3402029573917389, "train/loss_total": 0.4514795243740082 }, { "epoch": 3.916644402885386, "grad_norm": 2.3481249809265137, "learning_rate": 6.512957520705317e-06, "loss": 0.4852, "step": 14660 }, { "epoch": 3.916644402885386, "step": 14660, "train/loss_ctc": 0.5640645027160645, "train/loss_error": 0.4458358883857727, "train/loss_total": 0.46948161721229553 }, { "epoch": 3.9169115682607534, "step": 14661, "train/loss_ctc": 0.44894564151763916, "train/loss_error": 0.36990535259246826, "train/loss_total": 0.3857133984565735 }, { "epoch": 3.9171787336361206, "step": 14662, "train/loss_ctc": 0.7998684644699097, "train/loss_error": 0.47943365573883057, "train/loss_total": 0.5435206294059753 }, { "epoch": 3.917445899011488, "step": 14663, "train/loss_ctc": 0.722959041595459, "train/loss_error": 0.364273339509964, "train/loss_total": 0.436010479927063 }, { "epoch": 3.9177130643868554, "step": 14664, "train/loss_ctc": 0.6086800694465637, "train/loss_error": 0.4334729313850403, "train/loss_total": 0.4685143828392029 }, { "epoch": 3.9179802297622226, "step": 14665, "train/loss_ctc": 0.5954074859619141, "train/loss_error": 0.4736126661300659, "train/loss_total": 0.49797162413597107 }, { "epoch": 3.9182473951375902, "step": 14666, "train/loss_ctc": 1.434542179107666, "train/loss_error": 0.3893685042858124, "train/loss_total": 0.5984032154083252 }, { "epoch": 3.9185145605129574, "step": 14667, "train/loss_ctc": 0.3991614878177643, "train/loss_error": 0.38034701347351074, "train/loss_total": 0.3841099143028259 }, { "epoch": 3.9187817258883246, "step": 14668, "train/loss_ctc": 0.4429217576980591, "train/loss_error": 0.4106738865375519, "train/loss_total": 0.4171234667301178 }, { "epoch": 3.9190488912636923, "step": 14669, "train/loss_ctc": 0.6658433675765991, "train/loss_error": 0.42389529943466187, "train/loss_total": 0.4722849130630493 }, { "epoch": 3.9193160566390595, "grad_norm": 1.8057596683502197, "learning_rate": 6.496927598183275e-06, "loss": 0.4673, "step": 14670 }, { "epoch": 3.9193160566390595, "step": 14670, "train/loss_ctc": 1.4571762084960938, "train/loss_error": 0.4774315655231476, "train/loss_total": 0.6733804941177368 }, { "epoch": 3.9195832220144267, "step": 14671, "train/loss_ctc": 0.4957861006259918, "train/loss_error": 0.41681885719299316, "train/loss_total": 0.4326122999191284 }, { "epoch": 3.9198503873897943, "step": 14672, "train/loss_ctc": 0.6949864029884338, "train/loss_error": 0.439998596906662, "train/loss_total": 0.49099618196487427 }, { "epoch": 3.9201175527651615, "step": 14673, "train/loss_ctc": 0.4289252460002899, "train/loss_error": 0.4011289179325104, "train/loss_total": 0.40668821334838867 }, { "epoch": 3.920384718140529, "step": 14674, "train/loss_ctc": 1.0465770959854126, "train/loss_error": 0.4926488995552063, "train/loss_total": 0.6034345626831055 }, { "epoch": 3.9206518835158963, "step": 14675, "train/loss_ctc": 0.6246553659439087, "train/loss_error": 0.4586505889892578, "train/loss_total": 0.4918515384197235 }, { "epoch": 3.920919048891264, "step": 14676, "train/loss_ctc": 0.46873170137405396, "train/loss_error": 0.5156418085098267, "train/loss_total": 0.5062597990036011 }, { "epoch": 3.921186214266631, "step": 14677, "train/loss_ctc": 0.44140225648880005, "train/loss_error": 0.5102580189704895, "train/loss_total": 0.4964869022369385 }, { "epoch": 3.9214533796419984, "step": 14678, "train/loss_ctc": 0.18807271122932434, "train/loss_error": 0.4295543432235718, "train/loss_total": 0.3812580406665802 }, { "epoch": 3.921720545017366, "step": 14679, "train/loss_ctc": 0.7738860845565796, "train/loss_error": 0.4774726629257202, "train/loss_total": 0.5367553234100342 }, { "epoch": 3.921987710392733, "grad_norm": 3.031338930130005, "learning_rate": 6.480897675661235e-06, "loss": 0.502, "step": 14680 }, { "epoch": 3.921987710392733, "step": 14680, "train/loss_ctc": 0.3020973205566406, "train/loss_error": 0.3918100893497467, "train/loss_total": 0.37386754155158997 }, { "epoch": 3.9222548757681004, "step": 14681, "train/loss_ctc": 0.6257177591323853, "train/loss_error": 0.41513365507125854, "train/loss_total": 0.4572504758834839 }, { "epoch": 3.922522041143468, "step": 14682, "train/loss_ctc": 0.5570594072341919, "train/loss_error": 0.37308916449546814, "train/loss_total": 0.40988320112228394 }, { "epoch": 3.9227892065188352, "step": 14683, "train/loss_ctc": 0.9049595594406128, "train/loss_error": 0.5606757402420044, "train/loss_total": 0.629532516002655 }, { "epoch": 3.9230563718942024, "step": 14684, "train/loss_ctc": 0.8393053412437439, "train/loss_error": 0.4538338780403137, "train/loss_total": 0.5309281945228577 }, { "epoch": 3.92332353726957, "step": 14685, "train/loss_ctc": 0.8430790901184082, "train/loss_error": 0.5115370750427246, "train/loss_total": 0.5778454542160034 }, { "epoch": 3.9235907026449373, "step": 14686, "train/loss_ctc": 1.0364980697631836, "train/loss_error": 0.4059719741344452, "train/loss_total": 0.5320771932601929 }, { "epoch": 3.9238578680203045, "step": 14687, "train/loss_ctc": 0.6664459705352783, "train/loss_error": 0.39617758989334106, "train/loss_total": 0.45023125410079956 }, { "epoch": 3.924125033395672, "step": 14688, "train/loss_ctc": 0.7792212963104248, "train/loss_error": 0.4784172475337982, "train/loss_total": 0.5385780334472656 }, { "epoch": 3.9243921987710393, "step": 14689, "train/loss_ctc": 0.5228139758110046, "train/loss_error": 0.43495798110961914, "train/loss_total": 0.4525291919708252 }, { "epoch": 3.9246593641464065, "grad_norm": 1.6792240142822266, "learning_rate": 6.464867753139194e-06, "loss": 0.4953, "step": 14690 }, { "epoch": 3.9246593641464065, "step": 14690, "train/loss_ctc": 1.1437526941299438, "train/loss_error": 0.49063441157341003, "train/loss_total": 0.6212580800056458 }, { "epoch": 3.924926529521774, "step": 14691, "train/loss_ctc": 0.9954007267951965, "train/loss_error": 0.41083332896232605, "train/loss_total": 0.5277467966079712 }, { "epoch": 3.9251936948971413, "step": 14692, "train/loss_ctc": 0.7494948506355286, "train/loss_error": 0.47666677832603455, "train/loss_total": 0.5312324166297913 }, { "epoch": 3.9254608602725085, "step": 14693, "train/loss_ctc": 0.7441068887710571, "train/loss_error": 0.46756869554519653, "train/loss_total": 0.5228763818740845 }, { "epoch": 3.925728025647876, "step": 14694, "train/loss_ctc": 0.3768117129802704, "train/loss_error": 0.4982278048992157, "train/loss_total": 0.47394460439682007 }, { "epoch": 3.9259951910232433, "step": 14695, "train/loss_ctc": 0.40142446756362915, "train/loss_error": 0.3898460865020752, "train/loss_total": 0.3921617567539215 }, { "epoch": 3.9262623563986105, "step": 14696, "train/loss_ctc": 1.065109133720398, "train/loss_error": 0.3781021535396576, "train/loss_total": 0.5155035853385925 }, { "epoch": 3.926529521773978, "step": 14697, "train/loss_ctc": 0.5699881315231323, "train/loss_error": 0.48492205142974854, "train/loss_total": 0.5019352436065674 }, { "epoch": 3.9267966871493454, "step": 14698, "train/loss_ctc": 0.31513142585754395, "train/loss_error": 0.4058060944080353, "train/loss_total": 0.38767117261886597 }, { "epoch": 3.9270638525247126, "step": 14699, "train/loss_ctc": 0.2095523476600647, "train/loss_error": 0.4533385634422302, "train/loss_total": 0.40458133816719055 }, { "epoch": 3.92733101790008, "grad_norm": 1.4876298904418945, "learning_rate": 6.448837830617152e-06, "loss": 0.4879, "step": 14700 }, { "epoch": 3.92733101790008, "step": 14700, "train/loss_ctc": 0.5908135175704956, "train/loss_error": 0.4577464163303375, "train/loss_total": 0.48435986042022705 }, { "epoch": 3.9275981832754474, "step": 14701, "train/loss_ctc": 0.9458099007606506, "train/loss_error": 0.49402183294296265, "train/loss_total": 0.5843794345855713 }, { "epoch": 3.9278653486508146, "step": 14702, "train/loss_ctc": 1.0590307712554932, "train/loss_error": 0.49619656801223755, "train/loss_total": 0.6087633967399597 }, { "epoch": 3.9281325140261822, "step": 14703, "train/loss_ctc": 0.7944401502609253, "train/loss_error": 0.4762710928916931, "train/loss_total": 0.5399048924446106 }, { "epoch": 3.9283996794015494, "step": 14704, "train/loss_ctc": 0.6846781373023987, "train/loss_error": 0.48228076100349426, "train/loss_total": 0.522760272026062 }, { "epoch": 3.928666844776917, "step": 14705, "train/loss_ctc": 1.0497236251831055, "train/loss_error": 0.4514729678630829, "train/loss_total": 0.5711231231689453 }, { "epoch": 3.9289340101522843, "step": 14706, "train/loss_ctc": 1.0228581428527832, "train/loss_error": 0.37185317277908325, "train/loss_total": 0.5020542144775391 }, { "epoch": 3.9292011755276515, "step": 14707, "train/loss_ctc": 0.7817869186401367, "train/loss_error": 0.3372668921947479, "train/loss_total": 0.42617088556289673 }, { "epoch": 3.929468340903019, "step": 14708, "train/loss_ctc": 0.5737536549568176, "train/loss_error": 0.3808533251285553, "train/loss_total": 0.4194334149360657 }, { "epoch": 3.9297355062783863, "step": 14709, "train/loss_ctc": 0.3576236963272095, "train/loss_error": 0.4570296108722687, "train/loss_total": 0.43714842200279236 }, { "epoch": 3.930002671653754, "grad_norm": 2.3500425815582275, "learning_rate": 6.432807908095111e-06, "loss": 0.5096, "step": 14710 }, { "epoch": 3.930002671653754, "step": 14710, "train/loss_ctc": 1.532149314880371, "train/loss_error": 0.5244638323783875, "train/loss_total": 0.7260009050369263 }, { "epoch": 3.930269837029121, "step": 14711, "train/loss_ctc": 0.6622366309165955, "train/loss_error": 0.4377308785915375, "train/loss_total": 0.482632040977478 }, { "epoch": 3.9305370024044883, "step": 14712, "train/loss_ctc": 0.30898356437683105, "train/loss_error": 0.36797428131103516, "train/loss_total": 0.3561761677265167 }, { "epoch": 3.930804167779856, "step": 14713, "train/loss_ctc": 0.4081169366836548, "train/loss_error": 0.41990926861763, "train/loss_total": 0.41755080223083496 }, { "epoch": 3.931071333155223, "step": 14714, "train/loss_ctc": 0.7269943952560425, "train/loss_error": 0.3961612582206726, "train/loss_total": 0.46232789754867554 }, { "epoch": 3.9313384985305904, "step": 14715, "train/loss_ctc": 0.4428524971008301, "train/loss_error": 0.4498012065887451, "train/loss_total": 0.4484114646911621 }, { "epoch": 3.931605663905958, "step": 14716, "train/loss_ctc": 0.8048527240753174, "train/loss_error": 0.4234831929206848, "train/loss_total": 0.4997571110725403 }, { "epoch": 3.931872829281325, "step": 14717, "train/loss_ctc": 0.37982243299484253, "train/loss_error": 0.4405941963195801, "train/loss_total": 0.4284398555755615 }, { "epoch": 3.9321399946566924, "step": 14718, "train/loss_ctc": 0.48513704538345337, "train/loss_error": 0.4819827973842621, "train/loss_total": 0.4826136529445648 }, { "epoch": 3.93240716003206, "step": 14719, "train/loss_ctc": 0.5352920293807983, "train/loss_error": 0.42969539761543274, "train/loss_total": 0.45081472396850586 }, { "epoch": 3.9326743254074272, "grad_norm": 1.8991197347640991, "learning_rate": 6.4167779855730705e-06, "loss": 0.4755, "step": 14720 }, { "epoch": 3.9326743254074272, "step": 14720, "train/loss_ctc": 0.8768863677978516, "train/loss_error": 0.5241560339927673, "train/loss_total": 0.5947021245956421 }, { "epoch": 3.9329414907827944, "step": 14721, "train/loss_ctc": 1.1498743295669556, "train/loss_error": 0.4648174047470093, "train/loss_total": 0.6018288135528564 }, { "epoch": 3.933208656158162, "step": 14722, "train/loss_ctc": 0.573121190071106, "train/loss_error": 0.4008418619632721, "train/loss_total": 0.43529772758483887 }, { "epoch": 3.9334758215335293, "step": 14723, "train/loss_ctc": 1.2468770742416382, "train/loss_error": 0.4200953543186188, "train/loss_total": 0.5854517221450806 }, { "epoch": 3.9337429869088965, "step": 14724, "train/loss_ctc": 0.5800444483757019, "train/loss_error": 0.463821142911911, "train/loss_total": 0.48706579208374023 }, { "epoch": 3.934010152284264, "step": 14725, "train/loss_ctc": 0.6022694706916809, "train/loss_error": 0.48943978548049927, "train/loss_total": 0.5120056867599487 }, { "epoch": 3.9342773176596313, "step": 14726, "train/loss_ctc": 0.6806248426437378, "train/loss_error": 0.435045450925827, "train/loss_total": 0.4841613471508026 }, { "epoch": 3.9345444830349985, "step": 14727, "train/loss_ctc": 0.2416927069425583, "train/loss_error": 0.44714710116386414, "train/loss_total": 0.4060562252998352 }, { "epoch": 3.934811648410366, "step": 14728, "train/loss_ctc": 0.5741680860519409, "train/loss_error": 0.4023948907852173, "train/loss_total": 0.43674954771995544 }, { "epoch": 3.9350788137857333, "step": 14729, "train/loss_ctc": 1.1571463346481323, "train/loss_error": 0.4467967450618744, "train/loss_total": 0.588866651058197 }, { "epoch": 3.9353459791611005, "grad_norm": 2.130122661590576, "learning_rate": 6.400748063051029e-06, "loss": 0.5132, "step": 14730 }, { "epoch": 3.9353459791611005, "step": 14730, "train/loss_ctc": 0.9061658382415771, "train/loss_error": 0.46443432569503784, "train/loss_total": 0.5527806282043457 }, { "epoch": 3.935613144536468, "step": 14731, "train/loss_ctc": 0.6011404395103455, "train/loss_error": 0.44618725776672363, "train/loss_total": 0.4771778881549835 }, { "epoch": 3.9358803099118354, "step": 14732, "train/loss_ctc": 0.507623553276062, "train/loss_error": 0.46219462156295776, "train/loss_total": 0.47128042578697205 }, { "epoch": 3.9361474752872025, "step": 14733, "train/loss_ctc": 0.8028390407562256, "train/loss_error": 0.41439178586006165, "train/loss_total": 0.4920812249183655 }, { "epoch": 3.93641464066257, "step": 14734, "train/loss_ctc": 0.8919566869735718, "train/loss_error": 0.44524258375167847, "train/loss_total": 0.5345854163169861 }, { "epoch": 3.9366818060379374, "step": 14735, "train/loss_ctc": 1.1219804286956787, "train/loss_error": 0.4458618760108948, "train/loss_total": 0.5810856223106384 }, { "epoch": 3.9369489714133046, "step": 14736, "train/loss_ctc": 0.5451762080192566, "train/loss_error": 0.4697484076023102, "train/loss_total": 0.4848339855670929 }, { "epoch": 3.937216136788672, "step": 14737, "train/loss_ctc": 0.5454087853431702, "train/loss_error": 0.509349524974823, "train/loss_total": 0.5165613889694214 }, { "epoch": 3.9374833021640394, "step": 14738, "train/loss_ctc": 0.7504328489303589, "train/loss_error": 0.4405454397201538, "train/loss_total": 0.5025229454040527 }, { "epoch": 3.937750467539407, "step": 14739, "train/loss_ctc": 0.6168751120567322, "train/loss_error": 0.469539999961853, "train/loss_total": 0.49900704622268677 }, { "epoch": 3.9380176329147742, "grad_norm": 1.5374624729156494, "learning_rate": 6.3847181405289875e-06, "loss": 0.5112, "step": 14740 }, { "epoch": 3.9380176329147742, "step": 14740, "train/loss_ctc": 1.0300483703613281, "train/loss_error": 0.34392499923706055, "train/loss_total": 0.48114967346191406 }, { "epoch": 3.9382847982901414, "step": 14741, "train/loss_ctc": 0.39469581842422485, "train/loss_error": 0.5017477869987488, "train/loss_total": 0.4803374111652374 }, { "epoch": 3.938551963665509, "step": 14742, "train/loss_ctc": 0.703258752822876, "train/loss_error": 0.5367296934127808, "train/loss_total": 0.5700355172157288 }, { "epoch": 3.9388191290408763, "step": 14743, "train/loss_ctc": 0.3319593667984009, "train/loss_error": 0.4717380106449127, "train/loss_total": 0.4437822997570038 }, { "epoch": 3.939086294416244, "step": 14744, "train/loss_ctc": 0.6113266348838806, "train/loss_error": 0.44190895557403564, "train/loss_total": 0.4757924973964691 }, { "epoch": 3.939353459791611, "step": 14745, "train/loss_ctc": 0.5613716840744019, "train/loss_error": 0.4447937309741974, "train/loss_total": 0.4681093394756317 }, { "epoch": 3.9396206251669783, "step": 14746, "train/loss_ctc": 0.443612664937973, "train/loss_error": 0.45050185918807983, "train/loss_total": 0.4491240084171295 }, { "epoch": 3.939887790542346, "step": 14747, "train/loss_ctc": 1.291352391242981, "train/loss_error": 0.44419384002685547, "train/loss_total": 0.6136255264282227 }, { "epoch": 3.940154955917713, "step": 14748, "train/loss_ctc": 1.0708739757537842, "train/loss_error": 0.4388381540775299, "train/loss_total": 0.5652453303337097 }, { "epoch": 3.9404221212930803, "step": 14749, "train/loss_ctc": 0.4334045648574829, "train/loss_error": 0.44304895401000977, "train/loss_total": 0.44112008810043335 }, { "epoch": 3.940689286668448, "grad_norm": 1.3369683027267456, "learning_rate": 6.3686882180069465e-06, "loss": 0.4988, "step": 14750 }, { "epoch": 3.940689286668448, "step": 14750, "train/loss_ctc": 0.43840864300727844, "train/loss_error": 0.4361346364021301, "train/loss_total": 0.43658944964408875 }, { "epoch": 3.940956452043815, "step": 14751, "train/loss_ctc": 0.26890841126441956, "train/loss_error": 0.32152223587036133, "train/loss_total": 0.31099948287010193 }, { "epoch": 3.9412236174191824, "step": 14752, "train/loss_ctc": 0.29589158296585083, "train/loss_error": 0.405892014503479, "train/loss_total": 0.3838919401168823 }, { "epoch": 3.94149078279455, "step": 14753, "train/loss_ctc": 0.7865906953811646, "train/loss_error": 0.4761403501033783, "train/loss_total": 0.5382304191589355 }, { "epoch": 3.941757948169917, "step": 14754, "train/loss_ctc": 0.3738543391227722, "train/loss_error": 0.5093556642532349, "train/loss_total": 0.48225539922714233 }, { "epoch": 3.9420251135452844, "step": 14755, "train/loss_ctc": 0.3272241950035095, "train/loss_error": 0.43873584270477295, "train/loss_total": 0.41643351316452026 }, { "epoch": 3.942292278920652, "step": 14756, "train/loss_ctc": 0.5078426599502563, "train/loss_error": 0.396191269159317, "train/loss_total": 0.41852158308029175 }, { "epoch": 3.9425594442960192, "step": 14757, "train/loss_ctc": 0.6536060571670532, "train/loss_error": 0.42882442474365234, "train/loss_total": 0.4737807512283325 }, { "epoch": 3.9428266096713864, "step": 14758, "train/loss_ctc": 0.5444742441177368, "train/loss_error": 0.39359045028686523, "train/loss_total": 0.42376720905303955 }, { "epoch": 3.943093775046754, "step": 14759, "train/loss_ctc": 0.7799731492996216, "train/loss_error": 0.5091233849525452, "train/loss_total": 0.5632933378219604 }, { "epoch": 3.9433609404221213, "grad_norm": 3.554309844970703, "learning_rate": 6.3526582954849045e-06, "loss": 0.4448, "step": 14760 }, { "epoch": 3.9433609404221213, "step": 14760, "train/loss_ctc": 0.4887324273586273, "train/loss_error": 0.3756997883319855, "train/loss_total": 0.39830633997917175 }, { "epoch": 3.9436281057974885, "step": 14761, "train/loss_ctc": 1.4311578273773193, "train/loss_error": 0.45161598920822144, "train/loss_total": 0.647524356842041 }, { "epoch": 3.943895271172856, "step": 14762, "train/loss_ctc": 0.8919340968132019, "train/loss_error": 0.4268420338630676, "train/loss_total": 0.5198604464530945 }, { "epoch": 3.9441624365482233, "step": 14763, "train/loss_ctc": 0.5297754406929016, "train/loss_error": 0.3820888102054596, "train/loss_total": 0.4116261601448059 }, { "epoch": 3.9444296019235905, "step": 14764, "train/loss_ctc": 0.6713276505470276, "train/loss_error": 0.446304053068161, "train/loss_total": 0.4913087487220764 }, { "epoch": 3.944696767298958, "step": 14765, "train/loss_ctc": 0.3273735046386719, "train/loss_error": 0.4666261672973633, "train/loss_total": 0.4387756586074829 }, { "epoch": 3.9449639326743253, "step": 14766, "train/loss_ctc": 1.5783159732818604, "train/loss_error": 0.4972919225692749, "train/loss_total": 0.713496744632721 }, { "epoch": 3.9452310980496925, "step": 14767, "train/loss_ctc": 0.5543544888496399, "train/loss_error": 0.4577781558036804, "train/loss_total": 0.4770934283733368 }, { "epoch": 3.94549826342506, "step": 14768, "train/loss_ctc": 0.8123931288719177, "train/loss_error": 0.4411698281764984, "train/loss_total": 0.5154144763946533 }, { "epoch": 3.9457654288004274, "step": 14769, "train/loss_ctc": 1.1241445541381836, "train/loss_error": 0.4591948688030243, "train/loss_total": 0.592184841632843 }, { "epoch": 3.9460325941757946, "grad_norm": 2.1935007572174072, "learning_rate": 6.336628372962864e-06, "loss": 0.5206, "step": 14770 }, { "epoch": 3.9460325941757946, "step": 14770, "train/loss_ctc": 1.9732398986816406, "train/loss_error": 0.46077054738998413, "train/loss_total": 0.7632644176483154 }, { "epoch": 3.946299759551162, "step": 14771, "train/loss_ctc": 0.3194602429866791, "train/loss_error": 0.47592973709106445, "train/loss_total": 0.44463586807250977 }, { "epoch": 3.9465669249265294, "step": 14772, "train/loss_ctc": 1.1434048414230347, "train/loss_error": 0.4904741644859314, "train/loss_total": 0.621060311794281 }, { "epoch": 3.946834090301897, "step": 14773, "train/loss_ctc": 0.626156210899353, "train/loss_error": 0.4439617693424225, "train/loss_total": 0.4804006814956665 }, { "epoch": 3.947101255677264, "step": 14774, "train/loss_ctc": 0.7085747718811035, "train/loss_error": 0.5015626549720764, "train/loss_total": 0.5429650545120239 }, { "epoch": 3.9473684210526314, "step": 14775, "train/loss_ctc": 1.1224685907363892, "train/loss_error": 0.47283273935317993, "train/loss_total": 0.6027598977088928 }, { "epoch": 3.947635586427999, "step": 14776, "train/loss_ctc": 0.8651182055473328, "train/loss_error": 0.5454697012901306, "train/loss_total": 0.6093994379043579 }, { "epoch": 3.9479027518033663, "step": 14777, "train/loss_ctc": 1.0776045322418213, "train/loss_error": 0.448464035987854, "train/loss_total": 0.5742921233177185 }, { "epoch": 3.948169917178734, "step": 14778, "train/loss_ctc": 0.6954782009124756, "train/loss_error": 0.42243367433547974, "train/loss_total": 0.47704261541366577 }, { "epoch": 3.948437082554101, "step": 14779, "train/loss_ctc": 1.1383274793624878, "train/loss_error": 0.4372960329055786, "train/loss_total": 0.5775023102760315 }, { "epoch": 3.9487042479294683, "grad_norm": 1.8696836233139038, "learning_rate": 6.320598450440823e-06, "loss": 0.5693, "step": 14780 }, { "epoch": 3.9487042479294683, "step": 14780, "train/loss_ctc": 0.5979267954826355, "train/loss_error": 0.45601511001586914, "train/loss_total": 0.4843974709510803 }, { "epoch": 3.948971413304836, "step": 14781, "train/loss_ctc": 0.4773305654525757, "train/loss_error": 0.5330148339271545, "train/loss_total": 0.5218780040740967 }, { "epoch": 3.949238578680203, "step": 14782, "train/loss_ctc": 0.6683481931686401, "train/loss_error": 0.5004646182060242, "train/loss_total": 0.5340413451194763 }, { "epoch": 3.9495057440555703, "step": 14783, "train/loss_ctc": 0.41979730129241943, "train/loss_error": 0.4760422110557556, "train/loss_total": 0.46479323506355286 }, { "epoch": 3.949772909430938, "step": 14784, "train/loss_ctc": 0.9533807039260864, "train/loss_error": 0.5013666152954102, "train/loss_total": 0.5917694568634033 }, { "epoch": 3.950040074806305, "step": 14785, "train/loss_ctc": 0.9389548897743225, "train/loss_error": 0.32732871174812317, "train/loss_total": 0.4496539235115051 }, { "epoch": 3.9503072401816723, "step": 14786, "train/loss_ctc": 0.6423451900482178, "train/loss_error": 0.4452207684516907, "train/loss_total": 0.48464566469192505 }, { "epoch": 3.95057440555704, "step": 14787, "train/loss_ctc": 0.8119176626205444, "train/loss_error": 0.44263505935668945, "train/loss_total": 0.5164915919303894 }, { "epoch": 3.950841570932407, "step": 14788, "train/loss_ctc": 0.43300798535346985, "train/loss_error": 0.4437168538570404, "train/loss_total": 0.4415751099586487 }, { "epoch": 3.9511087363077744, "step": 14789, "train/loss_ctc": 0.39834827184677124, "train/loss_error": 0.46607381105422974, "train/loss_total": 0.452528715133667 }, { "epoch": 3.951375901683142, "grad_norm": 1.5868711471557617, "learning_rate": 6.304568527918782e-06, "loss": 0.4942, "step": 14790 }, { "epoch": 3.951375901683142, "step": 14790, "train/loss_ctc": 0.7979267239570618, "train/loss_error": 0.4128778278827667, "train/loss_total": 0.4898875951766968 }, { "epoch": 3.951643067058509, "step": 14791, "train/loss_ctc": 0.6763920783996582, "train/loss_error": 0.4216364920139313, "train/loss_total": 0.4725876450538635 }, { "epoch": 3.9519102324338764, "step": 14792, "train/loss_ctc": 0.7444747686386108, "train/loss_error": 0.4805433452129364, "train/loss_total": 0.5333296060562134 }, { "epoch": 3.952177397809244, "step": 14793, "train/loss_ctc": 1.0321707725524902, "train/loss_error": 0.44082245230674744, "train/loss_total": 0.5590921640396118 }, { "epoch": 3.9524445631846112, "step": 14794, "train/loss_ctc": 1.1676995754241943, "train/loss_error": 0.4980364739894867, "train/loss_total": 0.6319690942764282 }, { "epoch": 3.9527117285599784, "step": 14795, "train/loss_ctc": 0.5941475033760071, "train/loss_error": 0.4153290390968323, "train/loss_total": 0.4510927200317383 }, { "epoch": 3.952978893935346, "step": 14796, "train/loss_ctc": 0.5865857005119324, "train/loss_error": 0.4188341796398163, "train/loss_total": 0.45238450169563293 }, { "epoch": 3.9532460593107133, "step": 14797, "train/loss_ctc": 0.5700681209564209, "train/loss_error": 0.46818673610687256, "train/loss_total": 0.48856300115585327 }, { "epoch": 3.9535132246860805, "step": 14798, "train/loss_ctc": 0.5674734711647034, "train/loss_error": 0.3758080005645752, "train/loss_total": 0.41414108872413635 }, { "epoch": 3.953780390061448, "step": 14799, "train/loss_ctc": 0.958996057510376, "train/loss_error": 0.4291793406009674, "train/loss_total": 0.5351426601409912 }, { "epoch": 3.9540475554368153, "grad_norm": 1.9589076042175293, "learning_rate": 6.28853860539674e-06, "loss": 0.5028, "step": 14800 }, { "epoch": 3.9540475554368153, "step": 14800, "train/loss_ctc": 0.8204513788223267, "train/loss_error": 0.4266810715198517, "train/loss_total": 0.5054351091384888 }, { "epoch": 3.9543147208121825, "step": 14801, "train/loss_ctc": 0.482947438955307, "train/loss_error": 0.43026238679885864, "train/loss_total": 0.44079941511154175 }, { "epoch": 3.95458188618755, "step": 14802, "train/loss_ctc": 0.3459126949310303, "train/loss_error": 0.42541393637657166, "train/loss_total": 0.4095137119293213 }, { "epoch": 3.9548490515629173, "step": 14803, "train/loss_ctc": 0.501593828201294, "train/loss_error": 0.4637908339500427, "train/loss_total": 0.4713514447212219 }, { "epoch": 3.9551162169382845, "step": 14804, "train/loss_ctc": 0.6439535617828369, "train/loss_error": 0.40532442927360535, "train/loss_total": 0.45305025577545166 }, { "epoch": 3.955383382313652, "step": 14805, "train/loss_ctc": 0.4967695474624634, "train/loss_error": 0.42162710428237915, "train/loss_total": 0.43665561079978943 }, { "epoch": 3.9556505476890194, "step": 14806, "train/loss_ctc": 0.6975573301315308, "train/loss_error": 0.39715126156806946, "train/loss_total": 0.4572324752807617 }, { "epoch": 3.955917713064387, "step": 14807, "train/loss_ctc": 0.5056452751159668, "train/loss_error": 0.4480192959308624, "train/loss_total": 0.45954450964927673 }, { "epoch": 3.956184878439754, "step": 14808, "train/loss_ctc": 0.6116926670074463, "train/loss_error": 0.3848532736301422, "train/loss_total": 0.43022117018699646 }, { "epoch": 3.956452043815122, "step": 14809, "train/loss_ctc": 0.6108054518699646, "train/loss_error": 0.39062151312828064, "train/loss_total": 0.43465831875801086 }, { "epoch": 3.956719209190489, "grad_norm": 2.0965991020202637, "learning_rate": 6.2725086828747e-06, "loss": 0.4498, "step": 14810 }, { "epoch": 3.956719209190489, "step": 14810, "train/loss_ctc": 1.0364620685577393, "train/loss_error": 0.44275879859924316, "train/loss_total": 0.5614994764328003 }, { "epoch": 3.9569863745658562, "step": 14811, "train/loss_ctc": 0.5461000204086304, "train/loss_error": 0.3866208493709564, "train/loss_total": 0.41851669549942017 }, { "epoch": 3.957253539941224, "step": 14812, "train/loss_ctc": 0.7516947984695435, "train/loss_error": 0.4136553704738617, "train/loss_total": 0.48126327991485596 }, { "epoch": 3.957520705316591, "step": 14813, "train/loss_ctc": 0.47021862864494324, "train/loss_error": 0.4275500774383545, "train/loss_total": 0.4360837936401367 }, { "epoch": 3.9577878706919583, "step": 14814, "train/loss_ctc": 0.6322793960571289, "train/loss_error": 0.3883023262023926, "train/loss_total": 0.4370977282524109 }, { "epoch": 3.958055036067326, "step": 14815, "train/loss_ctc": 0.5808870792388916, "train/loss_error": 0.4565633535385132, "train/loss_total": 0.4814280867576599 }, { "epoch": 3.958322201442693, "step": 14816, "train/loss_ctc": 0.5250817537307739, "train/loss_error": 0.4887857735157013, "train/loss_total": 0.49604496359825134 }, { "epoch": 3.9585893668180603, "step": 14817, "train/loss_ctc": 0.69283127784729, "train/loss_error": 0.4451916515827179, "train/loss_total": 0.49471959471702576 }, { "epoch": 3.958856532193428, "step": 14818, "train/loss_ctc": 0.5731714963912964, "train/loss_error": 0.46296975016593933, "train/loss_total": 0.4850101172924042 }, { "epoch": 3.959123697568795, "step": 14819, "train/loss_ctc": 0.5147749185562134, "train/loss_error": 0.4733441472053528, "train/loss_total": 0.4816302955150604 }, { "epoch": 3.9593908629441623, "grad_norm": 3.1384243965148926, "learning_rate": 6.256478760352659e-06, "loss": 0.4773, "step": 14820 }, { "epoch": 3.9593908629441623, "step": 14820, "train/loss_ctc": 0.5956840515136719, "train/loss_error": 0.5186704993247986, "train/loss_total": 0.5340732336044312 }, { "epoch": 3.95965802831953, "step": 14821, "train/loss_ctc": 0.17499950528144836, "train/loss_error": 0.43911904096603394, "train/loss_total": 0.3862951397895813 }, { "epoch": 3.959925193694897, "step": 14822, "train/loss_ctc": 0.355465292930603, "train/loss_error": 0.4128870666027069, "train/loss_total": 0.40140271186828613 }, { "epoch": 3.9601923590702643, "step": 14823, "train/loss_ctc": 0.5930695533752441, "train/loss_error": 0.43131023645401, "train/loss_total": 0.4636620879173279 }, { "epoch": 3.960459524445632, "step": 14824, "train/loss_ctc": 0.8589732050895691, "train/loss_error": 0.4586678445339203, "train/loss_total": 0.5387288928031921 }, { "epoch": 3.960726689820999, "step": 14825, "train/loss_ctc": 0.5605369806289673, "train/loss_error": 0.45586150884628296, "train/loss_total": 0.47679659724235535 }, { "epoch": 3.9609938551963664, "step": 14826, "train/loss_ctc": 0.8345965147018433, "train/loss_error": 0.42630499601364136, "train/loss_total": 0.5079632997512817 }, { "epoch": 3.961261020571734, "step": 14827, "train/loss_ctc": 0.377554714679718, "train/loss_error": 0.40226152539253235, "train/loss_total": 0.3973201811313629 }, { "epoch": 3.961528185947101, "step": 14828, "train/loss_ctc": 0.5960599780082703, "train/loss_error": 0.36802735924720764, "train/loss_total": 0.41363388299942017 }, { "epoch": 3.9617953513224684, "step": 14829, "train/loss_ctc": 0.5514956712722778, "train/loss_error": 0.44204801321029663, "train/loss_total": 0.46393755078315735 }, { "epoch": 3.962062516697836, "grad_norm": 1.5292714834213257, "learning_rate": 6.240448837830617e-06, "loss": 0.4584, "step": 14830 }, { "epoch": 3.962062516697836, "step": 14830, "train/loss_ctc": 0.5927526354789734, "train/loss_error": 0.4690851867198944, "train/loss_total": 0.4938187003135681 }, { "epoch": 3.9623296820732032, "step": 14831, "train/loss_ctc": 0.33398205041885376, "train/loss_error": 0.42800721526145935, "train/loss_total": 0.4092021882534027 }, { "epoch": 3.9625968474485704, "step": 14832, "train/loss_ctc": 0.3176140785217285, "train/loss_error": 0.3998347520828247, "train/loss_total": 0.3833906352519989 }, { "epoch": 3.962864012823938, "step": 14833, "train/loss_ctc": 0.41586995124816895, "train/loss_error": 0.43688899278640747, "train/loss_total": 0.4326851963996887 }, { "epoch": 3.9631311781993053, "step": 14834, "train/loss_ctc": 0.42147547006607056, "train/loss_error": 0.3901849687099457, "train/loss_total": 0.39644306898117065 }, { "epoch": 3.9633983435746725, "step": 14835, "train/loss_ctc": 1.10288405418396, "train/loss_error": 0.41950640082359314, "train/loss_total": 0.5561819076538086 }, { "epoch": 3.96366550895004, "step": 14836, "train/loss_ctc": 0.8506582975387573, "train/loss_error": 0.4018057882785797, "train/loss_total": 0.49157631397247314 }, { "epoch": 3.9639326743254073, "step": 14837, "train/loss_ctc": 0.8907173871994019, "train/loss_error": 0.4042878746986389, "train/loss_total": 0.5015738010406494 }, { "epoch": 3.964199839700775, "step": 14838, "train/loss_ctc": 0.2648923397064209, "train/loss_error": 0.4783693552017212, "train/loss_total": 0.43567395210266113 }, { "epoch": 3.964467005076142, "step": 14839, "train/loss_ctc": 0.7758008241653442, "train/loss_error": 0.5142350196838379, "train/loss_total": 0.5665481686592102 }, { "epoch": 3.9647341704515093, "grad_norm": 1.5195276737213135, "learning_rate": 6.224418915308576e-06, "loss": 0.4667, "step": 14840 }, { "epoch": 3.9647341704515093, "step": 14840, "train/loss_ctc": 0.4444811940193176, "train/loss_error": 0.527056872844696, "train/loss_total": 0.5105417370796204 }, { "epoch": 3.965001335826877, "step": 14841, "train/loss_ctc": 0.5096206665039062, "train/loss_error": 0.4458109140396118, "train/loss_total": 0.4585728645324707 }, { "epoch": 3.965268501202244, "step": 14842, "train/loss_ctc": 1.6938554048538208, "train/loss_error": 0.4529057443141937, "train/loss_total": 0.701095700263977 }, { "epoch": 3.965535666577612, "step": 14843, "train/loss_ctc": 1.5373890399932861, "train/loss_error": 0.4201167821884155, "train/loss_total": 0.6435712575912476 }, { "epoch": 3.965802831952979, "step": 14844, "train/loss_ctc": 1.1035010814666748, "train/loss_error": 0.4859626591205597, "train/loss_total": 0.6094703674316406 }, { "epoch": 3.966069997328346, "step": 14845, "train/loss_ctc": 0.6444396376609802, "train/loss_error": 0.41281118988990784, "train/loss_total": 0.4591369032859802 }, { "epoch": 3.966337162703714, "step": 14846, "train/loss_ctc": 0.5532141923904419, "train/loss_error": 0.4673826992511749, "train/loss_total": 0.48454901576042175 }, { "epoch": 3.966604328079081, "step": 14847, "train/loss_ctc": 1.1994597911834717, "train/loss_error": 0.4623940587043762, "train/loss_total": 0.6098071932792664 }, { "epoch": 3.9668714934544482, "step": 14848, "train/loss_ctc": 0.8201554417610168, "train/loss_error": 0.38336455821990967, "train/loss_total": 0.4707227349281311 }, { "epoch": 3.967138658829816, "step": 14849, "train/loss_ctc": 1.2182164192199707, "train/loss_error": 0.39016127586364746, "train/loss_total": 0.5557723045349121 }, { "epoch": 3.967405824205183, "grad_norm": 1.3840177059173584, "learning_rate": 6.208388992786535e-06, "loss": 0.5503, "step": 14850 }, { "epoch": 3.967405824205183, "step": 14850, "train/loss_ctc": 0.788467288017273, "train/loss_error": 0.435300350189209, "train/loss_total": 0.5059337615966797 }, { "epoch": 3.9676729895805503, "step": 14851, "train/loss_ctc": 1.009986400604248, "train/loss_error": 0.45848312973976135, "train/loss_total": 0.5687837600708008 }, { "epoch": 3.967940154955918, "step": 14852, "train/loss_ctc": 0.6355379223823547, "train/loss_error": 0.42327451705932617, "train/loss_total": 0.46572721004486084 }, { "epoch": 3.968207320331285, "step": 14853, "train/loss_ctc": 1.325984239578247, "train/loss_error": 0.49213892221450806, "train/loss_total": 0.6589080095291138 }, { "epoch": 3.9684744857066523, "step": 14854, "train/loss_ctc": 1.389605164527893, "train/loss_error": 0.48246803879737854, "train/loss_total": 0.6638954877853394 }, { "epoch": 3.96874165108202, "step": 14855, "train/loss_ctc": 0.3576415479183197, "train/loss_error": 0.41251206398010254, "train/loss_total": 0.4015379846096039 }, { "epoch": 3.969008816457387, "step": 14856, "train/loss_ctc": 0.3391038477420807, "train/loss_error": 0.454537957906723, "train/loss_total": 0.43145114183425903 }, { "epoch": 3.9692759818327543, "step": 14857, "train/loss_ctc": 0.29539167881011963, "train/loss_error": 0.42091473937034607, "train/loss_total": 0.3958101272583008 }, { "epoch": 3.969543147208122, "step": 14858, "train/loss_ctc": 0.3488050699234009, "train/loss_error": 0.4752265214920044, "train/loss_total": 0.4499422311782837 }, { "epoch": 3.969810312583489, "step": 14859, "train/loss_ctc": 0.7034685611724854, "train/loss_error": 0.3836899995803833, "train/loss_total": 0.44764572381973267 }, { "epoch": 3.9700774779588563, "grad_norm": 2.568610668182373, "learning_rate": 6.192359070264494e-06, "loss": 0.499, "step": 14860 }, { "epoch": 3.9700774779588563, "step": 14860, "train/loss_ctc": 0.835371196269989, "train/loss_error": 0.416491836309433, "train/loss_total": 0.500267744064331 }, { "epoch": 3.970344643334224, "step": 14861, "train/loss_ctc": 0.7154254913330078, "train/loss_error": 0.42187803983688354, "train/loss_total": 0.48058754205703735 }, { "epoch": 3.970611808709591, "step": 14862, "train/loss_ctc": 0.5956861972808838, "train/loss_error": 0.450179785490036, "train/loss_total": 0.47928106784820557 }, { "epoch": 3.9708789740849584, "step": 14863, "train/loss_ctc": 0.7116241455078125, "train/loss_error": 0.45515456795692444, "train/loss_total": 0.50644850730896 }, { "epoch": 3.971146139460326, "step": 14864, "train/loss_ctc": 0.9432433247566223, "train/loss_error": 0.4680749773979187, "train/loss_total": 0.5631086826324463 }, { "epoch": 3.971413304835693, "step": 14865, "train/loss_ctc": 0.6790275573730469, "train/loss_error": 0.46711209416389465, "train/loss_total": 0.509495198726654 }, { "epoch": 3.9716804702110604, "step": 14866, "train/loss_ctc": 1.7421224117279053, "train/loss_error": 0.4556407332420349, "train/loss_total": 0.7129371166229248 }, { "epoch": 3.971947635586428, "step": 14867, "train/loss_ctc": 0.5143439769744873, "train/loss_error": 0.41674619913101196, "train/loss_total": 0.436265766620636 }, { "epoch": 3.9722148009617952, "step": 14868, "train/loss_ctc": 0.5989527702331543, "train/loss_error": 0.4457741975784302, "train/loss_total": 0.476409912109375 }, { "epoch": 3.9724819663371624, "step": 14869, "train/loss_ctc": 0.7078449726104736, "train/loss_error": 0.4097003936767578, "train/loss_total": 0.469329297542572 }, { "epoch": 3.97274913171253, "grad_norm": 1.30711030960083, "learning_rate": 6.176329147742453e-06, "loss": 0.5134, "step": 14870 }, { "epoch": 3.97274913171253, "step": 14870, "train/loss_ctc": 0.5412833094596863, "train/loss_error": 0.5206067562103271, "train/loss_total": 0.524742066860199 }, { "epoch": 3.9730162970878973, "step": 14871, "train/loss_ctc": 0.7457466125488281, "train/loss_error": 0.5340113043785095, "train/loss_total": 0.5763583779335022 }, { "epoch": 3.973283462463265, "step": 14872, "train/loss_ctc": 0.32862764596939087, "train/loss_error": 0.38197439908981323, "train/loss_total": 0.37130504846572876 }, { "epoch": 3.973550627838632, "step": 14873, "train/loss_ctc": 0.6403838396072388, "train/loss_error": 0.4881288409233093, "train/loss_total": 0.5185798406600952 }, { "epoch": 3.9738177932139993, "step": 14874, "train/loss_ctc": 0.8844519853591919, "train/loss_error": 0.4344419538974762, "train/loss_total": 0.5244439840316772 }, { "epoch": 3.974084958589367, "step": 14875, "train/loss_ctc": 0.6400150060653687, "train/loss_error": 0.46940577030181885, "train/loss_total": 0.5035276412963867 }, { "epoch": 3.974352123964734, "step": 14876, "train/loss_ctc": 0.3231387138366699, "train/loss_error": 0.44887569546699524, "train/loss_total": 0.4237282872200012 }, { "epoch": 3.974619289340102, "step": 14877, "train/loss_ctc": 0.782909631729126, "train/loss_error": 0.4971132278442383, "train/loss_total": 0.5542725324630737 }, { "epoch": 3.974886454715469, "step": 14878, "train/loss_ctc": 0.19087505340576172, "train/loss_error": 0.42616987228393555, "train/loss_total": 0.3791109323501587 }, { "epoch": 3.975153620090836, "step": 14879, "train/loss_ctc": 1.1319464445114136, "train/loss_error": 0.39130938053131104, "train/loss_total": 0.5394368171691895 }, { "epoch": 3.975420785466204, "grad_norm": 2.2951231002807617, "learning_rate": 6.160299225220412e-06, "loss": 0.4916, "step": 14880 }, { "epoch": 3.975420785466204, "step": 14880, "train/loss_ctc": 0.49626636505126953, "train/loss_error": 0.48213034868240356, "train/loss_total": 0.4849575459957123 }, { "epoch": 3.975687950841571, "step": 14881, "train/loss_ctc": 0.8171766400337219, "train/loss_error": 0.4158439040184021, "train/loss_total": 0.4961104393005371 }, { "epoch": 3.975955116216938, "step": 14882, "train/loss_ctc": 0.5017784833908081, "train/loss_error": 0.4362598657608032, "train/loss_total": 0.4493635892868042 }, { "epoch": 3.976222281592306, "step": 14883, "train/loss_ctc": 1.1399762630462646, "train/loss_error": 0.4352499544620514, "train/loss_total": 0.576195240020752 }, { "epoch": 3.976489446967673, "step": 14884, "train/loss_ctc": 0.5059241652488708, "train/loss_error": 0.4016166925430298, "train/loss_total": 0.42247819900512695 }, { "epoch": 3.9767566123430402, "step": 14885, "train/loss_ctc": 0.39310967922210693, "train/loss_error": 0.40571463108062744, "train/loss_total": 0.4031936526298523 }, { "epoch": 3.977023777718408, "step": 14886, "train/loss_ctc": 0.15395298600196838, "train/loss_error": 0.4343055784702301, "train/loss_total": 0.3782350718975067 }, { "epoch": 3.977290943093775, "step": 14887, "train/loss_ctc": 0.6011155843734741, "train/loss_error": 0.4842616021633148, "train/loss_total": 0.5076324343681335 }, { "epoch": 3.9775581084691423, "step": 14888, "train/loss_ctc": 1.054686188697815, "train/loss_error": 0.5089238286018372, "train/loss_total": 0.6180763244628906 }, { "epoch": 3.97782527384451, "step": 14889, "train/loss_ctc": 0.6579307317733765, "train/loss_error": 0.5020967721939087, "train/loss_total": 0.5332635641098022 }, { "epoch": 3.978092439219877, "grad_norm": 1.7656311988830566, "learning_rate": 6.14426930269837e-06, "loss": 0.487, "step": 14890 }, { "epoch": 3.978092439219877, "step": 14890, "train/loss_ctc": 0.8396487236022949, "train/loss_error": 0.41738250851631165, "train/loss_total": 0.5018357634544373 }, { "epoch": 3.9783596045952443, "step": 14891, "train/loss_ctc": 0.5391314029693604, "train/loss_error": 0.44182756543159485, "train/loss_total": 0.46128836274147034 }, { "epoch": 3.978626769970612, "step": 14892, "train/loss_ctc": 0.583072304725647, "train/loss_error": 0.5157262086868286, "train/loss_total": 0.5291954278945923 }, { "epoch": 3.978893935345979, "step": 14893, "train/loss_ctc": 0.9912711381912231, "train/loss_error": 0.467620313167572, "train/loss_total": 0.5723505020141602 }, { "epoch": 3.9791611007213463, "step": 14894, "train/loss_ctc": 0.3924592137336731, "train/loss_error": 0.4285455346107483, "train/loss_total": 0.42132827639579773 }, { "epoch": 3.979428266096714, "step": 14895, "train/loss_ctc": 0.9419762492179871, "train/loss_error": 0.35572293400764465, "train/loss_total": 0.4729735851287842 }, { "epoch": 3.979695431472081, "step": 14896, "train/loss_ctc": 0.9561101198196411, "train/loss_error": 0.45944541692733765, "train/loss_total": 0.5587783455848694 }, { "epoch": 3.9799625968474484, "step": 14897, "train/loss_ctc": 0.47915518283843994, "train/loss_error": 0.4649158716201782, "train/loss_total": 0.467763751745224 }, { "epoch": 3.980229762222816, "step": 14898, "train/loss_ctc": 0.5013946294784546, "train/loss_error": 0.38279983401298523, "train/loss_total": 0.406518816947937 }, { "epoch": 3.980496927598183, "step": 14899, "train/loss_ctc": 0.49847322702407837, "train/loss_error": 0.38952043652534485, "train/loss_total": 0.41131100058555603 }, { "epoch": 3.9807640929735504, "grad_norm": 1.7319908142089844, "learning_rate": 6.1282393801763296e-06, "loss": 0.4803, "step": 14900 }, { "epoch": 3.9807640929735504, "step": 14900, "train/loss_ctc": 0.779314398765564, "train/loss_error": 0.4542514979839325, "train/loss_total": 0.5192641019821167 }, { "epoch": 3.981031258348918, "step": 14901, "train/loss_ctc": 0.4357941150665283, "train/loss_error": 0.32404109835624695, "train/loss_total": 0.3463917076587677 }, { "epoch": 3.981298423724285, "step": 14902, "train/loss_ctc": 0.40458017587661743, "train/loss_error": 0.440181165933609, "train/loss_total": 0.43306097388267517 }, { "epoch": 3.9815655890996524, "step": 14903, "train/loss_ctc": 1.4661428928375244, "train/loss_error": 0.4805009663105011, "train/loss_total": 0.6776293516159058 }, { "epoch": 3.98183275447502, "step": 14904, "train/loss_ctc": 0.7630304098129272, "train/loss_error": 0.4446767270565033, "train/loss_total": 0.5083474516868591 }, { "epoch": 3.9820999198503872, "step": 14905, "train/loss_ctc": 0.6372474431991577, "train/loss_error": 0.5056051015853882, "train/loss_total": 0.531933605670929 }, { "epoch": 3.982367085225755, "step": 14906, "train/loss_ctc": 0.8317476511001587, "train/loss_error": 0.4636790454387665, "train/loss_total": 0.5372927784919739 }, { "epoch": 3.982634250601122, "step": 14907, "train/loss_ctc": 0.36971616744995117, "train/loss_error": 0.3829808533191681, "train/loss_total": 0.38032791018486023 }, { "epoch": 3.9829014159764897, "step": 14908, "train/loss_ctc": 0.6023083925247192, "train/loss_error": 0.4348922073841095, "train/loss_total": 0.46837544441223145 }, { "epoch": 3.983168581351857, "step": 14909, "train/loss_ctc": 0.33367496728897095, "train/loss_error": 0.374419629573822, "train/loss_total": 0.3662707209587097 }, { "epoch": 3.983435746727224, "grad_norm": 1.9897503852844238, "learning_rate": 6.1122094576542885e-06, "loss": 0.4769, "step": 14910 }, { "epoch": 3.983435746727224, "step": 14910, "train/loss_ctc": 0.9676790833473206, "train/loss_error": 0.4546753168106079, "train/loss_total": 0.5572760701179504 }, { "epoch": 3.9837029121025918, "step": 14911, "train/loss_ctc": 0.6090637445449829, "train/loss_error": 0.405524343252182, "train/loss_total": 0.44623222947120667 }, { "epoch": 3.983970077477959, "step": 14912, "train/loss_ctc": 1.276153326034546, "train/loss_error": 0.4257347285747528, "train/loss_total": 0.5958184599876404 }, { "epoch": 3.984237242853326, "step": 14913, "train/loss_ctc": 0.6834744215011597, "train/loss_error": 0.4363142251968384, "train/loss_total": 0.48574626445770264 }, { "epoch": 3.984504408228694, "step": 14914, "train/loss_ctc": 0.6165448427200317, "train/loss_error": 0.4725758731365204, "train/loss_total": 0.5013696551322937 }, { "epoch": 3.984771573604061, "step": 14915, "train/loss_ctc": 0.629896342754364, "train/loss_error": 0.49669116735458374, "train/loss_total": 0.5233322381973267 }, { "epoch": 3.985038738979428, "step": 14916, "train/loss_ctc": 0.6100618839263916, "train/loss_error": 0.43265199661254883, "train/loss_total": 0.46813398599624634 }, { "epoch": 3.985305904354796, "step": 14917, "train/loss_ctc": 0.9504138231277466, "train/loss_error": 0.37319186329841614, "train/loss_total": 0.4886362552642822 }, { "epoch": 3.985573069730163, "step": 14918, "train/loss_ctc": 1.320017695426941, "train/loss_error": 0.46309715509414673, "train/loss_total": 0.6344813108444214 }, { "epoch": 3.98584023510553, "step": 14919, "train/loss_ctc": 0.3307908773422241, "train/loss_error": 0.3786706030368805, "train/loss_total": 0.3690946698188782 }, { "epoch": 3.986107400480898, "grad_norm": 1.4530918598175049, "learning_rate": 6.0961795351322466e-06, "loss": 0.507, "step": 14920 }, { "epoch": 3.986107400480898, "step": 14920, "train/loss_ctc": 1.0173263549804688, "train/loss_error": 0.3833114802837372, "train/loss_total": 0.5101144313812256 }, { "epoch": 3.986374565856265, "step": 14921, "train/loss_ctc": 0.43508008122444153, "train/loss_error": 0.5104248523712158, "train/loss_total": 0.49535590410232544 }, { "epoch": 3.9866417312316322, "step": 14922, "train/loss_ctc": 0.4434136748313904, "train/loss_error": 0.4395618140697479, "train/loss_total": 0.44033220410346985 }, { "epoch": 3.986908896607, "step": 14923, "train/loss_ctc": 1.298351526260376, "train/loss_error": 0.4799230396747589, "train/loss_total": 0.6436087489128113 }, { "epoch": 3.987176061982367, "step": 14924, "train/loss_ctc": 0.901274561882019, "train/loss_error": 0.4543173909187317, "train/loss_total": 0.543708860874176 }, { "epoch": 3.9874432273577343, "step": 14925, "train/loss_ctc": 0.23099298775196075, "train/loss_error": 0.3904167711734772, "train/loss_total": 0.35853204131126404 }, { "epoch": 3.987710392733102, "step": 14926, "train/loss_ctc": 0.9982420206069946, "train/loss_error": 0.49081361293792725, "train/loss_total": 0.5922993421554565 }, { "epoch": 3.987977558108469, "step": 14927, "train/loss_ctc": 0.820134699344635, "train/loss_error": 0.44590795040130615, "train/loss_total": 0.5207533240318298 }, { "epoch": 3.9882447234838363, "step": 14928, "train/loss_ctc": 1.1001827716827393, "train/loss_error": 0.45365285873413086, "train/loss_total": 0.5829588174819946 }, { "epoch": 3.988511888859204, "step": 14929, "train/loss_ctc": 0.47492051124572754, "train/loss_error": 0.46445876359939575, "train/loss_total": 0.46655112504959106 }, { "epoch": 3.988779054234571, "grad_norm": 1.8236331939697266, "learning_rate": 6.0801496126102055e-06, "loss": 0.5154, "step": 14930 }, { "epoch": 3.988779054234571, "step": 14930, "train/loss_ctc": 0.6131405830383301, "train/loss_error": 0.38044068217277527, "train/loss_total": 0.4269806742668152 }, { "epoch": 3.9890462196099383, "step": 14931, "train/loss_ctc": 0.597343385219574, "train/loss_error": 0.4575482904911041, "train/loss_total": 0.4855073392391205 }, { "epoch": 3.989313384985306, "step": 14932, "train/loss_ctc": 0.5875214338302612, "train/loss_error": 0.4247072637081146, "train/loss_total": 0.4572701156139374 }, { "epoch": 3.989580550360673, "step": 14933, "train/loss_ctc": 0.5750634670257568, "train/loss_error": 0.41825932264328003, "train/loss_total": 0.44962015748023987 }, { "epoch": 3.9898477157360404, "step": 14934, "train/loss_ctc": 0.8059858679771423, "train/loss_error": 0.44141629338264465, "train/loss_total": 0.5143302083015442 }, { "epoch": 3.990114881111408, "step": 14935, "train/loss_ctc": 0.6756905913352966, "train/loss_error": 0.35942205786705017, "train/loss_total": 0.4226757884025574 }, { "epoch": 3.990382046486775, "step": 14936, "train/loss_ctc": 0.9813187122344971, "train/loss_error": 0.49018365144729614, "train/loss_total": 0.5884106755256653 }, { "epoch": 3.990649211862143, "step": 14937, "train/loss_ctc": 0.7369621992111206, "train/loss_error": 0.47334420680999756, "train/loss_total": 0.5260677933692932 }, { "epoch": 3.99091637723751, "step": 14938, "train/loss_ctc": 0.4281027913093567, "train/loss_error": 0.4320175349712372, "train/loss_total": 0.43123459815979004 }, { "epoch": 3.991183542612877, "step": 14939, "train/loss_ctc": 0.8197498321533203, "train/loss_error": 0.45305055379867554, "train/loss_total": 0.5263904333114624 }, { "epoch": 3.991450707988245, "grad_norm": 1.5237932205200195, "learning_rate": 6.064119690088165e-06, "loss": 0.4828, "step": 14940 }, { "epoch": 3.991450707988245, "step": 14940, "train/loss_ctc": 1.136149525642395, "train/loss_error": 0.4404669404029846, "train/loss_total": 0.5796034932136536 }, { "epoch": 3.991717873363612, "step": 14941, "train/loss_ctc": 0.4123038649559021, "train/loss_error": 0.3448324203491211, "train/loss_total": 0.3583267331123352 }, { "epoch": 3.9919850387389797, "step": 14942, "train/loss_ctc": 0.6675161123275757, "train/loss_error": 0.43012264370918274, "train/loss_total": 0.4776013493537903 }, { "epoch": 3.992252204114347, "step": 14943, "train/loss_ctc": 0.8482060432434082, "train/loss_error": 0.43469923734664917, "train/loss_total": 0.5174006223678589 }, { "epoch": 3.992519369489714, "step": 14944, "train/loss_ctc": 0.29348883032798767, "train/loss_error": 0.40431100130081177, "train/loss_total": 0.38214656710624695 }, { "epoch": 3.9927865348650817, "step": 14945, "train/loss_ctc": 0.6797023415565491, "train/loss_error": 0.4208126366138458, "train/loss_total": 0.4725905656814575 }, { "epoch": 3.993053700240449, "step": 14946, "train/loss_ctc": 0.5703757405281067, "train/loss_error": 0.44213470816612244, "train/loss_total": 0.4677829146385193 }, { "epoch": 3.993320865615816, "step": 14947, "train/loss_ctc": 0.4273946285247803, "train/loss_error": 0.4402904212474823, "train/loss_total": 0.43771126866340637 }, { "epoch": 3.9935880309911838, "step": 14948, "train/loss_ctc": 1.3419816493988037, "train/loss_error": 0.41430991888046265, "train/loss_total": 0.5998442769050598 }, { "epoch": 3.993855196366551, "step": 14949, "train/loss_ctc": 0.6667421460151672, "train/loss_error": 0.4165363311767578, "train/loss_total": 0.4665775001049042 }, { "epoch": 3.994122361741918, "grad_norm": 1.933995008468628, "learning_rate": 6.048089767566124e-06, "loss": 0.476, "step": 14950 }, { "epoch": 3.994122361741918, "step": 14950, "train/loss_ctc": 0.5415375232696533, "train/loss_error": 0.41078710556030273, "train/loss_total": 0.43693721294403076 }, { "epoch": 3.994389527117286, "step": 14951, "train/loss_ctc": 1.1981260776519775, "train/loss_error": 0.4758560359477997, "train/loss_total": 0.6203100681304932 }, { "epoch": 3.994656692492653, "step": 14952, "train/loss_ctc": 1.1364471912384033, "train/loss_error": 0.5252032279968262, "train/loss_total": 0.6474519968032837 }, { "epoch": 3.99492385786802, "step": 14953, "train/loss_ctc": 0.5485188961029053, "train/loss_error": 0.39634251594543457, "train/loss_total": 0.42677780985832214 }, { "epoch": 3.995191023243388, "step": 14954, "train/loss_ctc": 0.5844269394874573, "train/loss_error": 0.36339420080184937, "train/loss_total": 0.4076007604598999 }, { "epoch": 3.995458188618755, "step": 14955, "train/loss_ctc": 1.1002418994903564, "train/loss_error": 0.4894046187400818, "train/loss_total": 0.6115720868110657 }, { "epoch": 3.995725353994122, "step": 14956, "train/loss_ctc": 0.9418472647666931, "train/loss_error": 0.4421965181827545, "train/loss_total": 0.5421266555786133 }, { "epoch": 3.99599251936949, "step": 14957, "train/loss_ctc": 0.8802375793457031, "train/loss_error": 0.4656237065792084, "train/loss_total": 0.5485464930534363 }, { "epoch": 3.996259684744857, "step": 14958, "train/loss_ctc": 0.9517735838890076, "train/loss_error": 0.42506805062294006, "train/loss_total": 0.5304091572761536 }, { "epoch": 3.9965268501202242, "step": 14959, "train/loss_ctc": 0.5834641456604004, "train/loss_error": 0.4048826992511749, "train/loss_total": 0.4405989944934845 }, { "epoch": 3.996794015495592, "grad_norm": 2.516801357269287, "learning_rate": 6.032059845044082e-06, "loss": 0.5212, "step": 14960 }, { "epoch": 3.996794015495592, "step": 14960, "train/loss_ctc": 0.6804004311561584, "train/loss_error": 0.5112688541412354, "train/loss_total": 0.5450952053070068 }, { "epoch": 3.997061180870959, "step": 14961, "train/loss_ctc": 0.3307661712169647, "train/loss_error": 0.51409512758255, "train/loss_total": 0.4774293303489685 }, { "epoch": 3.9973283462463263, "step": 14962, "train/loss_ctc": 0.4610454738140106, "train/loss_error": 0.46168091893196106, "train/loss_total": 0.4615538418292999 }, { "epoch": 3.997595511621694, "step": 14963, "train/loss_ctc": 1.3640472888946533, "train/loss_error": 0.49069178104400635, "train/loss_total": 0.6653628945350647 }, { "epoch": 3.997862676997061, "step": 14964, "train/loss_ctc": 0.7511433362960815, "train/loss_error": 0.4618013799190521, "train/loss_total": 0.519669771194458 }, { "epoch": 3.9981298423724283, "step": 14965, "train/loss_ctc": 1.6179351806640625, "train/loss_error": 0.39573192596435547, "train/loss_total": 0.6401726007461548 }, { "epoch": 3.998397007747796, "step": 14966, "train/loss_ctc": 0.8338245153427124, "train/loss_error": 0.37072524428367615, "train/loss_total": 0.46334511041641235 }, { "epoch": 3.998664173123163, "step": 14967, "train/loss_ctc": 0.37312281131744385, "train/loss_error": 0.36864230036735535, "train/loss_total": 0.36953842639923096 }, { "epoch": 3.9989313384985303, "step": 14968, "train/loss_ctc": 0.3420318067073822, "train/loss_error": 0.3987122178077698, "train/loss_total": 0.38737615942955017 }, { "epoch": 3.999198503873898, "step": 14969, "train/loss_ctc": 0.5632307529449463, "train/loss_error": 0.42817801237106323, "train/loss_total": 0.4551885724067688 }, { "epoch": 3.999465669249265, "grad_norm": 1.7077381610870361, "learning_rate": 6.016029922522041e-06, "loss": 0.4985, "step": 14970 }, { "epoch": 3.999465669249265, "step": 14970, "train/loss_ctc": 0.6593422889709473, "train/loss_error": 0.4342283308506012, "train/loss_total": 0.4792511463165283 }, { "epoch": 3.999732834624633, "step": 14971, "train/loss_ctc": 0.5443034172058105, "train/loss_error": 0.43185025453567505, "train/loss_total": 0.4543408751487732 }, { "epoch": 4.0, "eval_eval/f1_0": 0.6238980293273926, "eval_eval/f1_1": 0.8253514170646667, "eval_eval/precision_0": 0.7869774699211121, "eval_eval/precision_1": 0.7529020309448242, "eval_eval/recall_0": 0.5168045163154602, "eval_eval/recall_1": 0.9132285714149475, "eval_eval/wer": 0.15928185386964847, "eval_runtime": 34.8227, "eval_samples_per_second": 13.181, "eval_steps_per_second": 13.181, "step": 14972 }, { "epoch": 4.0, "step": 14972, "train/loss_ctc": 0.5491935014724731, "train/loss_error": 0.4163609743118286, "train/loss_total": 0.4429274797439575 }, { "epoch": 4.000267165375368, "step": 14973, "train/loss_ctc": 0.7983735799789429, "train/loss_error": 0.3884310722351074, "train/loss_total": 0.47041958570480347 }, { "epoch": 4.000534330750734, "step": 14974, "train/loss_ctc": 0.794492244720459, "train/loss_error": 0.38640108704566956, "train/loss_total": 0.4680193066596985 }, { "epoch": 4.000801496126102, "step": 14975, "train/loss_ctc": 0.4313572645187378, "train/loss_error": 0.44242456555366516, "train/loss_total": 0.44021111726760864 }, { "epoch": 4.00106866150147, "step": 14976, "train/loss_ctc": 0.8383301496505737, "train/loss_error": 0.4199973940849304, "train/loss_total": 0.503663957118988 }, { "epoch": 4.001335826876836, "step": 14977, "train/loss_ctc": 0.46134960651397705, "train/loss_error": 0.39971327781677246, "train/loss_total": 0.4120405614376068 }, { "epoch": 4.001602992252204, "step": 14978, "train/loss_ctc": 0.518499493598938, "train/loss_error": 0.3670165240764618, "train/loss_total": 0.39731311798095703 }, { "epoch": 4.001870157627572, "step": 14979, "train/loss_ctc": 0.6769769191741943, "train/loss_error": 0.4412873685359955, "train/loss_total": 0.4884253144264221 }, { "epoch": 4.0021373230029385, "grad_norm": 2.1171982288360596, "learning_rate": 6e-06, "loss": 0.4557, "step": 14980 }, { "epoch": 4.0021373230029385, "step": 14980, "train/loss_ctc": 1.0805341005325317, "train/loss_error": 0.4357893764972687, "train/loss_total": 0.5647383332252502 }, { "epoch": 4.002404488378306, "step": 14981, "train/loss_ctc": 0.6629919409751892, "train/loss_error": 0.41168224811553955, "train/loss_total": 0.4619441628456116 }, { "epoch": 4.002671653753674, "step": 14982, "train/loss_ctc": 0.6921132206916809, "train/loss_error": 0.38774779438972473, "train/loss_total": 0.44862091541290283 }, { "epoch": 4.0029388191290405, "step": 14983, "train/loss_ctc": 0.3076910376548767, "train/loss_error": 0.4378788471221924, "train/loss_total": 0.4118413031101227 }, { "epoch": 4.003205984504408, "step": 14984, "train/loss_ctc": 0.6402045488357544, "train/loss_error": 0.4563228189945221, "train/loss_total": 0.493099182844162 }, { "epoch": 4.003473149879776, "step": 14985, "train/loss_ctc": 0.693724513053894, "train/loss_error": 0.47752735018730164, "train/loss_total": 0.5207667946815491 }, { "epoch": 4.0037403152551425, "step": 14986, "train/loss_ctc": 0.06745147705078125, "train/loss_error": 0.3718726336956024, "train/loss_total": 0.3109883964061737 }, { "epoch": 4.00400748063051, "step": 14987, "train/loss_ctc": 0.611321747303009, "train/loss_error": 0.5286999344825745, "train/loss_total": 0.5452243089675903 }, { "epoch": 4.004274646005878, "step": 14988, "train/loss_ctc": 0.6053106784820557, "train/loss_error": 0.44032156467437744, "train/loss_total": 0.4733193814754486 }, { "epoch": 4.004541811381245, "step": 14989, "train/loss_ctc": 0.6641635894775391, "train/loss_error": 0.45481574535369873, "train/loss_total": 0.49668532609939575 }, { "epoch": 4.004808976756612, "grad_norm": 1.4116272926330566, "learning_rate": 5.983970077477959e-06, "loss": 0.4727, "step": 14990 }, { "epoch": 4.004808976756612, "step": 14990, "train/loss_ctc": 0.47703468799591064, "train/loss_error": 0.47315704822540283, "train/loss_total": 0.4739325940608978 }, { "epoch": 4.00507614213198, "step": 14991, "train/loss_ctc": 0.6515015363693237, "train/loss_error": 0.48551493883132935, "train/loss_total": 0.5187122821807861 }, { "epoch": 4.0053433075073475, "step": 14992, "train/loss_ctc": 1.5452218055725098, "train/loss_error": 0.4734518826007843, "train/loss_total": 0.6878058910369873 }, { "epoch": 4.005610472882714, "step": 14993, "train/loss_ctc": 0.4731042683124542, "train/loss_error": 0.40569090843200684, "train/loss_total": 0.41917359828948975 }, { "epoch": 4.005877638258082, "step": 14994, "train/loss_ctc": 0.7114328145980835, "train/loss_error": 0.43840911984443665, "train/loss_total": 0.493013858795166 }, { "epoch": 4.0061448036334495, "step": 14995, "train/loss_ctc": 0.733057975769043, "train/loss_error": 0.4607977271080017, "train/loss_total": 0.5152497887611389 }, { "epoch": 4.006411969008816, "step": 14996, "train/loss_ctc": 1.2557263374328613, "train/loss_error": 0.4759979844093323, "train/loss_total": 0.6319437026977539 }, { "epoch": 4.006679134384184, "step": 14997, "train/loss_ctc": 1.1450109481811523, "train/loss_error": 0.5193217992782593, "train/loss_total": 0.6444596648216248 }, { "epoch": 4.0069462997595515, "step": 14998, "train/loss_ctc": 1.0765509605407715, "train/loss_error": 0.4125872552394867, "train/loss_total": 0.5453799962997437 }, { "epoch": 4.007213465134918, "step": 14999, "train/loss_ctc": 0.7953646779060364, "train/loss_error": 0.4751325845718384, "train/loss_total": 0.5391789674758911 }, { "epoch": 4.007480630510286, "grad_norm": 1.9905232191085815, "learning_rate": 5.967940154955918e-06, "loss": 0.5469, "step": 15000 }, { "epoch": 4.007480630510286, "step": 15000, "train/loss_ctc": 0.8653500080108643, "train/loss_error": 0.488078236579895, "train/loss_total": 0.5635325908660889 }, { "epoch": 4.0077477958856536, "step": 15001, "train/loss_ctc": 0.7601214051246643, "train/loss_error": 0.4077647626399994, "train/loss_total": 0.4782360792160034 }, { "epoch": 4.00801496126102, "step": 15002, "train/loss_ctc": 1.5758270025253296, "train/loss_error": 0.4965382516384125, "train/loss_total": 0.7123960256576538 }, { "epoch": 4.008282126636388, "step": 15003, "train/loss_ctc": 0.4285062551498413, "train/loss_error": 0.4309626519680023, "train/loss_total": 0.43047139048576355 }, { "epoch": 4.008549292011756, "step": 15004, "train/loss_ctc": 0.5778447985649109, "train/loss_error": 0.4047960937023163, "train/loss_total": 0.4394058585166931 }, { "epoch": 4.008816457387122, "step": 15005, "train/loss_ctc": 0.566428542137146, "train/loss_error": 0.3744942247867584, "train/loss_total": 0.41288110613822937 }, { "epoch": 4.00908362276249, "step": 15006, "train/loss_ctc": 0.8144434690475464, "train/loss_error": 0.48486295342445374, "train/loss_total": 0.5507790446281433 }, { "epoch": 4.009350788137858, "step": 15007, "train/loss_ctc": 0.5041863322257996, "train/loss_error": 0.40493637323379517, "train/loss_total": 0.42478635907173157 }, { "epoch": 4.009617953513224, "step": 15008, "train/loss_ctc": 0.800165057182312, "train/loss_error": 0.4048343598842621, "train/loss_total": 0.4839005172252655 }, { "epoch": 4.009885118888592, "step": 15009, "train/loss_ctc": 0.7081218361854553, "train/loss_error": 0.39229562878608704, "train/loss_total": 0.45546090602874756 }, { "epoch": 4.01015228426396, "grad_norm": 2.4105241298675537, "learning_rate": 5.951910232433877e-06, "loss": 0.4952, "step": 15010 }, { "epoch": 4.01015228426396, "step": 15010, "train/loss_ctc": 0.355796754360199, "train/loss_error": 0.49124306440353394, "train/loss_total": 0.46415382623672485 }, { "epoch": 4.010419449639326, "step": 15011, "train/loss_ctc": 0.782925546169281, "train/loss_error": 0.468608558177948, "train/loss_total": 0.5314719676971436 }, { "epoch": 4.010686615014694, "step": 15012, "train/loss_ctc": 1.068147897720337, "train/loss_error": 0.3985669016838074, "train/loss_total": 0.5324831008911133 }, { "epoch": 4.010953780390062, "step": 15013, "train/loss_ctc": 0.608900785446167, "train/loss_error": 0.4435321092605591, "train/loss_total": 0.4766058623790741 }, { "epoch": 4.011220945765428, "step": 15014, "train/loss_ctc": 0.5111350417137146, "train/loss_error": 0.40308713912963867, "train/loss_total": 0.4246967136859894 }, { "epoch": 4.011488111140796, "step": 15015, "train/loss_ctc": 0.7525715231895447, "train/loss_error": 0.3986210525035858, "train/loss_total": 0.469411164522171 }, { "epoch": 4.011755276516164, "step": 15016, "train/loss_ctc": 0.497135192155838, "train/loss_error": 0.4162593185901642, "train/loss_total": 0.4324344992637634 }, { "epoch": 4.0120224418915305, "step": 15017, "train/loss_ctc": 0.6631771326065063, "train/loss_error": 0.4596388339996338, "train/loss_total": 0.5003464818000793 }, { "epoch": 4.012289607266898, "step": 15018, "train/loss_ctc": 0.5979581475257874, "train/loss_error": 0.4285582900047302, "train/loss_total": 0.4624382555484772 }, { "epoch": 4.012556772642266, "step": 15019, "train/loss_ctc": 0.9538186192512512, "train/loss_error": 0.4574090838432312, "train/loss_total": 0.5566909909248352 }, { "epoch": 4.0128239380176325, "grad_norm": 2.365635395050049, "learning_rate": 5.935880309911835e-06, "loss": 0.4851, "step": 15020 }, { "epoch": 4.0128239380176325, "step": 15020, "train/loss_ctc": 0.40023890137672424, "train/loss_error": 0.5350080728530884, "train/loss_total": 0.508054256439209 }, { "epoch": 4.013091103393, "step": 15021, "train/loss_ctc": 1.1521193981170654, "train/loss_error": 0.47390782833099365, "train/loss_total": 0.6095501780509949 }, { "epoch": 4.013358268768368, "step": 15022, "train/loss_ctc": 0.9367725253105164, "train/loss_error": 0.4669911861419678, "train/loss_total": 0.5609474182128906 }, { "epoch": 4.013625434143735, "step": 15023, "train/loss_ctc": 0.5114149451255798, "train/loss_error": 0.3709582984447479, "train/loss_total": 0.39904963970184326 }, { "epoch": 4.013892599519102, "step": 15024, "train/loss_ctc": 0.6953529119491577, "train/loss_error": 0.44155964255332947, "train/loss_total": 0.492318332195282 }, { "epoch": 4.01415976489447, "step": 15025, "train/loss_ctc": 0.39844152331352234, "train/loss_error": 0.3613049387931824, "train/loss_total": 0.3687322735786438 }, { "epoch": 4.014426930269837, "step": 15026, "train/loss_ctc": 0.4798973500728607, "train/loss_error": 0.5307486653327942, "train/loss_total": 0.5205783843994141 }, { "epoch": 4.014694095645204, "step": 15027, "train/loss_ctc": 0.5519027709960938, "train/loss_error": 0.4394926428794861, "train/loss_total": 0.4619746804237366 }, { "epoch": 4.014961261020572, "step": 15028, "train/loss_ctc": 0.9579082131385803, "train/loss_error": 0.4562690854072571, "train/loss_total": 0.5565969347953796 }, { "epoch": 4.0152284263959395, "step": 15029, "train/loss_ctc": 0.4882718324661255, "train/loss_error": 0.45805078744888306, "train/loss_total": 0.46409499645233154 }, { "epoch": 4.015495591771306, "grad_norm": 1.8063424825668335, "learning_rate": 5.919850387389795e-06, "loss": 0.4942, "step": 15030 }, { "epoch": 4.015495591771306, "step": 15030, "train/loss_ctc": 0.6203295588493347, "train/loss_error": 0.3859257400035858, "train/loss_total": 0.43280649185180664 }, { "epoch": 4.015762757146674, "step": 15031, "train/loss_ctc": 0.42127299308776855, "train/loss_error": 0.4036661684513092, "train/loss_total": 0.4071875214576721 }, { "epoch": 4.0160299225220415, "step": 15032, "train/loss_ctc": 0.5216408371925354, "train/loss_error": 0.4565601348876953, "train/loss_total": 0.46957629919052124 }, { "epoch": 4.016297087897408, "step": 15033, "train/loss_ctc": 0.8816581964492798, "train/loss_error": 0.4085098206996918, "train/loss_total": 0.5031394958496094 }, { "epoch": 4.016564253272776, "step": 15034, "train/loss_ctc": 0.4909624755382538, "train/loss_error": 0.4586860239505768, "train/loss_total": 0.46514129638671875 }, { "epoch": 4.0168314186481435, "step": 15035, "train/loss_ctc": 0.2815445065498352, "train/loss_error": 0.40268999338150024, "train/loss_total": 0.3784608840942383 }, { "epoch": 4.01709858402351, "step": 15036, "train/loss_ctc": 0.6267786026000977, "train/loss_error": 0.45505958795547485, "train/loss_total": 0.4894033968448639 }, { "epoch": 4.017365749398878, "step": 15037, "train/loss_ctc": 0.9383587837219238, "train/loss_error": 0.4463920593261719, "train/loss_total": 0.5447854399681091 }, { "epoch": 4.017632914774246, "step": 15038, "train/loss_ctc": 0.5369278192520142, "train/loss_error": 0.3780531883239746, "train/loss_total": 0.4098281264305115 }, { "epoch": 4.017900080149612, "step": 15039, "train/loss_ctc": 0.9306840896606445, "train/loss_error": 0.3657241463661194, "train/loss_total": 0.4787161350250244 }, { "epoch": 4.01816724552498, "grad_norm": 1.3910919427871704, "learning_rate": 5.903820464867754e-06, "loss": 0.4579, "step": 15040 }, { "epoch": 4.01816724552498, "step": 15040, "train/loss_ctc": 0.26939263939857483, "train/loss_error": 0.4491226077079773, "train/loss_total": 0.41317659616470337 }, { "epoch": 4.018434410900348, "step": 15041, "train/loss_ctc": 0.5716838240623474, "train/loss_error": 0.41268110275268555, "train/loss_total": 0.44448164105415344 }, { "epoch": 4.018701576275714, "step": 15042, "train/loss_ctc": 0.523673951625824, "train/loss_error": 0.4408087134361267, "train/loss_total": 0.4573817849159241 }, { "epoch": 4.018968741651082, "step": 15043, "train/loss_ctc": 0.8685784935951233, "train/loss_error": 0.4156001806259155, "train/loss_total": 0.5061958432197571 }, { "epoch": 4.01923590702645, "step": 15044, "train/loss_ctc": 0.7442374229431152, "train/loss_error": 0.4675546884536743, "train/loss_total": 0.5228912830352783 }, { "epoch": 4.019503072401816, "step": 15045, "train/loss_ctc": 0.8657999634742737, "train/loss_error": 0.4932697117328644, "train/loss_total": 0.5677757859230042 }, { "epoch": 4.019770237777184, "step": 15046, "train/loss_ctc": 1.5224169492721558, "train/loss_error": 0.4259742200374603, "train/loss_total": 0.6452627778053284 }, { "epoch": 4.020037403152552, "step": 15047, "train/loss_ctc": 0.4103143811225891, "train/loss_error": 0.40678152441978455, "train/loss_total": 0.4074881076812744 }, { "epoch": 4.020304568527918, "step": 15048, "train/loss_ctc": 0.7888941168785095, "train/loss_error": 0.4064728021621704, "train/loss_total": 0.48295706510543823 }, { "epoch": 4.020571733903286, "step": 15049, "train/loss_ctc": 0.6009480953216553, "train/loss_error": 0.45445308089256287, "train/loss_total": 0.4837520718574524 }, { "epoch": 4.020838899278654, "grad_norm": 1.3905829191207886, "learning_rate": 5.887790542345712e-06, "loss": 0.4931, "step": 15050 }, { "epoch": 4.020838899278654, "step": 15050, "train/loss_ctc": 0.4036761522293091, "train/loss_error": 0.5273278951644897, "train/loss_total": 0.5025975704193115 }, { "epoch": 4.02110606465402, "step": 15051, "train/loss_ctc": 0.6588842272758484, "train/loss_error": 0.4693703353404999, "train/loss_total": 0.5072731375694275 }, { "epoch": 4.021373230029388, "step": 15052, "train/loss_ctc": 1.0399467945098877, "train/loss_error": 0.4743316173553467, "train/loss_total": 0.5874546766281128 }, { "epoch": 4.021640395404756, "step": 15053, "train/loss_ctc": 0.4396441578865051, "train/loss_error": 0.46375608444213867, "train/loss_total": 0.4589337110519409 }, { "epoch": 4.0219075607801225, "step": 15054, "train/loss_ctc": 0.6916618347167969, "train/loss_error": 0.4675719738006592, "train/loss_total": 0.5123899579048157 }, { "epoch": 4.02217472615549, "step": 15055, "train/loss_ctc": 0.30868470668792725, "train/loss_error": 0.40170738101005554, "train/loss_total": 0.3831028640270233 }, { "epoch": 4.022441891530858, "step": 15056, "train/loss_ctc": 0.5986131429672241, "train/loss_error": 0.46824929118156433, "train/loss_total": 0.4943220615386963 }, { "epoch": 4.022709056906225, "step": 15057, "train/loss_ctc": 0.34701403975486755, "train/loss_error": 0.5064509510993958, "train/loss_total": 0.4745635688304901 }, { "epoch": 4.022976222281592, "step": 15058, "train/loss_ctc": 0.8124574422836304, "train/loss_error": 0.44771623611450195, "train/loss_total": 0.5206644535064697 }, { "epoch": 4.02324338765696, "step": 15059, "train/loss_ctc": 0.6273756623268127, "train/loss_error": 0.4418661296367645, "train/loss_total": 0.4789680540561676 }, { "epoch": 4.023510553032327, "grad_norm": 1.483816146850586, "learning_rate": 5.871760619823671e-06, "loss": 0.492, "step": 15060 }, { "epoch": 4.023510553032327, "step": 15060, "train/loss_ctc": 0.691326379776001, "train/loss_error": 0.4012546241283417, "train/loss_total": 0.4592689871788025 }, { "epoch": 4.023777718407694, "step": 15061, "train/loss_ctc": 0.5127198696136475, "train/loss_error": 0.47981464862823486, "train/loss_total": 0.4863957166671753 }, { "epoch": 4.024044883783062, "step": 15062, "train/loss_ctc": 0.49475494027137756, "train/loss_error": 0.3654289245605469, "train/loss_total": 0.39129412174224854 }, { "epoch": 4.024312049158429, "step": 15063, "train/loss_ctc": 1.093498706817627, "train/loss_error": 0.46313580870628357, "train/loss_total": 0.5892083644866943 }, { "epoch": 4.024579214533796, "step": 15064, "train/loss_ctc": 1.0638785362243652, "train/loss_error": 0.488326758146286, "train/loss_total": 0.6034371256828308 }, { "epoch": 4.024846379909164, "step": 15065, "train/loss_ctc": 0.40172079205513, "train/loss_error": 0.4922022521495819, "train/loss_total": 0.47410598397254944 }, { "epoch": 4.0251135452845315, "step": 15066, "train/loss_ctc": 0.3440682590007782, "train/loss_error": 0.445401668548584, "train/loss_total": 0.4251349866390228 }, { "epoch": 4.025380710659898, "step": 15067, "train/loss_ctc": 0.42577672004699707, "train/loss_error": 0.46564769744873047, "train/loss_total": 0.45767349004745483 }, { "epoch": 4.025647876035266, "step": 15068, "train/loss_ctc": 0.4334648549556732, "train/loss_error": 0.3780961334705353, "train/loss_total": 0.3891698718070984 }, { "epoch": 4.0259150414106335, "step": 15069, "train/loss_ctc": 0.28613603115081787, "train/loss_error": 0.4219614863395691, "train/loss_total": 0.3947964012622833 }, { "epoch": 4.026182206786, "grad_norm": 2.1228325366973877, "learning_rate": 5.85573069730163e-06, "loss": 0.467, "step": 15070 }, { "epoch": 4.026182206786, "step": 15070, "train/loss_ctc": 0.46761634945869446, "train/loss_error": 0.47830164432525635, "train/loss_total": 0.4761645793914795 }, { "epoch": 4.026449372161368, "step": 15071, "train/loss_ctc": 0.799598217010498, "train/loss_error": 0.3675716817378998, "train/loss_total": 0.45397698879241943 }, { "epoch": 4.0267165375367355, "step": 15072, "train/loss_ctc": 0.535511314868927, "train/loss_error": 0.44525206089019775, "train/loss_total": 0.46330392360687256 }, { "epoch": 4.026983702912102, "step": 15073, "train/loss_ctc": 0.699042558670044, "train/loss_error": 0.46920129656791687, "train/loss_total": 0.5151695609092712 }, { "epoch": 4.02725086828747, "step": 15074, "train/loss_ctc": 0.6647375822067261, "train/loss_error": 0.4798411726951599, "train/loss_total": 0.51682049036026 }, { "epoch": 4.027518033662838, "step": 15075, "train/loss_ctc": 1.0459216833114624, "train/loss_error": 0.4847233295440674, "train/loss_total": 0.5969629883766174 }, { "epoch": 4.027785199038204, "step": 15076, "train/loss_ctc": 1.0520095825195312, "train/loss_error": 0.384336918592453, "train/loss_total": 0.5178714990615845 }, { "epoch": 4.028052364413572, "step": 15077, "train/loss_ctc": 0.766188383102417, "train/loss_error": 0.4757500886917114, "train/loss_total": 0.5338377356529236 }, { "epoch": 4.02831952978894, "step": 15078, "train/loss_ctc": 1.5678118467330933, "train/loss_error": 0.4300611615180969, "train/loss_total": 0.6576113104820251 }, { "epoch": 4.028586695164306, "step": 15079, "train/loss_ctc": 0.552309513092041, "train/loss_error": 0.3839864730834961, "train/loss_total": 0.41765111684799194 }, { "epoch": 4.028853860539674, "grad_norm": 1.4754512310028076, "learning_rate": 5.839700774779589e-06, "loss": 0.5149, "step": 15080 }, { "epoch": 4.028853860539674, "step": 15080, "train/loss_ctc": 0.4738618731498718, "train/loss_error": 0.4691206216812134, "train/loss_total": 0.47006887197494507 }, { "epoch": 4.029121025915042, "step": 15081, "train/loss_ctc": 0.7707366943359375, "train/loss_error": 0.38456854224205017, "train/loss_total": 0.4618021845817566 }, { "epoch": 4.029388191290408, "step": 15082, "train/loss_ctc": 0.25707995891571045, "train/loss_error": 0.4809374213218689, "train/loss_total": 0.4361659288406372 }, { "epoch": 4.029655356665776, "step": 15083, "train/loss_ctc": 0.9158535599708557, "train/loss_error": 0.4732765853404999, "train/loss_total": 0.5617920160293579 }, { "epoch": 4.029922522041144, "step": 15084, "train/loss_ctc": 0.7721767425537109, "train/loss_error": 0.34575963020324707, "train/loss_total": 0.4310430884361267 }, { "epoch": 4.03018968741651, "step": 15085, "train/loss_ctc": 0.8158794641494751, "train/loss_error": 0.46630820631980896, "train/loss_total": 0.5362224578857422 }, { "epoch": 4.030456852791878, "step": 15086, "train/loss_ctc": 0.8111972808837891, "train/loss_error": 0.4078470468521118, "train/loss_total": 0.4885171055793762 }, { "epoch": 4.030724018167246, "step": 15087, "train/loss_ctc": 0.7771728038787842, "train/loss_error": 0.41050440073013306, "train/loss_total": 0.4838380813598633 }, { "epoch": 4.030991183542612, "step": 15088, "train/loss_ctc": 0.6311333179473877, "train/loss_error": 0.4339846074581146, "train/loss_total": 0.4734143614768982 }, { "epoch": 4.03125834891798, "step": 15089, "train/loss_ctc": 1.1105523109436035, "train/loss_error": 0.36053094267845154, "train/loss_total": 0.5105352401733398 }, { "epoch": 4.031525514293348, "grad_norm": 2.3298566341400146, "learning_rate": 5.8236708522575475e-06, "loss": 0.4853, "step": 15090 }, { "epoch": 4.031525514293348, "step": 15090, "train/loss_ctc": 0.525306761264801, "train/loss_error": 0.38358721137046814, "train/loss_total": 0.4119311273097992 }, { "epoch": 4.031792679668715, "step": 15091, "train/loss_ctc": 0.6891998052597046, "train/loss_error": 0.3939530849456787, "train/loss_total": 0.4530024528503418 }, { "epoch": 4.032059845044082, "step": 15092, "train/loss_ctc": 0.4550154507160187, "train/loss_error": 0.41417762637138367, "train/loss_total": 0.42234519124031067 }, { "epoch": 4.03232701041945, "step": 15093, "train/loss_ctc": 0.6925405263900757, "train/loss_error": 0.40840229392051697, "train/loss_total": 0.46522995829582214 }, { "epoch": 4.032594175794817, "step": 15094, "train/loss_ctc": 0.8684265613555908, "train/loss_error": 0.44706636667251587, "train/loss_total": 0.5313384532928467 }, { "epoch": 4.032861341170184, "step": 15095, "train/loss_ctc": 0.44055822491645813, "train/loss_error": 0.4524787366390228, "train/loss_total": 0.45009464025497437 }, { "epoch": 4.033128506545552, "step": 15096, "train/loss_ctc": 0.8220757246017456, "train/loss_error": 0.4436076581478119, "train/loss_total": 0.5193012952804565 }, { "epoch": 4.033395671920919, "step": 15097, "train/loss_ctc": 0.45845144987106323, "train/loss_error": 0.4386780261993408, "train/loss_total": 0.4426327347755432 }, { "epoch": 4.033662837296286, "step": 15098, "train/loss_ctc": 0.646943211555481, "train/loss_error": 0.44592007994651794, "train/loss_total": 0.4861246943473816 }, { "epoch": 4.033930002671654, "step": 15099, "train/loss_ctc": 0.4358450174331665, "train/loss_error": 0.508892297744751, "train/loss_total": 0.4942828416824341 }, { "epoch": 4.034197168047021, "grad_norm": 1.9508510828018188, "learning_rate": 5.8076409297355065e-06, "loss": 0.4676, "step": 15100 }, { "epoch": 4.034197168047021, "step": 15100, "train/loss_ctc": 0.7194108963012695, "train/loss_error": 0.38012224435806274, "train/loss_total": 0.44797998666763306 }, { "epoch": 4.034464333422388, "step": 15101, "train/loss_ctc": 0.7149544954299927, "train/loss_error": 0.4478408098220825, "train/loss_total": 0.5012635588645935 }, { "epoch": 4.034731498797756, "step": 15102, "train/loss_ctc": 1.0435914993286133, "train/loss_error": 0.4107886254787445, "train/loss_total": 0.5373492240905762 }, { "epoch": 4.0349986641731235, "step": 15103, "train/loss_ctc": 0.3480888605117798, "train/loss_error": 0.39970067143440247, "train/loss_total": 0.38937830924987793 }, { "epoch": 4.03526582954849, "step": 15104, "train/loss_ctc": 0.9736067056655884, "train/loss_error": 0.39541852474212646, "train/loss_total": 0.5110561847686768 }, { "epoch": 4.035532994923858, "step": 15105, "train/loss_ctc": 1.0032000541687012, "train/loss_error": 0.4357859194278717, "train/loss_total": 0.5492687225341797 }, { "epoch": 4.0358001602992255, "step": 15106, "train/loss_ctc": 0.45453399419784546, "train/loss_error": 0.4568827450275421, "train/loss_total": 0.45641300082206726 }, { "epoch": 4.036067325674592, "step": 15107, "train/loss_ctc": 0.7323676347732544, "train/loss_error": 0.5045085549354553, "train/loss_total": 0.550080418586731 }, { "epoch": 4.03633449104996, "step": 15108, "train/loss_ctc": 1.5444890260696411, "train/loss_error": 0.440874308347702, "train/loss_total": 0.6615972518920898 }, { "epoch": 4.0366016564253275, "step": 15109, "train/loss_ctc": 0.9744765758514404, "train/loss_error": 0.4014663100242615, "train/loss_total": 0.5160683393478394 }, { "epoch": 4.036868821800694, "grad_norm": 1.636063575744629, "learning_rate": 5.791611007213465e-06, "loss": 0.512, "step": 15110 }, { "epoch": 4.036868821800694, "step": 15110, "train/loss_ctc": 0.4075215458869934, "train/loss_error": 0.4624166488647461, "train/loss_total": 0.45143765211105347 }, { "epoch": 4.037135987176062, "step": 15111, "train/loss_ctc": 0.764838457107544, "train/loss_error": 0.4464021325111389, "train/loss_total": 0.5100893974304199 }, { "epoch": 4.03740315255143, "step": 15112, "train/loss_ctc": 1.0032408237457275, "train/loss_error": 0.436145156621933, "train/loss_total": 0.5495643019676208 }, { "epoch": 4.037670317926796, "step": 15113, "train/loss_ctc": 0.6630954146385193, "train/loss_error": 0.3528817296028137, "train/loss_total": 0.4149244725704193 }, { "epoch": 4.037937483302164, "step": 15114, "train/loss_ctc": 0.8866497278213501, "train/loss_error": 0.4673428237438202, "train/loss_total": 0.5512042045593262 }, { "epoch": 4.038204648677532, "step": 15115, "train/loss_ctc": 0.39739152789115906, "train/loss_error": 0.5129814147949219, "train/loss_total": 0.48986345529556274 }, { "epoch": 4.038471814052898, "step": 15116, "train/loss_ctc": 0.8778659105300903, "train/loss_error": 0.5014984011650085, "train/loss_total": 0.5767719149589539 }, { "epoch": 4.038738979428266, "step": 15117, "train/loss_ctc": 0.46683233976364136, "train/loss_error": 0.3585914969444275, "train/loss_total": 0.38023966550827026 }, { "epoch": 4.039006144803634, "step": 15118, "train/loss_ctc": 0.8913061618804932, "train/loss_error": 0.48710736632347107, "train/loss_total": 0.5679471492767334 }, { "epoch": 4.039273310179, "step": 15119, "train/loss_ctc": 0.5717488527297974, "train/loss_error": 0.4835224747657776, "train/loss_total": 0.5011677742004395 }, { "epoch": 4.039540475554368, "grad_norm": 1.4868652820587158, "learning_rate": 5.775581084691424e-06, "loss": 0.4993, "step": 15120 }, { "epoch": 4.039540475554368, "step": 15120, "train/loss_ctc": 0.8787336349487305, "train/loss_error": 0.49225863814353943, "train/loss_total": 0.5695536136627197 }, { "epoch": 4.039807640929736, "step": 15121, "train/loss_ctc": 0.4834773540496826, "train/loss_error": 0.3929070234298706, "train/loss_total": 0.4110211133956909 }, { "epoch": 4.040074806305103, "step": 15122, "train/loss_ctc": 1.2291944026947021, "train/loss_error": 0.5309869050979614, "train/loss_total": 0.6706284284591675 }, { "epoch": 4.04034197168047, "step": 15123, "train/loss_ctc": 1.109525442123413, "train/loss_error": 0.4356025159358978, "train/loss_total": 0.5703871250152588 }, { "epoch": 4.040609137055838, "step": 15124, "train/loss_ctc": 0.7150365114212036, "train/loss_error": 0.41127774119377136, "train/loss_total": 0.47202950716018677 }, { "epoch": 4.040876302431205, "step": 15125, "train/loss_ctc": 0.6183843612670898, "train/loss_error": 0.42886078357696533, "train/loss_total": 0.46676549315452576 }, { "epoch": 4.041143467806572, "step": 15126, "train/loss_ctc": 0.6223537921905518, "train/loss_error": 0.41405755281448364, "train/loss_total": 0.4557168185710907 }, { "epoch": 4.04141063318194, "step": 15127, "train/loss_ctc": 0.7275373935699463, "train/loss_error": 0.4828437566757202, "train/loss_total": 0.5317825078964233 }, { "epoch": 4.041677798557307, "step": 15128, "train/loss_ctc": 1.0414505004882812, "train/loss_error": 0.4276387691497803, "train/loss_total": 0.5504010915756226 }, { "epoch": 4.041944963932674, "step": 15129, "train/loss_ctc": 0.4196503162384033, "train/loss_error": 0.45995062589645386, "train/loss_total": 0.45189058780670166 }, { "epoch": 4.042212129308042, "grad_norm": 3.5629658699035645, "learning_rate": 5.759551162169383e-06, "loss": 0.515, "step": 15130 }, { "epoch": 4.042212129308042, "step": 15130, "train/loss_ctc": 1.158691167831421, "train/loss_error": 0.4739921987056732, "train/loss_total": 0.6109319925308228 }, { "epoch": 4.042479294683409, "step": 15131, "train/loss_ctc": 0.6198771595954895, "train/loss_error": 0.4194612503051758, "train/loss_total": 0.45954442024230957 }, { "epoch": 4.042746460058776, "step": 15132, "train/loss_ctc": 0.3617817163467407, "train/loss_error": 0.4425908625125885, "train/loss_total": 0.42642903327941895 }, { "epoch": 4.043013625434144, "step": 15133, "train/loss_ctc": 0.585281491279602, "train/loss_error": 0.3838934004306793, "train/loss_total": 0.4241710305213928 }, { "epoch": 4.043280790809511, "step": 15134, "train/loss_ctc": 0.31134968996047974, "train/loss_error": 0.3349781930446625, "train/loss_total": 0.3302524983882904 }, { "epoch": 4.043547956184878, "step": 15135, "train/loss_ctc": 1.1419775485992432, "train/loss_error": 0.48217645287513733, "train/loss_total": 0.6141366958618164 }, { "epoch": 4.043815121560246, "step": 15136, "train/loss_ctc": 1.317501187324524, "train/loss_error": 0.4494338929653168, "train/loss_total": 0.6230473518371582 }, { "epoch": 4.0440822869356134, "step": 15137, "train/loss_ctc": 0.3475632667541504, "train/loss_error": 0.4319089651107788, "train/loss_total": 0.4150398373603821 }, { "epoch": 4.04434945231098, "step": 15138, "train/loss_ctc": 0.7156633734703064, "train/loss_error": 0.4334903359413147, "train/loss_total": 0.48992496728897095 }, { "epoch": 4.044616617686348, "step": 15139, "train/loss_ctc": 0.7886192798614502, "train/loss_error": 0.49432656168937683, "train/loss_total": 0.5531851053237915 }, { "epoch": 4.0448837830617155, "grad_norm": 4.352624893188477, "learning_rate": 5.743521239647342e-06, "loss": 0.4947, "step": 15140 }, { "epoch": 4.0448837830617155, "step": 15140, "train/loss_ctc": 0.68680340051651, "train/loss_error": 0.49961838126182556, "train/loss_total": 0.5370553731918335 }, { "epoch": 4.045150948437082, "step": 15141, "train/loss_ctc": 0.6711408495903015, "train/loss_error": 0.45381709933280945, "train/loss_total": 0.49728184938430786 }, { "epoch": 4.04541811381245, "step": 15142, "train/loss_ctc": 0.2937176823616028, "train/loss_error": 0.3546638786792755, "train/loss_total": 0.34247463941574097 }, { "epoch": 4.0456852791878175, "step": 15143, "train/loss_ctc": 1.0025702714920044, "train/loss_error": 0.45380881428718567, "train/loss_total": 0.5635611414909363 }, { "epoch": 4.045952444563184, "step": 15144, "train/loss_ctc": 0.9090386629104614, "train/loss_error": 0.4099813401699066, "train/loss_total": 0.5097928047180176 }, { "epoch": 4.046219609938552, "step": 15145, "train/loss_ctc": 0.6694920659065247, "train/loss_error": 0.4394262135028839, "train/loss_total": 0.4854394197463989 }, { "epoch": 4.0464867753139195, "step": 15146, "train/loss_ctc": 0.5480829477310181, "train/loss_error": 0.5004684925079346, "train/loss_total": 0.5099914073944092 }, { "epoch": 4.046753940689286, "step": 15147, "train/loss_ctc": 0.674589991569519, "train/loss_error": 0.47447431087493896, "train/loss_total": 0.5144974589347839 }, { "epoch": 4.047021106064654, "step": 15148, "train/loss_ctc": 0.36728811264038086, "train/loss_error": 0.41475149989128113, "train/loss_total": 0.40525883436203003 }, { "epoch": 4.047288271440022, "step": 15149, "train/loss_ctc": 0.8456252217292786, "train/loss_error": 0.47438666224479675, "train/loss_total": 0.54863440990448 }, { "epoch": 4.047555436815388, "grad_norm": 1.9906878471374512, "learning_rate": 5.7274913171253e-06, "loss": 0.4914, "step": 15150 }, { "epoch": 4.047555436815388, "step": 15150, "train/loss_ctc": 0.32883623242378235, "train/loss_error": 0.3050656020641327, "train/loss_total": 0.3098197281360626 }, { "epoch": 4.047822602190756, "step": 15151, "train/loss_ctc": 1.2632701396942139, "train/loss_error": 0.4101540744304657, "train/loss_total": 0.5807772874832153 }, { "epoch": 4.048089767566124, "step": 15152, "train/loss_ctc": 0.37430405616760254, "train/loss_error": 0.4156643748283386, "train/loss_total": 0.40739232301712036 }, { "epoch": 4.04835693294149, "step": 15153, "train/loss_ctc": 0.7450176477432251, "train/loss_error": 0.42303943634033203, "train/loss_total": 0.48743510246276855 }, { "epoch": 4.048624098316858, "step": 15154, "train/loss_ctc": 0.3545929789543152, "train/loss_error": 0.42026352882385254, "train/loss_total": 0.4071294069290161 }, { "epoch": 4.048891263692226, "step": 15155, "train/loss_ctc": 0.7734707593917847, "train/loss_error": 0.4628754258155823, "train/loss_total": 0.5249944925308228 }, { "epoch": 4.049158429067593, "step": 15156, "train/loss_ctc": 0.5010592937469482, "train/loss_error": 0.39985352754592896, "train/loss_total": 0.42009469866752625 }, { "epoch": 4.04942559444296, "step": 15157, "train/loss_ctc": 1.608775019645691, "train/loss_error": 0.486225962638855, "train/loss_total": 0.7107357978820801 }, { "epoch": 4.049692759818328, "step": 15158, "train/loss_ctc": 0.9013804793357849, "train/loss_error": 0.41609761118888855, "train/loss_total": 0.513154149055481 }, { "epoch": 4.049959925193695, "step": 15159, "train/loss_ctc": 0.5487114191055298, "train/loss_error": 0.386862576007843, "train/loss_total": 0.4192323386669159 }, { "epoch": 4.050227090569062, "grad_norm": 2.34547758102417, "learning_rate": 5.711461394603259e-06, "loss": 0.4781, "step": 15160 }, { "epoch": 4.050227090569062, "step": 15160, "train/loss_ctc": 0.6949225664138794, "train/loss_error": 0.44093841314315796, "train/loss_total": 0.4917352795600891 }, { "epoch": 4.05049425594443, "step": 15161, "train/loss_ctc": 0.8076279163360596, "train/loss_error": 0.5307548642158508, "train/loss_total": 0.5861294865608215 }, { "epoch": 4.050761421319797, "step": 15162, "train/loss_ctc": 0.9800512194633484, "train/loss_error": 0.37183403968811035, "train/loss_total": 0.493477463722229 }, { "epoch": 4.051028586695164, "step": 15163, "train/loss_ctc": 0.4017285406589508, "train/loss_error": 0.43561846017837524, "train/loss_total": 0.4288404881954193 }, { "epoch": 4.051295752070532, "step": 15164, "train/loss_ctc": 0.44832396507263184, "train/loss_error": 0.36406949162483215, "train/loss_total": 0.3809203803539276 }, { "epoch": 4.051562917445899, "step": 15165, "train/loss_ctc": 0.6516597270965576, "train/loss_error": 0.4649611711502075, "train/loss_total": 0.5023009181022644 }, { "epoch": 4.051830082821266, "step": 15166, "train/loss_ctc": 0.5585331916809082, "train/loss_error": 0.44747284054756165, "train/loss_total": 0.4696849286556244 }, { "epoch": 4.052097248196634, "step": 15167, "train/loss_ctc": 0.35762694478034973, "train/loss_error": 0.4342784881591797, "train/loss_total": 0.4189482033252716 }, { "epoch": 4.052364413572001, "step": 15168, "train/loss_ctc": 0.4702172875404358, "train/loss_error": 0.4450094401836395, "train/loss_total": 0.4500510096549988 }, { "epoch": 4.052631578947368, "step": 15169, "train/loss_ctc": 0.7361441254615784, "train/loss_error": 0.4836761951446533, "train/loss_total": 0.5341697931289673 }, { "epoch": 4.052898744322736, "grad_norm": 1.1735855340957642, "learning_rate": 5.695431472081219e-06, "loss": 0.4756, "step": 15170 }, { "epoch": 4.052898744322736, "step": 15170, "train/loss_ctc": 0.6855916380882263, "train/loss_error": 0.4795089066028595, "train/loss_total": 0.520725429058075 }, { "epoch": 4.053165909698103, "step": 15171, "train/loss_ctc": 0.3118293583393097, "train/loss_error": 0.4347791373729706, "train/loss_total": 0.4101891815662384 }, { "epoch": 4.05343307507347, "step": 15172, "train/loss_ctc": 1.149262547492981, "train/loss_error": 0.5051445364952087, "train/loss_total": 0.63396817445755 }, { "epoch": 4.053700240448838, "step": 15173, "train/loss_ctc": 0.24685941636562347, "train/loss_error": 0.3826628029346466, "train/loss_total": 0.3555021286010742 }, { "epoch": 4.0539674058242054, "step": 15174, "train/loss_ctc": 0.7748663425445557, "train/loss_error": 0.40169888734817505, "train/loss_total": 0.4763323962688446 }, { "epoch": 4.054234571199572, "step": 15175, "train/loss_ctc": 0.48122701048851013, "train/loss_error": 0.4091925024986267, "train/loss_total": 0.4235994219779968 }, { "epoch": 4.05450173657494, "step": 15176, "train/loss_ctc": 1.144895315170288, "train/loss_error": 0.4138747751712799, "train/loss_total": 0.5600789189338684 }, { "epoch": 4.0547689019503075, "step": 15177, "train/loss_ctc": 0.7605323791503906, "train/loss_error": 0.3651149868965149, "train/loss_total": 0.44419848918914795 }, { "epoch": 4.055036067325674, "step": 15178, "train/loss_ctc": 0.3264814019203186, "train/loss_error": 0.46725529432296753, "train/loss_total": 0.4391005337238312 }, { "epoch": 4.055303232701042, "step": 15179, "train/loss_ctc": 0.39176106452941895, "train/loss_error": 0.45292773842811584, "train/loss_total": 0.4406944215297699 }, { "epoch": 4.0555703980764095, "grad_norm": 4.052783966064453, "learning_rate": 5.679401549559177e-06, "loss": 0.4704, "step": 15180 }, { "epoch": 4.0555703980764095, "step": 15180, "train/loss_ctc": 0.5243651866912842, "train/loss_error": 0.4189586937427521, "train/loss_total": 0.4400399923324585 }, { "epoch": 4.055837563451776, "step": 15181, "train/loss_ctc": 0.708791971206665, "train/loss_error": 0.5030907988548279, "train/loss_total": 0.5442310571670532 }, { "epoch": 4.056104728827144, "step": 15182, "train/loss_ctc": 0.4080086052417755, "train/loss_error": 0.3531004786491394, "train/loss_total": 0.36408209800720215 }, { "epoch": 4.0563718942025115, "step": 15183, "train/loss_ctc": 1.0258407592773438, "train/loss_error": 0.48195940256118774, "train/loss_total": 0.590735673904419 }, { "epoch": 4.056639059577878, "step": 15184, "train/loss_ctc": 1.37082839012146, "train/loss_error": 0.4415931701660156, "train/loss_total": 0.6274402141571045 }, { "epoch": 4.056906224953246, "step": 15185, "train/loss_ctc": 0.3981650173664093, "train/loss_error": 0.42285090684890747, "train/loss_total": 0.4179137349128723 }, { "epoch": 4.057173390328614, "step": 15186, "train/loss_ctc": 1.3768917322158813, "train/loss_error": 0.47093215584754944, "train/loss_total": 0.6521240472793579 }, { "epoch": 4.05744055570398, "step": 15187, "train/loss_ctc": 0.9812528491020203, "train/loss_error": 0.43813514709472656, "train/loss_total": 0.5467587113380432 }, { "epoch": 4.057707721079348, "step": 15188, "train/loss_ctc": 0.9400886297225952, "train/loss_error": 0.4140874445438385, "train/loss_total": 0.5192877054214478 }, { "epoch": 4.057974886454716, "step": 15189, "train/loss_ctc": 0.40656742453575134, "train/loss_error": 0.3994644284248352, "train/loss_total": 0.40088504552841187 }, { "epoch": 4.058242051830083, "grad_norm": 2.956277370452881, "learning_rate": 5.663371627037136e-06, "loss": 0.5103, "step": 15190 }, { "epoch": 4.058242051830083, "step": 15190, "train/loss_ctc": 0.4249260723590851, "train/loss_error": 0.38433265686035156, "train/loss_total": 0.39245134592056274 }, { "epoch": 4.05850921720545, "step": 15191, "train/loss_ctc": 0.72400963306427, "train/loss_error": 0.4166969954967499, "train/loss_total": 0.4781595468521118 }, { "epoch": 4.058776382580818, "step": 15192, "train/loss_ctc": 0.5977789163589478, "train/loss_error": 0.41119319200515747, "train/loss_total": 0.4485103487968445 }, { "epoch": 4.059043547956185, "step": 15193, "train/loss_ctc": 0.797020435333252, "train/loss_error": 0.46703195571899414, "train/loss_total": 0.5330296754837036 }, { "epoch": 4.059310713331552, "step": 15194, "train/loss_ctc": 0.7671255469322205, "train/loss_error": 0.47043225169181824, "train/loss_total": 0.5297709107398987 }, { "epoch": 4.05957787870692, "step": 15195, "train/loss_ctc": 0.8187354803085327, "train/loss_error": 0.4474041163921356, "train/loss_total": 0.521670401096344 }, { "epoch": 4.059845044082287, "step": 15196, "train/loss_ctc": 0.7671060562133789, "train/loss_error": 0.43577712774276733, "train/loss_total": 0.5020428895950317 }, { "epoch": 4.060112209457654, "step": 15197, "train/loss_ctc": 1.0831772089004517, "train/loss_error": 0.40064379572868347, "train/loss_total": 0.537150502204895 }, { "epoch": 4.060379374833022, "step": 15198, "train/loss_ctc": 0.6645004749298096, "train/loss_error": 0.4575364887714386, "train/loss_total": 0.49892932176589966 }, { "epoch": 4.060646540208389, "step": 15199, "train/loss_ctc": 0.49648499488830566, "train/loss_error": 0.46241557598114014, "train/loss_total": 0.46922945976257324 }, { "epoch": 4.060913705583756, "grad_norm": 2.8464276790618896, "learning_rate": 5.647341704515095e-06, "loss": 0.4911, "step": 15200 }, { "epoch": 4.060913705583756, "step": 15200, "train/loss_ctc": 0.3999479115009308, "train/loss_error": 0.4230497181415558, "train/loss_total": 0.4184293746948242 }, { "epoch": 4.061180870959124, "step": 15201, "train/loss_ctc": 0.3763883113861084, "train/loss_error": 0.40438488125801086, "train/loss_total": 0.3987855613231659 }, { "epoch": 4.061448036334491, "step": 15202, "train/loss_ctc": 1.1499664783477783, "train/loss_error": 0.5039125680923462, "train/loss_total": 0.6331233382225037 }, { "epoch": 4.061715201709858, "step": 15203, "train/loss_ctc": 0.9672554135322571, "train/loss_error": 0.4641454815864563, "train/loss_total": 0.5647674798965454 }, { "epoch": 4.061982367085226, "step": 15204, "train/loss_ctc": 0.7408708333969116, "train/loss_error": 0.45223167538642883, "train/loss_total": 0.5099595189094543 }, { "epoch": 4.062249532460593, "step": 15205, "train/loss_ctc": 1.3961939811706543, "train/loss_error": 0.485742449760437, "train/loss_total": 0.6678327322006226 }, { "epoch": 4.06251669783596, "step": 15206, "train/loss_ctc": 0.7811166048049927, "train/loss_error": 0.49650880694389343, "train/loss_total": 0.5534303784370422 }, { "epoch": 4.062783863211328, "step": 15207, "train/loss_ctc": 0.799834132194519, "train/loss_error": 0.41654375195503235, "train/loss_total": 0.4932018220424652 }, { "epoch": 4.063051028586695, "step": 15208, "train/loss_ctc": 0.24529939889907837, "train/loss_error": 0.4119751453399658, "train/loss_total": 0.37863999605178833 }, { "epoch": 4.063318193962062, "step": 15209, "train/loss_ctc": 0.3531806468963623, "train/loss_error": 0.40457382798194885, "train/loss_total": 0.39429518580436707 }, { "epoch": 4.06358535933743, "grad_norm": 1.941051959991455, "learning_rate": 5.631311781993054e-06, "loss": 0.5012, "step": 15210 }, { "epoch": 4.06358535933743, "step": 15210, "train/loss_ctc": 0.9651558995246887, "train/loss_error": 0.4418577253818512, "train/loss_total": 0.5465173721313477 }, { "epoch": 4.0638525247127975, "step": 15211, "train/loss_ctc": 0.6727538704872131, "train/loss_error": 0.3996693193912506, "train/loss_total": 0.45428624749183655 }, { "epoch": 4.064119690088164, "step": 15212, "train/loss_ctc": 0.6504116058349609, "train/loss_error": 0.46417686343193054, "train/loss_total": 0.5014238357543945 }, { "epoch": 4.064386855463532, "step": 15213, "train/loss_ctc": 0.4468999207019806, "train/loss_error": 0.44949644804000854, "train/loss_total": 0.44897717237472534 }, { "epoch": 4.0646540208388995, "step": 15214, "train/loss_ctc": 0.5149585008621216, "train/loss_error": 0.5163911581039429, "train/loss_total": 0.5161046385765076 }, { "epoch": 4.064921186214266, "step": 15215, "train/loss_ctc": 0.41632843017578125, "train/loss_error": 0.37303081154823303, "train/loss_total": 0.3816903531551361 }, { "epoch": 4.065188351589634, "step": 15216, "train/loss_ctc": 0.498671293258667, "train/loss_error": 0.446887731552124, "train/loss_total": 0.4572444558143616 }, { "epoch": 4.0654555169650015, "step": 15217, "train/loss_ctc": 0.6068707704544067, "train/loss_error": 0.47117334604263306, "train/loss_total": 0.4983128309249878 }, { "epoch": 4.065722682340368, "step": 15218, "train/loss_ctc": 0.38978201150894165, "train/loss_error": 0.4293432831764221, "train/loss_total": 0.4214310348033905 }, { "epoch": 4.065989847715736, "step": 15219, "train/loss_ctc": 0.8119168877601624, "train/loss_error": 0.33509162068367004, "train/loss_total": 0.43045666813850403 }, { "epoch": 4.0662570130911035, "grad_norm": 2.4012792110443115, "learning_rate": 5.615281859471013e-06, "loss": 0.4656, "step": 15220 }, { "epoch": 4.0662570130911035, "step": 15220, "train/loss_ctc": 0.511918306350708, "train/loss_error": 0.42457115650177, "train/loss_total": 0.4420405924320221 }, { "epoch": 4.066524178466471, "step": 15221, "train/loss_ctc": 0.4581927955150604, "train/loss_error": 0.4105370342731476, "train/loss_total": 0.4200682044029236 }, { "epoch": 4.066791343841838, "step": 15222, "train/loss_ctc": 0.46383315324783325, "train/loss_error": 0.40329816937446594, "train/loss_total": 0.41540518403053284 }, { "epoch": 4.067058509217206, "step": 15223, "train/loss_ctc": 1.132865071296692, "train/loss_error": 0.43041545152664185, "train/loss_total": 0.5709053874015808 }, { "epoch": 4.067325674592573, "step": 15224, "train/loss_ctc": 0.9659231901168823, "train/loss_error": 0.473893940448761, "train/loss_total": 0.5722998380661011 }, { "epoch": 4.06759283996794, "step": 15225, "train/loss_ctc": 1.1248977184295654, "train/loss_error": 0.4754931330680847, "train/loss_total": 0.6053740978240967 }, { "epoch": 4.067860005343308, "step": 15226, "train/loss_ctc": 0.8402942419052124, "train/loss_error": 0.5473629236221313, "train/loss_total": 0.6059492230415344 }, { "epoch": 4.068127170718675, "step": 15227, "train/loss_ctc": 1.5388963222503662, "train/loss_error": 0.4729549288749695, "train/loss_total": 0.6861432194709778 }, { "epoch": 4.068394336094042, "step": 15228, "train/loss_ctc": 1.0107946395874023, "train/loss_error": 0.37449878454208374, "train/loss_total": 0.5017579793930054 }, { "epoch": 4.06866150146941, "step": 15229, "train/loss_ctc": 0.38583311438560486, "train/loss_error": 0.45724546909332275, "train/loss_total": 0.44296300411224365 }, { "epoch": 4.068928666844777, "grad_norm": 1.7661229372024536, "learning_rate": 5.599251936948972e-06, "loss": 0.5263, "step": 15230 }, { "epoch": 4.068928666844777, "step": 15230, "train/loss_ctc": 0.4442752003669739, "train/loss_error": 0.4397777318954468, "train/loss_total": 0.4406772255897522 }, { "epoch": 4.069195832220144, "step": 15231, "train/loss_ctc": 0.6482506990432739, "train/loss_error": 0.4138874113559723, "train/loss_total": 0.46076008677482605 }, { "epoch": 4.069462997595512, "step": 15232, "train/loss_ctc": 1.2843647003173828, "train/loss_error": 0.45442479848861694, "train/loss_total": 0.6204128265380859 }, { "epoch": 4.069730162970879, "step": 15233, "train/loss_ctc": 0.7369130849838257, "train/loss_error": 0.44224339723587036, "train/loss_total": 0.5011773109436035 }, { "epoch": 4.069997328346246, "step": 15234, "train/loss_ctc": 0.9389267563819885, "train/loss_error": 0.43990328907966614, "train/loss_total": 0.5397080183029175 }, { "epoch": 4.070264493721614, "step": 15235, "train/loss_ctc": 0.3210945129394531, "train/loss_error": 0.40930673480033875, "train/loss_total": 0.3916642963886261 }, { "epoch": 4.070531659096981, "step": 15236, "train/loss_ctc": 0.48502251505851746, "train/loss_error": 0.3170306384563446, "train/loss_total": 0.3506290316581726 }, { "epoch": 4.070798824472348, "step": 15237, "train/loss_ctc": 0.5590231418609619, "train/loss_error": 0.46807610988616943, "train/loss_total": 0.48626554012298584 }, { "epoch": 4.071065989847716, "step": 15238, "train/loss_ctc": 1.0246115922927856, "train/loss_error": 0.48238107562065125, "train/loss_total": 0.590827226638794 }, { "epoch": 4.071333155223083, "step": 15239, "train/loss_ctc": 0.551474928855896, "train/loss_error": 0.49622172117233276, "train/loss_total": 0.5072723627090454 }, { "epoch": 4.07160032059845, "grad_norm": 3.041691303253174, "learning_rate": 5.583222014426931e-06, "loss": 0.4889, "step": 15240 }, { "epoch": 4.07160032059845, "step": 15240, "train/loss_ctc": 0.6723999381065369, "train/loss_error": 0.42159798741340637, "train/loss_total": 0.4717583656311035 }, { "epoch": 4.071867485973818, "step": 15241, "train/loss_ctc": 1.0014926195144653, "train/loss_error": 0.48553016781806946, "train/loss_total": 0.5887226462364197 }, { "epoch": 4.072134651349185, "step": 15242, "train/loss_ctc": 0.49892458319664, "train/loss_error": 0.4691273272037506, "train/loss_total": 0.4750867784023285 }, { "epoch": 4.072401816724552, "step": 15243, "train/loss_ctc": 0.41835957765579224, "train/loss_error": 0.4506320059299469, "train/loss_total": 0.4441775381565094 }, { "epoch": 4.07266898209992, "step": 15244, "train/loss_ctc": 0.4571308493614197, "train/loss_error": 0.389055460691452, "train/loss_total": 0.4026705324649811 }, { "epoch": 4.072936147475287, "step": 15245, "train/loss_ctc": 0.7314291000366211, "train/loss_error": 0.4991157352924347, "train/loss_total": 0.5455784201622009 }, { "epoch": 4.073203312850654, "step": 15246, "train/loss_ctc": 0.6148909330368042, "train/loss_error": 0.3997189700603485, "train/loss_total": 0.4427533745765686 }, { "epoch": 4.073470478226022, "step": 15247, "train/loss_ctc": 1.342360258102417, "train/loss_error": 0.4684872329235077, "train/loss_total": 0.6432618498802185 }, { "epoch": 4.0737376436013895, "step": 15248, "train/loss_ctc": 0.5973109602928162, "train/loss_error": 0.45306095480918884, "train/loss_total": 0.48191097378730774 }, { "epoch": 4.074004808976756, "step": 15249, "train/loss_ctc": 0.5070512890815735, "train/loss_error": 0.370529443025589, "train/loss_total": 0.39783382415771484 }, { "epoch": 4.074271974352124, "grad_norm": 1.4514741897583008, "learning_rate": 5.567192091904889e-06, "loss": 0.4894, "step": 15250 }, { "epoch": 4.074271974352124, "step": 15250, "train/loss_ctc": 1.6728365421295166, "train/loss_error": 0.5475559830665588, "train/loss_total": 0.7726120948791504 }, { "epoch": 4.0745391397274915, "step": 15251, "train/loss_ctc": 0.705022931098938, "train/loss_error": 0.457083523273468, "train/loss_total": 0.5066714286804199 }, { "epoch": 4.074806305102858, "step": 15252, "train/loss_ctc": 0.31763577461242676, "train/loss_error": 0.4401354491710663, "train/loss_total": 0.41563552618026733 }, { "epoch": 4.075073470478226, "step": 15253, "train/loss_ctc": 0.4187217950820923, "train/loss_error": 0.4265684187412262, "train/loss_total": 0.4249991178512573 }, { "epoch": 4.0753406358535935, "step": 15254, "train/loss_ctc": 0.6291194558143616, "train/loss_error": 0.41325005888938904, "train/loss_total": 0.45642393827438354 }, { "epoch": 4.075607801228961, "step": 15255, "train/loss_ctc": 0.41380125284194946, "train/loss_error": 0.34017911553382874, "train/loss_total": 0.35490354895591736 }, { "epoch": 4.075874966604328, "step": 15256, "train/loss_ctc": 0.37296873331069946, "train/loss_error": 0.40747714042663574, "train/loss_total": 0.4005754590034485 }, { "epoch": 4.0761421319796955, "step": 15257, "train/loss_ctc": 0.39560753107070923, "train/loss_error": 0.4189225137233734, "train/loss_total": 0.41425952315330505 }, { "epoch": 4.076409297355063, "step": 15258, "train/loss_ctc": 0.7988471984863281, "train/loss_error": 0.4014192521572113, "train/loss_total": 0.48090484738349915 }, { "epoch": 4.07667646273043, "step": 15259, "train/loss_ctc": 0.8296549320220947, "train/loss_error": 0.49468711018562317, "train/loss_total": 0.5616806745529175 }, { "epoch": 4.076943628105798, "grad_norm": 2.7833251953125, "learning_rate": 5.5511621693828485e-06, "loss": 0.4789, "step": 15260 }, { "epoch": 4.076943628105798, "step": 15260, "train/loss_ctc": 0.5193960666656494, "train/loss_error": 0.4546900689601898, "train/loss_total": 0.4676312804222107 }, { "epoch": 4.077210793481165, "step": 15261, "train/loss_ctc": 0.9334248304367065, "train/loss_error": 0.4205445647239685, "train/loss_total": 0.523120641708374 }, { "epoch": 4.077477958856532, "step": 15262, "train/loss_ctc": 1.1073933839797974, "train/loss_error": 0.447019100189209, "train/loss_total": 0.5790939927101135 }, { "epoch": 4.0777451242319, "step": 15263, "train/loss_ctc": 1.1221116781234741, "train/loss_error": 0.4203965663909912, "train/loss_total": 0.5607396364212036 }, { "epoch": 4.078012289607267, "step": 15264, "train/loss_ctc": 0.9322077035903931, "train/loss_error": 0.4467344880104065, "train/loss_total": 0.5438291430473328 }, { "epoch": 4.078279454982634, "step": 15265, "train/loss_ctc": 0.8261827230453491, "train/loss_error": 0.37273839116096497, "train/loss_total": 0.46342724561691284 }, { "epoch": 4.078546620358002, "step": 15266, "train/loss_ctc": 0.5882283449172974, "train/loss_error": 0.3735378682613373, "train/loss_total": 0.41647598147392273 }, { "epoch": 4.078813785733369, "step": 15267, "train/loss_ctc": 0.5135644674301147, "train/loss_error": 0.39072275161743164, "train/loss_total": 0.41529110074043274 }, { "epoch": 4.079080951108736, "step": 15268, "train/loss_ctc": 0.4297814965248108, "train/loss_error": 0.41795405745506287, "train/loss_total": 0.4203195571899414 }, { "epoch": 4.079348116484104, "step": 15269, "train/loss_ctc": 0.499098539352417, "train/loss_error": 0.4684199094772339, "train/loss_total": 0.474555641412735 }, { "epoch": 4.079615281859471, "grad_norm": 1.7308573722839355, "learning_rate": 5.5351322468608074e-06, "loss": 0.4864, "step": 15270 }, { "epoch": 4.079615281859471, "step": 15270, "train/loss_ctc": 1.1028399467468262, "train/loss_error": 0.47612157464027405, "train/loss_total": 0.6014652252197266 }, { "epoch": 4.079882447234838, "step": 15271, "train/loss_ctc": 0.9274009466171265, "train/loss_error": 0.46461987495422363, "train/loss_total": 0.5571761131286621 }, { "epoch": 4.080149612610206, "step": 15272, "train/loss_ctc": 1.101482629776001, "train/loss_error": 0.4674229323863983, "train/loss_total": 0.5942348837852478 }, { "epoch": 4.080416777985573, "step": 15273, "train/loss_ctc": 0.3596247136592865, "train/loss_error": 0.41998615860939026, "train/loss_total": 0.4079138934612274 }, { "epoch": 4.08068394336094, "step": 15274, "train/loss_ctc": 0.56300950050354, "train/loss_error": 0.40975287556648254, "train/loss_total": 0.4404042065143585 }, { "epoch": 4.080951108736308, "step": 15275, "train/loss_ctc": 0.38328468799591064, "train/loss_error": 0.4170304536819458, "train/loss_total": 0.41028130054473877 }, { "epoch": 4.081218274111675, "step": 15276, "train/loss_ctc": 0.37291842699050903, "train/loss_error": 0.4064981937408447, "train/loss_total": 0.3997822403907776 }, { "epoch": 4.081485439487042, "step": 15277, "train/loss_ctc": 0.8678755760192871, "train/loss_error": 0.3978250324726105, "train/loss_total": 0.49183517694473267 }, { "epoch": 4.08175260486241, "step": 15278, "train/loss_ctc": 1.0982733964920044, "train/loss_error": 0.4684544801712036, "train/loss_total": 0.5944182872772217 }, { "epoch": 4.082019770237777, "step": 15279, "train/loss_ctc": 0.7783786058425903, "train/loss_error": 0.4294698238372803, "train/loss_total": 0.4992516040802002 }, { "epoch": 4.082286935613144, "grad_norm": 1.68159019947052, "learning_rate": 5.5191023243387655e-06, "loss": 0.4997, "step": 15280 }, { "epoch": 4.082286935613144, "step": 15280, "train/loss_ctc": 1.018310546875, "train/loss_error": 0.5136836767196655, "train/loss_total": 0.6146090626716614 }, { "epoch": 4.082554100988512, "step": 15281, "train/loss_ctc": 0.8045509457588196, "train/loss_error": 0.42824020981788635, "train/loss_total": 0.503502368927002 }, { "epoch": 4.082821266363879, "step": 15282, "train/loss_ctc": 0.8449689745903015, "train/loss_error": 0.4118559956550598, "train/loss_total": 0.49847859144210815 }, { "epoch": 4.083088431739246, "step": 15283, "train/loss_ctc": 0.5299042463302612, "train/loss_error": 0.4378836154937744, "train/loss_total": 0.4562877416610718 }, { "epoch": 4.083355597114614, "step": 15284, "train/loss_ctc": 1.4767613410949707, "train/loss_error": 0.5173455476760864, "train/loss_total": 0.7092287540435791 }, { "epoch": 4.0836227624899815, "step": 15285, "train/loss_ctc": 0.6721703410148621, "train/loss_error": 0.43412595987319946, "train/loss_total": 0.48173484206199646 }, { "epoch": 4.083889927865348, "step": 15286, "train/loss_ctc": 0.37217575311660767, "train/loss_error": 0.3741092383861542, "train/loss_total": 0.37372255325317383 }, { "epoch": 4.084157093240716, "step": 15287, "train/loss_ctc": 1.7731432914733887, "train/loss_error": 0.4018651247024536, "train/loss_total": 0.6761207580566406 }, { "epoch": 4.0844242586160835, "step": 15288, "train/loss_ctc": 1.2151648998260498, "train/loss_error": 0.43523064255714417, "train/loss_total": 0.5912175178527832 }, { "epoch": 4.084691423991451, "step": 15289, "train/loss_ctc": 1.2835705280303955, "train/loss_error": 0.40780961513519287, "train/loss_total": 0.5829617977142334 }, { "epoch": 4.084958589366818, "grad_norm": 1.8100332021713257, "learning_rate": 5.5030724018167244e-06, "loss": 0.5488, "step": 15290 }, { "epoch": 4.084958589366818, "step": 15290, "train/loss_ctc": 0.9202114343643188, "train/loss_error": 0.40774860978126526, "train/loss_total": 0.5102412104606628 }, { "epoch": 4.0852257547421855, "step": 15291, "train/loss_ctc": 0.500400185585022, "train/loss_error": 0.4146890938282013, "train/loss_total": 0.43183133006095886 }, { "epoch": 4.085492920117553, "step": 15292, "train/loss_ctc": 0.5003322958946228, "train/loss_error": 0.3769606947898865, "train/loss_total": 0.4016350209712982 }, { "epoch": 4.08576008549292, "step": 15293, "train/loss_ctc": 1.346112608909607, "train/loss_error": 0.4127570688724518, "train/loss_total": 0.5994281768798828 }, { "epoch": 4.0860272508682876, "step": 15294, "train/loss_ctc": 0.8637372255325317, "train/loss_error": 0.42094120383262634, "train/loss_total": 0.5095004439353943 }, { "epoch": 4.086294416243655, "step": 15295, "train/loss_ctc": 0.3319968283176422, "train/loss_error": 0.45211827754974365, "train/loss_total": 0.4280939996242523 }, { "epoch": 4.086561581619022, "step": 15296, "train/loss_ctc": 0.6474856734275818, "train/loss_error": 0.44010689854621887, "train/loss_total": 0.4815826714038849 }, { "epoch": 4.08682874699439, "step": 15297, "train/loss_ctc": 0.27765944600105286, "train/loss_error": 0.38446223735809326, "train/loss_total": 0.36310169100761414 }, { "epoch": 4.087095912369757, "step": 15298, "train/loss_ctc": 0.6271308660507202, "train/loss_error": 0.516071617603302, "train/loss_total": 0.5382834672927856 }, { "epoch": 4.087363077745124, "step": 15299, "train/loss_ctc": 0.5317875742912292, "train/loss_error": 0.4462074041366577, "train/loss_total": 0.4633234441280365 }, { "epoch": 4.087630243120492, "grad_norm": 1.5191196203231812, "learning_rate": 5.487042479294684e-06, "loss": 0.4727, "step": 15300 }, { "epoch": 4.087630243120492, "step": 15300, "train/loss_ctc": 0.3039778769016266, "train/loss_error": 0.40896207094192505, "train/loss_total": 0.38796523213386536 }, { "epoch": 4.087897408495859, "step": 15301, "train/loss_ctc": 0.22533538937568665, "train/loss_error": 0.36402440071105957, "train/loss_total": 0.33628660440444946 }, { "epoch": 4.088164573871226, "step": 15302, "train/loss_ctc": 0.3582439422607422, "train/loss_error": 0.42777174711227417, "train/loss_total": 0.41386622190475464 }, { "epoch": 4.088431739246594, "step": 15303, "train/loss_ctc": 0.5834977626800537, "train/loss_error": 0.4547273814678192, "train/loss_total": 0.48048144578933716 }, { "epoch": 4.088698904621961, "step": 15304, "train/loss_ctc": 0.5959203243255615, "train/loss_error": 0.4641442596912384, "train/loss_total": 0.49049949645996094 }, { "epoch": 4.088966069997328, "step": 15305, "train/loss_ctc": 0.9485417008399963, "train/loss_error": 0.4740007817745209, "train/loss_total": 0.5689089894294739 }, { "epoch": 4.089233235372696, "step": 15306, "train/loss_ctc": 0.7562992572784424, "train/loss_error": 0.5197928547859192, "train/loss_total": 0.5670941472053528 }, { "epoch": 4.089500400748063, "step": 15307, "train/loss_ctc": 0.9957514405250549, "train/loss_error": 0.510788083076477, "train/loss_total": 0.6077807545661926 }, { "epoch": 4.08976756612343, "step": 15308, "train/loss_ctc": 0.5012631416320801, "train/loss_error": 0.47043243050575256, "train/loss_total": 0.4765985906124115 }, { "epoch": 4.090034731498798, "step": 15309, "train/loss_ctc": 0.23319049179553986, "train/loss_error": 0.4562029540538788, "train/loss_total": 0.4116004705429077 }, { "epoch": 4.090301896874165, "grad_norm": 1.8334683179855347, "learning_rate": 5.471012556772642e-06, "loss": 0.4741, "step": 15310 }, { "epoch": 4.090301896874165, "step": 15310, "train/loss_ctc": 1.0506048202514648, "train/loss_error": 0.39960387349128723, "train/loss_total": 0.5298040509223938 }, { "epoch": 4.090569062249532, "step": 15311, "train/loss_ctc": 0.3523516058921814, "train/loss_error": 0.43573033809661865, "train/loss_total": 0.4190545976161957 }, { "epoch": 4.0908362276249, "step": 15312, "train/loss_ctc": 0.4285452365875244, "train/loss_error": 0.4758876860141754, "train/loss_total": 0.4664192199707031 }, { "epoch": 4.091103393000267, "step": 15313, "train/loss_ctc": 0.8788520097732544, "train/loss_error": 0.3571796417236328, "train/loss_total": 0.46151411533355713 }, { "epoch": 4.091370558375634, "step": 15314, "train/loss_ctc": 0.9518295526504517, "train/loss_error": 0.4576381742954254, "train/loss_total": 0.5564764738082886 }, { "epoch": 4.091637723751002, "step": 15315, "train/loss_ctc": 0.7163628935813904, "train/loss_error": 0.4354947805404663, "train/loss_total": 0.4916684031486511 }, { "epoch": 4.091904889126369, "step": 15316, "train/loss_ctc": 0.7276976704597473, "train/loss_error": 0.475952684879303, "train/loss_total": 0.5263016819953918 }, { "epoch": 4.092172054501736, "step": 15317, "train/loss_ctc": 0.3173694610595703, "train/loss_error": 0.4146311283111572, "train/loss_total": 0.39517879486083984 }, { "epoch": 4.092439219877104, "step": 15318, "train/loss_ctc": 1.1919382810592651, "train/loss_error": 0.4075991213321686, "train/loss_total": 0.5644669532775879 }, { "epoch": 4.092706385252471, "step": 15319, "train/loss_ctc": 1.4920096397399902, "train/loss_error": 0.5081806778907776, "train/loss_total": 0.7049464583396912 }, { "epoch": 4.092973550627839, "grad_norm": 2.5201151371002197, "learning_rate": 5.454982634250601e-06, "loss": 0.5116, "step": 15320 }, { "epoch": 4.092973550627839, "step": 15320, "train/loss_ctc": 0.43989551067352295, "train/loss_error": 0.3792533576488495, "train/loss_total": 0.39138180017471313 }, { "epoch": 4.093240716003206, "step": 15321, "train/loss_ctc": 0.4686010479927063, "train/loss_error": 0.44912251830101013, "train/loss_total": 0.4530182480812073 }, { "epoch": 4.0935078813785735, "step": 15322, "train/loss_ctc": 1.035056471824646, "train/loss_error": 0.5117279291152954, "train/loss_total": 0.6163936257362366 }, { "epoch": 4.093775046753941, "step": 15323, "train/loss_ctc": 0.8056453466415405, "train/loss_error": 0.47603967785835266, "train/loss_total": 0.5419608354568481 }, { "epoch": 4.094042212129308, "step": 15324, "train/loss_ctc": 0.8045801520347595, "train/loss_error": 0.48849913477897644, "train/loss_total": 0.5517153739929199 }, { "epoch": 4.0943093775046755, "step": 15325, "train/loss_ctc": 0.3707085847854614, "train/loss_error": 0.43151915073394775, "train/loss_total": 0.419357031583786 }, { "epoch": 4.094576542880043, "step": 15326, "train/loss_ctc": 0.6170731782913208, "train/loss_error": 0.49233439564704895, "train/loss_total": 0.5172821283340454 }, { "epoch": 4.09484370825541, "step": 15327, "train/loss_ctc": 0.930432915687561, "train/loss_error": 0.4827914237976074, "train/loss_total": 0.572319746017456 }, { "epoch": 4.0951108736307775, "step": 15328, "train/loss_ctc": 0.5702873468399048, "train/loss_error": 0.43918371200561523, "train/loss_total": 0.4654044508934021 }, { "epoch": 4.095378039006145, "step": 15329, "train/loss_ctc": 1.4150409698486328, "train/loss_error": 0.513637125492096, "train/loss_total": 0.6939178705215454 }, { "epoch": 4.095645204381512, "grad_norm": 9.974004745483398, "learning_rate": 5.43895271172856e-06, "loss": 0.5223, "step": 15330 }, { "epoch": 4.095645204381512, "step": 15330, "train/loss_ctc": 1.1503022909164429, "train/loss_error": 0.4666365385055542, "train/loss_total": 0.6033697128295898 }, { "epoch": 4.09591236975688, "step": 15331, "train/loss_ctc": 1.2205684185028076, "train/loss_error": 0.5613667368888855, "train/loss_total": 0.6932070851325989 }, { "epoch": 4.096179535132247, "step": 15332, "train/loss_ctc": 0.5785796642303467, "train/loss_error": 0.4303480386734009, "train/loss_total": 0.459994375705719 }, { "epoch": 4.096446700507614, "step": 15333, "train/loss_ctc": 0.3367530107498169, "train/loss_error": 0.46974778175354004, "train/loss_total": 0.44314882159233093 }, { "epoch": 4.096713865882982, "step": 15334, "train/loss_ctc": 0.5563172101974487, "train/loss_error": 0.4531159996986389, "train/loss_total": 0.47375625371932983 }, { "epoch": 4.096981031258349, "step": 15335, "train/loss_ctc": 0.4435049891471863, "train/loss_error": 0.5693646669387817, "train/loss_total": 0.5441927313804626 }, { "epoch": 4.097248196633716, "step": 15336, "train/loss_ctc": 0.45847630500793457, "train/loss_error": 0.42550569772720337, "train/loss_total": 0.4320998191833496 }, { "epoch": 4.097515362009084, "step": 15337, "train/loss_ctc": 0.9870665073394775, "train/loss_error": 0.5266074538230896, "train/loss_total": 0.6186992526054382 }, { "epoch": 4.097782527384451, "step": 15338, "train/loss_ctc": 0.42275482416152954, "train/loss_error": 0.4608668386936188, "train/loss_total": 0.4532444477081299 }, { "epoch": 4.098049692759818, "step": 15339, "train/loss_ctc": 0.25646764039993286, "train/loss_error": 0.4598187506198883, "train/loss_total": 0.4191485345363617 }, { "epoch": 4.098316858135186, "grad_norm": 1.4470617771148682, "learning_rate": 5.422922789206518e-06, "loss": 0.5141, "step": 15340 }, { "epoch": 4.098316858135186, "step": 15340, "train/loss_ctc": 0.18437129259109497, "train/loss_error": 0.34717226028442383, "train/loss_total": 0.31461209058761597 }, { "epoch": 4.098584023510553, "step": 15341, "train/loss_ctc": 1.2837467193603516, "train/loss_error": 0.49751925468444824, "train/loss_total": 0.6547647714614868 }, { "epoch": 4.09885118888592, "step": 15342, "train/loss_ctc": 0.7103228569030762, "train/loss_error": 0.4434451460838318, "train/loss_total": 0.49682068824768066 }, { "epoch": 4.099118354261288, "step": 15343, "train/loss_ctc": 0.7479091882705688, "train/loss_error": 0.4392963945865631, "train/loss_total": 0.5010189414024353 }, { "epoch": 4.099385519636655, "step": 15344, "train/loss_ctc": 0.45118770003318787, "train/loss_error": 0.4054463505744934, "train/loss_total": 0.4145946502685547 }, { "epoch": 4.099652685012022, "step": 15345, "train/loss_ctc": 0.5991853475570679, "train/loss_error": 0.4180451035499573, "train/loss_total": 0.45427316427230835 }, { "epoch": 4.09991985038739, "step": 15346, "train/loss_ctc": 0.6523972153663635, "train/loss_error": 0.48820799589157104, "train/loss_total": 0.5210458636283875 }, { "epoch": 4.100187015762757, "step": 15347, "train/loss_ctc": 1.1876552104949951, "train/loss_error": 0.4655614495277405, "train/loss_total": 0.6099802255630493 }, { "epoch": 4.100454181138124, "step": 15348, "train/loss_ctc": 0.5036391615867615, "train/loss_error": 0.40181827545166016, "train/loss_total": 0.42218244075775146 }, { "epoch": 4.100721346513492, "step": 15349, "train/loss_ctc": 0.5076186656951904, "train/loss_error": 0.4624156951904297, "train/loss_total": 0.47145628929138184 }, { "epoch": 4.100988511888859, "grad_norm": 2.3914341926574707, "learning_rate": 5.406892866684478e-06, "loss": 0.4861, "step": 15350 }, { "epoch": 4.100988511888859, "step": 15350, "train/loss_ctc": 0.6835232973098755, "train/loss_error": 0.435513436794281, "train/loss_total": 0.4851154088973999 }, { "epoch": 4.101255677264226, "step": 15351, "train/loss_ctc": 0.699527382850647, "train/loss_error": 0.43390989303588867, "train/loss_total": 0.4870333969593048 }, { "epoch": 4.101522842639594, "step": 15352, "train/loss_ctc": 0.2616608738899231, "train/loss_error": 0.3754633367061615, "train/loss_total": 0.3527028262615204 }, { "epoch": 4.101790008014961, "step": 15353, "train/loss_ctc": 0.8159475326538086, "train/loss_error": 0.43762487173080444, "train/loss_total": 0.5132893919944763 }, { "epoch": 4.102057173390329, "step": 15354, "train/loss_ctc": 0.6011546850204468, "train/loss_error": 0.411437451839447, "train/loss_total": 0.44938090443611145 }, { "epoch": 4.102324338765696, "step": 15355, "train/loss_ctc": 0.4769784212112427, "train/loss_error": 0.4904100000858307, "train/loss_total": 0.4877236783504486 }, { "epoch": 4.102591504141063, "step": 15356, "train/loss_ctc": 0.993052065372467, "train/loss_error": 0.49353042244911194, "train/loss_total": 0.593434751033783 }, { "epoch": 4.102858669516431, "step": 15357, "train/loss_ctc": 0.6123433709144592, "train/loss_error": 0.38285744190216064, "train/loss_total": 0.42875462770462036 }, { "epoch": 4.103125834891798, "step": 15358, "train/loss_ctc": 0.6471322178840637, "train/loss_error": 0.4553174376487732, "train/loss_total": 0.4936804175376892 }, { "epoch": 4.1033930002671655, "step": 15359, "train/loss_ctc": 0.8143046498298645, "train/loss_error": 0.42928552627563477, "train/loss_total": 0.5062893629074097 }, { "epoch": 4.103660165642533, "grad_norm": 1.5109848976135254, "learning_rate": 5.390862944162437e-06, "loss": 0.4797, "step": 15360 }, { "epoch": 4.103660165642533, "step": 15360, "train/loss_ctc": 0.4734196066856384, "train/loss_error": 0.3912011682987213, "train/loss_total": 0.4076448678970337 }, { "epoch": 4.1039273310179, "step": 15361, "train/loss_ctc": 1.4158130884170532, "train/loss_error": 0.49810296297073364, "train/loss_total": 0.6816450357437134 }, { "epoch": 4.1041944963932675, "step": 15362, "train/loss_ctc": 0.7204985022544861, "train/loss_error": 0.459003746509552, "train/loss_total": 0.5113027095794678 }, { "epoch": 4.104461661768635, "step": 15363, "train/loss_ctc": 0.8908287286758423, "train/loss_error": 0.48156610131263733, "train/loss_total": 0.5634186267852783 }, { "epoch": 4.104728827144002, "step": 15364, "train/loss_ctc": 0.6495051383972168, "train/loss_error": 0.485919713973999, "train/loss_total": 0.5186368227005005 }, { "epoch": 4.1049959925193695, "step": 15365, "train/loss_ctc": 0.5080760717391968, "train/loss_error": 0.33427664637565613, "train/loss_total": 0.36903655529022217 }, { "epoch": 4.105263157894737, "step": 15366, "train/loss_ctc": 0.6006219387054443, "train/loss_error": 0.41782525181770325, "train/loss_total": 0.45438459515571594 }, { "epoch": 4.105530323270104, "step": 15367, "train/loss_ctc": 0.6025930047035217, "train/loss_error": 0.41225603222846985, "train/loss_total": 0.4503234326839447 }, { "epoch": 4.105797488645472, "step": 15368, "train/loss_ctc": 0.17974327504634857, "train/loss_error": 0.45123913884162903, "train/loss_total": 0.3969399929046631 }, { "epoch": 4.106064654020839, "step": 15369, "train/loss_ctc": 0.7116996645927429, "train/loss_error": 0.4954826831817627, "train/loss_total": 0.5387260913848877 }, { "epoch": 4.106331819396206, "grad_norm": 1.6721582412719727, "learning_rate": 5.374833021640395e-06, "loss": 0.4892, "step": 15370 }, { "epoch": 4.106331819396206, "step": 15370, "train/loss_ctc": 0.402533620595932, "train/loss_error": 0.42734265327453613, "train/loss_total": 0.42238086462020874 }, { "epoch": 4.106598984771574, "step": 15371, "train/loss_ctc": 0.8432543277740479, "train/loss_error": 0.46003052592277527, "train/loss_total": 0.5366753339767456 }, { "epoch": 4.106866150146941, "step": 15372, "train/loss_ctc": 0.8002516031265259, "train/loss_error": 0.44537845253944397, "train/loss_total": 0.5163530707359314 }, { "epoch": 4.107133315522308, "step": 15373, "train/loss_ctc": 0.43142950534820557, "train/loss_error": 0.46978676319122314, "train/loss_total": 0.4621153473854065 }, { "epoch": 4.107400480897676, "step": 15374, "train/loss_ctc": 0.8009676933288574, "train/loss_error": 0.44078192114830017, "train/loss_total": 0.5128191113471985 }, { "epoch": 4.107667646273043, "step": 15375, "train/loss_ctc": 0.5412741303443909, "train/loss_error": 0.35003888607025146, "train/loss_total": 0.38828593492507935 }, { "epoch": 4.10793481164841, "step": 15376, "train/loss_ctc": 0.42511990666389465, "train/loss_error": 0.3964007496833801, "train/loss_total": 0.4021446108818054 }, { "epoch": 4.108201977023778, "step": 15377, "train/loss_ctc": 0.6972784996032715, "train/loss_error": 0.46649155020713806, "train/loss_total": 0.5126489400863647 }, { "epoch": 4.108469142399145, "step": 15378, "train/loss_ctc": 0.3159148693084717, "train/loss_error": 0.3927426338195801, "train/loss_total": 0.37737709283828735 }, { "epoch": 4.108736307774512, "step": 15379, "train/loss_ctc": 0.4213862419128418, "train/loss_error": 0.36085543036460876, "train/loss_total": 0.3729615807533264 }, { "epoch": 4.10900347314988, "grad_norm": 2.12793231010437, "learning_rate": 5.358803099118354e-06, "loss": 0.4504, "step": 15380 }, { "epoch": 4.10900347314988, "step": 15380, "train/loss_ctc": 0.5825055241584778, "train/loss_error": 0.4474564492702484, "train/loss_total": 0.4744662642478943 }, { "epoch": 4.109270638525247, "step": 15381, "train/loss_ctc": 0.6653599739074707, "train/loss_error": 0.4502991735935211, "train/loss_total": 0.49331134557724 }, { "epoch": 4.109537803900614, "step": 15382, "train/loss_ctc": 0.4613091051578522, "train/loss_error": 0.36461547017097473, "train/loss_total": 0.3839541971683502 }, { "epoch": 4.109804969275982, "step": 15383, "train/loss_ctc": 1.4292550086975098, "train/loss_error": 0.4748585522174835, "train/loss_total": 0.6657378673553467 }, { "epoch": 4.110072134651349, "step": 15384, "train/loss_ctc": 0.3612973690032959, "train/loss_error": 0.4702335298061371, "train/loss_total": 0.4484463036060333 }, { "epoch": 4.110339300026716, "step": 15385, "train/loss_ctc": 0.7472878694534302, "train/loss_error": 0.45766860246658325, "train/loss_total": 0.5155924558639526 }, { "epoch": 4.110606465402084, "step": 15386, "train/loss_ctc": 0.7670149803161621, "train/loss_error": 0.4129045009613037, "train/loss_total": 0.4837266206741333 }, { "epoch": 4.110873630777451, "step": 15387, "train/loss_ctc": 0.8998645544052124, "train/loss_error": 0.4762290120124817, "train/loss_total": 0.5609561204910278 }, { "epoch": 4.111140796152819, "step": 15388, "train/loss_ctc": 0.47179585695266724, "train/loss_error": 0.3835408091545105, "train/loss_total": 0.4011918306350708 }, { "epoch": 4.111407961528186, "step": 15389, "train/loss_ctc": 0.8448394536972046, "train/loss_error": 0.482223242521286, "train/loss_total": 0.5547465085983276 }, { "epoch": 4.111675126903553, "grad_norm": 1.4186540842056274, "learning_rate": 5.342773176596314e-06, "loss": 0.4982, "step": 15390 }, { "epoch": 4.111675126903553, "step": 15390, "train/loss_ctc": 0.7069774866104126, "train/loss_error": 0.47167032957077026, "train/loss_total": 0.5187317728996277 }, { "epoch": 4.111942292278921, "step": 15391, "train/loss_ctc": 0.25182127952575684, "train/loss_error": 0.38671910762786865, "train/loss_total": 0.3597395420074463 }, { "epoch": 4.112209457654288, "step": 15392, "train/loss_ctc": 0.2557123601436615, "train/loss_error": 0.43344271183013916, "train/loss_total": 0.3978966474533081 }, { "epoch": 4.112476623029655, "step": 15393, "train/loss_ctc": 0.5432991981506348, "train/loss_error": 0.408979594707489, "train/loss_total": 0.4358435273170471 }, { "epoch": 4.112743788405023, "step": 15394, "train/loss_ctc": 0.32442641258239746, "train/loss_error": 0.3336499333381653, "train/loss_total": 0.3318052291870117 }, { "epoch": 4.11301095378039, "step": 15395, "train/loss_ctc": 0.5978108644485474, "train/loss_error": 0.43038424849510193, "train/loss_total": 0.463869571685791 }, { "epoch": 4.1132781191557575, "step": 15396, "train/loss_ctc": 0.7598578333854675, "train/loss_error": 0.35047006607055664, "train/loss_total": 0.4323475956916809 }, { "epoch": 4.113545284531125, "step": 15397, "train/loss_ctc": 0.3579871356487274, "train/loss_error": 0.41898155212402344, "train/loss_total": 0.40678268671035767 }, { "epoch": 4.113812449906492, "step": 15398, "train/loss_ctc": 0.20364908874034882, "train/loss_error": 0.39290693402290344, "train/loss_total": 0.3550553619861603 }, { "epoch": 4.1140796152818595, "step": 15399, "train/loss_ctc": 0.8207818865776062, "train/loss_error": 0.4601357579231262, "train/loss_total": 0.5322649478912354 }, { "epoch": 4.114346780657227, "grad_norm": 2.2958903312683105, "learning_rate": 5.326743254074273e-06, "loss": 0.4234, "step": 15400 }, { "epoch": 4.114346780657227, "step": 15400, "train/loss_ctc": 0.7465652227401733, "train/loss_error": 0.46866634488105774, "train/loss_total": 0.5242461562156677 }, { "epoch": 4.114613946032594, "step": 15401, "train/loss_ctc": 0.6680936813354492, "train/loss_error": 0.3850182592868805, "train/loss_total": 0.44163334369659424 }, { "epoch": 4.1148811114079615, "step": 15402, "train/loss_ctc": 0.46246591210365295, "train/loss_error": 0.384067177772522, "train/loss_total": 0.39974692463874817 }, { "epoch": 4.115148276783329, "step": 15403, "train/loss_ctc": 1.1242878437042236, "train/loss_error": 0.524630606174469, "train/loss_total": 0.6445620656013489 }, { "epoch": 4.115415442158696, "step": 15404, "train/loss_ctc": 0.6460976600646973, "train/loss_error": 0.4434066414833069, "train/loss_total": 0.4839448630809784 }, { "epoch": 4.115682607534064, "step": 15405, "train/loss_ctc": 0.9892832636833191, "train/loss_error": 0.42853692173957825, "train/loss_total": 0.5406861901283264 }, { "epoch": 4.115949772909431, "step": 15406, "train/loss_ctc": 1.4888296127319336, "train/loss_error": 0.4667356312274933, "train/loss_total": 0.6711544394493103 }, { "epoch": 4.116216938284798, "step": 15407, "train/loss_ctc": 0.6048891544342041, "train/loss_error": 0.4197629690170288, "train/loss_total": 0.45678824186325073 }, { "epoch": 4.116484103660166, "step": 15408, "train/loss_ctc": 0.56280916929245, "train/loss_error": 0.4236709177494049, "train/loss_total": 0.4514985680580139 }, { "epoch": 4.116751269035533, "step": 15409, "train/loss_ctc": 0.5452781915664673, "train/loss_error": 0.443880558013916, "train/loss_total": 0.46416008472442627 }, { "epoch": 4.1170184344109, "grad_norm": 1.523710012435913, "learning_rate": 5.310713331552231e-06, "loss": 0.5078, "step": 15410 }, { "epoch": 4.1170184344109, "step": 15410, "train/loss_ctc": 0.5646513104438782, "train/loss_error": 0.3571285903453827, "train/loss_total": 0.3986331522464752 }, { "epoch": 4.117285599786268, "step": 15411, "train/loss_ctc": 0.9741135835647583, "train/loss_error": 0.5013322830200195, "train/loss_total": 0.5958885550498962 }, { "epoch": 4.117552765161635, "step": 15412, "train/loss_ctc": 0.9063026905059814, "train/loss_error": 0.42437270283699036, "train/loss_total": 0.5207586884498596 }, { "epoch": 4.117819930537002, "step": 15413, "train/loss_ctc": 0.3169402480125427, "train/loss_error": 0.47535091638565063, "train/loss_total": 0.44366878271102905 }, { "epoch": 4.11808709591237, "step": 15414, "train/loss_ctc": 0.4621310234069824, "train/loss_error": 0.4090823829174042, "train/loss_total": 0.41969212889671326 }, { "epoch": 4.118354261287737, "step": 15415, "train/loss_ctc": 0.9055994749069214, "train/loss_error": 0.4192344844341278, "train/loss_total": 0.5165075063705444 }, { "epoch": 4.118621426663104, "step": 15416, "train/loss_ctc": 0.9808792471885681, "train/loss_error": 0.48432865738868713, "train/loss_total": 0.5836387872695923 }, { "epoch": 4.118888592038472, "step": 15417, "train/loss_ctc": 0.3869919180870056, "train/loss_error": 0.4024462103843689, "train/loss_total": 0.39935535192489624 }, { "epoch": 4.119155757413839, "step": 15418, "train/loss_ctc": 0.6391568183898926, "train/loss_error": 0.4290252923965454, "train/loss_total": 0.4710516035556793 }, { "epoch": 4.119422922789207, "step": 15419, "train/loss_ctc": 0.4289880096912384, "train/loss_error": 0.39589571952819824, "train/loss_total": 0.40251418948173523 }, { "epoch": 4.119690088164574, "grad_norm": 2.0686933994293213, "learning_rate": 5.29468340903019e-06, "loss": 0.4752, "step": 15420 }, { "epoch": 4.119690088164574, "step": 15420, "train/loss_ctc": 0.6202418804168701, "train/loss_error": 0.4384332299232483, "train/loss_total": 0.47479498386383057 }, { "epoch": 4.119957253539941, "step": 15421, "train/loss_ctc": 0.8165318965911865, "train/loss_error": 0.473594605922699, "train/loss_total": 0.5421820878982544 }, { "epoch": 4.120224418915309, "step": 15422, "train/loss_ctc": 0.5159929990768433, "train/loss_error": 0.460595965385437, "train/loss_total": 0.47167539596557617 }, { "epoch": 4.120491584290676, "step": 15423, "train/loss_ctc": 0.47419607639312744, "train/loss_error": 0.43170103430747986, "train/loss_total": 0.4402000606060028 }, { "epoch": 4.120758749666043, "step": 15424, "train/loss_ctc": 0.7655717134475708, "train/loss_error": 0.4267074167728424, "train/loss_total": 0.49448028206825256 }, { "epoch": 4.121025915041411, "step": 15425, "train/loss_ctc": 0.4675692319869995, "train/loss_error": 0.4452268183231354, "train/loss_total": 0.44969531893730164 }, { "epoch": 4.121293080416778, "step": 15426, "train/loss_ctc": 0.5018906593322754, "train/loss_error": 0.4890228807926178, "train/loss_total": 0.49159643054008484 }, { "epoch": 4.121560245792145, "step": 15427, "train/loss_ctc": 0.37180179357528687, "train/loss_error": 0.4993840157985687, "train/loss_total": 0.47386759519577026 }, { "epoch": 4.121827411167513, "step": 15428, "train/loss_ctc": 0.44877785444259644, "train/loss_error": 0.47405046224594116, "train/loss_total": 0.46899592876434326 }, { "epoch": 4.12209457654288, "step": 15429, "train/loss_ctc": 0.8581583499908447, "train/loss_error": 0.4874909520149231, "train/loss_total": 0.5616244673728943 }, { "epoch": 4.1223617419182474, "grad_norm": 1.876265048980713, "learning_rate": 5.278653486508149e-06, "loss": 0.4869, "step": 15430 }, { "epoch": 4.1223617419182474, "step": 15430, "train/loss_ctc": 0.9792351126670837, "train/loss_error": 0.4272695481777191, "train/loss_total": 0.53766268491745 }, { "epoch": 4.122628907293615, "step": 15431, "train/loss_ctc": 0.2513696849346161, "train/loss_error": 0.4659225642681122, "train/loss_total": 0.42301198840141296 }, { "epoch": 4.122896072668982, "step": 15432, "train/loss_ctc": 0.5004562735557556, "train/loss_error": 0.45996835827827454, "train/loss_total": 0.46806594729423523 }, { "epoch": 4.1231632380443495, "step": 15433, "train/loss_ctc": 0.9289354085922241, "train/loss_error": 0.4577680230140686, "train/loss_total": 0.5520014762878418 }, { "epoch": 4.123430403419717, "step": 15434, "train/loss_ctc": 0.5326278209686279, "train/loss_error": 0.3938713073730469, "train/loss_total": 0.421622633934021 }, { "epoch": 4.123697568795084, "step": 15435, "train/loss_ctc": 0.5867432951927185, "train/loss_error": 0.44603416323661804, "train/loss_total": 0.4741760194301605 }, { "epoch": 4.1239647341704515, "step": 15436, "train/loss_ctc": 0.4269854426383972, "train/loss_error": 0.4647315740585327, "train/loss_total": 0.4571823477745056 }, { "epoch": 4.124231899545819, "step": 15437, "train/loss_ctc": 0.4187815189361572, "train/loss_error": 0.537743091583252, "train/loss_total": 0.513950765132904 }, { "epoch": 4.124499064921186, "step": 15438, "train/loss_ctc": 1.179359793663025, "train/loss_error": 0.3858507573604584, "train/loss_total": 0.5445525646209717 }, { "epoch": 4.1247662302965535, "step": 15439, "train/loss_ctc": 0.6079768538475037, "train/loss_error": 0.457075297832489, "train/loss_total": 0.48725563287734985 }, { "epoch": 4.125033395671921, "grad_norm": 3.252592086791992, "learning_rate": 5.2626235639861075e-06, "loss": 0.4879, "step": 15440 }, { "epoch": 4.125033395671921, "step": 15440, "train/loss_ctc": 0.3870246708393097, "train/loss_error": 0.42448192834854126, "train/loss_total": 0.4169904887676239 }, { "epoch": 4.125300561047288, "step": 15441, "train/loss_ctc": 0.6744478940963745, "train/loss_error": 0.3843144476413727, "train/loss_total": 0.442341148853302 }, { "epoch": 4.125567726422656, "step": 15442, "train/loss_ctc": 0.550371527671814, "train/loss_error": 0.41525736451148987, "train/loss_total": 0.44228020310401917 }, { "epoch": 4.125834891798023, "step": 15443, "train/loss_ctc": 0.38673943281173706, "train/loss_error": 0.4530471861362457, "train/loss_total": 0.4397856593132019 }, { "epoch": 4.12610205717339, "step": 15444, "train/loss_ctc": 0.67857825756073, "train/loss_error": 0.43307578563690186, "train/loss_total": 0.4821763038635254 }, { "epoch": 4.126369222548758, "step": 15445, "train/loss_ctc": 0.565244197845459, "train/loss_error": 0.4660657048225403, "train/loss_total": 0.485901415348053 }, { "epoch": 4.126636387924125, "step": 15446, "train/loss_ctc": 0.6230602860450745, "train/loss_error": 0.3909499943256378, "train/loss_total": 0.4373720586299896 }, { "epoch": 4.126903553299492, "step": 15447, "train/loss_ctc": 0.26214349269866943, "train/loss_error": 0.4766092002391815, "train/loss_total": 0.4337160587310791 }, { "epoch": 4.12717071867486, "step": 15448, "train/loss_ctc": 0.4594579339027405, "train/loss_error": 0.5068787932395935, "train/loss_total": 0.4973946213722229 }, { "epoch": 4.127437884050227, "step": 15449, "train/loss_ctc": 0.8269538283348083, "train/loss_error": 0.45561879873275757, "train/loss_total": 0.5298858284950256 }, { "epoch": 4.127705049425594, "grad_norm": 1.6454137563705444, "learning_rate": 5.2465936414640665e-06, "loss": 0.4608, "step": 15450 }, { "epoch": 4.127705049425594, "step": 15450, "train/loss_ctc": 0.3158358335494995, "train/loss_error": 0.4795757830142975, "train/loss_total": 0.44682782888412476 }, { "epoch": 4.127972214800962, "step": 15451, "train/loss_ctc": 0.3422948718070984, "train/loss_error": 0.3893178105354309, "train/loss_total": 0.37991324067115784 }, { "epoch": 4.128239380176329, "step": 15452, "train/loss_ctc": 0.45034074783325195, "train/loss_error": 0.5449264049530029, "train/loss_total": 0.5260092616081238 }, { "epoch": 4.128506545551696, "step": 15453, "train/loss_ctc": 0.7702582478523254, "train/loss_error": 0.3653411865234375, "train/loss_total": 0.44632458686828613 }, { "epoch": 4.128773710927064, "step": 15454, "train/loss_ctc": 0.7280409932136536, "train/loss_error": 0.41228926181793213, "train/loss_total": 0.4754396080970764 }, { "epoch": 4.129040876302431, "step": 15455, "train/loss_ctc": 0.5535388588905334, "train/loss_error": 0.3959759771823883, "train/loss_total": 0.4274885654449463 }, { "epoch": 4.129308041677799, "step": 15456, "train/loss_ctc": 1.0131144523620605, "train/loss_error": 0.4774162173271179, "train/loss_total": 0.5845558643341064 }, { "epoch": 4.129575207053166, "step": 15457, "train/loss_ctc": 1.5031118392944336, "train/loss_error": 0.531247079372406, "train/loss_total": 0.7256200313568115 }, { "epoch": 4.129842372428533, "step": 15458, "train/loss_ctc": 0.8414609432220459, "train/loss_error": 0.4817591905593872, "train/loss_total": 0.5536995530128479 }, { "epoch": 4.130109537803901, "step": 15459, "train/loss_ctc": 0.35365328192710876, "train/loss_error": 0.48929086327552795, "train/loss_total": 0.46216335892677307 }, { "epoch": 4.130376703179268, "grad_norm": 1.402665376663208, "learning_rate": 5.230563718942025e-06, "loss": 0.5028, "step": 15460 }, { "epoch": 4.130376703179268, "step": 15460, "train/loss_ctc": 0.9258580803871155, "train/loss_error": 0.5395843386650085, "train/loss_total": 0.6168391108512878 }, { "epoch": 4.130643868554635, "step": 15461, "train/loss_ctc": 0.8378764390945435, "train/loss_error": 0.4291588068008423, "train/loss_total": 0.5109023451805115 }, { "epoch": 4.130911033930003, "step": 15462, "train/loss_ctc": 0.8995979428291321, "train/loss_error": 0.40159574151039124, "train/loss_total": 0.5011962056159973 }, { "epoch": 4.13117819930537, "step": 15463, "train/loss_ctc": 0.43067118525505066, "train/loss_error": 0.4759212136268616, "train/loss_total": 0.4668712019920349 }, { "epoch": 4.131445364680737, "step": 15464, "train/loss_ctc": 0.6207138299942017, "train/loss_error": 0.4276922345161438, "train/loss_total": 0.46629655361175537 }, { "epoch": 4.131712530056105, "step": 15465, "train/loss_ctc": 0.9355669021606445, "train/loss_error": 0.4154264032840729, "train/loss_total": 0.5194545388221741 }, { "epoch": 4.131979695431472, "step": 15466, "train/loss_ctc": 0.9089101552963257, "train/loss_error": 0.46336525678634644, "train/loss_total": 0.5524742603302002 }, { "epoch": 4.1322468608068395, "step": 15467, "train/loss_ctc": 1.4890861511230469, "train/loss_error": 0.46361544728279114, "train/loss_total": 0.6687096357345581 }, { "epoch": 4.132514026182207, "step": 15468, "train/loss_ctc": 0.733418345451355, "train/loss_error": 0.3958507180213928, "train/loss_total": 0.46336424350738525 }, { "epoch": 4.132781191557574, "step": 15469, "train/loss_ctc": 0.323546826839447, "train/loss_error": 0.44965746998786926, "train/loss_total": 0.4244353473186493 }, { "epoch": 4.1330483569329415, "grad_norm": 2.816368579864502, "learning_rate": 5.2145337964199835e-06, "loss": 0.5191, "step": 15470 }, { "epoch": 4.1330483569329415, "step": 15470, "train/loss_ctc": 0.7339124083518982, "train/loss_error": 0.41129598021507263, "train/loss_total": 0.47581928968429565 }, { "epoch": 4.133315522308309, "step": 15471, "train/loss_ctc": 0.428971529006958, "train/loss_error": 0.37617403268814087, "train/loss_total": 0.3867335319519043 }, { "epoch": 4.133582687683676, "step": 15472, "train/loss_ctc": 0.8250974416732788, "train/loss_error": 0.45536139607429504, "train/loss_total": 0.5293086171150208 }, { "epoch": 4.1338498530590435, "step": 15473, "train/loss_ctc": 0.43417978286743164, "train/loss_error": 0.4286002218723297, "train/loss_total": 0.4297161400318146 }, { "epoch": 4.134117018434411, "step": 15474, "train/loss_ctc": 0.6647719144821167, "train/loss_error": 0.48797982931137085, "train/loss_total": 0.523338258266449 }, { "epoch": 4.134384183809778, "step": 15475, "train/loss_ctc": 0.5796751976013184, "train/loss_error": 0.3599012494087219, "train/loss_total": 0.4038560390472412 }, { "epoch": 4.1346513491851455, "step": 15476, "train/loss_ctc": 0.5368881821632385, "train/loss_error": 0.422865629196167, "train/loss_total": 0.44567015767097473 }, { "epoch": 4.134918514560513, "step": 15477, "train/loss_ctc": 0.6821117401123047, "train/loss_error": 0.4328802824020386, "train/loss_total": 0.4827265739440918 }, { "epoch": 4.13518567993588, "step": 15478, "train/loss_ctc": 1.137052059173584, "train/loss_error": 0.4122505784034729, "train/loss_total": 0.5572108626365662 }, { "epoch": 4.135452845311248, "step": 15479, "train/loss_ctc": 0.9204816222190857, "train/loss_error": 0.42326194047927856, "train/loss_total": 0.5227058529853821 }, { "epoch": 4.135720010686615, "grad_norm": 1.3257157802581787, "learning_rate": 5.198503873897943e-06, "loss": 0.4757, "step": 15480 }, { "epoch": 4.135720010686615, "step": 15480, "train/loss_ctc": 0.5641332864761353, "train/loss_error": 0.41139256954193115, "train/loss_total": 0.4419407248497009 }, { "epoch": 4.135987176061982, "step": 15481, "train/loss_ctc": 0.45465487241744995, "train/loss_error": 0.4526071548461914, "train/loss_total": 0.4530166983604431 }, { "epoch": 4.13625434143735, "step": 15482, "train/loss_ctc": 0.5299590826034546, "train/loss_error": 0.4060487151145935, "train/loss_total": 0.43083077669143677 }, { "epoch": 4.136521506812717, "step": 15483, "train/loss_ctc": 1.035534143447876, "train/loss_error": 0.41981321573257446, "train/loss_total": 0.5429574251174927 }, { "epoch": 4.136788672188084, "step": 15484, "train/loss_ctc": 0.8727273941040039, "train/loss_error": 0.5043789148330688, "train/loss_total": 0.5780486464500427 }, { "epoch": 4.137055837563452, "step": 15485, "train/loss_ctc": 0.6428390741348267, "train/loss_error": 0.42364010214805603, "train/loss_total": 0.4674799144268036 }, { "epoch": 4.137323002938819, "step": 15486, "train/loss_ctc": 0.6672553420066833, "train/loss_error": 0.35570788383483887, "train/loss_total": 0.4180173873901367 }, { "epoch": 4.137590168314187, "step": 15487, "train/loss_ctc": 0.4259379506111145, "train/loss_error": 0.43156957626342773, "train/loss_total": 0.43044325709342957 }, { "epoch": 4.137857333689554, "step": 15488, "train/loss_ctc": 0.45633190870285034, "train/loss_error": 0.44993388652801514, "train/loss_total": 0.4512135088443756 }, { "epoch": 4.138124499064921, "step": 15489, "train/loss_ctc": 0.673780083656311, "train/loss_error": 0.4039904475212097, "train/loss_total": 0.45794838666915894 }, { "epoch": 4.138391664440289, "grad_norm": 7.365077495574951, "learning_rate": 5.182473951375902e-06, "loss": 0.4672, "step": 15490 }, { "epoch": 4.138391664440289, "step": 15490, "train/loss_ctc": 0.3939336836338043, "train/loss_error": 0.3628252148628235, "train/loss_total": 0.3690468966960907 }, { "epoch": 4.138658829815656, "step": 15491, "train/loss_ctc": 0.8160218000411987, "train/loss_error": 0.42913487553596497, "train/loss_total": 0.5065122842788696 }, { "epoch": 4.138925995191023, "step": 15492, "train/loss_ctc": 0.6491255760192871, "train/loss_error": 0.35954973101615906, "train/loss_total": 0.4174649119377136 }, { "epoch": 4.139193160566391, "step": 15493, "train/loss_ctc": 0.995067298412323, "train/loss_error": 0.3979153335094452, "train/loss_total": 0.5173457264900208 }, { "epoch": 4.139460325941758, "step": 15494, "train/loss_ctc": 0.4772018790245056, "train/loss_error": 0.4402165114879608, "train/loss_total": 0.44761359691619873 }, { "epoch": 4.139727491317125, "step": 15495, "train/loss_ctc": 1.0853073596954346, "train/loss_error": 0.4585448205471039, "train/loss_total": 0.5838973522186279 }, { "epoch": 4.139994656692493, "step": 15496, "train/loss_ctc": 0.6233888864517212, "train/loss_error": 0.40822774171829224, "train/loss_total": 0.451259970664978 }, { "epoch": 4.14026182206786, "step": 15497, "train/loss_ctc": 1.322129249572754, "train/loss_error": 0.3848333954811096, "train/loss_total": 0.5722925662994385 }, { "epoch": 4.140528987443227, "step": 15498, "train/loss_ctc": 0.5611873865127563, "train/loss_error": 0.3844132423400879, "train/loss_total": 0.4197680950164795 }, { "epoch": 4.140796152818595, "step": 15499, "train/loss_ctc": 0.7111092805862427, "train/loss_error": 0.42572471499443054, "train/loss_total": 0.482801616191864 }, { "epoch": 4.141063318193962, "grad_norm": 2.597306251525879, "learning_rate": 5.16644402885386e-06, "loss": 0.4768, "step": 15500 }, { "epoch": 4.141063318193962, "step": 15500, "train/loss_ctc": 1.0390205383300781, "train/loss_error": 0.4398474395275116, "train/loss_total": 0.5596820712089539 }, { "epoch": 4.141330483569329, "step": 15501, "train/loss_ctc": 0.740848183631897, "train/loss_error": 0.42522692680358887, "train/loss_total": 0.4883511960506439 }, { "epoch": 4.141597648944697, "step": 15502, "train/loss_ctc": 1.2919026613235474, "train/loss_error": 0.4605221450328827, "train/loss_total": 0.6267982721328735 }, { "epoch": 4.141864814320064, "step": 15503, "train/loss_ctc": 0.7901815176010132, "train/loss_error": 0.44003185629844666, "train/loss_total": 0.5100618004798889 }, { "epoch": 4.1421319796954315, "step": 15504, "train/loss_ctc": 0.7619032859802246, "train/loss_error": 0.4630662500858307, "train/loss_total": 0.5228336453437805 }, { "epoch": 4.142399145070799, "step": 15505, "train/loss_ctc": 1.4244495630264282, "train/loss_error": 0.49725690484046936, "train/loss_total": 0.6826954483985901 }, { "epoch": 4.142666310446166, "step": 15506, "train/loss_ctc": 1.2713063955307007, "train/loss_error": 0.45783084630966187, "train/loss_total": 0.6205259561538696 }, { "epoch": 4.1429334758215335, "step": 15507, "train/loss_ctc": 0.5477185249328613, "train/loss_error": 0.4587923586368561, "train/loss_total": 0.47657760977745056 }, { "epoch": 4.143200641196901, "step": 15508, "train/loss_ctc": 0.4838270843029022, "train/loss_error": 0.5463932752609253, "train/loss_total": 0.5338800549507141 }, { "epoch": 4.143467806572268, "step": 15509, "train/loss_ctc": 0.5456533432006836, "train/loss_error": 0.38832560181617737, "train/loss_total": 0.41979116201400757 }, { "epoch": 4.1437349719476355, "grad_norm": 3.451615333557129, "learning_rate": 5.150414106331819e-06, "loss": 0.5441, "step": 15510 }, { "epoch": 4.1437349719476355, "step": 15510, "train/loss_ctc": 0.4138423204421997, "train/loss_error": 0.4235287010669708, "train/loss_total": 0.4215914309024811 }, { "epoch": 4.144002137323003, "step": 15511, "train/loss_ctc": 0.6636377573013306, "train/loss_error": 0.3963688313961029, "train/loss_total": 0.4498226046562195 }, { "epoch": 4.14426930269837, "step": 15512, "train/loss_ctc": 0.3644220232963562, "train/loss_error": 0.4296415448188782, "train/loss_total": 0.4165976643562317 }, { "epoch": 4.1445364680737375, "step": 15513, "train/loss_ctc": 0.7271508574485779, "train/loss_error": 0.45031097531318665, "train/loss_total": 0.5056789517402649 }, { "epoch": 4.144803633449105, "step": 15514, "train/loss_ctc": 0.6910375356674194, "train/loss_error": 0.4090917408466339, "train/loss_total": 0.4654809236526489 }, { "epoch": 4.145070798824472, "step": 15515, "train/loss_ctc": 0.6938756704330444, "train/loss_error": 0.4395976662635803, "train/loss_total": 0.4904532730579376 }, { "epoch": 4.14533796419984, "step": 15516, "train/loss_ctc": 1.1626245975494385, "train/loss_error": 0.42856884002685547, "train/loss_total": 0.5753799676895142 }, { "epoch": 4.145605129575207, "step": 15517, "train/loss_ctc": 1.5776138305664062, "train/loss_error": 0.4247361719608307, "train/loss_total": 0.6553117036819458 }, { "epoch": 4.145872294950575, "step": 15518, "train/loss_ctc": 0.26913982629776, "train/loss_error": 0.3603472113609314, "train/loss_total": 0.3421057462692261 }, { "epoch": 4.146139460325942, "step": 15519, "train/loss_ctc": 1.3453810214996338, "train/loss_error": 0.4069744348526001, "train/loss_total": 0.5946557521820068 }, { "epoch": 4.146406625701309, "grad_norm": 3.2909770011901855, "learning_rate": 5.134384183809778e-06, "loss": 0.4917, "step": 15520 }, { "epoch": 4.146406625701309, "step": 15520, "train/loss_ctc": 0.2515740990638733, "train/loss_error": 0.3825379014015198, "train/loss_total": 0.35634514689445496 }, { "epoch": 4.146673791076677, "step": 15521, "train/loss_ctc": 1.0658063888549805, "train/loss_error": 0.5340962409973145, "train/loss_total": 0.6404382586479187 }, { "epoch": 4.146940956452044, "step": 15522, "train/loss_ctc": 0.6688327789306641, "train/loss_error": 0.3795538544654846, "train/loss_total": 0.4374096393585205 }, { "epoch": 4.147208121827411, "step": 15523, "train/loss_ctc": 0.5454355478286743, "train/loss_error": 0.4286945164203644, "train/loss_total": 0.45204272866249084 }, { "epoch": 4.147475287202779, "step": 15524, "train/loss_ctc": 0.8717599511146545, "train/loss_error": 0.41785070300102234, "train/loss_total": 0.5086325407028198 }, { "epoch": 4.147742452578146, "step": 15525, "train/loss_ctc": 0.7513874769210815, "train/loss_error": 0.405970960855484, "train/loss_total": 0.4750542640686035 }, { "epoch": 4.148009617953513, "step": 15526, "train/loss_ctc": 0.23308256268501282, "train/loss_error": 0.5139098167419434, "train/loss_total": 0.45774438977241516 }, { "epoch": 4.148276783328881, "step": 15527, "train/loss_ctc": 0.8500806093215942, "train/loss_error": 0.5158593654632568, "train/loss_total": 0.5827036499977112 }, { "epoch": 4.148543948704248, "step": 15528, "train/loss_ctc": 0.548350989818573, "train/loss_error": 0.45930153131484985, "train/loss_total": 0.47711142897605896 }, { "epoch": 4.148811114079615, "step": 15529, "train/loss_ctc": 1.164304494857788, "train/loss_error": 0.40747979283332825, "train/loss_total": 0.5588447451591492 }, { "epoch": 4.149078279454983, "grad_norm": 3.894608974456787, "learning_rate": 5.118354261287738e-06, "loss": 0.4946, "step": 15530 }, { "epoch": 4.149078279454983, "step": 15530, "train/loss_ctc": 0.661522388458252, "train/loss_error": 0.505012035369873, "train/loss_total": 0.5363141298294067 }, { "epoch": 4.14934544483035, "step": 15531, "train/loss_ctc": 0.7684991359710693, "train/loss_error": 0.5034211874008179, "train/loss_total": 0.5564367771148682 }, { "epoch": 4.149612610205717, "step": 15532, "train/loss_ctc": 0.6502649188041687, "train/loss_error": 0.4004870057106018, "train/loss_total": 0.4504425823688507 }, { "epoch": 4.149879775581085, "step": 15533, "train/loss_ctc": 0.40657293796539307, "train/loss_error": 0.4572279453277588, "train/loss_total": 0.44709694385528564 }, { "epoch": 4.150146940956452, "step": 15534, "train/loss_ctc": 0.32158708572387695, "train/loss_error": 0.422529399394989, "train/loss_total": 0.40234094858169556 }, { "epoch": 4.150414106331819, "step": 15535, "train/loss_ctc": 0.4567275643348694, "train/loss_error": 0.4843009114265442, "train/loss_total": 0.47878625988960266 }, { "epoch": 4.150681271707187, "step": 15536, "train/loss_ctc": 0.5221520066261292, "train/loss_error": 0.3633266091346741, "train/loss_total": 0.395091712474823 }, { "epoch": 4.150948437082554, "step": 15537, "train/loss_ctc": 1.310779333114624, "train/loss_error": 0.4226170480251312, "train/loss_total": 0.6002495288848877 }, { "epoch": 4.151215602457921, "step": 15538, "train/loss_ctc": 0.6989495158195496, "train/loss_error": 0.45541390776634216, "train/loss_total": 0.5041210651397705 }, { "epoch": 4.151482767833289, "step": 15539, "train/loss_ctc": 0.5322020649909973, "train/loss_error": 0.4900859594345093, "train/loss_total": 0.4985091984272003 }, { "epoch": 4.151749933208656, "grad_norm": 3.6062099933624268, "learning_rate": 5.102324338765696e-06, "loss": 0.4869, "step": 15540 }, { "epoch": 4.151749933208656, "step": 15540, "train/loss_ctc": 0.7080126404762268, "train/loss_error": 0.41695037484169006, "train/loss_total": 0.4751628637313843 }, { "epoch": 4.1520170985840235, "step": 15541, "train/loss_ctc": 0.371757447719574, "train/loss_error": 0.41102004051208496, "train/loss_total": 0.4031675159931183 }, { "epoch": 4.152284263959391, "step": 15542, "train/loss_ctc": 1.5931382179260254, "train/loss_error": 0.4685199558734894, "train/loss_total": 0.6934436559677124 }, { "epoch": 4.152551429334758, "step": 15543, "train/loss_ctc": 0.6843084096908569, "train/loss_error": 0.42828482389450073, "train/loss_total": 0.4794895350933075 }, { "epoch": 4.1528185947101255, "step": 15544, "train/loss_ctc": 0.36019787192344666, "train/loss_error": 0.3857105076313019, "train/loss_total": 0.3806079924106598 }, { "epoch": 4.153085760085493, "step": 15545, "train/loss_ctc": 0.917604923248291, "train/loss_error": 0.4788109064102173, "train/loss_total": 0.5665697455406189 }, { "epoch": 4.15335292546086, "step": 15546, "train/loss_ctc": 0.43533164262771606, "train/loss_error": 0.4103569984436035, "train/loss_total": 0.415351927280426 }, { "epoch": 4.1536200908362275, "step": 15547, "train/loss_ctc": 1.0285083055496216, "train/loss_error": 0.40211039781570435, "train/loss_total": 0.5273900032043457 }, { "epoch": 4.153887256211595, "step": 15548, "train/loss_ctc": 0.8212331533432007, "train/loss_error": 0.4785762429237366, "train/loss_total": 0.5471076369285583 }, { "epoch": 4.154154421586962, "step": 15549, "train/loss_ctc": 0.4054749608039856, "train/loss_error": 0.4349123239517212, "train/loss_total": 0.429024875164032 }, { "epoch": 4.1544215869623295, "grad_norm": 1.6603254079818726, "learning_rate": 5.086294416243655e-06, "loss": 0.4917, "step": 15550 }, { "epoch": 4.1544215869623295, "step": 15550, "train/loss_ctc": 0.3479476571083069, "train/loss_error": 0.3336740732192993, "train/loss_total": 0.3365287780761719 }, { "epoch": 4.154688752337697, "step": 15551, "train/loss_ctc": 0.6662983894348145, "train/loss_error": 0.40881484746932983, "train/loss_total": 0.46031156182289124 }, { "epoch": 4.154955917713064, "step": 15552, "train/loss_ctc": 0.943744421005249, "train/loss_error": 0.4369896948337555, "train/loss_total": 0.5383406281471252 }, { "epoch": 4.155223083088432, "step": 15553, "train/loss_ctc": 0.4725380539894104, "train/loss_error": 0.41810542345046997, "train/loss_total": 0.4289919435977936 }, { "epoch": 4.155490248463799, "step": 15554, "train/loss_ctc": 0.6206972002983093, "train/loss_error": 0.4534608721733093, "train/loss_total": 0.4869081377983093 }, { "epoch": 4.155757413839167, "step": 15555, "train/loss_ctc": 0.7804206609725952, "train/loss_error": 0.4684557020664215, "train/loss_total": 0.5308486819267273 }, { "epoch": 4.156024579214534, "step": 15556, "train/loss_ctc": 0.6739174127578735, "train/loss_error": 0.4489969313144684, "train/loss_total": 0.4939810633659363 }, { "epoch": 4.156291744589901, "step": 15557, "train/loss_ctc": 0.8409999012947083, "train/loss_error": 0.4044851064682007, "train/loss_total": 0.4917880892753601 }, { "epoch": 4.156558909965269, "step": 15558, "train/loss_ctc": 0.7102015018463135, "train/loss_error": 0.4868335723876953, "train/loss_total": 0.531507134437561 }, { "epoch": 4.156826075340636, "step": 15559, "train/loss_ctc": 0.48476123809814453, "train/loss_error": 0.4540916681289673, "train/loss_total": 0.4602256119251251 }, { "epoch": 4.157093240716003, "grad_norm": 1.2576946020126343, "learning_rate": 5.070264493721614e-06, "loss": 0.4759, "step": 15560 }, { "epoch": 4.157093240716003, "step": 15560, "train/loss_ctc": 0.4061387777328491, "train/loss_error": 0.41771090030670166, "train/loss_total": 0.41539648175239563 }, { "epoch": 4.157360406091371, "step": 15561, "train/loss_ctc": 0.6888172030448914, "train/loss_error": 0.49901893734931946, "train/loss_total": 0.5369786024093628 }, { "epoch": 4.157627571466738, "step": 15562, "train/loss_ctc": 0.5175023078918457, "train/loss_error": 0.5066541433334351, "train/loss_total": 0.508823812007904 }, { "epoch": 4.157894736842105, "step": 15563, "train/loss_ctc": 0.49562346935272217, "train/loss_error": 0.4108002185821533, "train/loss_total": 0.427764892578125 }, { "epoch": 4.158161902217473, "step": 15564, "train/loss_ctc": 1.0368844270706177, "train/loss_error": 0.3888912796974182, "train/loss_total": 0.5184898972511292 }, { "epoch": 4.15842906759284, "step": 15565, "train/loss_ctc": 0.23086115717887878, "train/loss_error": 0.4002564549446106, "train/loss_total": 0.36637741327285767 }, { "epoch": 4.158696232968207, "step": 15566, "train/loss_ctc": 0.6086284518241882, "train/loss_error": 0.3980569541454315, "train/loss_total": 0.4401712417602539 }, { "epoch": 4.158963398343575, "step": 15567, "train/loss_ctc": 0.5487953424453735, "train/loss_error": 0.40475285053253174, "train/loss_total": 0.4335613548755646 }, { "epoch": 4.159230563718942, "step": 15568, "train/loss_ctc": 2.313185214996338, "train/loss_error": 0.5087689161300659, "train/loss_total": 0.8696521520614624 }, { "epoch": 4.159497729094309, "step": 15569, "train/loss_ctc": 0.6408257484436035, "train/loss_error": 0.4792889654636383, "train/loss_total": 0.5115963220596313 }, { "epoch": 4.159764894469677, "grad_norm": 1.9091401100158691, "learning_rate": 5.054234571199573e-06, "loss": 0.5029, "step": 15570 }, { "epoch": 4.159764894469677, "step": 15570, "train/loss_ctc": 0.4941880702972412, "train/loss_error": 0.458149790763855, "train/loss_total": 0.4653574526309967 }, { "epoch": 4.160032059845044, "step": 15571, "train/loss_ctc": 0.8764479160308838, "train/loss_error": 0.4778800904750824, "train/loss_total": 0.5575936436653137 }, { "epoch": 4.160299225220411, "step": 15572, "train/loss_ctc": 0.5483500957489014, "train/loss_error": 0.4984423816204071, "train/loss_total": 0.508423924446106 }, { "epoch": 4.160566390595779, "step": 15573, "train/loss_ctc": 1.2617592811584473, "train/loss_error": 0.42888447642326355, "train/loss_total": 0.5954594612121582 }, { "epoch": 4.160833555971146, "step": 15574, "train/loss_ctc": 0.782598614692688, "train/loss_error": 0.46100616455078125, "train/loss_total": 0.5253246426582336 }, { "epoch": 4.161100721346513, "step": 15575, "train/loss_ctc": 0.9743310809135437, "train/loss_error": 0.3693486452102661, "train/loss_total": 0.4903451204299927 }, { "epoch": 4.161367886721881, "step": 15576, "train/loss_ctc": 0.7853521704673767, "train/loss_error": 0.4446021318435669, "train/loss_total": 0.5127521753311157 }, { "epoch": 4.161635052097248, "step": 15577, "train/loss_ctc": 0.4012277126312256, "train/loss_error": 0.4129014313220978, "train/loss_total": 0.4105667173862457 }, { "epoch": 4.1619022174726155, "step": 15578, "train/loss_ctc": 0.5175676345825195, "train/loss_error": 0.41206687688827515, "train/loss_total": 0.433167040348053 }, { "epoch": 4.162169382847983, "step": 15579, "train/loss_ctc": 1.3311070203781128, "train/loss_error": 0.42766979336738586, "train/loss_total": 0.6083572506904602 }, { "epoch": 4.16243654822335, "grad_norm": 2.4791784286499023, "learning_rate": 5.038204648677532e-06, "loss": 0.5107, "step": 15580 }, { "epoch": 4.16243654822335, "step": 15580, "train/loss_ctc": 0.37651586532592773, "train/loss_error": 0.47200825810432434, "train/loss_total": 0.45290976762771606 }, { "epoch": 4.1627037135987175, "step": 15581, "train/loss_ctc": 0.6394633054733276, "train/loss_error": 0.45430368185043335, "train/loss_total": 0.4913356304168701 }, { "epoch": 4.162970878974085, "step": 15582, "train/loss_ctc": 0.3490973711013794, "train/loss_error": 0.40660539269447327, "train/loss_total": 0.3951038122177124 }, { "epoch": 4.163238044349452, "step": 15583, "train/loss_ctc": 0.5962154865264893, "train/loss_error": 0.4117651879787445, "train/loss_total": 0.44865524768829346 }, { "epoch": 4.1635052097248195, "step": 15584, "train/loss_ctc": 0.8499664664268494, "train/loss_error": 0.43349045515060425, "train/loss_total": 0.5167856812477112 }, { "epoch": 4.163772375100187, "step": 15585, "train/loss_ctc": 0.8653647899627686, "train/loss_error": 0.47873735427856445, "train/loss_total": 0.5560628175735474 }, { "epoch": 4.164039540475555, "step": 15586, "train/loss_ctc": 0.2748609185218811, "train/loss_error": 0.3931455910205841, "train/loss_total": 0.3694886565208435 }, { "epoch": 4.1643067058509216, "step": 15587, "train/loss_ctc": 0.4342052638530731, "train/loss_error": 0.39724215865135193, "train/loss_total": 0.4046347737312317 }, { "epoch": 4.164573871226289, "step": 15588, "train/loss_ctc": 1.1798274517059326, "train/loss_error": 0.5282784104347229, "train/loss_total": 0.6585882306098938 }, { "epoch": 4.164841036601657, "step": 15589, "train/loss_ctc": 0.3992501497268677, "train/loss_error": 0.43736976385116577, "train/loss_total": 0.4297458529472351 }, { "epoch": 4.165108201977024, "grad_norm": 3.208181381225586, "learning_rate": 5.022174726155491e-06, "loss": 0.4723, "step": 15590 }, { "epoch": 4.165108201977024, "step": 15590, "train/loss_ctc": 0.47362908720970154, "train/loss_error": 0.4464837908744812, "train/loss_total": 0.45191285014152527 }, { "epoch": 4.165375367352391, "step": 15591, "train/loss_ctc": 0.5470961332321167, "train/loss_error": 0.42965489625930786, "train/loss_total": 0.4531431496143341 }, { "epoch": 4.165642532727759, "step": 15592, "train/loss_ctc": 0.5633503198623657, "train/loss_error": 0.41358521580696106, "train/loss_total": 0.44353824853897095 }, { "epoch": 4.165909698103126, "step": 15593, "train/loss_ctc": 0.45138898491859436, "train/loss_error": 0.4247521758079529, "train/loss_total": 0.43007954955101013 }, { "epoch": 4.166176863478493, "step": 15594, "train/loss_ctc": 0.29314959049224854, "train/loss_error": 0.39861536026000977, "train/loss_total": 0.37752223014831543 }, { "epoch": 4.166444028853861, "step": 15595, "train/loss_ctc": 0.9864617586135864, "train/loss_error": 0.4993847608566284, "train/loss_total": 0.5968002080917358 }, { "epoch": 4.166711194229228, "step": 15596, "train/loss_ctc": 0.4423249065876007, "train/loss_error": 0.42298564314842224, "train/loss_total": 0.4268535077571869 }, { "epoch": 4.166978359604595, "step": 15597, "train/loss_ctc": 0.43430253863334656, "train/loss_error": 0.40840867161750793, "train/loss_total": 0.41358745098114014 }, { "epoch": 4.167245524979963, "step": 15598, "train/loss_ctc": 0.8116899728775024, "train/loss_error": 0.47551771998405457, "train/loss_total": 0.542752206325531 }, { "epoch": 4.16751269035533, "step": 15599, "train/loss_ctc": 0.47413352131843567, "train/loss_error": 0.3788676857948303, "train/loss_total": 0.3979208469390869 }, { "epoch": 4.167779855730697, "grad_norm": 1.5091965198516846, "learning_rate": 5.006144803633449e-06, "loss": 0.4534, "step": 15600 }, { "epoch": 4.167779855730697, "step": 15600, "train/loss_ctc": 0.6658726334571838, "train/loss_error": 0.457304984331131, "train/loss_total": 0.49901849031448364 }, { "epoch": 4.168047021106065, "step": 15601, "train/loss_ctc": 0.38307881355285645, "train/loss_error": 0.4034266173839569, "train/loss_total": 0.3993570804595947 }, { "epoch": 4.168314186481432, "step": 15602, "train/loss_ctc": 1.2317981719970703, "train/loss_error": 0.46187642216682434, "train/loss_total": 0.6158607602119446 }, { "epoch": 4.168581351856799, "step": 15603, "train/loss_ctc": 0.3830426037311554, "train/loss_error": 0.38335859775543213, "train/loss_total": 0.3832954168319702 }, { "epoch": 4.168848517232167, "step": 15604, "train/loss_ctc": 0.8849749565124512, "train/loss_error": 0.4699886441230774, "train/loss_total": 0.5529859066009521 }, { "epoch": 4.169115682607534, "step": 15605, "train/loss_ctc": 0.7774423956871033, "train/loss_error": 0.3773348331451416, "train/loss_total": 0.457356333732605 }, { "epoch": 4.169382847982901, "step": 15606, "train/loss_ctc": 0.594418466091156, "train/loss_error": 0.506881833076477, "train/loss_total": 0.5243891477584839 }, { "epoch": 4.169650013358269, "step": 15607, "train/loss_ctc": 0.9715338349342346, "train/loss_error": 0.375497967004776, "train/loss_total": 0.4947051405906677 }, { "epoch": 4.169917178733636, "step": 15608, "train/loss_ctc": 0.5163537263870239, "train/loss_error": 0.4944092631340027, "train/loss_total": 0.4987981617450714 }, { "epoch": 4.170184344109003, "step": 15609, "train/loss_ctc": 0.654960036277771, "train/loss_error": 0.43861204385757446, "train/loss_total": 0.48188167810440063 }, { "epoch": 4.170451509484371, "grad_norm": 1.2281348705291748, "learning_rate": 4.990114881111408e-06, "loss": 0.4908, "step": 15610 }, { "epoch": 4.170451509484371, "step": 15610, "train/loss_ctc": 0.6326118111610413, "train/loss_error": 0.43096408247947693, "train/loss_total": 0.4712936282157898 }, { "epoch": 4.170718674859738, "step": 15611, "train/loss_ctc": 0.7576841115951538, "train/loss_error": 0.3675577938556671, "train/loss_total": 0.4455830752849579 }, { "epoch": 4.170985840235105, "step": 15612, "train/loss_ctc": 1.4311091899871826, "train/loss_error": 0.45637983083724976, "train/loss_total": 0.6513257026672363 }, { "epoch": 4.171253005610473, "step": 15613, "train/loss_ctc": 0.8226907253265381, "train/loss_error": 0.436890184879303, "train/loss_total": 0.514050304889679 }, { "epoch": 4.17152017098584, "step": 15614, "train/loss_ctc": 0.7647613286972046, "train/loss_error": 0.4029921591281891, "train/loss_total": 0.47534602880477905 }, { "epoch": 4.1717873363612075, "step": 15615, "train/loss_ctc": 0.29470881819725037, "train/loss_error": 0.385742723941803, "train/loss_total": 0.36753594875335693 }, { "epoch": 4.172054501736575, "step": 15616, "train/loss_ctc": 0.4424061179161072, "train/loss_error": 0.38806480169296265, "train/loss_total": 0.3989330530166626 }, { "epoch": 4.172321667111943, "step": 15617, "train/loss_ctc": 0.4887448251247406, "train/loss_error": 0.45299404859542847, "train/loss_total": 0.4601442217826843 }, { "epoch": 4.1725888324873095, "step": 15618, "train/loss_ctc": 0.8356398940086365, "train/loss_error": 0.4011082649230957, "train/loss_total": 0.4880145788192749 }, { "epoch": 4.172855997862677, "step": 15619, "train/loss_ctc": 0.8845597505569458, "train/loss_error": 0.4012903571128845, "train/loss_total": 0.4979442358016968 }, { "epoch": 4.173123163238045, "grad_norm": 1.7059767246246338, "learning_rate": 4.9740849585893674e-06, "loss": 0.477, "step": 15620 }, { "epoch": 4.173123163238045, "step": 15620, "train/loss_ctc": 0.2771507501602173, "train/loss_error": 0.41808709502220154, "train/loss_total": 0.3898998200893402 }, { "epoch": 4.1733903286134115, "step": 15621, "train/loss_ctc": 0.6397691965103149, "train/loss_error": 0.47980281710624695, "train/loss_total": 0.5117961168289185 }, { "epoch": 4.173657493988779, "step": 15622, "train/loss_ctc": 1.0537316799163818, "train/loss_error": 0.4623117446899414, "train/loss_total": 0.5805957317352295 }, { "epoch": 4.173924659364147, "step": 15623, "train/loss_ctc": 0.8642156720161438, "train/loss_error": 0.5077040791511536, "train/loss_total": 0.5790064334869385 }, { "epoch": 4.174191824739514, "step": 15624, "train/loss_ctc": 0.60090172290802, "train/loss_error": 0.42048677802085876, "train/loss_total": 0.45656976103782654 }, { "epoch": 4.174458990114881, "step": 15625, "train/loss_ctc": 0.6795713901519775, "train/loss_error": 0.4959944486618042, "train/loss_total": 0.5327098369598389 }, { "epoch": 4.174726155490249, "step": 15626, "train/loss_ctc": 0.5004340410232544, "train/loss_error": 0.4863949418067932, "train/loss_total": 0.4892027676105499 }, { "epoch": 4.174993320865616, "step": 15627, "train/loss_ctc": 0.7560859322547913, "train/loss_error": 0.45475083589553833, "train/loss_total": 0.5150178670883179 }, { "epoch": 4.175260486240983, "step": 15628, "train/loss_ctc": 0.5080572366714478, "train/loss_error": 0.4566340446472168, "train/loss_total": 0.4669187068939209 }, { "epoch": 4.175527651616351, "step": 15629, "train/loss_ctc": 0.24866315722465515, "train/loss_error": 0.37056249380111694, "train/loss_total": 0.34618261456489563 }, { "epoch": 4.175794816991718, "grad_norm": 1.02495539188385, "learning_rate": 4.9580550360673255e-06, "loss": 0.4868, "step": 15630 }, { "epoch": 4.175794816991718, "step": 15630, "train/loss_ctc": 0.752206027507782, "train/loss_error": 0.4207236170768738, "train/loss_total": 0.4870201349258423 }, { "epoch": 4.176061982367085, "step": 15631, "train/loss_ctc": 0.8048895597457886, "train/loss_error": 0.5024747848510742, "train/loss_total": 0.562957763671875 }, { "epoch": 4.176329147742453, "step": 15632, "train/loss_ctc": 0.3746957778930664, "train/loss_error": 0.4864969551563263, "train/loss_total": 0.4641367197036743 }, { "epoch": 4.17659631311782, "step": 15633, "train/loss_ctc": 0.3887881636619568, "train/loss_error": 0.4203629493713379, "train/loss_total": 0.4140479862689972 }, { "epoch": 4.176863478493187, "step": 15634, "train/loss_ctc": 0.6576902270317078, "train/loss_error": 0.42227426171302795, "train/loss_total": 0.4693574905395508 }, { "epoch": 4.177130643868555, "step": 15635, "train/loss_ctc": 1.4606633186340332, "train/loss_error": 0.5135158896446228, "train/loss_total": 0.702945351600647 }, { "epoch": 4.177397809243922, "step": 15636, "train/loss_ctc": 1.0720081329345703, "train/loss_error": 0.398461252450943, "train/loss_total": 0.5331706404685974 }, { "epoch": 4.177664974619289, "step": 15637, "train/loss_ctc": 0.801784873008728, "train/loss_error": 0.4576825201511383, "train/loss_total": 0.5265030264854431 }, { "epoch": 4.177932139994657, "step": 15638, "train/loss_ctc": 0.7241553664207458, "train/loss_error": 0.3959127962589264, "train/loss_total": 0.46156132221221924 }, { "epoch": 4.178199305370024, "step": 15639, "train/loss_ctc": 0.7294867038726807, "train/loss_error": 0.46068456768989563, "train/loss_total": 0.5144450068473816 }, { "epoch": 4.178466470745391, "grad_norm": 1.923114538192749, "learning_rate": 4.9420251135452845e-06, "loss": 0.5136, "step": 15640 }, { "epoch": 4.178466470745391, "step": 15640, "train/loss_ctc": 0.510610044002533, "train/loss_error": 0.44545698165893555, "train/loss_total": 0.4584876000881195 }, { "epoch": 4.178733636120759, "step": 15641, "train/loss_ctc": 0.6972205638885498, "train/loss_error": 0.494824081659317, "train/loss_total": 0.5353033542633057 }, { "epoch": 4.179000801496126, "step": 15642, "train/loss_ctc": 0.7316200733184814, "train/loss_error": 0.4404999315738678, "train/loss_total": 0.49872398376464844 }, { "epoch": 4.179267966871493, "step": 15643, "train/loss_ctc": 0.3546072244644165, "train/loss_error": 0.4616420269012451, "train/loss_total": 0.44023507833480835 }, { "epoch": 4.179535132246861, "step": 15644, "train/loss_ctc": 0.5124115943908691, "train/loss_error": 0.4097541570663452, "train/loss_total": 0.43028566241264343 }, { "epoch": 4.179802297622228, "step": 15645, "train/loss_ctc": 0.5494830012321472, "train/loss_error": 0.4673258364200592, "train/loss_total": 0.48375728726387024 }, { "epoch": 4.180069462997595, "step": 15646, "train/loss_ctc": 0.6703424453735352, "train/loss_error": 0.4902931749820709, "train/loss_total": 0.5263030529022217 }, { "epoch": 4.180336628372963, "step": 15647, "train/loss_ctc": 0.711997389793396, "train/loss_error": 0.42522385716438293, "train/loss_total": 0.4825785756111145 }, { "epoch": 4.18060379374833, "step": 15648, "train/loss_ctc": 0.7209518551826477, "train/loss_error": 0.3553115129470825, "train/loss_total": 0.42843958735466003 }, { "epoch": 4.180870959123697, "step": 15649, "train/loss_ctc": 0.49962764978408813, "train/loss_error": 0.4455973505973816, "train/loss_total": 0.4564034342765808 }, { "epoch": 4.181138124499065, "grad_norm": 1.4502156972885132, "learning_rate": 4.925995191023243e-06, "loss": 0.4741, "step": 15650 }, { "epoch": 4.181138124499065, "step": 15650, "train/loss_ctc": 1.4105925559997559, "train/loss_error": 0.4424702227115631, "train/loss_total": 0.6360946893692017 }, { "epoch": 4.181405289874432, "step": 15651, "train/loss_ctc": 0.932976245880127, "train/loss_error": 0.504802942276001, "train/loss_total": 0.5904375910758972 }, { "epoch": 4.1816724552497995, "step": 15652, "train/loss_ctc": 0.7539291977882385, "train/loss_error": 0.4511431157588959, "train/loss_total": 0.5117003321647644 }, { "epoch": 4.181939620625167, "step": 15653, "train/loss_ctc": 0.8265488147735596, "train/loss_error": 0.4266812801361084, "train/loss_total": 0.5066547989845276 }, { "epoch": 4.182206786000535, "step": 15654, "train/loss_ctc": 0.5177421569824219, "train/loss_error": 0.45618683099746704, "train/loss_total": 0.4684979021549225 }, { "epoch": 4.1824739513759015, "step": 15655, "train/loss_ctc": 0.31264856457710266, "train/loss_error": 0.4335213005542755, "train/loss_total": 0.4093467593193054 }, { "epoch": 4.182741116751269, "step": 15656, "train/loss_ctc": 0.3767545521259308, "train/loss_error": 0.45335257053375244, "train/loss_total": 0.43803298473358154 }, { "epoch": 4.183008282126637, "step": 15657, "train/loss_ctc": 0.5250718593597412, "train/loss_error": 0.4991820156574249, "train/loss_total": 0.5043599605560303 }, { "epoch": 4.1832754475020035, "step": 15658, "train/loss_ctc": 0.8721218109130859, "train/loss_error": 0.4085090160369873, "train/loss_total": 0.5012316107749939 }, { "epoch": 4.183542612877371, "step": 15659, "train/loss_ctc": 0.8873754143714905, "train/loss_error": 0.4812520444393158, "train/loss_total": 0.5624766945838928 }, { "epoch": 4.183809778252739, "grad_norm": 1.4644718170166016, "learning_rate": 4.909965268501202e-06, "loss": 0.5129, "step": 15660 }, { "epoch": 4.183809778252739, "step": 15660, "train/loss_ctc": 0.9452325105667114, "train/loss_error": 0.43442878127098083, "train/loss_total": 0.536589503288269 }, { "epoch": 4.184076943628106, "step": 15661, "train/loss_ctc": 0.4917198121547699, "train/loss_error": 0.4102369546890259, "train/loss_total": 0.4265335500240326 }, { "epoch": 4.184344109003473, "step": 15662, "train/loss_ctc": 0.7011702060699463, "train/loss_error": 0.43486446142196655, "train/loss_total": 0.48812562227249146 }, { "epoch": 4.184611274378841, "step": 15663, "train/loss_ctc": 0.38983801007270813, "train/loss_error": 0.44169655442237854, "train/loss_total": 0.43132486939430237 }, { "epoch": 4.184878439754208, "step": 15664, "train/loss_ctc": 0.2613433003425598, "train/loss_error": 0.4217976927757263, "train/loss_total": 0.3897068202495575 }, { "epoch": 4.185145605129575, "step": 15665, "train/loss_ctc": 0.15861962735652924, "train/loss_error": 0.48043525218963623, "train/loss_total": 0.41607213020324707 }, { "epoch": 4.185412770504943, "step": 15666, "train/loss_ctc": 0.2746158242225647, "train/loss_error": 0.3914603590965271, "train/loss_total": 0.3680914640426636 }, { "epoch": 4.18567993588031, "step": 15667, "train/loss_ctc": 0.6140376329421997, "train/loss_error": 0.46880295872688293, "train/loss_total": 0.4978499114513397 }, { "epoch": 4.185947101255677, "step": 15668, "train/loss_ctc": 0.6992682814598083, "train/loss_error": 0.39340004324913025, "train/loss_total": 0.45457369089126587 }, { "epoch": 4.186214266631045, "step": 15669, "train/loss_ctc": 0.4319824278354645, "train/loss_error": 0.4248858094215393, "train/loss_total": 0.4263051450252533 }, { "epoch": 4.186481432006412, "grad_norm": 1.2982351779937744, "learning_rate": 4.893935345979161e-06, "loss": 0.4435, "step": 15670 }, { "epoch": 4.186481432006412, "step": 15670, "train/loss_ctc": 0.5731706619262695, "train/loss_error": 0.42715173959732056, "train/loss_total": 0.4563555121421814 }, { "epoch": 4.186748597381779, "step": 15671, "train/loss_ctc": 1.3582463264465332, "train/loss_error": 0.47816386818885803, "train/loss_total": 0.6541804075241089 }, { "epoch": 4.187015762757147, "step": 15672, "train/loss_ctc": 0.5293532609939575, "train/loss_error": 0.4121721386909485, "train/loss_total": 0.4356083869934082 }, { "epoch": 4.187282928132514, "step": 15673, "train/loss_ctc": 0.29146674275398254, "train/loss_error": 0.41447460651397705, "train/loss_total": 0.38987302780151367 }, { "epoch": 4.187550093507881, "step": 15674, "train/loss_ctc": 0.285826712846756, "train/loss_error": 0.4640461504459381, "train/loss_total": 0.42840227484703064 }, { "epoch": 4.187817258883249, "step": 15675, "train/loss_ctc": 0.6260071992874146, "train/loss_error": 0.40698570013046265, "train/loss_total": 0.4507899880409241 }, { "epoch": 4.188084424258616, "step": 15676, "train/loss_ctc": 0.9028027653694153, "train/loss_error": 0.41133585572242737, "train/loss_total": 0.5096292495727539 }, { "epoch": 4.188351589633983, "step": 15677, "train/loss_ctc": 0.8807262778282166, "train/loss_error": 0.4405022859573364, "train/loss_total": 0.5285470485687256 }, { "epoch": 4.188618755009351, "step": 15678, "train/loss_ctc": 0.12638522684574127, "train/loss_error": 0.36804068088531494, "train/loss_total": 0.3197095990180969 }, { "epoch": 4.188885920384718, "step": 15679, "train/loss_ctc": 0.9040708541870117, "train/loss_error": 0.4660697877407074, "train/loss_total": 0.5536700487136841 }, { "epoch": 4.189153085760085, "grad_norm": 1.654496431350708, "learning_rate": 4.87790542345712e-06, "loss": 0.4727, "step": 15680 }, { "epoch": 4.189153085760085, "step": 15680, "train/loss_ctc": 0.9410953521728516, "train/loss_error": 0.4519460201263428, "train/loss_total": 0.5497758984565735 }, { "epoch": 4.189420251135453, "step": 15681, "train/loss_ctc": 0.8556430339813232, "train/loss_error": 0.4211384654045105, "train/loss_total": 0.5080394148826599 }, { "epoch": 4.18968741651082, "step": 15682, "train/loss_ctc": 0.5314472913742065, "train/loss_error": 0.4963226020336151, "train/loss_total": 0.5033475756645203 }, { "epoch": 4.189954581886187, "step": 15683, "train/loss_ctc": 1.266481876373291, "train/loss_error": 0.5168685913085938, "train/loss_total": 0.6667912602424622 }, { "epoch": 4.190221747261555, "step": 15684, "train/loss_ctc": 0.8401463031768799, "train/loss_error": 0.43992483615875244, "train/loss_total": 0.5199691653251648 }, { "epoch": 4.190488912636923, "step": 15685, "train/loss_ctc": 1.033954381942749, "train/loss_error": 0.5405874252319336, "train/loss_total": 0.6392608284950256 }, { "epoch": 4.190756078012289, "step": 15686, "train/loss_ctc": 0.5483368635177612, "train/loss_error": 0.39781543612480164, "train/loss_total": 0.42791974544525146 }, { "epoch": 4.191023243387657, "step": 15687, "train/loss_ctc": 1.4490032196044922, "train/loss_error": 0.38148874044418335, "train/loss_total": 0.5949916839599609 }, { "epoch": 4.191290408763025, "step": 15688, "train/loss_ctc": 0.3657515347003937, "train/loss_error": 0.481450617313385, "train/loss_total": 0.4583108127117157 }, { "epoch": 4.1915575741383915, "step": 15689, "train/loss_ctc": 0.8616986274719238, "train/loss_error": 0.39251571893692017, "train/loss_total": 0.4863523244857788 }, { "epoch": 4.191824739513759, "grad_norm": 4.808332443237305, "learning_rate": 4.861875500935079e-06, "loss": 0.5355, "step": 15690 }, { "epoch": 4.191824739513759, "step": 15690, "train/loss_ctc": 1.2132532596588135, "train/loss_error": 0.5085193514823914, "train/loss_total": 0.6494661569595337 }, { "epoch": 4.192091904889127, "step": 15691, "train/loss_ctc": 0.20351985096931458, "train/loss_error": 0.3803142309188843, "train/loss_total": 0.3449553847312927 }, { "epoch": 4.1923590702644935, "step": 15692, "train/loss_ctc": 0.7741466760635376, "train/loss_error": 0.4186859726905823, "train/loss_total": 0.4897781014442444 }, { "epoch": 4.192626235639861, "step": 15693, "train/loss_ctc": 0.6388854384422302, "train/loss_error": 0.4124198257923126, "train/loss_total": 0.45771294832229614 }, { "epoch": 4.192893401015229, "step": 15694, "train/loss_ctc": 0.369183212518692, "train/loss_error": 0.4070785641670227, "train/loss_total": 0.3994995057582855 }, { "epoch": 4.1931605663905955, "step": 15695, "train/loss_ctc": 0.2758105397224426, "train/loss_error": 0.42634254693984985, "train/loss_total": 0.3962361514568329 }, { "epoch": 4.193427731765963, "step": 15696, "train/loss_ctc": 0.7490476965904236, "train/loss_error": 0.42199912667274475, "train/loss_total": 0.487408846616745 }, { "epoch": 4.193694897141331, "step": 15697, "train/loss_ctc": 0.78627610206604, "train/loss_error": 0.46255484223365784, "train/loss_total": 0.5272991061210632 }, { "epoch": 4.193962062516698, "step": 15698, "train/loss_ctc": 1.8660320043563843, "train/loss_error": 0.5381313562393188, "train/loss_total": 0.8037115335464478 }, { "epoch": 4.194229227892065, "step": 15699, "train/loss_ctc": 0.8368332982063293, "train/loss_error": 0.5033600330352783, "train/loss_total": 0.5700547099113464 }, { "epoch": 4.194496393267433, "grad_norm": 2.8416600227355957, "learning_rate": 4.845845578413037e-06, "loss": 0.5126, "step": 15700 }, { "epoch": 4.194496393267433, "step": 15700, "train/loss_ctc": 0.8455232381820679, "train/loss_error": 0.4395085871219635, "train/loss_total": 0.5207115411758423 }, { "epoch": 4.1947635586428, "step": 15701, "train/loss_ctc": 0.3231295347213745, "train/loss_error": 0.4736613929271698, "train/loss_total": 0.4435550272464752 }, { "epoch": 4.195030724018167, "step": 15702, "train/loss_ctc": 0.60392165184021, "train/loss_error": 0.4597840905189514, "train/loss_total": 0.4886116087436676 }, { "epoch": 4.195297889393535, "step": 15703, "train/loss_ctc": 0.49997830390930176, "train/loss_error": 0.4202759265899658, "train/loss_total": 0.43621641397476196 }, { "epoch": 4.195565054768902, "step": 15704, "train/loss_ctc": 0.34552833437919617, "train/loss_error": 0.46342939138412476, "train/loss_total": 0.43984919786453247 }, { "epoch": 4.195832220144269, "step": 15705, "train/loss_ctc": 0.6218643188476562, "train/loss_error": 0.4892479181289673, "train/loss_total": 0.515771210193634 }, { "epoch": 4.196099385519637, "step": 15706, "train/loss_ctc": 0.6017065048217773, "train/loss_error": 0.44490116834640503, "train/loss_total": 0.47626224160194397 }, { "epoch": 4.196366550895004, "step": 15707, "train/loss_ctc": 0.42856836318969727, "train/loss_error": 0.44692498445510864, "train/loss_total": 0.44325366616249084 }, { "epoch": 4.196633716270371, "step": 15708, "train/loss_ctc": 0.6672405004501343, "train/loss_error": 0.4541620910167694, "train/loss_total": 0.4967777729034424 }, { "epoch": 4.196900881645739, "step": 15709, "train/loss_ctc": 0.6918076872825623, "train/loss_error": 0.37283578515052795, "train/loss_total": 0.4366301894187927 }, { "epoch": 4.197168047021106, "grad_norm": 2.2603535652160645, "learning_rate": 4.829815655890997e-06, "loss": 0.4698, "step": 15710 }, { "epoch": 4.197168047021106, "step": 15710, "train/loss_ctc": 0.8114814162254333, "train/loss_error": 0.4041267931461334, "train/loss_total": 0.48559772968292236 }, { "epoch": 4.197435212396473, "step": 15711, "train/loss_ctc": 0.6323877573013306, "train/loss_error": 0.41148635745048523, "train/loss_total": 0.4556666612625122 }, { "epoch": 4.197702377771841, "step": 15712, "train/loss_ctc": 0.5378834009170532, "train/loss_error": 0.4072931110858917, "train/loss_total": 0.433411180973053 }, { "epoch": 4.197969543147208, "step": 15713, "train/loss_ctc": 0.373929500579834, "train/loss_error": 0.39780640602111816, "train/loss_total": 0.3930310606956482 }, { "epoch": 4.198236708522575, "step": 15714, "train/loss_ctc": 0.3511542081832886, "train/loss_error": 0.45317885279655457, "train/loss_total": 0.4327739179134369 }, { "epoch": 4.198503873897943, "step": 15715, "train/loss_ctc": 0.7644084692001343, "train/loss_error": 0.4331800937652588, "train/loss_total": 0.4994257688522339 }, { "epoch": 4.198771039273311, "step": 15716, "train/loss_ctc": 0.7610880136489868, "train/loss_error": 0.4432556927204132, "train/loss_total": 0.5068221688270569 }, { "epoch": 4.199038204648677, "step": 15717, "train/loss_ctc": 1.4673939943313599, "train/loss_error": 0.46783819794654846, "train/loss_total": 0.6677494049072266 }, { "epoch": 4.199305370024045, "step": 15718, "train/loss_ctc": 0.5785026550292969, "train/loss_error": 0.3902035355567932, "train/loss_total": 0.42786338925361633 }, { "epoch": 4.199572535399413, "step": 15719, "train/loss_ctc": 0.9853124022483826, "train/loss_error": 0.4253462255001068, "train/loss_total": 0.537339448928833 }, { "epoch": 4.199839700774779, "grad_norm": 1.544664740562439, "learning_rate": 4.813785733368956e-06, "loss": 0.484, "step": 15720 }, { "epoch": 4.199839700774779, "step": 15720, "train/loss_ctc": 0.858526349067688, "train/loss_error": 0.42817094922065735, "train/loss_total": 0.5142420530319214 }, { "epoch": 4.200106866150147, "step": 15721, "train/loss_ctc": 1.0492569208145142, "train/loss_error": 0.4285939931869507, "train/loss_total": 0.5527266263961792 }, { "epoch": 4.200374031525515, "step": 15722, "train/loss_ctc": 0.9635434150695801, "train/loss_error": 0.42536768317222595, "train/loss_total": 0.5330028533935547 }, { "epoch": 4.2006411969008814, "step": 15723, "train/loss_ctc": 1.0232360363006592, "train/loss_error": 0.4438205063343048, "train/loss_total": 0.5597035884857178 }, { "epoch": 4.200908362276249, "step": 15724, "train/loss_ctc": 0.7228226661682129, "train/loss_error": 0.4066031277179718, "train/loss_total": 0.46984705328941345 }, { "epoch": 4.201175527651617, "step": 15725, "train/loss_ctc": 0.36488527059555054, "train/loss_error": 0.39223116636276245, "train/loss_total": 0.38676199316978455 }, { "epoch": 4.2014426930269835, "step": 15726, "train/loss_ctc": 0.586705207824707, "train/loss_error": 0.43437740206718445, "train/loss_total": 0.4648429751396179 }, { "epoch": 4.201709858402351, "step": 15727, "train/loss_ctc": 1.098931074142456, "train/loss_error": 0.47219574451446533, "train/loss_total": 0.5975428223609924 }, { "epoch": 4.201977023777719, "step": 15728, "train/loss_ctc": 0.800584614276886, "train/loss_error": 0.4091021418571472, "train/loss_total": 0.487398624420166 }, { "epoch": 4.2022441891530855, "step": 15729, "train/loss_ctc": 0.5963193774223328, "train/loss_error": 0.43990814685821533, "train/loss_total": 0.4711904227733612 }, { "epoch": 4.202511354528453, "grad_norm": 1.7116013765335083, "learning_rate": 4.797755810846914e-06, "loss": 0.5037, "step": 15730 }, { "epoch": 4.202511354528453, "step": 15730, "train/loss_ctc": 0.520666241645813, "train/loss_error": 0.4703104794025421, "train/loss_total": 0.48038163781166077 }, { "epoch": 4.202778519903821, "step": 15731, "train/loss_ctc": 0.6325377225875854, "train/loss_error": 0.43133121728897095, "train/loss_total": 0.47157251834869385 }, { "epoch": 4.2030456852791875, "step": 15732, "train/loss_ctc": 1.0228676795959473, "train/loss_error": 0.4231584072113037, "train/loss_total": 0.5431002378463745 }, { "epoch": 4.203312850654555, "step": 15733, "train/loss_ctc": 0.7457224130630493, "train/loss_error": 0.4734675884246826, "train/loss_total": 0.5279185771942139 }, { "epoch": 4.203580016029923, "step": 15734, "train/loss_ctc": 0.7709822058677673, "train/loss_error": 0.4246545135974884, "train/loss_total": 0.49392005801200867 }, { "epoch": 4.20384718140529, "step": 15735, "train/loss_ctc": 0.3537345230579376, "train/loss_error": 0.399998277425766, "train/loss_total": 0.39074552059173584 }, { "epoch": 4.204114346780657, "step": 15736, "train/loss_ctc": 0.9587653279304504, "train/loss_error": 0.39391419291496277, "train/loss_total": 0.5068844556808472 }, { "epoch": 4.204381512156025, "step": 15737, "train/loss_ctc": 1.3237760066986084, "train/loss_error": 0.47151297330856323, "train/loss_total": 0.6419656276702881 }, { "epoch": 4.204648677531392, "step": 15738, "train/loss_ctc": 0.6859114170074463, "train/loss_error": 0.4129578471183777, "train/loss_total": 0.46754854917526245 }, { "epoch": 4.204915842906759, "step": 15739, "train/loss_ctc": 0.46815693378448486, "train/loss_error": 0.471465140581131, "train/loss_total": 0.47080349922180176 }, { "epoch": 4.205183008282127, "grad_norm": 2.9239745140075684, "learning_rate": 4.781725888324873e-06, "loss": 0.4995, "step": 15740 }, { "epoch": 4.205183008282127, "step": 15740, "train/loss_ctc": 1.6018264293670654, "train/loss_error": 0.488019198179245, "train/loss_total": 0.7107806205749512 }, { "epoch": 4.205450173657494, "step": 15741, "train/loss_ctc": 0.5771535038948059, "train/loss_error": 0.43411001563072205, "train/loss_total": 0.4627187252044678 }, { "epoch": 4.205717339032861, "step": 15742, "train/loss_ctc": 0.40947896242141724, "train/loss_error": 0.49308523535728455, "train/loss_total": 0.47636398673057556 }, { "epoch": 4.205984504408229, "step": 15743, "train/loss_ctc": 1.4349005222320557, "train/loss_error": 0.5053271055221558, "train/loss_total": 0.6912418007850647 }, { "epoch": 4.206251669783596, "step": 15744, "train/loss_ctc": 0.6497336626052856, "train/loss_error": 0.44229018688201904, "train/loss_total": 0.4837788939476013 }, { "epoch": 4.206518835158963, "step": 15745, "train/loss_ctc": 1.057257890701294, "train/loss_error": 0.4331284761428833, "train/loss_total": 0.5579543709754944 }, { "epoch": 4.206786000534331, "step": 15746, "train/loss_ctc": 0.902294397354126, "train/loss_error": 0.45003873109817505, "train/loss_total": 0.5404898524284363 }, { "epoch": 4.207053165909698, "step": 15747, "train/loss_ctc": 0.6762446165084839, "train/loss_error": 0.442686527967453, "train/loss_total": 0.48939815163612366 }, { "epoch": 4.207320331285065, "step": 15748, "train/loss_ctc": 0.4302283525466919, "train/loss_error": 0.33931222558021545, "train/loss_total": 0.3574954569339752 }, { "epoch": 4.207587496660433, "step": 15749, "train/loss_ctc": 0.29063600301742554, "train/loss_error": 0.41518279910087585, "train/loss_total": 0.39027345180511475 }, { "epoch": 4.2078546620358, "grad_norm": 1.635751485824585, "learning_rate": 4.765695965802833e-06, "loss": 0.516, "step": 15750 }, { "epoch": 4.2078546620358, "step": 15750, "train/loss_ctc": 0.42903655767440796, "train/loss_error": 0.43337884545326233, "train/loss_total": 0.4325104057788849 }, { "epoch": 4.208121827411167, "step": 15751, "train/loss_ctc": 0.40634649991989136, "train/loss_error": 0.42499345541000366, "train/loss_total": 0.42126405239105225 }, { "epoch": 4.208388992786535, "step": 15752, "train/loss_ctc": 0.5846641659736633, "train/loss_error": 0.47217580676078796, "train/loss_total": 0.494673490524292 }, { "epoch": 4.208656158161903, "step": 15753, "train/loss_ctc": 0.55207359790802, "train/loss_error": 0.4109552204608917, "train/loss_total": 0.4391788840293884 }, { "epoch": 4.208923323537269, "step": 15754, "train/loss_ctc": 0.7499134540557861, "train/loss_error": 0.4549407660961151, "train/loss_total": 0.5139353275299072 }, { "epoch": 4.209190488912637, "step": 15755, "train/loss_ctc": 0.7988014817237854, "train/loss_error": 0.4450550675392151, "train/loss_total": 0.5158043503761292 }, { "epoch": 4.209457654288005, "step": 15756, "train/loss_ctc": 0.7629194259643555, "train/loss_error": 0.42858555912971497, "train/loss_total": 0.495452344417572 }, { "epoch": 4.209724819663371, "step": 15757, "train/loss_ctc": 0.7594308853149414, "train/loss_error": 0.5525560975074768, "train/loss_total": 0.5939310789108276 }, { "epoch": 4.209991985038739, "step": 15758, "train/loss_ctc": 0.4243584871292114, "train/loss_error": 0.4198850989341736, "train/loss_total": 0.4207797944545746 }, { "epoch": 4.210259150414107, "step": 15759, "train/loss_ctc": 0.5251086950302124, "train/loss_error": 0.35426443815231323, "train/loss_total": 0.3884333074092865 }, { "epoch": 4.2105263157894735, "grad_norm": 1.8947659730911255, "learning_rate": 4.749666043280791e-06, "loss": 0.4716, "step": 15760 }, { "epoch": 4.2105263157894735, "step": 15760, "train/loss_ctc": 0.9394087195396423, "train/loss_error": 0.408939391374588, "train/loss_total": 0.5150332450866699 }, { "epoch": 4.210793481164841, "step": 15761, "train/loss_ctc": 0.8372607827186584, "train/loss_error": 0.4708063006401062, "train/loss_total": 0.5440971851348877 }, { "epoch": 4.211060646540209, "step": 15762, "train/loss_ctc": 0.36071568727493286, "train/loss_error": 0.46443483233451843, "train/loss_total": 0.4436910152435303 }, { "epoch": 4.2113278119155755, "step": 15763, "train/loss_ctc": 0.544625461101532, "train/loss_error": 0.3988494873046875, "train/loss_total": 0.4280047118663788 }, { "epoch": 4.211594977290943, "step": 15764, "train/loss_ctc": 0.5970432162284851, "train/loss_error": 0.4458378553390503, "train/loss_total": 0.47607892751693726 }, { "epoch": 4.211862142666311, "step": 15765, "train/loss_ctc": 0.30132031440734863, "train/loss_error": 0.4543860852718353, "train/loss_total": 0.423772931098938 }, { "epoch": 4.2121293080416775, "step": 15766, "train/loss_ctc": 0.48773735761642456, "train/loss_error": 0.4717714488506317, "train/loss_total": 0.4749646484851837 }, { "epoch": 4.212396473417045, "step": 15767, "train/loss_ctc": 0.46117696166038513, "train/loss_error": 0.4172714054584503, "train/loss_total": 0.4260525107383728 }, { "epoch": 4.212663638792413, "step": 15768, "train/loss_ctc": 1.3408313989639282, "train/loss_error": 0.43923601508140564, "train/loss_total": 0.6195551156997681 }, { "epoch": 4.2129308041677795, "step": 15769, "train/loss_ctc": 0.6802998781204224, "train/loss_error": 0.43537041544914246, "train/loss_total": 0.4843563437461853 }, { "epoch": 4.213197969543147, "grad_norm": 6.855772018432617, "learning_rate": 4.73363612075875e-06, "loss": 0.4836, "step": 15770 }, { "epoch": 4.213197969543147, "step": 15770, "train/loss_ctc": 0.6105411052703857, "train/loss_error": 0.3529011309146881, "train/loss_total": 0.4044291377067566 }, { "epoch": 4.213465134918515, "step": 15771, "train/loss_ctc": 0.694499135017395, "train/loss_error": 0.39037835597991943, "train/loss_total": 0.45120251178741455 }, { "epoch": 4.213732300293882, "step": 15772, "train/loss_ctc": 0.45484793186187744, "train/loss_error": 0.48705556988716125, "train/loss_total": 0.4806140661239624 }, { "epoch": 4.213999465669249, "step": 15773, "train/loss_ctc": 0.5408697724342346, "train/loss_error": 0.4949222505092621, "train/loss_total": 0.5041117668151855 }, { "epoch": 4.214266631044617, "step": 15774, "train/loss_ctc": 0.7075966596603394, "train/loss_error": 0.45573288202285767, "train/loss_total": 0.5061056613922119 }, { "epoch": 4.214533796419984, "step": 15775, "train/loss_ctc": 0.28615614771842957, "train/loss_error": 0.4795031249523163, "train/loss_total": 0.44083371758461 }, { "epoch": 4.214800961795351, "step": 15776, "train/loss_ctc": 0.7844012975692749, "train/loss_error": 0.4121215045452118, "train/loss_total": 0.48657748103141785 }, { "epoch": 4.215068127170719, "step": 15777, "train/loss_ctc": 0.5312483310699463, "train/loss_error": 0.43148186802864075, "train/loss_total": 0.4514351487159729 }, { "epoch": 4.215335292546086, "step": 15778, "train/loss_ctc": 0.8939816951751709, "train/loss_error": 0.4327395558357239, "train/loss_total": 0.5249879956245422 }, { "epoch": 4.215602457921453, "step": 15779, "train/loss_ctc": 0.7455048561096191, "train/loss_error": 0.48000186681747437, "train/loss_total": 0.5331024527549744 }, { "epoch": 4.215869623296821, "grad_norm": 1.8640214204788208, "learning_rate": 4.717606198236709e-06, "loss": 0.4783, "step": 15780 }, { "epoch": 4.215869623296821, "step": 15780, "train/loss_ctc": 0.5335561037063599, "train/loss_error": 0.4587201476097107, "train/loss_total": 0.4736873507499695 }, { "epoch": 4.216136788672188, "step": 15781, "train/loss_ctc": 0.6520861387252808, "train/loss_error": 0.3741694390773773, "train/loss_total": 0.42975279688835144 }, { "epoch": 4.216403954047555, "step": 15782, "train/loss_ctc": 0.5617345571517944, "train/loss_error": 0.43396273255348206, "train/loss_total": 0.45951712131500244 }, { "epoch": 4.216671119422923, "step": 15783, "train/loss_ctc": 0.9429464340209961, "train/loss_error": 0.5272360444068909, "train/loss_total": 0.6103781461715698 }, { "epoch": 4.216938284798291, "step": 15784, "train/loss_ctc": 0.3818339407444, "train/loss_error": 0.4249611496925354, "train/loss_total": 0.41633570194244385 }, { "epoch": 4.217205450173657, "step": 15785, "train/loss_ctc": 0.7844711542129517, "train/loss_error": 0.4692322909832001, "train/loss_total": 0.5322800874710083 }, { "epoch": 4.217472615549025, "step": 15786, "train/loss_ctc": 0.948013424873352, "train/loss_error": 0.4176872968673706, "train/loss_total": 0.5237525105476379 }, { "epoch": 4.217739780924393, "step": 15787, "train/loss_ctc": 0.3213508129119873, "train/loss_error": 0.4636826515197754, "train/loss_total": 0.4352163076400757 }, { "epoch": 4.218006946299759, "step": 15788, "train/loss_ctc": 0.6146250367164612, "train/loss_error": 0.37207555770874023, "train/loss_total": 0.4205854535102844 }, { "epoch": 4.218274111675127, "step": 15789, "train/loss_ctc": 0.8190584182739258, "train/loss_error": 0.4349106550216675, "train/loss_total": 0.5117402076721191 }, { "epoch": 4.218541277050495, "grad_norm": 6.088345050811768, "learning_rate": 4.701576275714667e-06, "loss": 0.4813, "step": 15790 }, { "epoch": 4.218541277050495, "step": 15790, "train/loss_ctc": 0.33598649501800537, "train/loss_error": 0.508271336555481, "train/loss_total": 0.47381436824798584 }, { "epoch": 4.218808442425861, "step": 15791, "train/loss_ctc": 0.3347665071487427, "train/loss_error": 0.42958542704582214, "train/loss_total": 0.41062164306640625 }, { "epoch": 4.219075607801229, "step": 15792, "train/loss_ctc": 1.4884471893310547, "train/loss_error": 0.4595435857772827, "train/loss_total": 0.665324330329895 }, { "epoch": 4.219342773176597, "step": 15793, "train/loss_ctc": 0.6832098960876465, "train/loss_error": 0.4285278022289276, "train/loss_total": 0.47946423292160034 }, { "epoch": 4.219609938551963, "step": 15794, "train/loss_ctc": 0.7925475835800171, "train/loss_error": 0.45497792959213257, "train/loss_total": 0.5224918723106384 }, { "epoch": 4.219877103927331, "step": 15795, "train/loss_ctc": 0.4952109754085541, "train/loss_error": 0.44469210505485535, "train/loss_total": 0.4547958970069885 }, { "epoch": 4.220144269302699, "step": 15796, "train/loss_ctc": 0.4137761890888214, "train/loss_error": 0.4340297281742096, "train/loss_total": 0.42997902631759644 }, { "epoch": 4.2204114346780655, "step": 15797, "train/loss_ctc": 0.30268803238868713, "train/loss_error": 0.4252731204032898, "train/loss_total": 0.4007561206817627 }, { "epoch": 4.220678600053433, "step": 15798, "train/loss_ctc": 1.1240158081054688, "train/loss_error": 0.3966289460659027, "train/loss_total": 0.5421063303947449 }, { "epoch": 4.220945765428801, "step": 15799, "train/loss_ctc": 0.17800167202949524, "train/loss_error": 0.3926793932914734, "train/loss_total": 0.3497438430786133 }, { "epoch": 4.2212129308041675, "grad_norm": 1.1359244585037231, "learning_rate": 4.6855463531926265e-06, "loss": 0.4729, "step": 15800 }, { "epoch": 4.2212129308041675, "step": 15800, "train/loss_ctc": 0.6202831864356995, "train/loss_error": 0.4911581575870514, "train/loss_total": 0.516983151435852 }, { "epoch": 4.221480096179535, "step": 15801, "train/loss_ctc": 0.5325759649276733, "train/loss_error": 0.40611761808395386, "train/loss_total": 0.4314092993736267 }, { "epoch": 4.221747261554903, "step": 15802, "train/loss_ctc": 0.8920183777809143, "train/loss_error": 0.4846211075782776, "train/loss_total": 0.5661005973815918 }, { "epoch": 4.2220144269302695, "step": 15803, "train/loss_ctc": 0.3594754934310913, "train/loss_error": 0.4829806983470917, "train/loss_total": 0.45827966928482056 }, { "epoch": 4.222281592305637, "step": 15804, "train/loss_ctc": 0.47358453273773193, "train/loss_error": 0.4180324971675873, "train/loss_total": 0.42914292216300964 }, { "epoch": 4.222548757681005, "step": 15805, "train/loss_ctc": 0.6631003618240356, "train/loss_error": 0.46100154519081116, "train/loss_total": 0.501421332359314 }, { "epoch": 4.2228159230563715, "step": 15806, "train/loss_ctc": 1.0805678367614746, "train/loss_error": 0.42711883783340454, "train/loss_total": 0.5578086376190186 }, { "epoch": 4.223083088431739, "step": 15807, "train/loss_ctc": 0.4140731692314148, "train/loss_error": 0.4969313442707062, "train/loss_total": 0.4803597033023834 }, { "epoch": 4.223350253807107, "step": 15808, "train/loss_ctc": 0.9258580207824707, "train/loss_error": 0.5195660591125488, "train/loss_total": 0.6008244752883911 }, { "epoch": 4.223617419182474, "step": 15809, "train/loss_ctc": 0.1868494153022766, "train/loss_error": 0.36531004309654236, "train/loss_total": 0.3296179175376892 }, { "epoch": 4.223884584557841, "grad_norm": 4.09426212310791, "learning_rate": 4.669516430670585e-06, "loss": 0.4872, "step": 15810 }, { "epoch": 4.223884584557841, "step": 15810, "train/loss_ctc": 1.3926337957382202, "train/loss_error": 0.4489367604255676, "train/loss_total": 0.6376761794090271 }, { "epoch": 4.224151749933209, "step": 15811, "train/loss_ctc": 0.5184279680252075, "train/loss_error": 0.45635783672332764, "train/loss_total": 0.46877187490463257 }, { "epoch": 4.224418915308576, "step": 15812, "train/loss_ctc": 0.5937018990516663, "train/loss_error": 0.4102470278739929, "train/loss_total": 0.44693800806999207 }, { "epoch": 4.224686080683943, "step": 15813, "train/loss_ctc": 0.5150448679924011, "train/loss_error": 0.4326026141643524, "train/loss_total": 0.4490910768508911 }, { "epoch": 4.224953246059311, "step": 15814, "train/loss_ctc": 0.829874575138092, "train/loss_error": 0.43742382526397705, "train/loss_total": 0.5159139633178711 }, { "epoch": 4.225220411434678, "step": 15815, "train/loss_ctc": 0.40708765387535095, "train/loss_error": 0.378860741853714, "train/loss_total": 0.38450613617897034 }, { "epoch": 4.225487576810045, "step": 15816, "train/loss_ctc": 0.8786890506744385, "train/loss_error": 0.43855348229408264, "train/loss_total": 0.5265806317329407 }, { "epoch": 4.225754742185413, "step": 15817, "train/loss_ctc": 1.0392979383468628, "train/loss_error": 0.41663920879364014, "train/loss_total": 0.5411709547042847 }, { "epoch": 4.22602190756078, "step": 15818, "train/loss_ctc": 0.6800996661186218, "train/loss_error": 0.3975391089916229, "train/loss_total": 0.4540511965751648 }, { "epoch": 4.226289072936147, "step": 15819, "train/loss_ctc": 0.30136430263519287, "train/loss_error": 0.4516535997390747, "train/loss_total": 0.4215957522392273 }, { "epoch": 4.226556238311515, "grad_norm": 1.514388084411621, "learning_rate": 4.653486508148544e-06, "loss": 0.4846, "step": 15820 }, { "epoch": 4.226556238311515, "step": 15820, "train/loss_ctc": 1.3980032205581665, "train/loss_error": 0.44804292917251587, "train/loss_total": 0.638034999370575 }, { "epoch": 4.226823403686883, "step": 15821, "train/loss_ctc": 1.0105938911437988, "train/loss_error": 0.4343433380126953, "train/loss_total": 0.549593448638916 }, { "epoch": 4.227090569062249, "step": 15822, "train/loss_ctc": 0.421511709690094, "train/loss_error": 0.4252697229385376, "train/loss_total": 0.4245181083679199 }, { "epoch": 4.227357734437617, "step": 15823, "train/loss_ctc": 0.5873293280601501, "train/loss_error": 0.40187159180641174, "train/loss_total": 0.4389631748199463 }, { "epoch": 4.227624899812985, "step": 15824, "train/loss_ctc": 0.5682449340820312, "train/loss_error": 0.3861628770828247, "train/loss_total": 0.422579288482666 }, { "epoch": 4.227892065188351, "step": 15825, "train/loss_ctc": 0.3999151885509491, "train/loss_error": 0.47438961267471313, "train/loss_total": 0.4594947099685669 }, { "epoch": 4.228159230563719, "step": 15826, "train/loss_ctc": 0.6523078680038452, "train/loss_error": 0.42081305384635925, "train/loss_total": 0.4671120345592499 }, { "epoch": 4.228426395939087, "step": 15827, "train/loss_ctc": 0.5513254404067993, "train/loss_error": 0.40573254227638245, "train/loss_total": 0.43485110998153687 }, { "epoch": 4.228693561314453, "step": 15828, "train/loss_ctc": 0.439858078956604, "train/loss_error": 0.38820141553878784, "train/loss_total": 0.39853277802467346 }, { "epoch": 4.228960726689821, "step": 15829, "train/loss_ctc": 0.22504784166812897, "train/loss_error": 0.4944837987422943, "train/loss_total": 0.44059664011001587 }, { "epoch": 4.229227892065189, "grad_norm": 1.9087247848510742, "learning_rate": 4.6374565856265024e-06, "loss": 0.4674, "step": 15830 }, { "epoch": 4.229227892065189, "step": 15830, "train/loss_ctc": 0.5727846026420593, "train/loss_error": 0.4758942723274231, "train/loss_total": 0.49527233839035034 }, { "epoch": 4.229495057440555, "step": 15831, "train/loss_ctc": 0.7742261290550232, "train/loss_error": 0.3890725076198578, "train/loss_total": 0.4661032557487488 }, { "epoch": 4.229762222815923, "step": 15832, "train/loss_ctc": 0.8795746564865112, "train/loss_error": 0.45518332719802856, "train/loss_total": 0.5400615930557251 }, { "epoch": 4.230029388191291, "step": 15833, "train/loss_ctc": 0.6277411580085754, "train/loss_error": 0.42694687843322754, "train/loss_total": 0.4671057462692261 }, { "epoch": 4.2302965535666575, "step": 15834, "train/loss_ctc": 0.44641533493995667, "train/loss_error": 0.4061307907104492, "train/loss_total": 0.4141877293586731 }, { "epoch": 4.230563718942025, "step": 15835, "train/loss_ctc": 0.389779657125473, "train/loss_error": 0.3808261752128601, "train/loss_total": 0.38261687755584717 }, { "epoch": 4.230830884317393, "step": 15836, "train/loss_ctc": 1.0645076036453247, "train/loss_error": 0.5280492305755615, "train/loss_total": 0.6353409290313721 }, { "epoch": 4.2310980496927595, "step": 15837, "train/loss_ctc": 0.5945196151733398, "train/loss_error": 0.3984120190143585, "train/loss_total": 0.43763354420661926 }, { "epoch": 4.231365215068127, "step": 15838, "train/loss_ctc": 0.8244826793670654, "train/loss_error": 0.40647679567337036, "train/loss_total": 0.4900779724121094 }, { "epoch": 4.231632380443495, "step": 15839, "train/loss_ctc": 0.3535168468952179, "train/loss_error": 0.4281223714351654, "train/loss_total": 0.4132012724876404 }, { "epoch": 4.2318995458188615, "grad_norm": 2.7615466117858887, "learning_rate": 4.621426663104462e-06, "loss": 0.4742, "step": 15840 }, { "epoch": 4.2318995458188615, "step": 15840, "train/loss_ctc": 0.5074381232261658, "train/loss_error": 0.39923417568206787, "train/loss_total": 0.4208749830722809 }, { "epoch": 4.232166711194229, "step": 15841, "train/loss_ctc": 0.6200763583183289, "train/loss_error": 0.43233734369277954, "train/loss_total": 0.4698851406574249 }, { "epoch": 4.232433876569597, "step": 15842, "train/loss_ctc": 0.8955538272857666, "train/loss_error": 0.46282094717025757, "train/loss_total": 0.5493675470352173 }, { "epoch": 4.2327010419449635, "step": 15843, "train/loss_ctc": 0.7613073587417603, "train/loss_error": 0.43132758140563965, "train/loss_total": 0.49732357263565063 }, { "epoch": 4.232968207320331, "step": 15844, "train/loss_ctc": 0.21997667849063873, "train/loss_error": 0.40966105461120605, "train/loss_total": 0.3717241883277893 }, { "epoch": 4.233235372695699, "step": 15845, "train/loss_ctc": 0.3241578936576843, "train/loss_error": 0.4688843786716461, "train/loss_total": 0.43993908166885376 }, { "epoch": 4.233502538071066, "step": 15846, "train/loss_ctc": 0.763437807559967, "train/loss_error": 0.49672380089759827, "train/loss_total": 0.5500665903091431 }, { "epoch": 4.233769703446433, "step": 15847, "train/loss_ctc": 0.7054488062858582, "train/loss_error": 0.4013781249523163, "train/loss_total": 0.46219226717948914 }, { "epoch": 4.234036868821801, "step": 15848, "train/loss_ctc": 0.46877145767211914, "train/loss_error": 0.41476184129714966, "train/loss_total": 0.425563782453537 }, { "epoch": 4.234304034197168, "step": 15849, "train/loss_ctc": 0.8734921813011169, "train/loss_error": 0.5121864080429077, "train/loss_total": 0.5844475626945496 }, { "epoch": 4.234571199572535, "grad_norm": 1.9635440111160278, "learning_rate": 4.605396740582421e-06, "loss": 0.4771, "step": 15850 }, { "epoch": 4.234571199572535, "step": 15850, "train/loss_ctc": 1.0815274715423584, "train/loss_error": 0.484551340341568, "train/loss_total": 0.6039465665817261 }, { "epoch": 4.234838364947903, "step": 15851, "train/loss_ctc": 0.5579970479011536, "train/loss_error": 0.48961907625198364, "train/loss_total": 0.5032946467399597 }, { "epoch": 4.2351055303232705, "step": 15852, "train/loss_ctc": 0.46928372979164124, "train/loss_error": 0.4340502917766571, "train/loss_total": 0.4410969913005829 }, { "epoch": 4.235372695698637, "step": 15853, "train/loss_ctc": 0.4234176576137543, "train/loss_error": 0.44632720947265625, "train/loss_total": 0.4417453110218048 }, { "epoch": 4.235639861074005, "step": 15854, "train/loss_ctc": 0.8928954601287842, "train/loss_error": 0.40573108196258545, "train/loss_total": 0.5031639337539673 }, { "epoch": 4.235907026449373, "step": 15855, "train/loss_ctc": 0.49940264225006104, "train/loss_error": 0.41524964570999146, "train/loss_total": 0.4320802688598633 }, { "epoch": 4.236174191824739, "step": 15856, "train/loss_ctc": 0.8203991651535034, "train/loss_error": 0.43857571482658386, "train/loss_total": 0.5149403810501099 }, { "epoch": 4.236441357200107, "step": 15857, "train/loss_ctc": 0.4160587787628174, "train/loss_error": 0.4223644733428955, "train/loss_total": 0.4211033284664154 }, { "epoch": 4.236708522575475, "step": 15858, "train/loss_ctc": 0.3211292624473572, "train/loss_error": 0.4576409161090851, "train/loss_total": 0.430338591337204 }, { "epoch": 4.236975687950841, "step": 15859, "train/loss_ctc": 0.6564080715179443, "train/loss_error": 0.42595019936561584, "train/loss_total": 0.4720417857170105 }, { "epoch": 4.237242853326209, "grad_norm": 1.469199776649475, "learning_rate": 4.589366818060379e-06, "loss": 0.4764, "step": 15860 }, { "epoch": 4.237242853326209, "step": 15860, "train/loss_ctc": 0.7946720123291016, "train/loss_error": 0.43100208044052124, "train/loss_total": 0.5037360787391663 }, { "epoch": 4.237510018701577, "step": 15861, "train/loss_ctc": 0.4390772879123688, "train/loss_error": 0.38495856523513794, "train/loss_total": 0.39578232169151306 }, { "epoch": 4.237777184076943, "step": 15862, "train/loss_ctc": 1.3133134841918945, "train/loss_error": 0.4633350968360901, "train/loss_total": 0.6333308219909668 }, { "epoch": 4.238044349452311, "step": 15863, "train/loss_ctc": 0.3772898316383362, "train/loss_error": 0.40956801176071167, "train/loss_total": 0.40311238169670105 }, { "epoch": 4.238311514827679, "step": 15864, "train/loss_ctc": 0.4879586696624756, "train/loss_error": 0.4248632788658142, "train/loss_total": 0.4374823570251465 }, { "epoch": 4.238578680203045, "step": 15865, "train/loss_ctc": 0.6791573762893677, "train/loss_error": 0.44332918524742126, "train/loss_total": 0.49049481749534607 }, { "epoch": 4.238845845578413, "step": 15866, "train/loss_ctc": 0.5444396734237671, "train/loss_error": 0.33464664220809937, "train/loss_total": 0.3766052722930908 }, { "epoch": 4.239113010953781, "step": 15867, "train/loss_ctc": 0.3891589641571045, "train/loss_error": 0.4379100799560547, "train/loss_total": 0.4281598627567291 }, { "epoch": 4.239380176329147, "step": 15868, "train/loss_ctc": 1.4903934001922607, "train/loss_error": 0.48402610421180725, "train/loss_total": 0.6852995753288269 }, { "epoch": 4.239647341704515, "step": 15869, "train/loss_ctc": 0.9722325801849365, "train/loss_error": 0.4345601201057434, "train/loss_total": 0.5420946478843689 }, { "epoch": 4.239914507079883, "grad_norm": 2.0469517707824707, "learning_rate": 4.574939887790542e-06, "loss": 0.4896, "step": 15870 }, { "epoch": 4.239914507079883, "step": 15870, "train/loss_ctc": 0.543961763381958, "train/loss_error": 0.4338909387588501, "train/loss_total": 0.45590510964393616 }, { "epoch": 4.2401816724552495, "step": 15871, "train/loss_ctc": 0.7360610961914062, "train/loss_error": 0.4338710606098175, "train/loss_total": 0.49430906772613525 }, { "epoch": 4.240448837830617, "step": 15872, "train/loss_ctc": 0.30919304490089417, "train/loss_error": 0.45348820090293884, "train/loss_total": 0.4246291518211365 }, { "epoch": 4.240716003205985, "step": 15873, "train/loss_ctc": 0.6889451742172241, "train/loss_error": 0.46323442459106445, "train/loss_total": 0.5083765983581543 }, { "epoch": 4.2409831685813515, "step": 15874, "train/loss_ctc": 0.4611646831035614, "train/loss_error": 0.4177349805831909, "train/loss_total": 0.4264209270477295 }, { "epoch": 4.241250333956719, "step": 15875, "train/loss_ctc": 0.4885121285915375, "train/loss_error": 0.42739152908325195, "train/loss_total": 0.4396156668663025 }, { "epoch": 4.241517499332087, "step": 15876, "train/loss_ctc": 0.5638952851295471, "train/loss_error": 0.4329553544521332, "train/loss_total": 0.45914334058761597 }, { "epoch": 4.2417846647074535, "step": 15877, "train/loss_ctc": 0.9057905673980713, "train/loss_error": 0.4254032075405121, "train/loss_total": 0.5214806795120239 }, { "epoch": 4.242051830082821, "step": 15878, "train/loss_ctc": 0.2953459322452545, "train/loss_error": 0.42848673462867737, "train/loss_total": 0.4018585681915283 }, { "epoch": 4.242318995458189, "step": 15879, "train/loss_ctc": 1.0701091289520264, "train/loss_error": 0.42583972215652466, "train/loss_total": 0.5546935796737671 }, { "epoch": 4.2425861608335556, "grad_norm": 12.581130981445312, "learning_rate": 4.558909965268501e-06, "loss": 0.4686, "step": 15880 }, { "epoch": 4.2425861608335556, "step": 15880, "train/loss_ctc": 0.7003800868988037, "train/loss_error": 0.3952028155326843, "train/loss_total": 0.4562382698059082 }, { "epoch": 4.242853326208923, "step": 15881, "train/loss_ctc": 1.0110669136047363, "train/loss_error": 0.5193707346916199, "train/loss_total": 0.6177099943161011 }, { "epoch": 4.243120491584291, "step": 15882, "train/loss_ctc": 1.0737292766571045, "train/loss_error": 0.4433479607105255, "train/loss_total": 0.5694242119789124 }, { "epoch": 4.2433876569596585, "step": 15883, "train/loss_ctc": 1.5605881214141846, "train/loss_error": 0.41419103741645813, "train/loss_total": 0.6434704661369324 }, { "epoch": 4.243654822335025, "step": 15884, "train/loss_ctc": 0.7648887634277344, "train/loss_error": 0.42778095602989197, "train/loss_total": 0.49520254135131836 }, { "epoch": 4.243921987710393, "step": 15885, "train/loss_ctc": 0.6591180562973022, "train/loss_error": 0.4624686539173126, "train/loss_total": 0.5017985701560974 }, { "epoch": 4.2441891530857605, "step": 15886, "train/loss_ctc": 0.5542210340499878, "train/loss_error": 0.40084946155548096, "train/loss_total": 0.43152379989624023 }, { "epoch": 4.244456318461127, "step": 15887, "train/loss_ctc": 0.872299313545227, "train/loss_error": 0.44782382249832153, "train/loss_total": 0.5327188968658447 }, { "epoch": 4.244723483836495, "step": 15888, "train/loss_ctc": 0.37014853954315186, "train/loss_error": 0.40934425592422485, "train/loss_total": 0.40150511264801025 }, { "epoch": 4.2449906492118625, "step": 15889, "train/loss_ctc": 0.6687233448028564, "train/loss_error": 0.36808016896247864, "train/loss_total": 0.4282088279724121 }, { "epoch": 4.245257814587229, "grad_norm": 5.670733451843262, "learning_rate": 4.54288004274646e-06, "loss": 0.5078, "step": 15890 }, { "epoch": 4.245257814587229, "step": 15890, "train/loss_ctc": 0.4985600709915161, "train/loss_error": 0.42343661189079285, "train/loss_total": 0.4384613037109375 }, { "epoch": 4.245524979962597, "step": 15891, "train/loss_ctc": 0.6638230681419373, "train/loss_error": 0.4738861918449402, "train/loss_total": 0.5118736028671265 }, { "epoch": 4.245792145337965, "step": 15892, "train/loss_ctc": 0.36339259147644043, "train/loss_error": 0.4408970773220062, "train/loss_total": 0.425396203994751 }, { "epoch": 4.246059310713331, "step": 15893, "train/loss_ctc": 0.7280082702636719, "train/loss_error": 0.42511969804763794, "train/loss_total": 0.4856974184513092 }, { "epoch": 4.246326476088699, "step": 15894, "train/loss_ctc": 0.6454454064369202, "train/loss_error": 0.43455445766448975, "train/loss_total": 0.47673267126083374 }, { "epoch": 4.246593641464067, "step": 15895, "train/loss_ctc": 1.1076908111572266, "train/loss_error": 0.4435447156429291, "train/loss_total": 0.5763739347457886 }, { "epoch": 4.246860806839433, "step": 15896, "train/loss_ctc": 0.39454755187034607, "train/loss_error": 0.44668835401535034, "train/loss_total": 0.4362601935863495 }, { "epoch": 4.247127972214801, "step": 15897, "train/loss_ctc": 0.7271210551261902, "train/loss_error": 0.3860098421573639, "train/loss_total": 0.4542320966720581 }, { "epoch": 4.247395137590169, "step": 15898, "train/loss_ctc": 0.6493910551071167, "train/loss_error": 0.42401349544525146, "train/loss_total": 0.4690890312194824 }, { "epoch": 4.247662302965535, "step": 15899, "train/loss_ctc": 0.45622965693473816, "train/loss_error": 0.4844374358654022, "train/loss_total": 0.4787958860397339 }, { "epoch": 4.247929468340903, "grad_norm": 2.786550283432007, "learning_rate": 4.526850120224419e-06, "loss": 0.4753, "step": 15900 }, { "epoch": 4.247929468340903, "step": 15900, "train/loss_ctc": 0.8489857316017151, "train/loss_error": 0.41036128997802734, "train/loss_total": 0.49808618426322937 }, { "epoch": 4.248196633716271, "step": 15901, "train/loss_ctc": 1.5378721952438354, "train/loss_error": 0.48655322194099426, "train/loss_total": 0.6968170404434204 }, { "epoch": 4.248463799091637, "step": 15902, "train/loss_ctc": 0.6409438252449036, "train/loss_error": 0.43120312690734863, "train/loss_total": 0.4731512665748596 }, { "epoch": 4.248730964467005, "step": 15903, "train/loss_ctc": 0.49614208936691284, "train/loss_error": 0.4554455578327179, "train/loss_total": 0.46358487010002136 }, { "epoch": 4.248998129842373, "step": 15904, "train/loss_ctc": 1.013209581375122, "train/loss_error": 0.46210744976997375, "train/loss_total": 0.5723279118537903 }, { "epoch": 4.249265295217739, "step": 15905, "train/loss_ctc": 0.4156227707862854, "train/loss_error": 0.43180206418037415, "train/loss_total": 0.42856621742248535 }, { "epoch": 4.249532460593107, "step": 15906, "train/loss_ctc": 0.6619206070899963, "train/loss_error": 0.5717939734458923, "train/loss_total": 0.5898193120956421 }, { "epoch": 4.249799625968475, "step": 15907, "train/loss_ctc": 0.2807901203632355, "train/loss_error": 0.4417058825492859, "train/loss_total": 0.40952274203300476 }, { "epoch": 4.2500667913438415, "step": 15908, "train/loss_ctc": 0.8175339698791504, "train/loss_error": 0.46854540705680847, "train/loss_total": 0.5383431315422058 }, { "epoch": 4.250333956719209, "step": 15909, "train/loss_ctc": 0.6855601072311401, "train/loss_error": 0.4043775200843811, "train/loss_total": 0.46061405539512634 }, { "epoch": 4.250601122094577, "grad_norm": 6.412883758544922, "learning_rate": 4.510820197702378e-06, "loss": 0.5131, "step": 15910 }, { "epoch": 4.250601122094577, "step": 15910, "train/loss_ctc": 0.5326085090637207, "train/loss_error": 0.46408915519714355, "train/loss_total": 0.47779303789138794 }, { "epoch": 4.2508682874699435, "step": 15911, "train/loss_ctc": 0.6345325708389282, "train/loss_error": 0.45071840286254883, "train/loss_total": 0.4874812364578247 }, { "epoch": 4.251135452845311, "step": 15912, "train/loss_ctc": 0.6480776071548462, "train/loss_error": 0.41636034846305847, "train/loss_total": 0.4627038240432739 }, { "epoch": 4.251402618220679, "step": 15913, "train/loss_ctc": 0.878090500831604, "train/loss_error": 0.4504476487636566, "train/loss_total": 0.535976231098175 }, { "epoch": 4.251669783596046, "step": 15914, "train/loss_ctc": 0.4172205924987793, "train/loss_error": 0.4746888279914856, "train/loss_total": 0.46319517493247986 }, { "epoch": 4.251936948971413, "step": 15915, "train/loss_ctc": 0.5969346761703491, "train/loss_error": 0.39647582173347473, "train/loss_total": 0.43656760454177856 }, { "epoch": 4.252204114346781, "step": 15916, "train/loss_ctc": 0.6560914516448975, "train/loss_error": 0.4085673391819, "train/loss_total": 0.4580721855163574 }, { "epoch": 4.252471279722148, "step": 15917, "train/loss_ctc": 0.3089347183704376, "train/loss_error": 0.394679993391037, "train/loss_total": 0.377530962228775 }, { "epoch": 4.252738445097515, "step": 15918, "train/loss_ctc": 0.6801280975341797, "train/loss_error": 0.4315841495990753, "train/loss_total": 0.4812929630279541 }, { "epoch": 4.253005610472883, "step": 15919, "train/loss_ctc": 0.4927108883857727, "train/loss_error": 0.37899866700172424, "train/loss_total": 0.4017411172389984 }, { "epoch": 4.2532727758482505, "grad_norm": 3.253138303756714, "learning_rate": 4.494790275180337e-06, "loss": 0.4582, "step": 15920 }, { "epoch": 4.2532727758482505, "step": 15920, "train/loss_ctc": 0.435273140668869, "train/loss_error": 0.421604722738266, "train/loss_total": 0.4243384301662445 }, { "epoch": 4.253539941223617, "step": 15921, "train/loss_ctc": 0.2952762842178345, "train/loss_error": 0.43632227182388306, "train/loss_total": 0.4081130921840668 }, { "epoch": 4.253807106598985, "step": 15922, "train/loss_ctc": 0.7460353374481201, "train/loss_error": 0.423745721578598, "train/loss_total": 0.48820364475250244 }, { "epoch": 4.2540742719743525, "step": 15923, "train/loss_ctc": 0.44451725482940674, "train/loss_error": 0.43639296293258667, "train/loss_total": 0.4380178451538086 }, { "epoch": 4.254341437349719, "step": 15924, "train/loss_ctc": 0.4000735282897949, "train/loss_error": 0.40663573145866394, "train/loss_total": 0.4053232967853546 }, { "epoch": 4.254608602725087, "step": 15925, "train/loss_ctc": 0.6497565507888794, "train/loss_error": 0.48229649662971497, "train/loss_total": 0.5157884955406189 }, { "epoch": 4.2548757681004545, "step": 15926, "train/loss_ctc": 0.4879376292228699, "train/loss_error": 0.4232833981513977, "train/loss_total": 0.43621423840522766 }, { "epoch": 4.255142933475821, "step": 15927, "train/loss_ctc": 0.5980516672134399, "train/loss_error": 0.4401567578315735, "train/loss_total": 0.47173574566841125 }, { "epoch": 4.255410098851189, "step": 15928, "train/loss_ctc": 0.36944007873535156, "train/loss_error": 0.37866586446762085, "train/loss_total": 0.37682074308395386 }, { "epoch": 4.255677264226557, "step": 15929, "train/loss_ctc": 0.71246337890625, "train/loss_error": 0.41323843598365784, "train/loss_total": 0.4730834364891052 }, { "epoch": 4.255944429601923, "grad_norm": 1.6720795631408691, "learning_rate": 4.478760352658296e-06, "loss": 0.4438, "step": 15930 }, { "epoch": 4.255944429601923, "step": 15930, "train/loss_ctc": 0.5242323875427246, "train/loss_error": 0.44252651929855347, "train/loss_total": 0.4588676989078522 }, { "epoch": 4.256211594977291, "step": 15931, "train/loss_ctc": 1.278915524482727, "train/loss_error": 0.5114343166351318, "train/loss_total": 0.6649305820465088 }, { "epoch": 4.256478760352659, "step": 15932, "train/loss_ctc": 1.0426112413406372, "train/loss_error": 0.3215256333351135, "train/loss_total": 0.4657427668571472 }, { "epoch": 4.256745925728025, "step": 15933, "train/loss_ctc": 1.2100781202316284, "train/loss_error": 0.4670543074607849, "train/loss_total": 0.6156591176986694 }, { "epoch": 4.257013091103393, "step": 15934, "train/loss_ctc": 0.691124677658081, "train/loss_error": 0.5095272660255432, "train/loss_total": 0.5458467602729797 }, { "epoch": 4.257280256478761, "step": 15935, "train/loss_ctc": 0.5356364250183105, "train/loss_error": 0.49801766872406006, "train/loss_total": 0.5055414438247681 }, { "epoch": 4.257547421854127, "step": 15936, "train/loss_ctc": 0.41501089930534363, "train/loss_error": 0.4104060232639313, "train/loss_total": 0.4113270044326782 }, { "epoch": 4.257814587229495, "step": 15937, "train/loss_ctc": 0.3399864435195923, "train/loss_error": 0.3904537856578827, "train/loss_total": 0.38036030530929565 }, { "epoch": 4.258081752604863, "step": 15938, "train/loss_ctc": 0.8547323942184448, "train/loss_error": 0.42664530873298645, "train/loss_total": 0.5122627019882202 }, { "epoch": 4.258348917980229, "step": 15939, "train/loss_ctc": 0.5943751335144043, "train/loss_error": 0.41181325912475586, "train/loss_total": 0.44832563400268555 }, { "epoch": 4.258616083355597, "grad_norm": 2.998159408569336, "learning_rate": 4.462730430136255e-06, "loss": 0.5009, "step": 15940 }, { "epoch": 4.258616083355597, "step": 15940, "train/loss_ctc": 0.44567009806632996, "train/loss_error": 0.38563108444213867, "train/loss_total": 0.3976389169692993 }, { "epoch": 4.258883248730965, "step": 15941, "train/loss_ctc": 0.9493603110313416, "train/loss_error": 0.5120323896408081, "train/loss_total": 0.5994979739189148 }, { "epoch": 4.259150414106331, "step": 15942, "train/loss_ctc": 1.112424612045288, "train/loss_error": 0.4665071964263916, "train/loss_total": 0.5956906676292419 }, { "epoch": 4.259417579481699, "step": 15943, "train/loss_ctc": 0.5882730484008789, "train/loss_error": 0.4837212562561035, "train/loss_total": 0.5046316385269165 }, { "epoch": 4.259684744857067, "step": 15944, "train/loss_ctc": 0.5969374775886536, "train/loss_error": 0.4180474579334259, "train/loss_total": 0.4538254737854004 }, { "epoch": 4.2599519102324335, "step": 15945, "train/loss_ctc": 0.3739795684814453, "train/loss_error": 0.4312381148338318, "train/loss_total": 0.41978639364242554 }, { "epoch": 4.260219075607801, "step": 15946, "train/loss_ctc": 0.4653146266937256, "train/loss_error": 0.3861684799194336, "train/loss_total": 0.40199771523475647 }, { "epoch": 4.260486240983169, "step": 15947, "train/loss_ctc": 0.4926772117614746, "train/loss_error": 0.4416976273059845, "train/loss_total": 0.45189356803894043 }, { "epoch": 4.2607534063585355, "step": 15948, "train/loss_ctc": 0.45074933767318726, "train/loss_error": 0.4928997755050659, "train/loss_total": 0.4844697117805481 }, { "epoch": 4.261020571733903, "step": 15949, "train/loss_ctc": 0.49321499466896057, "train/loss_error": 0.42669421434402466, "train/loss_total": 0.4399983882904053 }, { "epoch": 4.261287737109271, "grad_norm": 2.414647102355957, "learning_rate": 4.446700507614214e-06, "loss": 0.4749, "step": 15950 }, { "epoch": 4.261287737109271, "step": 15950, "train/loss_ctc": 0.420165479183197, "train/loss_error": 0.4265330135822296, "train/loss_total": 0.425259530544281 }, { "epoch": 4.261554902484638, "step": 15951, "train/loss_ctc": 0.20775699615478516, "train/loss_error": 0.4671989679336548, "train/loss_total": 0.4153105914592743 }, { "epoch": 4.261822067860005, "step": 15952, "train/loss_ctc": 0.3863943815231323, "train/loss_error": 0.450708270072937, "train/loss_total": 0.43784549832344055 }, { "epoch": 4.262089233235373, "step": 15953, "train/loss_ctc": 0.8909486532211304, "train/loss_error": 0.4176473617553711, "train/loss_total": 0.5123076438903809 }, { "epoch": 4.2623563986107404, "step": 15954, "train/loss_ctc": 0.6384533643722534, "train/loss_error": 0.5333248376846313, "train/loss_total": 0.5543505549430847 }, { "epoch": 4.262623563986107, "step": 15955, "train/loss_ctc": 0.9562522172927856, "train/loss_error": 0.4891373813152313, "train/loss_total": 0.5825603604316711 }, { "epoch": 4.262890729361475, "step": 15956, "train/loss_ctc": 0.9605845212936401, "train/loss_error": 0.4901789724826813, "train/loss_total": 0.584260106086731 }, { "epoch": 4.2631578947368425, "step": 15957, "train/loss_ctc": 0.8859371542930603, "train/loss_error": 0.40386146306991577, "train/loss_total": 0.5002766251564026 }, { "epoch": 4.263425060112209, "step": 15958, "train/loss_ctc": 0.7332754135131836, "train/loss_error": 0.45680806040763855, "train/loss_total": 0.5121015310287476 }, { "epoch": 4.263692225487577, "step": 15959, "train/loss_ctc": 0.9775065183639526, "train/loss_error": 0.5007261037826538, "train/loss_total": 0.5960822105407715 }, { "epoch": 4.2639593908629445, "grad_norm": 1.9960200786590576, "learning_rate": 4.430670585092172e-06, "loss": 0.512, "step": 15960 }, { "epoch": 4.2639593908629445, "step": 15960, "train/loss_ctc": 0.5245975255966187, "train/loss_error": 0.43112650513648987, "train/loss_total": 0.44982069730758667 }, { "epoch": 4.264226556238311, "step": 15961, "train/loss_ctc": 0.8001902103424072, "train/loss_error": 0.5234543681144714, "train/loss_total": 0.5788015127182007 }, { "epoch": 4.264493721613679, "step": 15962, "train/loss_ctc": 0.9885424375534058, "train/loss_error": 0.4287916123867035, "train/loss_total": 0.5407418012619019 }, { "epoch": 4.2647608869890465, "step": 15963, "train/loss_ctc": 0.3901360332965851, "train/loss_error": 0.46234381198883057, "train/loss_total": 0.44790226221084595 }, { "epoch": 4.265028052364413, "step": 15964, "train/loss_ctc": 0.34046876430511475, "train/loss_error": 0.39184215664863586, "train/loss_total": 0.38156747817993164 }, { "epoch": 4.265295217739781, "step": 15965, "train/loss_ctc": 1.2123676538467407, "train/loss_error": 0.43820321559906006, "train/loss_total": 0.5930361151695251 }, { "epoch": 4.265562383115149, "step": 15966, "train/loss_ctc": 0.43308383226394653, "train/loss_error": 0.45250850915908813, "train/loss_total": 0.4486235976219177 }, { "epoch": 4.265829548490515, "step": 15967, "train/loss_ctc": 0.9224473834037781, "train/loss_error": 0.4427154064178467, "train/loss_total": 0.538661777973175 }, { "epoch": 4.266096713865883, "step": 15968, "train/loss_ctc": 0.19802486896514893, "train/loss_error": 0.38592103123664856, "train/loss_total": 0.34834179282188416 }, { "epoch": 4.266363879241251, "step": 15969, "train/loss_ctc": 1.3374807834625244, "train/loss_error": 0.4773053526878357, "train/loss_total": 0.6493404507637024 }, { "epoch": 4.266631044616617, "grad_norm": 1.7899281978607178, "learning_rate": 4.414640662570131e-06, "loss": 0.4977, "step": 15970 }, { "epoch": 4.266631044616617, "step": 15970, "train/loss_ctc": 0.3455791771411896, "train/loss_error": 0.3639639616012573, "train/loss_total": 0.36028701066970825 }, { "epoch": 4.266898209991985, "step": 15971, "train/loss_ctc": 0.6037449240684509, "train/loss_error": 0.4589802324771881, "train/loss_total": 0.4879331886768341 }, { "epoch": 4.267165375367353, "step": 15972, "train/loss_ctc": 0.5114442110061646, "train/loss_error": 0.432269811630249, "train/loss_total": 0.44810470938682556 }, { "epoch": 4.267432540742719, "step": 15973, "train/loss_ctc": 0.4618467092514038, "train/loss_error": 0.43291306495666504, "train/loss_total": 0.4386998116970062 }, { "epoch": 4.267699706118087, "step": 15974, "train/loss_ctc": 0.42608803510665894, "train/loss_error": 0.5174646377563477, "train/loss_total": 0.4991893172264099 }, { "epoch": 4.267966871493455, "step": 15975, "train/loss_ctc": 0.830464243888855, "train/loss_error": 0.43826743960380554, "train/loss_total": 0.5167068243026733 }, { "epoch": 4.268234036868821, "step": 15976, "train/loss_ctc": 0.7713735103607178, "train/loss_error": 0.4088728427886963, "train/loss_total": 0.48137298226356506 }, { "epoch": 4.268501202244189, "step": 15977, "train/loss_ctc": 0.5592923164367676, "train/loss_error": 0.44958293437957764, "train/loss_total": 0.47152480483055115 }, { "epoch": 4.268768367619557, "step": 15978, "train/loss_ctc": 0.3697795867919922, "train/loss_error": 0.43412327766418457, "train/loss_total": 0.42125454545021057 }, { "epoch": 4.269035532994923, "step": 15979, "train/loss_ctc": 0.9263907074928284, "train/loss_error": 0.41916388273239136, "train/loss_total": 0.5206092596054077 }, { "epoch": 4.269302698370291, "grad_norm": 1.8588217496871948, "learning_rate": 4.39861074004809e-06, "loss": 0.4646, "step": 15980 }, { "epoch": 4.269302698370291, "step": 15980, "train/loss_ctc": 0.8318703174591064, "train/loss_error": 0.4276004433631897, "train/loss_total": 0.508454442024231 }, { "epoch": 4.269569863745659, "step": 15981, "train/loss_ctc": 0.508607804775238, "train/loss_error": 0.5186973810195923, "train/loss_total": 0.5166794657707214 }, { "epoch": 4.269837029121026, "step": 15982, "train/loss_ctc": 0.579334020614624, "train/loss_error": 0.424560546875, "train/loss_total": 0.4555152654647827 }, { "epoch": 4.270104194496393, "step": 15983, "train/loss_ctc": 0.5340964794158936, "train/loss_error": 0.4089241027832031, "train/loss_total": 0.43395859003067017 }, { "epoch": 4.270371359871761, "step": 15984, "train/loss_ctc": 0.9469929337501526, "train/loss_error": 0.4456944465637207, "train/loss_total": 0.5459541082382202 }, { "epoch": 4.2706385252471275, "step": 15985, "train/loss_ctc": 0.962204098701477, "train/loss_error": 0.436806857585907, "train/loss_total": 0.5418863296508789 }, { "epoch": 4.270905690622495, "step": 15986, "train/loss_ctc": 0.49190670251846313, "train/loss_error": 0.4057493507862091, "train/loss_total": 0.42298081517219543 }, { "epoch": 4.271172855997863, "step": 15987, "train/loss_ctc": 0.4672471582889557, "train/loss_error": 0.41149741411209106, "train/loss_total": 0.4226473867893219 }, { "epoch": 4.27144002137323, "step": 15988, "train/loss_ctc": 1.013914704322815, "train/loss_error": 0.4678957164287567, "train/loss_total": 0.5770995020866394 }, { "epoch": 4.271707186748597, "step": 15989, "train/loss_ctc": 0.6266918778419495, "train/loss_error": 0.46620193123817444, "train/loss_total": 0.4982999265193939 }, { "epoch": 4.271974352123965, "grad_norm": 5.884866237640381, "learning_rate": 4.3825808175260485e-06, "loss": 0.4923, "step": 15990 }, { "epoch": 4.271974352123965, "step": 15990, "train/loss_ctc": 0.8269965052604675, "train/loss_error": 0.43963491916656494, "train/loss_total": 0.5171072483062744 }, { "epoch": 4.2722415174993325, "step": 15991, "train/loss_ctc": 0.7469305992126465, "train/loss_error": 0.4252147674560547, "train/loss_total": 0.4895579218864441 }, { "epoch": 4.272508682874699, "step": 15992, "train/loss_ctc": 0.82486891746521, "train/loss_error": 0.46101829409599304, "train/loss_total": 0.5337884426116943 }, { "epoch": 4.272775848250067, "step": 15993, "train/loss_ctc": 0.6966946125030518, "train/loss_error": 0.5108282566070557, "train/loss_total": 0.5480015277862549 }, { "epoch": 4.2730430136254345, "step": 15994, "train/loss_ctc": 0.7687682509422302, "train/loss_error": 0.49516794085502625, "train/loss_total": 0.549888014793396 }, { "epoch": 4.273310179000801, "step": 15995, "train/loss_ctc": 0.44605082273483276, "train/loss_error": 0.44740554690361023, "train/loss_total": 0.4471346139907837 }, { "epoch": 4.273577344376169, "step": 15996, "train/loss_ctc": 0.6314279437065125, "train/loss_error": 0.4490048587322235, "train/loss_total": 0.48548948764801025 }, { "epoch": 4.2738445097515365, "step": 15997, "train/loss_ctc": 0.6195304989814758, "train/loss_error": 0.48131123185157776, "train/loss_total": 0.5089550614356995 }, { "epoch": 4.274111675126903, "step": 15998, "train/loss_ctc": 0.9525729417800903, "train/loss_error": 0.43431317806243896, "train/loss_total": 0.5379651784896851 }, { "epoch": 4.274378840502271, "step": 15999, "train/loss_ctc": 1.395129919052124, "train/loss_error": 0.5076795220375061, "train/loss_total": 0.6851696372032166 }, { "epoch": 4.2746460058776385, "grad_norm": 1.920622706413269, "learning_rate": 4.3665508950040074e-06, "loss": 0.5303, "step": 16000 }, { "epoch": 4.2746460058776385, "step": 16000, "train/loss_ctc": 0.5598850250244141, "train/loss_error": 0.415093332529068, "train/loss_total": 0.44405168294906616 }, { "epoch": 4.274913171253005, "step": 16001, "train/loss_ctc": 0.4101565480232239, "train/loss_error": 0.45788803696632385, "train/loss_total": 0.4483417570590973 }, { "epoch": 4.275180336628373, "step": 16002, "train/loss_ctc": 0.46126747131347656, "train/loss_error": 0.4223596155643463, "train/loss_total": 0.4301412105560303 }, { "epoch": 4.275447502003741, "step": 16003, "train/loss_ctc": 0.3145679235458374, "train/loss_error": 0.4038894474506378, "train/loss_total": 0.38602516055107117 }, { "epoch": 4.275714667379107, "step": 16004, "train/loss_ctc": 1.6766648292541504, "train/loss_error": 0.5142651796340942, "train/loss_total": 0.7467451095581055 }, { "epoch": 4.275981832754475, "step": 16005, "train/loss_ctc": 0.7500330805778503, "train/loss_error": 0.3456844985485077, "train/loss_total": 0.42655423283576965 }, { "epoch": 4.276248998129843, "step": 16006, "train/loss_ctc": 0.34001922607421875, "train/loss_error": 0.42231661081314087, "train/loss_total": 0.4058571457862854 }, { "epoch": 4.276516163505209, "step": 16007, "train/loss_ctc": 0.32182061672210693, "train/loss_error": 0.4568954408168793, "train/loss_total": 0.4298804998397827 }, { "epoch": 4.276783328880577, "step": 16008, "train/loss_ctc": 0.6056512594223022, "train/loss_error": 0.48729318380355835, "train/loss_total": 0.5109648108482361 }, { "epoch": 4.277050494255945, "step": 16009, "train/loss_ctc": 0.5070253610610962, "train/loss_error": 0.41346439719200134, "train/loss_total": 0.4321766197681427 }, { "epoch": 4.277317659631311, "grad_norm": 2.2809085845947266, "learning_rate": 4.350520972481966e-06, "loss": 0.4661, "step": 16010 }, { "epoch": 4.277317659631311, "step": 16010, "train/loss_ctc": 0.6667517423629761, "train/loss_error": 0.4549081325531006, "train/loss_total": 0.49727684259414673 }, { "epoch": 4.277584825006679, "step": 16011, "train/loss_ctc": 0.4299851357936859, "train/loss_error": 0.43085405230522156, "train/loss_total": 0.4306802749633789 }, { "epoch": 4.277851990382047, "step": 16012, "train/loss_ctc": 1.2231993675231934, "train/loss_error": 0.4877578020095825, "train/loss_total": 0.6348460912704468 }, { "epoch": 4.278119155757414, "step": 16013, "train/loss_ctc": 0.35470375418663025, "train/loss_error": 0.3977521061897278, "train/loss_total": 0.3891424536705017 }, { "epoch": 4.278386321132781, "step": 16014, "train/loss_ctc": 0.5704674124717712, "train/loss_error": 0.40723147988319397, "train/loss_total": 0.4398786723613739 }, { "epoch": 4.278653486508149, "step": 16015, "train/loss_ctc": 0.8302756547927856, "train/loss_error": 0.4430249035358429, "train/loss_total": 0.5204750299453735 }, { "epoch": 4.2789206518835154, "step": 16016, "train/loss_ctc": 0.6353930234909058, "train/loss_error": 0.4199504852294922, "train/loss_total": 0.46303898096084595 }, { "epoch": 4.279187817258883, "step": 16017, "train/loss_ctc": 0.753436803817749, "train/loss_error": 0.4393242299556732, "train/loss_total": 0.5021467208862305 }, { "epoch": 4.279454982634251, "step": 16018, "train/loss_ctc": 1.4515031576156616, "train/loss_error": 0.43696489930152893, "train/loss_total": 0.6398725509643555 }, { "epoch": 4.279722148009618, "step": 16019, "train/loss_ctc": 0.5145071744918823, "train/loss_error": 0.4207551181316376, "train/loss_total": 0.43950551748275757 }, { "epoch": 4.279989313384985, "grad_norm": 2.368394613265991, "learning_rate": 4.334491049959925e-06, "loss": 0.4957, "step": 16020 }, { "epoch": 4.279989313384985, "step": 16020, "train/loss_ctc": 0.9617564082145691, "train/loss_error": 0.3946693539619446, "train/loss_total": 0.5080868005752563 }, { "epoch": 4.280256478760353, "step": 16021, "train/loss_ctc": 0.5569607019424438, "train/loss_error": 0.4340851306915283, "train/loss_total": 0.4586602449417114 }, { "epoch": 4.28052364413572, "step": 16022, "train/loss_ctc": 0.382241427898407, "train/loss_error": 0.4920354187488556, "train/loss_total": 0.47007662057876587 }, { "epoch": 4.280790809511087, "step": 16023, "train/loss_ctc": 0.25169289112091064, "train/loss_error": 0.4324967861175537, "train/loss_total": 0.39633598923683167 }, { "epoch": 4.281057974886455, "step": 16024, "train/loss_ctc": 0.4989638328552246, "train/loss_error": 0.37157517671585083, "train/loss_total": 0.39705291390419006 }, { "epoch": 4.281325140261822, "step": 16025, "train/loss_ctc": 1.2138067483901978, "train/loss_error": 0.4598850607872009, "train/loss_total": 0.6106694340705872 }, { "epoch": 4.281592305637189, "step": 16026, "train/loss_ctc": 0.43464064598083496, "train/loss_error": 0.4474281072616577, "train/loss_total": 0.44487062096595764 }, { "epoch": 4.281859471012557, "step": 16027, "train/loss_ctc": 0.36390525102615356, "train/loss_error": 0.44618117809295654, "train/loss_total": 0.4297260046005249 }, { "epoch": 4.2821266363879245, "step": 16028, "train/loss_ctc": 0.7793386578559875, "train/loss_error": 0.3995409905910492, "train/loss_total": 0.47550052404403687 }, { "epoch": 4.282393801763291, "step": 16029, "train/loss_ctc": 0.396889328956604, "train/loss_error": 0.3760550320148468, "train/loss_total": 0.3802219033241272 }, { "epoch": 4.282660967138659, "grad_norm": 1.9780616760253906, "learning_rate": 4.318461127437884e-06, "loss": 0.4571, "step": 16030 }, { "epoch": 4.282660967138659, "step": 16030, "train/loss_ctc": 0.8229561448097229, "train/loss_error": 0.4040662944316864, "train/loss_total": 0.4878442883491516 }, { "epoch": 4.2829281325140265, "step": 16031, "train/loss_ctc": 0.4322877526283264, "train/loss_error": 0.4480826258659363, "train/loss_total": 0.44492366909980774 }, { "epoch": 4.283195297889393, "step": 16032, "train/loss_ctc": 0.9013859033584595, "train/loss_error": 0.4508073329925537, "train/loss_total": 0.5409230589866638 }, { "epoch": 4.283462463264761, "step": 16033, "train/loss_ctc": 0.6389062404632568, "train/loss_error": 0.44104665517807007, "train/loss_total": 0.48061859607696533 }, { "epoch": 4.2837296286401285, "step": 16034, "train/loss_ctc": 0.8329466581344604, "train/loss_error": 0.4413353502750397, "train/loss_total": 0.5196576118469238 }, { "epoch": 4.283996794015495, "step": 16035, "train/loss_ctc": 0.34680962562561035, "train/loss_error": 0.4884064197540283, "train/loss_total": 0.4600870609283447 }, { "epoch": 4.284263959390863, "step": 16036, "train/loss_ctc": 1.0485937595367432, "train/loss_error": 0.5005646347999573, "train/loss_total": 0.6101704835891724 }, { "epoch": 4.2845311247662305, "step": 16037, "train/loss_ctc": 0.5749715566635132, "train/loss_error": 0.3364448547363281, "train/loss_total": 0.3841502070426941 }, { "epoch": 4.284798290141597, "step": 16038, "train/loss_ctc": 0.6518654823303223, "train/loss_error": 0.455104261636734, "train/loss_total": 0.49445652961730957 }, { "epoch": 4.285065455516965, "step": 16039, "train/loss_ctc": 1.846701741218567, "train/loss_error": 0.4309922456741333, "train/loss_total": 0.714134156703949 }, { "epoch": 4.285332620892333, "grad_norm": 2.3223936557769775, "learning_rate": 4.302431204915843e-06, "loss": 0.5137, "step": 16040 }, { "epoch": 4.285332620892333, "step": 16040, "train/loss_ctc": 0.8717292547225952, "train/loss_error": 0.43488967418670654, "train/loss_total": 0.5222575664520264 }, { "epoch": 4.285599786267699, "step": 16041, "train/loss_ctc": 0.746199905872345, "train/loss_error": 0.4544295370578766, "train/loss_total": 0.5127836465835571 }, { "epoch": 4.285866951643067, "step": 16042, "train/loss_ctc": 0.9561260938644409, "train/loss_error": 0.49568235874176025, "train/loss_total": 0.5877711176872253 }, { "epoch": 4.286134117018435, "step": 16043, "train/loss_ctc": 0.4572643041610718, "train/loss_error": 0.4567413926124573, "train/loss_total": 0.4568459987640381 }, { "epoch": 4.286401282393801, "step": 16044, "train/loss_ctc": 0.755046010017395, "train/loss_error": 0.4273557662963867, "train/loss_total": 0.4928938150405884 }, { "epoch": 4.286668447769169, "step": 16045, "train/loss_ctc": 1.0883562564849854, "train/loss_error": 0.41223248839378357, "train/loss_total": 0.5474572777748108 }, { "epoch": 4.286935613144537, "step": 16046, "train/loss_ctc": 0.6677026152610779, "train/loss_error": 0.3898575007915497, "train/loss_total": 0.4454265236854553 }, { "epoch": 4.287202778519903, "step": 16047, "train/loss_ctc": 1.046420693397522, "train/loss_error": 0.4652212858200073, "train/loss_total": 0.5814611911773682 }, { "epoch": 4.287469943895271, "step": 16048, "train/loss_ctc": 0.8428167104721069, "train/loss_error": 0.46856021881103516, "train/loss_total": 0.5434115529060364 }, { "epoch": 4.287737109270639, "step": 16049, "train/loss_ctc": 0.5059165954589844, "train/loss_error": 0.4303969144821167, "train/loss_total": 0.44550085067749023 }, { "epoch": 4.288004274646006, "grad_norm": 2.220552682876587, "learning_rate": 4.286401282393801e-06, "loss": 0.5136, "step": 16050 }, { "epoch": 4.288004274646006, "step": 16050, "train/loss_ctc": 0.43385934829711914, "train/loss_error": 0.4006636440753937, "train/loss_total": 0.4073027968406677 }, { "epoch": 4.288271440021373, "step": 16051, "train/loss_ctc": 0.37984028458595276, "train/loss_error": 0.3929778039455414, "train/loss_total": 0.3903503119945526 }, { "epoch": 4.288538605396741, "step": 16052, "train/loss_ctc": 0.8085825443267822, "train/loss_error": 0.48788896203041077, "train/loss_total": 0.552027702331543 }, { "epoch": 4.288805770772108, "step": 16053, "train/loss_ctc": 0.5714466571807861, "train/loss_error": 0.42608410120010376, "train/loss_total": 0.4551566243171692 }, { "epoch": 4.289072936147475, "step": 16054, "train/loss_ctc": 0.9043881893157959, "train/loss_error": 0.47527244687080383, "train/loss_total": 0.5610955953598022 }, { "epoch": 4.289340101522843, "step": 16055, "train/loss_ctc": 0.6366492509841919, "train/loss_error": 0.43156808614730835, "train/loss_total": 0.4725843369960785 }, { "epoch": 4.28960726689821, "step": 16056, "train/loss_ctc": 1.0547232627868652, "train/loss_error": 0.4388098120689392, "train/loss_total": 0.5619925260543823 }, { "epoch": 4.289874432273577, "step": 16057, "train/loss_ctc": 0.3414105176925659, "train/loss_error": 0.38778960704803467, "train/loss_total": 0.37851378321647644 }, { "epoch": 4.290141597648945, "step": 16058, "train/loss_ctc": 0.3461706340312958, "train/loss_error": 0.4365682899951935, "train/loss_total": 0.4184887707233429 }, { "epoch": 4.290408763024312, "step": 16059, "train/loss_ctc": 0.648804783821106, "train/loss_error": 0.43772390484809875, "train/loss_total": 0.47994011640548706 }, { "epoch": 4.290675928399679, "grad_norm": 1.5523594617843628, "learning_rate": 4.270371359871761e-06, "loss": 0.4677, "step": 16060 }, { "epoch": 4.290675928399679, "step": 16060, "train/loss_ctc": 0.7885018587112427, "train/loss_error": 0.4749760627746582, "train/loss_total": 0.5376812219619751 }, { "epoch": 4.290943093775047, "step": 16061, "train/loss_ctc": 1.062821388244629, "train/loss_error": 0.43792352080345154, "train/loss_total": 0.562903106212616 }, { "epoch": 4.291210259150414, "step": 16062, "train/loss_ctc": 0.41953274607658386, "train/loss_error": 0.4315180778503418, "train/loss_total": 0.4291210174560547 }, { "epoch": 4.291477424525781, "step": 16063, "train/loss_ctc": 0.512198805809021, "train/loss_error": 0.47012659907341003, "train/loss_total": 0.4785410463809967 }, { "epoch": 4.291744589901149, "step": 16064, "train/loss_ctc": 1.0380229949951172, "train/loss_error": 0.44205227494239807, "train/loss_total": 0.5612464547157288 }, { "epoch": 4.2920117552765165, "step": 16065, "train/loss_ctc": 0.9196829795837402, "train/loss_error": 0.47723349928855896, "train/loss_total": 0.5657234191894531 }, { "epoch": 4.292278920651883, "step": 16066, "train/loss_ctc": 0.4583721160888672, "train/loss_error": 0.45135897397994995, "train/loss_total": 0.45276159048080444 }, { "epoch": 4.292546086027251, "step": 16067, "train/loss_ctc": 1.0078060626983643, "train/loss_error": 0.43174391984939575, "train/loss_total": 0.5469563603401184 }, { "epoch": 4.2928132514026185, "step": 16068, "train/loss_ctc": 0.2897101044654846, "train/loss_error": 0.39139631390571594, "train/loss_total": 0.3710590898990631 }, { "epoch": 4.293080416777985, "step": 16069, "train/loss_ctc": 0.4976245164871216, "train/loss_error": 0.47026753425598145, "train/loss_total": 0.4757389426231384 }, { "epoch": 4.293347582153353, "grad_norm": 1.2039892673492432, "learning_rate": 4.25434143734972e-06, "loss": 0.4982, "step": 16070 }, { "epoch": 4.293347582153353, "step": 16070, "train/loss_ctc": 0.4519115388393402, "train/loss_error": 0.44604772329330444, "train/loss_total": 0.44722050428390503 }, { "epoch": 4.2936147475287205, "step": 16071, "train/loss_ctc": 0.8442212343215942, "train/loss_error": 0.45214715600013733, "train/loss_total": 0.5305619835853577 }, { "epoch": 4.293881912904087, "step": 16072, "train/loss_ctc": 0.5022208094596863, "train/loss_error": 0.46400630474090576, "train/loss_total": 0.4716492295265198 }, { "epoch": 4.294149078279455, "step": 16073, "train/loss_ctc": 0.49258968234062195, "train/loss_error": 0.444430410861969, "train/loss_total": 0.454062283039093 }, { "epoch": 4.2944162436548226, "step": 16074, "train/loss_ctc": 0.40521448850631714, "train/loss_error": 0.40641090273857117, "train/loss_total": 0.40617161989212036 }, { "epoch": 4.294683409030189, "step": 16075, "train/loss_ctc": 0.6678813099861145, "train/loss_error": 0.3916999399662018, "train/loss_total": 0.4469361901283264 }, { "epoch": 4.294950574405557, "step": 16076, "train/loss_ctc": 0.6294475793838501, "train/loss_error": 0.4208986759185791, "train/loss_total": 0.4626084566116333 }, { "epoch": 4.295217739780925, "step": 16077, "train/loss_ctc": 0.6605997681617737, "train/loss_error": 0.4370076656341553, "train/loss_total": 0.4817260801792145 }, { "epoch": 4.295484905156291, "step": 16078, "train/loss_ctc": 0.496498167514801, "train/loss_error": 0.4414049983024597, "train/loss_total": 0.452423632144928 }, { "epoch": 4.295752070531659, "step": 16079, "train/loss_ctc": 0.6545510292053223, "train/loss_error": 0.38805556297302246, "train/loss_total": 0.4413546323776245 }, { "epoch": 4.296019235907027, "grad_norm": 2.238238573074341, "learning_rate": 4.238311514827679e-06, "loss": 0.4595, "step": 16080 }, { "epoch": 4.296019235907027, "step": 16080, "train/loss_ctc": 0.6902550458908081, "train/loss_error": 0.43303197622299194, "train/loss_total": 0.48447662591934204 }, { "epoch": 4.296286401282394, "step": 16081, "train/loss_ctc": 0.6031759977340698, "train/loss_error": 0.45083868503570557, "train/loss_total": 0.48130616545677185 }, { "epoch": 4.296553566657761, "step": 16082, "train/loss_ctc": 0.2772362232208252, "train/loss_error": 0.4363142251968384, "train/loss_total": 0.4044986367225647 }, { "epoch": 4.296820732033129, "step": 16083, "train/loss_ctc": 0.6572377681732178, "train/loss_error": 0.4145578444004059, "train/loss_total": 0.4630938470363617 }, { "epoch": 4.297087897408495, "step": 16084, "train/loss_ctc": 0.3713570237159729, "train/loss_error": 0.4132949411869049, "train/loss_total": 0.40490737557411194 }, { "epoch": 4.297355062783863, "step": 16085, "train/loss_ctc": 1.0235615968704224, "train/loss_error": 0.43766331672668457, "train/loss_total": 0.5548429489135742 }, { "epoch": 4.297622228159231, "step": 16086, "train/loss_ctc": 0.6511870622634888, "train/loss_error": 0.4550018608570099, "train/loss_total": 0.4942389130592346 }, { "epoch": 4.297889393534598, "step": 16087, "train/loss_ctc": 0.4065232276916504, "train/loss_error": 0.4463198781013489, "train/loss_total": 0.4383605420589447 }, { "epoch": 4.298156558909965, "step": 16088, "train/loss_ctc": 0.6046506762504578, "train/loss_error": 0.41603052616119385, "train/loss_total": 0.45375457406044006 }, { "epoch": 4.298423724285333, "step": 16089, "train/loss_ctc": 0.7598058581352234, "train/loss_error": 0.392602801322937, "train/loss_total": 0.4660434126853943 }, { "epoch": 4.2986908896607, "grad_norm": 1.9751003980636597, "learning_rate": 4.222281592305637e-06, "loss": 0.4646, "step": 16090 }, { "epoch": 4.2986908896607, "step": 16090, "train/loss_ctc": 0.29884350299835205, "train/loss_error": 0.3232611119747162, "train/loss_total": 0.31837761402130127 }, { "epoch": 4.298958055036067, "step": 16091, "train/loss_ctc": 0.5232911109924316, "train/loss_error": 0.4383111596107483, "train/loss_total": 0.45530715584754944 }, { "epoch": 4.299225220411435, "step": 16092, "train/loss_ctc": 0.535622239112854, "train/loss_error": 0.3959636390209198, "train/loss_total": 0.42389535903930664 }, { "epoch": 4.299492385786802, "step": 16093, "train/loss_ctc": 0.8120225667953491, "train/loss_error": 0.43248650431632996, "train/loss_total": 0.5083937048912048 }, { "epoch": 4.299759551162169, "step": 16094, "train/loss_ctc": 0.7332333326339722, "train/loss_error": 0.46303507685661316, "train/loss_total": 0.517074704170227 }, { "epoch": 4.300026716537537, "step": 16095, "train/loss_ctc": 0.3443909287452698, "train/loss_error": 0.41131460666656494, "train/loss_total": 0.3979299068450928 }, { "epoch": 4.300293881912904, "step": 16096, "train/loss_ctc": 0.46597519516944885, "train/loss_error": 0.3910239338874817, "train/loss_total": 0.40601420402526855 }, { "epoch": 4.300561047288271, "step": 16097, "train/loss_ctc": 0.6210128664970398, "train/loss_error": 0.42351076006889343, "train/loss_total": 0.4630112051963806 }, { "epoch": 4.300828212663639, "step": 16098, "train/loss_ctc": 0.9287223815917969, "train/loss_error": 0.4498650133609772, "train/loss_total": 0.5456364750862122 }, { "epoch": 4.301095378039006, "step": 16099, "train/loss_ctc": 1.0261082649230957, "train/loss_error": 0.45836153626441956, "train/loss_total": 0.5719108581542969 }, { "epoch": 4.301362543414373, "grad_norm": 1.3228726387023926, "learning_rate": 4.206251669783596e-06, "loss": 0.4608, "step": 16100 }, { "epoch": 4.301362543414373, "step": 16100, "train/loss_ctc": 1.4277660846710205, "train/loss_error": 0.4256678819656372, "train/loss_total": 0.6260875463485718 }, { "epoch": 4.301629708789741, "step": 16101, "train/loss_ctc": 0.5659396648406982, "train/loss_error": 0.39628374576568604, "train/loss_total": 0.43021494150161743 }, { "epoch": 4.3018968741651085, "step": 16102, "train/loss_ctc": 1.0505621433258057, "train/loss_error": 0.48898208141326904, "train/loss_total": 0.6012980937957764 }, { "epoch": 4.302164039540475, "step": 16103, "train/loss_ctc": 1.0043960809707642, "train/loss_error": 0.44752395153045654, "train/loss_total": 0.558898389339447 }, { "epoch": 4.302431204915843, "step": 16104, "train/loss_ctc": 0.9076722860336304, "train/loss_error": 0.42562469840049744, "train/loss_total": 0.522034227848053 }, { "epoch": 4.3026983702912105, "step": 16105, "train/loss_ctc": 0.7655891180038452, "train/loss_error": 0.437764436006546, "train/loss_total": 0.5033293962478638 }, { "epoch": 4.302965535666577, "step": 16106, "train/loss_ctc": 1.4074485301971436, "train/loss_error": 0.4513891339302063, "train/loss_total": 0.6426010131835938 }, { "epoch": 4.303232701041945, "step": 16107, "train/loss_ctc": 0.3781144618988037, "train/loss_error": 0.4752524495124817, "train/loss_total": 0.4558248519897461 }, { "epoch": 4.3034998664173125, "step": 16108, "train/loss_ctc": 0.6817789077758789, "train/loss_error": 0.5195634365081787, "train/loss_total": 0.5520065426826477 }, { "epoch": 4.303767031792679, "step": 16109, "train/loss_ctc": 0.4348853528499603, "train/loss_error": 0.3507615029811859, "train/loss_total": 0.36758628487586975 }, { "epoch": 4.304034197168047, "grad_norm": 1.8878706693649292, "learning_rate": 4.190221747261556e-06, "loss": 0.526, "step": 16110 }, { "epoch": 4.304034197168047, "step": 16110, "train/loss_ctc": 0.4966769814491272, "train/loss_error": 0.42201122641563416, "train/loss_total": 0.4369443953037262 }, { "epoch": 4.3043013625434146, "step": 16111, "train/loss_ctc": 0.7527393102645874, "train/loss_error": 0.42627277970314026, "train/loss_total": 0.49156609177589417 }, { "epoch": 4.304568527918782, "step": 16112, "train/loss_ctc": 0.5731561183929443, "train/loss_error": 0.41758161783218384, "train/loss_total": 0.4486965239048004 }, { "epoch": 4.304835693294149, "step": 16113, "train/loss_ctc": 0.5845211744308472, "train/loss_error": 0.40608078241348267, "train/loss_total": 0.4417688548564911 }, { "epoch": 4.305102858669517, "step": 16114, "train/loss_ctc": 0.4253321886062622, "train/loss_error": 0.41234782338142395, "train/loss_total": 0.41494470834732056 }, { "epoch": 4.305370024044883, "step": 16115, "train/loss_ctc": 0.5118842124938965, "train/loss_error": 0.38835087418556213, "train/loss_total": 0.4130575656890869 }, { "epoch": 4.305637189420251, "step": 16116, "train/loss_ctc": 0.5340860486030579, "train/loss_error": 0.40500468015670776, "train/loss_total": 0.4308209717273712 }, { "epoch": 4.305904354795619, "step": 16117, "train/loss_ctc": 0.35188961029052734, "train/loss_error": 0.5006512999534607, "train/loss_total": 0.47089895606040955 }, { "epoch": 4.306171520170986, "step": 16118, "train/loss_ctc": 0.7169599533081055, "train/loss_error": 0.4863535165786743, "train/loss_total": 0.5324748158454895 }, { "epoch": 4.306438685546353, "step": 16119, "train/loss_ctc": 0.5102512836456299, "train/loss_error": 0.4547227919101715, "train/loss_total": 0.46582847833633423 }, { "epoch": 4.306705850921721, "grad_norm": 1.8355224132537842, "learning_rate": 4.174191824739514e-06, "loss": 0.4547, "step": 16120 }, { "epoch": 4.306705850921721, "step": 16120, "train/loss_ctc": 0.40535157918930054, "train/loss_error": 0.4629475474357605, "train/loss_total": 0.4514283537864685 }, { "epoch": 4.306973016297088, "step": 16121, "train/loss_ctc": 0.36257457733154297, "train/loss_error": 0.4464985430240631, "train/loss_total": 0.42971375584602356 }, { "epoch": 4.307240181672455, "step": 16122, "train/loss_ctc": 0.48476412892341614, "train/loss_error": 0.411832720041275, "train/loss_total": 0.4264190196990967 }, { "epoch": 4.307507347047823, "step": 16123, "train/loss_ctc": 0.8996222615242004, "train/loss_error": 0.40878409147262573, "train/loss_total": 0.5069517493247986 }, { "epoch": 4.30777451242319, "step": 16124, "train/loss_ctc": 0.4720141887664795, "train/loss_error": 0.3835720419883728, "train/loss_total": 0.40126049518585205 }, { "epoch": 4.308041677798557, "step": 16125, "train/loss_ctc": 0.7718561291694641, "train/loss_error": 0.514241635799408, "train/loss_total": 0.5657645463943481 }, { "epoch": 4.308308843173925, "step": 16126, "train/loss_ctc": 1.0771697759628296, "train/loss_error": 0.4673159420490265, "train/loss_total": 0.5892866849899292 }, { "epoch": 4.308576008549292, "step": 16127, "train/loss_ctc": 0.7554323673248291, "train/loss_error": 0.38026347756385803, "train/loss_total": 0.4552972614765167 }, { "epoch": 4.308843173924659, "step": 16128, "train/loss_ctc": 0.5070303678512573, "train/loss_error": 0.39579200744628906, "train/loss_total": 0.4180396795272827 }, { "epoch": 4.309110339300027, "step": 16129, "train/loss_ctc": 0.8813711404800415, "train/loss_error": 0.41425615549087524, "train/loss_total": 0.5076791644096375 }, { "epoch": 4.309377504675394, "grad_norm": 2.395308494567871, "learning_rate": 4.158161902217473e-06, "loss": 0.4752, "step": 16130 }, { "epoch": 4.309377504675394, "step": 16130, "train/loss_ctc": 0.8310955762863159, "train/loss_error": 0.4878872334957123, "train/loss_total": 0.5565289258956909 }, { "epoch": 4.309644670050761, "step": 16131, "train/loss_ctc": 0.5402239561080933, "train/loss_error": 0.4086662828922272, "train/loss_total": 0.43497782945632935 }, { "epoch": 4.309911835426129, "step": 16132, "train/loss_ctc": 0.31828898191452026, "train/loss_error": 0.3791233003139496, "train/loss_total": 0.3669564425945282 }, { "epoch": 4.310179000801496, "step": 16133, "train/loss_ctc": 0.6734172105789185, "train/loss_error": 0.41259628534317017, "train/loss_total": 0.4647604823112488 }, { "epoch": 4.310446166176863, "step": 16134, "train/loss_ctc": 0.6389627456665039, "train/loss_error": 0.43244487047195435, "train/loss_total": 0.47374844551086426 }, { "epoch": 4.310713331552231, "step": 16135, "train/loss_ctc": 0.5402156710624695, "train/loss_error": 0.3788819909095764, "train/loss_total": 0.41114872694015503 }, { "epoch": 4.310980496927598, "step": 16136, "train/loss_ctc": 0.6875633597373962, "train/loss_error": 0.4307825565338135, "train/loss_total": 0.4821386933326721 }, { "epoch": 4.311247662302965, "step": 16137, "train/loss_ctc": 0.5825812816619873, "train/loss_error": 0.4407794177532196, "train/loss_total": 0.46913981437683105 }, { "epoch": 4.311514827678333, "step": 16138, "train/loss_ctc": 0.3658885657787323, "train/loss_error": 0.3515457212924957, "train/loss_total": 0.35441431403160095 }, { "epoch": 4.3117819930537005, "step": 16139, "train/loss_ctc": 0.597284734249115, "train/loss_error": 0.4465930163860321, "train/loss_total": 0.4767313599586487 }, { "epoch": 4.312049158429067, "grad_norm": 1.888403058052063, "learning_rate": 4.142131979695432e-06, "loss": 0.4491, "step": 16140 }, { "epoch": 4.312049158429067, "step": 16140, "train/loss_ctc": 0.7258272171020508, "train/loss_error": 0.4777452349662781, "train/loss_total": 0.5273616313934326 }, { "epoch": 4.312316323804435, "step": 16141, "train/loss_ctc": 1.0768070220947266, "train/loss_error": 0.464034765958786, "train/loss_total": 0.5865892171859741 }, { "epoch": 4.3125834891798025, "step": 16142, "train/loss_ctc": 0.3685551583766937, "train/loss_error": 0.44682008028030396, "train/loss_total": 0.4311670958995819 }, { "epoch": 4.312850654555169, "step": 16143, "train/loss_ctc": 0.32753288745880127, "train/loss_error": 0.4369855523109436, "train/loss_total": 0.4150950312614441 }, { "epoch": 4.313117819930537, "step": 16144, "train/loss_ctc": 0.47831135988235474, "train/loss_error": 0.4133448302745819, "train/loss_total": 0.4263381361961365 }, { "epoch": 4.3133849853059045, "step": 16145, "train/loss_ctc": 0.5459296703338623, "train/loss_error": 0.41050535440444946, "train/loss_total": 0.43759021162986755 }, { "epoch": 4.313652150681271, "step": 16146, "train/loss_ctc": 0.6848328709602356, "train/loss_error": 0.4890207350254059, "train/loss_total": 0.5281831622123718 }, { "epoch": 4.313919316056639, "step": 16147, "train/loss_ctc": 0.2717173993587494, "train/loss_error": 0.4675084948539734, "train/loss_total": 0.4283502995967865 }, { "epoch": 4.314186481432007, "step": 16148, "train/loss_ctc": 1.1160855293273926, "train/loss_error": 0.4910651445388794, "train/loss_total": 0.6160692572593689 }, { "epoch": 4.314453646807374, "step": 16149, "train/loss_ctc": 0.8747773170471191, "train/loss_error": 0.4756660461425781, "train/loss_total": 0.5554882884025574 }, { "epoch": 4.314720812182741, "grad_norm": 1.6645687818527222, "learning_rate": 4.1261020571733905e-06, "loss": 0.4952, "step": 16150 }, { "epoch": 4.314720812182741, "step": 16150, "train/loss_ctc": 0.5291872620582581, "train/loss_error": 0.4842536151409149, "train/loss_total": 0.4932403564453125 }, { "epoch": 4.314987977558109, "step": 16151, "train/loss_ctc": 0.5386176109313965, "train/loss_error": 0.5051646828651428, "train/loss_total": 0.5118552446365356 }, { "epoch": 4.315255142933476, "step": 16152, "train/loss_ctc": 0.5692384243011475, "train/loss_error": 0.4064710736274719, "train/loss_total": 0.43902456760406494 }, { "epoch": 4.315522308308843, "step": 16153, "train/loss_ctc": 0.9842784404754639, "train/loss_error": 0.4489489197731018, "train/loss_total": 0.5560148358345032 }, { "epoch": 4.315789473684211, "step": 16154, "train/loss_ctc": 0.547150731086731, "train/loss_error": 0.3569074273109436, "train/loss_total": 0.394956111907959 }, { "epoch": 4.316056639059578, "step": 16155, "train/loss_ctc": 0.7255649566650391, "train/loss_error": 0.45363542437553406, "train/loss_total": 0.508021354675293 }, { "epoch": 4.316323804434945, "step": 16156, "train/loss_ctc": 0.4842389225959778, "train/loss_error": 0.4267962574958801, "train/loss_total": 0.43828481435775757 }, { "epoch": 4.316590969810313, "step": 16157, "train/loss_ctc": 0.8076158761978149, "train/loss_error": 0.4187392294406891, "train/loss_total": 0.49651455879211426 }, { "epoch": 4.31685813518568, "step": 16158, "train/loss_ctc": 0.9726582765579224, "train/loss_error": 0.4429657757282257, "train/loss_total": 0.548904299736023 }, { "epoch": 4.317125300561047, "step": 16159, "train/loss_ctc": 0.5072237849235535, "train/loss_error": 0.4095650315284729, "train/loss_total": 0.4290967881679535 }, { "epoch": 4.317392465936415, "grad_norm": 2.0698635578155518, "learning_rate": 4.1100721346513495e-06, "loss": 0.4816, "step": 16160 }, { "epoch": 4.317392465936415, "step": 16160, "train/loss_ctc": 0.5099486708641052, "train/loss_error": 0.4749351739883423, "train/loss_total": 0.48193788528442383 }, { "epoch": 4.317659631311782, "step": 16161, "train/loss_ctc": 0.6039232015609741, "train/loss_error": 0.4772259593009949, "train/loss_total": 0.5025653839111328 }, { "epoch": 4.317926796687149, "step": 16162, "train/loss_ctc": 0.4062628448009491, "train/loss_error": 0.48748406767845154, "train/loss_total": 0.47123983502388 }, { "epoch": 4.318193962062517, "step": 16163, "train/loss_ctc": 0.6038743257522583, "train/loss_error": 0.35478436946868896, "train/loss_total": 0.40460237860679626 }, { "epoch": 4.318461127437884, "step": 16164, "train/loss_ctc": 0.1921829879283905, "train/loss_error": 0.35729917883872986, "train/loss_total": 0.324275940656662 }, { "epoch": 4.318728292813251, "step": 16165, "train/loss_ctc": 0.7966593503952026, "train/loss_error": 0.46377766132354736, "train/loss_total": 0.5303540229797363 }, { "epoch": 4.318995458188619, "step": 16166, "train/loss_ctc": 0.8242194056510925, "train/loss_error": 0.5113429427146912, "train/loss_total": 0.5739182233810425 }, { "epoch": 4.319262623563986, "step": 16167, "train/loss_ctc": 1.3100168704986572, "train/loss_error": 0.45876142382621765, "train/loss_total": 0.6290125250816345 }, { "epoch": 4.319529788939353, "step": 16168, "train/loss_ctc": 1.1250710487365723, "train/loss_error": 0.41523849964141846, "train/loss_total": 0.5572050213813782 }, { "epoch": 4.319796954314721, "step": 16169, "train/loss_ctc": 0.42680105566978455, "train/loss_error": 0.48601624369621277, "train/loss_total": 0.4741731882095337 }, { "epoch": 4.320064119690088, "grad_norm": 2.6375951766967773, "learning_rate": 4.094042212129308e-06, "loss": 0.4949, "step": 16170 }, { "epoch": 4.320064119690088, "step": 16170, "train/loss_ctc": 0.4381892681121826, "train/loss_error": 0.41437578201293945, "train/loss_total": 0.41913849115371704 }, { "epoch": 4.320331285065455, "step": 16171, "train/loss_ctc": 0.6113446950912476, "train/loss_error": 0.48066845536231995, "train/loss_total": 0.5068036913871765 }, { "epoch": 4.320598450440823, "step": 16172, "train/loss_ctc": 0.8897289633750916, "train/loss_error": 0.43352431058883667, "train/loss_total": 0.5247652530670166 }, { "epoch": 4.32086561581619, "step": 16173, "train/loss_ctc": 0.6600528955459595, "train/loss_error": 0.42693379521369934, "train/loss_total": 0.47355762124061584 }, { "epoch": 4.321132781191557, "step": 16174, "train/loss_ctc": 0.888788104057312, "train/loss_error": 0.4577483534812927, "train/loss_total": 0.5439562797546387 }, { "epoch": 4.321399946566925, "step": 16175, "train/loss_ctc": 0.4467569589614868, "train/loss_error": 0.4143742024898529, "train/loss_total": 0.4208507537841797 }, { "epoch": 4.3216671119422925, "step": 16176, "train/loss_ctc": 0.6144151091575623, "train/loss_error": 0.5023909211158752, "train/loss_total": 0.5247957706451416 }, { "epoch": 4.321934277317659, "step": 16177, "train/loss_ctc": 0.7163572311401367, "train/loss_error": 0.41553518176078796, "train/loss_total": 0.47569960355758667 }, { "epoch": 4.322201442693027, "step": 16178, "train/loss_ctc": 0.6451267600059509, "train/loss_error": 0.4359486997127533, "train/loss_total": 0.47778433561325073 }, { "epoch": 4.3224686080683945, "step": 16179, "train/loss_ctc": 0.9333580732345581, "train/loss_error": 0.4389151930809021, "train/loss_total": 0.5378037691116333 }, { "epoch": 4.322735773443762, "grad_norm": 2.5226681232452393, "learning_rate": 4.0780122896072665e-06, "loss": 0.4905, "step": 16180 }, { "epoch": 4.322735773443762, "step": 16180, "train/loss_ctc": 0.2819477617740631, "train/loss_error": 0.3853520452976227, "train/loss_total": 0.36467117071151733 }, { "epoch": 4.323002938819129, "step": 16181, "train/loss_ctc": 0.8571372628211975, "train/loss_error": 0.4005253314971924, "train/loss_total": 0.4918477237224579 }, { "epoch": 4.3232701041944965, "step": 16182, "train/loss_ctc": 0.7136874198913574, "train/loss_error": 0.45982101559638977, "train/loss_total": 0.5105943083763123 }, { "epoch": 4.323537269569863, "step": 16183, "train/loss_ctc": 1.9356409311294556, "train/loss_error": 0.47918251156806946, "train/loss_total": 0.7704741954803467 }, { "epoch": 4.323804434945231, "step": 16184, "train/loss_ctc": 0.7698961496353149, "train/loss_error": 0.44867703318595886, "train/loss_total": 0.5129208564758301 }, { "epoch": 4.324071600320599, "step": 16185, "train/loss_ctc": 0.6213198900222778, "train/loss_error": 0.38241851329803467, "train/loss_total": 0.4301987886428833 }, { "epoch": 4.324338765695966, "step": 16186, "train/loss_ctc": 0.3848731517791748, "train/loss_error": 0.4628259241580963, "train/loss_total": 0.4472353756427765 }, { "epoch": 4.324605931071333, "step": 16187, "train/loss_ctc": 0.44754481315612793, "train/loss_error": 0.43326255679130554, "train/loss_total": 0.436119019985199 }, { "epoch": 4.324873096446701, "step": 16188, "train/loss_ctc": 0.9090979099273682, "train/loss_error": 0.544739305973053, "train/loss_total": 0.6176110506057739 }, { "epoch": 4.325140261822068, "step": 16189, "train/loss_ctc": 0.3783302307128906, "train/loss_error": 0.4004606008529663, "train/loss_total": 0.3960345387458801 }, { "epoch": 4.325407427197435, "grad_norm": 2.9335885047912598, "learning_rate": 4.061982367085225e-06, "loss": 0.4978, "step": 16190 }, { "epoch": 4.325407427197435, "step": 16190, "train/loss_ctc": 0.9683078527450562, "train/loss_error": 0.43135496973991394, "train/loss_total": 0.5387455224990845 }, { "epoch": 4.325674592572803, "step": 16191, "train/loss_ctc": 0.8609235286712646, "train/loss_error": 0.4308163821697235, "train/loss_total": 0.5168378353118896 }, { "epoch": 4.32594175794817, "step": 16192, "train/loss_ctc": 1.5046950578689575, "train/loss_error": 0.4679873287677765, "train/loss_total": 0.6753288507461548 }, { "epoch": 4.326208923323537, "step": 16193, "train/loss_ctc": 0.344345360994339, "train/loss_error": 0.4098408818244934, "train/loss_total": 0.3967418074607849 }, { "epoch": 4.326476088698905, "step": 16194, "train/loss_ctc": 1.1704601049423218, "train/loss_error": 0.45392659306526184, "train/loss_total": 0.5972332954406738 }, { "epoch": 4.326743254074272, "step": 16195, "train/loss_ctc": 0.8211783766746521, "train/loss_error": 0.47676292061805725, "train/loss_total": 0.5456460118293762 }, { "epoch": 4.327010419449639, "step": 16196, "train/loss_ctc": 0.4669777452945709, "train/loss_error": 0.44656094908714294, "train/loss_total": 0.450644314289093 }, { "epoch": 4.327277584825007, "step": 16197, "train/loss_ctc": 0.6390933394432068, "train/loss_error": 0.4588629901409149, "train/loss_total": 0.4949090778827667 }, { "epoch": 4.327544750200374, "step": 16198, "train/loss_ctc": 1.190590739250183, "train/loss_error": 0.4455774426460266, "train/loss_total": 0.5945801138877869 }, { "epoch": 4.327811915575741, "step": 16199, "train/loss_ctc": 0.6085652112960815, "train/loss_error": 0.4400743246078491, "train/loss_total": 0.47377249598503113 }, { "epoch": 4.328079080951109, "grad_norm": 1.4306284189224243, "learning_rate": 4.045952444563185e-06, "loss": 0.5284, "step": 16200 }, { "epoch": 4.328079080951109, "step": 16200, "train/loss_ctc": 0.47017979621887207, "train/loss_error": 0.38432058691978455, "train/loss_total": 0.4014924168586731 }, { "epoch": 4.328346246326476, "step": 16201, "train/loss_ctc": 1.4666544198989868, "train/loss_error": 0.5014074444770813, "train/loss_total": 0.6944568157196045 }, { "epoch": 4.328613411701843, "step": 16202, "train/loss_ctc": 0.2022048830986023, "train/loss_error": 0.43385031819343567, "train/loss_total": 0.38752123713493347 }, { "epoch": 4.328880577077211, "step": 16203, "train/loss_ctc": 0.7745097279548645, "train/loss_error": 0.4204089939594269, "train/loss_total": 0.4912291467189789 }, { "epoch": 4.329147742452578, "step": 16204, "train/loss_ctc": 0.48318615555763245, "train/loss_error": 0.39824801683425903, "train/loss_total": 0.41523563861846924 }, { "epoch": 4.329414907827945, "step": 16205, "train/loss_ctc": 0.39967256784439087, "train/loss_error": 0.3882090747356415, "train/loss_total": 0.3905017673969269 }, { "epoch": 4.329682073203313, "step": 16206, "train/loss_ctc": 0.6157220602035522, "train/loss_error": 0.4025270342826843, "train/loss_total": 0.44516605138778687 }, { "epoch": 4.32994923857868, "step": 16207, "train/loss_ctc": 0.3494786024093628, "train/loss_error": 0.40624889731407166, "train/loss_total": 0.3948948383331299 }, { "epoch": 4.330216403954047, "step": 16208, "train/loss_ctc": 0.9117387533187866, "train/loss_error": 0.5303398966789246, "train/loss_total": 0.606619656085968 }, { "epoch": 4.330483569329415, "step": 16209, "train/loss_ctc": 0.6183282136917114, "train/loss_error": 0.36285001039505005, "train/loss_total": 0.41394567489624023 }, { "epoch": 4.3307507347047824, "grad_norm": 2.928884983062744, "learning_rate": 4.029922522041144e-06, "loss": 0.4641, "step": 16210 }, { "epoch": 4.3307507347047824, "step": 16210, "train/loss_ctc": 0.4671154022216797, "train/loss_error": 0.4061056673526764, "train/loss_total": 0.4183076024055481 }, { "epoch": 4.331017900080149, "step": 16211, "train/loss_ctc": 0.3450244069099426, "train/loss_error": 0.3338596820831299, "train/loss_total": 0.33609265089035034 }, { "epoch": 4.331285065455517, "step": 16212, "train/loss_ctc": 0.9586162567138672, "train/loss_error": 0.44275543093681335, "train/loss_total": 0.5459276437759399 }, { "epoch": 4.3315522308308845, "step": 16213, "train/loss_ctc": 0.42680367827415466, "train/loss_error": 0.566096305847168, "train/loss_total": 0.5382378101348877 }, { "epoch": 4.331819396206251, "step": 16214, "train/loss_ctc": 1.7188621759414673, "train/loss_error": 0.49607494473457336, "train/loss_total": 0.7406324148178101 }, { "epoch": 4.332086561581619, "step": 16215, "train/loss_ctc": 0.3969833552837372, "train/loss_error": 0.4173140823841095, "train/loss_total": 0.4132479429244995 }, { "epoch": 4.3323537269569865, "step": 16216, "train/loss_ctc": 0.9258363246917725, "train/loss_error": 0.39922308921813965, "train/loss_total": 0.5045457482337952 }, { "epoch": 4.332620892332354, "step": 16217, "train/loss_ctc": 0.4946104884147644, "train/loss_error": 0.39392709732055664, "train/loss_total": 0.41406378149986267 }, { "epoch": 4.332888057707721, "step": 16218, "train/loss_ctc": 0.5390888452529907, "train/loss_error": 0.3993412256240845, "train/loss_total": 0.42729076743125916 }, { "epoch": 4.3331552230830885, "step": 16219, "train/loss_ctc": 0.6671383380889893, "train/loss_error": 0.418136864900589, "train/loss_total": 0.467937171459198 }, { "epoch": 4.333422388458456, "grad_norm": 1.418319821357727, "learning_rate": 4.013892599519102e-06, "loss": 0.4806, "step": 16220 }, { "epoch": 4.333422388458456, "step": 16220, "train/loss_ctc": 0.5313259363174438, "train/loss_error": 0.461866170167923, "train/loss_total": 0.4757581353187561 }, { "epoch": 4.333689553833823, "step": 16221, "train/loss_ctc": 1.4266657829284668, "train/loss_error": 0.45099037885665894, "train/loss_total": 0.6461254358291626 }, { "epoch": 4.333956719209191, "step": 16222, "train/loss_ctc": 0.815752387046814, "train/loss_error": 0.4827421009540558, "train/loss_total": 0.5493441820144653 }, { "epoch": 4.334223884584558, "step": 16223, "train/loss_ctc": 0.5099751949310303, "train/loss_error": 0.38211768865585327, "train/loss_total": 0.4076892137527466 }, { "epoch": 4.334491049959925, "step": 16224, "train/loss_ctc": 0.6017124652862549, "train/loss_error": 0.39060479402542114, "train/loss_total": 0.43282634019851685 }, { "epoch": 4.334758215335293, "step": 16225, "train/loss_ctc": 0.29693543910980225, "train/loss_error": 0.41120854020118713, "train/loss_total": 0.3883539140224457 }, { "epoch": 4.33502538071066, "step": 16226, "train/loss_ctc": 0.5011951327323914, "train/loss_error": 0.4238385260105133, "train/loss_total": 0.43930986523628235 }, { "epoch": 4.335292546086027, "step": 16227, "train/loss_ctc": 0.8831180930137634, "train/loss_error": 0.4866913855075836, "train/loss_total": 0.5659767389297485 }, { "epoch": 4.335559711461395, "step": 16228, "train/loss_ctc": 0.9868652820587158, "train/loss_error": 0.4200480580329895, "train/loss_total": 0.5334115028381348 }, { "epoch": 4.335826876836762, "step": 16229, "train/loss_ctc": 0.5504096746444702, "train/loss_error": 0.4193410873413086, "train/loss_total": 0.44555482268333435 }, { "epoch": 4.336094042212129, "grad_norm": 1.6395516395568848, "learning_rate": 3.997862676997061e-06, "loss": 0.4884, "step": 16230 }, { "epoch": 4.336094042212129, "step": 16230, "train/loss_ctc": 0.5494393706321716, "train/loss_error": 0.4007644057273865, "train/loss_total": 0.430499404668808 }, { "epoch": 4.336361207587497, "step": 16231, "train/loss_ctc": 1.6663804054260254, "train/loss_error": 0.45856261253356934, "train/loss_total": 0.7001261711120605 }, { "epoch": 4.336628372962864, "step": 16232, "train/loss_ctc": 0.8813008069992065, "train/loss_error": 0.45972493290901184, "train/loss_total": 0.5440400838851929 }, { "epoch": 4.336895538338231, "step": 16233, "train/loss_ctc": 0.3156861960887909, "train/loss_error": 0.4172823131084442, "train/loss_total": 0.39696308970451355 }, { "epoch": 4.337162703713599, "step": 16234, "train/loss_ctc": 0.5604166984558105, "train/loss_error": 0.41345906257629395, "train/loss_total": 0.44285058975219727 }, { "epoch": 4.337429869088966, "step": 16235, "train/loss_ctc": 0.3788054883480072, "train/loss_error": 0.3747886121273041, "train/loss_total": 0.3755919933319092 }, { "epoch": 4.337697034464333, "step": 16236, "train/loss_ctc": 0.46644580364227295, "train/loss_error": 0.45355865359306335, "train/loss_total": 0.4561361074447632 }, { "epoch": 4.337964199839701, "step": 16237, "train/loss_ctc": 0.6845473051071167, "train/loss_error": 0.3748939037322998, "train/loss_total": 0.43682461977005005 }, { "epoch": 4.338231365215068, "step": 16238, "train/loss_ctc": 0.9629631042480469, "train/loss_error": 0.41914916038513184, "train/loss_total": 0.5279119610786438 }, { "epoch": 4.338498530590435, "step": 16239, "train/loss_ctc": 0.6078173518180847, "train/loss_error": 0.3799702823162079, "train/loss_total": 0.42553970217704773 }, { "epoch": 4.338765695965803, "grad_norm": 1.8317818641662598, "learning_rate": 3.981832754475021e-06, "loss": 0.4736, "step": 16240 }, { "epoch": 4.338765695965803, "step": 16240, "train/loss_ctc": 0.38443830609321594, "train/loss_error": 0.4179827570915222, "train/loss_total": 0.41127386689186096 }, { "epoch": 4.33903286134117, "step": 16241, "train/loss_ctc": 0.6252633333206177, "train/loss_error": 0.4855826199054718, "train/loss_total": 0.513518750667572 }, { "epoch": 4.339300026716537, "step": 16242, "train/loss_ctc": 0.6502318382263184, "train/loss_error": 0.4284140467643738, "train/loss_total": 0.4727776050567627 }, { "epoch": 4.339567192091905, "step": 16243, "train/loss_ctc": 0.9100069999694824, "train/loss_error": 0.4473865032196045, "train/loss_total": 0.539910614490509 }, { "epoch": 4.339834357467272, "step": 16244, "train/loss_ctc": 0.41424405574798584, "train/loss_error": 0.45629557967185974, "train/loss_total": 0.44788527488708496 }, { "epoch": 4.340101522842639, "step": 16245, "train/loss_ctc": 0.884623646736145, "train/loss_error": 0.4564816355705261, "train/loss_total": 0.5421100854873657 }, { "epoch": 4.340368688218007, "step": 16246, "train/loss_ctc": 0.48038068413734436, "train/loss_error": 0.36174049973487854, "train/loss_total": 0.3854685425758362 }, { "epoch": 4.3406358535933744, "step": 16247, "train/loss_ctc": 0.8105484247207642, "train/loss_error": 0.4356033504009247, "train/loss_total": 0.5105924010276794 }, { "epoch": 4.340903018968742, "step": 16248, "train/loss_ctc": 0.5074243545532227, "train/loss_error": 0.3981054425239563, "train/loss_total": 0.41996923089027405 }, { "epoch": 4.341170184344109, "step": 16249, "train/loss_ctc": 0.503800630569458, "train/loss_error": 0.44030067324638367, "train/loss_total": 0.45300066471099854 }, { "epoch": 4.3414373497194765, "grad_norm": 2.752094268798828, "learning_rate": 3.965802831952979e-06, "loss": 0.4697, "step": 16250 }, { "epoch": 4.3414373497194765, "step": 16250, "train/loss_ctc": 0.9344770908355713, "train/loss_error": 0.5145657062530518, "train/loss_total": 0.5985479950904846 }, { "epoch": 4.341704515094844, "step": 16251, "train/loss_ctc": 0.6391206979751587, "train/loss_error": 0.5307631492614746, "train/loss_total": 0.5524346828460693 }, { "epoch": 4.341971680470211, "step": 16252, "train/loss_ctc": 0.61275315284729, "train/loss_error": 0.49800291657447815, "train/loss_total": 0.5209529399871826 }, { "epoch": 4.3422388458455785, "step": 16253, "train/loss_ctc": 0.5798837542533875, "train/loss_error": 0.46726807951927185, "train/loss_total": 0.48979121446609497 }, { "epoch": 4.342506011220946, "step": 16254, "train/loss_ctc": 0.664792001247406, "train/loss_error": 0.4390297532081604, "train/loss_total": 0.4841821789741516 }, { "epoch": 4.342773176596313, "step": 16255, "train/loss_ctc": 1.0020453929901123, "train/loss_error": 0.4875745177268982, "train/loss_total": 0.59046870470047 }, { "epoch": 4.3430403419716805, "step": 16256, "train/loss_ctc": 0.40124571323394775, "train/loss_error": 0.44254371523857117, "train/loss_total": 0.43428415060043335 }, { "epoch": 4.343307507347048, "step": 16257, "train/loss_ctc": 0.41551104187965393, "train/loss_error": 0.372420072555542, "train/loss_total": 0.38103824853897095 }, { "epoch": 4.343574672722415, "step": 16258, "train/loss_ctc": 0.5612213611602783, "train/loss_error": 0.4951791763305664, "train/loss_total": 0.5083876252174377 }, { "epoch": 4.343841838097783, "step": 16259, "train/loss_ctc": 1.496289610862732, "train/loss_error": 0.4194146990776062, "train/loss_total": 0.6347897052764893 }, { "epoch": 4.34410900347315, "grad_norm": 1.1545034646987915, "learning_rate": 3.949772909430938e-06, "loss": 0.5195, "step": 16260 }, { "epoch": 4.34410900347315, "step": 16260, "train/loss_ctc": 0.9007505178451538, "train/loss_error": 0.40033796429634094, "train/loss_total": 0.5004205107688904 }, { "epoch": 4.344376168848517, "step": 16261, "train/loss_ctc": 0.47938019037246704, "train/loss_error": 0.4473550021648407, "train/loss_total": 0.4537600576877594 }, { "epoch": 4.344643334223885, "step": 16262, "train/loss_ctc": 1.390574336051941, "train/loss_error": 0.44178950786590576, "train/loss_total": 0.6315464973449707 }, { "epoch": 4.344910499599252, "step": 16263, "train/loss_ctc": 0.4282093346118927, "train/loss_error": 0.3626680374145508, "train/loss_total": 0.3757762908935547 }, { "epoch": 4.345177664974619, "step": 16264, "train/loss_ctc": 0.7401193380355835, "train/loss_error": 0.5396263599395752, "train/loss_total": 0.5797249674797058 }, { "epoch": 4.345444830349987, "step": 16265, "train/loss_ctc": 0.7977148294448853, "train/loss_error": 0.48172527551651, "train/loss_total": 0.5449231863021851 }, { "epoch": 4.345711995725354, "step": 16266, "train/loss_ctc": 0.8164627552032471, "train/loss_error": 0.4190012216567993, "train/loss_total": 0.4984935522079468 }, { "epoch": 4.345979161100721, "step": 16267, "train/loss_ctc": 0.9548357725143433, "train/loss_error": 0.4663243889808655, "train/loss_total": 0.5640266537666321 }, { "epoch": 4.346246326476089, "step": 16268, "train/loss_ctc": 0.7651224136352539, "train/loss_error": 0.4031468331813812, "train/loss_total": 0.47554194927215576 }, { "epoch": 4.346513491851456, "step": 16269, "train/loss_ctc": 0.9345309734344482, "train/loss_error": 0.5063226222991943, "train/loss_total": 0.5919643044471741 }, { "epoch": 4.346780657226823, "grad_norm": 1.6783959865570068, "learning_rate": 3.933742986908897e-06, "loss": 0.5216, "step": 16270 }, { "epoch": 4.346780657226823, "step": 16270, "train/loss_ctc": 0.5979257822036743, "train/loss_error": 0.4548294246196747, "train/loss_total": 0.48344871401786804 }, { "epoch": 4.347047822602191, "step": 16271, "train/loss_ctc": 0.6426997184753418, "train/loss_error": 0.4436658024787903, "train/loss_total": 0.4834725856781006 }, { "epoch": 4.347314987977558, "step": 16272, "train/loss_ctc": 0.3613433837890625, "train/loss_error": 0.41651785373687744, "train/loss_total": 0.4054829478263855 }, { "epoch": 4.347582153352925, "step": 16273, "train/loss_ctc": 1.330343246459961, "train/loss_error": 0.43421733379364014, "train/loss_total": 0.6134425401687622 }, { "epoch": 4.347849318728293, "step": 16274, "train/loss_ctc": 0.564633846282959, "train/loss_error": 0.39574673771858215, "train/loss_total": 0.42952418327331543 }, { "epoch": 4.34811648410366, "step": 16275, "train/loss_ctc": 0.519614577293396, "train/loss_error": 0.401239275932312, "train/loss_total": 0.4249143600463867 }, { "epoch": 4.348383649479027, "step": 16276, "train/loss_ctc": 0.38446033000946045, "train/loss_error": 0.5066178441047668, "train/loss_total": 0.48218634724617004 }, { "epoch": 4.348650814854395, "step": 16277, "train/loss_ctc": 1.0248091220855713, "train/loss_error": 0.47601765394210815, "train/loss_total": 0.5857759714126587 }, { "epoch": 4.348917980229762, "step": 16278, "train/loss_ctc": 0.8356310725212097, "train/loss_error": 0.45156627893447876, "train/loss_total": 0.5283792614936829 }, { "epoch": 4.34918514560513, "step": 16279, "train/loss_ctc": 1.0560798645019531, "train/loss_error": 0.47711339592933655, "train/loss_total": 0.5929067134857178 }, { "epoch": 4.349452310980497, "grad_norm": 1.7873417139053345, "learning_rate": 3.917713064386855e-06, "loss": 0.503, "step": 16280 }, { "epoch": 4.349452310980497, "step": 16280, "train/loss_ctc": 1.3500268459320068, "train/loss_error": 0.4584804177284241, "train/loss_total": 0.6367896795272827 }, { "epoch": 4.349719476355864, "step": 16281, "train/loss_ctc": 1.0483874082565308, "train/loss_error": 0.537539005279541, "train/loss_total": 0.6397086977958679 }, { "epoch": 4.349986641731231, "step": 16282, "train/loss_ctc": 0.7161412835121155, "train/loss_error": 0.37818869948387146, "train/loss_total": 0.4457792341709137 }, { "epoch": 4.350253807106599, "step": 16283, "train/loss_ctc": 0.9997451305389404, "train/loss_error": 0.45614197850227356, "train/loss_total": 0.5648626089096069 }, { "epoch": 4.3505209724819665, "step": 16284, "train/loss_ctc": 0.6678832769393921, "train/loss_error": 0.39969584345817566, "train/loss_total": 0.4533333480358124 }, { "epoch": 4.350788137857334, "step": 16285, "train/loss_ctc": 1.3014891147613525, "train/loss_error": 0.43977412581443787, "train/loss_total": 0.6121171712875366 }, { "epoch": 4.351055303232701, "step": 16286, "train/loss_ctc": 1.1112949848175049, "train/loss_error": 0.5231443047523499, "train/loss_total": 0.6407744288444519 }, { "epoch": 4.3513224686080685, "step": 16287, "train/loss_ctc": 0.8025892972946167, "train/loss_error": 0.39752957224845886, "train/loss_total": 0.4785414934158325 }, { "epoch": 4.351589633983436, "step": 16288, "train/loss_ctc": 0.9766806364059448, "train/loss_error": 0.47522324323654175, "train/loss_total": 0.5755147337913513 }, { "epoch": 4.351856799358803, "step": 16289, "train/loss_ctc": 0.6569256782531738, "train/loss_error": 0.4747633635997772, "train/loss_total": 0.5111958384513855 }, { "epoch": 4.3521239647341705, "grad_norm": 1.3674697875976562, "learning_rate": 3.901683141864815e-06, "loss": 0.5559, "step": 16290 }, { "epoch": 4.3521239647341705, "step": 16290, "train/loss_ctc": 1.1644220352172852, "train/loss_error": 0.4217533469200134, "train/loss_total": 0.5702871084213257 }, { "epoch": 4.352391130109538, "step": 16291, "train/loss_ctc": 0.23017486929893494, "train/loss_error": 0.41368287801742554, "train/loss_total": 0.376981258392334 }, { "epoch": 4.352658295484905, "step": 16292, "train/loss_ctc": 0.7364646196365356, "train/loss_error": 0.47426608204841614, "train/loss_total": 0.526705801486969 }, { "epoch": 4.3529254608602725, "step": 16293, "train/loss_ctc": 0.7690691351890564, "train/loss_error": 0.3718003034591675, "train/loss_total": 0.45125406980514526 }, { "epoch": 4.35319262623564, "step": 16294, "train/loss_ctc": 0.7254173755645752, "train/loss_error": 0.4869406819343567, "train/loss_total": 0.5346360206604004 }, { "epoch": 4.353459791611007, "step": 16295, "train/loss_ctc": 0.42397114634513855, "train/loss_error": 0.4216112494468689, "train/loss_total": 0.4220832288265228 }, { "epoch": 4.353726956986375, "step": 16296, "train/loss_ctc": 1.0367915630340576, "train/loss_error": 0.4410894811153412, "train/loss_total": 0.5602298974990845 }, { "epoch": 4.353994122361742, "step": 16297, "train/loss_ctc": 0.7736940979957581, "train/loss_error": 0.5181707739830017, "train/loss_total": 0.569275438785553 }, { "epoch": 4.354261287737109, "step": 16298, "train/loss_ctc": 0.5994256138801575, "train/loss_error": 0.40175890922546387, "train/loss_total": 0.44129225611686707 }, { "epoch": 4.354528453112477, "step": 16299, "train/loss_ctc": 0.5947726964950562, "train/loss_error": 0.41701868176460266, "train/loss_total": 0.45256948471069336 }, { "epoch": 4.354795618487844, "grad_norm": 4.120889663696289, "learning_rate": 3.885653219342774e-06, "loss": 0.4905, "step": 16300 }, { "epoch": 4.354795618487844, "step": 16300, "train/loss_ctc": 0.7545129060745239, "train/loss_error": 0.4987233579158783, "train/loss_total": 0.5498812794685364 }, { "epoch": 4.355062783863211, "step": 16301, "train/loss_ctc": 0.8027260899543762, "train/loss_error": 0.41837427020072937, "train/loss_total": 0.4952446222305298 }, { "epoch": 4.355329949238579, "step": 16302, "train/loss_ctc": 0.8003508448600769, "train/loss_error": 0.38180914521217346, "train/loss_total": 0.46551746129989624 }, { "epoch": 4.355597114613946, "step": 16303, "train/loss_ctc": 0.9238290190696716, "train/loss_error": 0.5000824332237244, "train/loss_total": 0.5848317742347717 }, { "epoch": 4.355864279989313, "step": 16304, "train/loss_ctc": 0.4691334664821625, "train/loss_error": 0.4352210462093353, "train/loss_total": 0.4420035481452942 }, { "epoch": 4.356131445364681, "step": 16305, "train/loss_ctc": 0.86570805311203, "train/loss_error": 0.4611954391002655, "train/loss_total": 0.5420979857444763 }, { "epoch": 4.356398610740048, "step": 16306, "train/loss_ctc": 0.6783352494239807, "train/loss_error": 0.40872684121131897, "train/loss_total": 0.46264854073524475 }, { "epoch": 4.356665776115415, "step": 16307, "train/loss_ctc": 0.69249427318573, "train/loss_error": 0.4738782048225403, "train/loss_total": 0.5176014304161072 }, { "epoch": 4.356932941490783, "step": 16308, "train/loss_ctc": 0.7332791686058044, "train/loss_error": 0.477914422750473, "train/loss_total": 0.5289874076843262 }, { "epoch": 4.35720010686615, "step": 16309, "train/loss_ctc": 0.37959858775138855, "train/loss_error": 0.3644232451915741, "train/loss_total": 0.367458313703537 }, { "epoch": 4.357467272241517, "grad_norm": 2.1698124408721924, "learning_rate": 3.869623296820732e-06, "loss": 0.4956, "step": 16310 }, { "epoch": 4.357467272241517, "step": 16310, "train/loss_ctc": 1.1262857913970947, "train/loss_error": 0.4700583517551422, "train/loss_total": 0.6013038158416748 }, { "epoch": 4.357734437616885, "step": 16311, "train/loss_ctc": 0.5936148166656494, "train/loss_error": 0.3921809494495392, "train/loss_total": 0.4324677288532257 }, { "epoch": 4.358001602992252, "step": 16312, "train/loss_ctc": 0.622104823589325, "train/loss_error": 0.40661635994911194, "train/loss_total": 0.4497140645980835 }, { "epoch": 4.358268768367619, "step": 16313, "train/loss_ctc": 1.1447150707244873, "train/loss_error": 0.4273340106010437, "train/loss_total": 0.5708101987838745 }, { "epoch": 4.358535933742987, "step": 16314, "train/loss_ctc": 0.8805446624755859, "train/loss_error": 0.39083585143089294, "train/loss_total": 0.48877763748168945 }, { "epoch": 4.358803099118354, "step": 16315, "train/loss_ctc": 0.6261684894561768, "train/loss_error": 0.46493759751319885, "train/loss_total": 0.49718379974365234 }, { "epoch": 4.359070264493722, "step": 16316, "train/loss_ctc": 0.4356127977371216, "train/loss_error": 0.365927129983902, "train/loss_total": 0.37986427545547485 }, { "epoch": 4.359337429869089, "step": 16317, "train/loss_ctc": 0.5608099699020386, "train/loss_error": 0.44214531779289246, "train/loss_total": 0.4658782482147217 }, { "epoch": 4.359604595244456, "step": 16318, "train/loss_ctc": 0.33005550503730774, "train/loss_error": 0.44669920206069946, "train/loss_total": 0.42337048053741455 }, { "epoch": 4.359871760619824, "step": 16319, "train/loss_ctc": 0.6589792966842651, "train/loss_error": 0.4858168959617615, "train/loss_total": 0.5204493999481201 }, { "epoch": 4.360138925995191, "grad_norm": 1.5438075065612793, "learning_rate": 3.853593374298691e-06, "loss": 0.483, "step": 16320 }, { "epoch": 4.360138925995191, "step": 16320, "train/loss_ctc": 1.443481206893921, "train/loss_error": 0.47281524538993835, "train/loss_total": 0.6669484376907349 }, { "epoch": 4.3604060913705585, "step": 16321, "train/loss_ctc": 0.5127469897270203, "train/loss_error": 0.4137980043888092, "train/loss_total": 0.43358781933784485 }, { "epoch": 4.360673256745926, "step": 16322, "train/loss_ctc": 0.5009876489639282, "train/loss_error": 0.5072801113128662, "train/loss_total": 0.5060216188430786 }, { "epoch": 4.360940422121293, "step": 16323, "train/loss_ctc": 0.5857847929000854, "train/loss_error": 0.41701945662498474, "train/loss_total": 0.4507725238800049 }, { "epoch": 4.3612075874966605, "step": 16324, "train/loss_ctc": 0.33861982822418213, "train/loss_error": 0.4368246793746948, "train/loss_total": 0.41718369722366333 }, { "epoch": 4.361474752872028, "step": 16325, "train/loss_ctc": 0.29031285643577576, "train/loss_error": 0.418830543756485, "train/loss_total": 0.3931270241737366 }, { "epoch": 4.361741918247395, "step": 16326, "train/loss_ctc": 0.4558057487010956, "train/loss_error": 0.3946625888347626, "train/loss_total": 0.40689122676849365 }, { "epoch": 4.3620090836227625, "step": 16327, "train/loss_ctc": 0.4826931357383728, "train/loss_error": 0.4213033616542816, "train/loss_total": 0.43358132243156433 }, { "epoch": 4.36227624899813, "step": 16328, "train/loss_ctc": 0.7982333898544312, "train/loss_error": 0.4051923155784607, "train/loss_total": 0.4838005304336548 }, { "epoch": 4.362543414373497, "step": 16329, "train/loss_ctc": 0.5813283920288086, "train/loss_error": 0.39443105459213257, "train/loss_total": 0.43181052803993225 }, { "epoch": 4.3628105797488645, "grad_norm": 1.5862510204315186, "learning_rate": 3.8375634517766504e-06, "loss": 0.4624, "step": 16330 }, { "epoch": 4.3628105797488645, "step": 16330, "train/loss_ctc": 1.1402146816253662, "train/loss_error": 0.3944045901298523, "train/loss_total": 0.5435666441917419 }, { "epoch": 4.363077745124232, "step": 16331, "train/loss_ctc": 0.9066259264945984, "train/loss_error": 0.46037957072257996, "train/loss_total": 0.5496288537979126 }, { "epoch": 4.363344910499599, "step": 16332, "train/loss_ctc": 0.43156248331069946, "train/loss_error": 0.41629499197006226, "train/loss_total": 0.41934850811958313 }, { "epoch": 4.363612075874967, "step": 16333, "train/loss_ctc": 0.42201244831085205, "train/loss_error": 0.4211779534816742, "train/loss_total": 0.4213448762893677 }, { "epoch": 4.363879241250334, "step": 16334, "train/loss_ctc": 0.9564650654792786, "train/loss_error": 0.37702158093452454, "train/loss_total": 0.4929102659225464 }, { "epoch": 4.364146406625701, "step": 16335, "train/loss_ctc": 0.3552994430065155, "train/loss_error": 0.38799476623535156, "train/loss_total": 0.3814556896686554 }, { "epoch": 4.364413572001069, "step": 16336, "train/loss_ctc": 0.5751473903656006, "train/loss_error": 0.41548290848731995, "train/loss_total": 0.447415828704834 }, { "epoch": 4.364680737376436, "step": 16337, "train/loss_ctc": 0.456148236989975, "train/loss_error": 0.4365358054637909, "train/loss_total": 0.4404582977294922 }, { "epoch": 4.364947902751803, "step": 16338, "train/loss_ctc": 0.34255021810531616, "train/loss_error": 0.436262845993042, "train/loss_total": 0.41752034425735474 }, { "epoch": 4.365215068127171, "step": 16339, "train/loss_ctc": 0.38057881593704224, "train/loss_error": 0.35633617639541626, "train/loss_total": 0.3611847162246704 }, { "epoch": 4.365482233502538, "grad_norm": 1.6655958890914917, "learning_rate": 3.8215335292546085e-06, "loss": 0.4475, "step": 16340 }, { "epoch": 4.365482233502538, "step": 16340, "train/loss_ctc": 1.3869048357009888, "train/loss_error": 0.5623315572738647, "train/loss_total": 0.7272462248802185 }, { "epoch": 4.365749398877905, "step": 16341, "train/loss_ctc": 0.88231360912323, "train/loss_error": 0.405214786529541, "train/loss_total": 0.5006345510482788 }, { "epoch": 4.366016564253273, "step": 16342, "train/loss_ctc": 0.46505969762802124, "train/loss_error": 0.39983969926834106, "train/loss_total": 0.4128836989402771 }, { "epoch": 4.36628372962864, "step": 16343, "train/loss_ctc": 0.4256044626235962, "train/loss_error": 0.38535118103027344, "train/loss_total": 0.3934018313884735 }, { "epoch": 4.366550895004007, "step": 16344, "train/loss_ctc": 0.7221366167068481, "train/loss_error": 0.5145505666732788, "train/loss_total": 0.5560678243637085 }, { "epoch": 4.366818060379375, "step": 16345, "train/loss_ctc": 1.0220825672149658, "train/loss_error": 0.47645649313926697, "train/loss_total": 0.5855817198753357 }, { "epoch": 4.367085225754742, "step": 16346, "train/loss_ctc": 0.5310871601104736, "train/loss_error": 0.42895498871803284, "train/loss_total": 0.44938144087791443 }, { "epoch": 4.36735239113011, "step": 16347, "train/loss_ctc": 0.7716257572174072, "train/loss_error": 0.3824937045574188, "train/loss_total": 0.4603201150894165 }, { "epoch": 4.367619556505477, "step": 16348, "train/loss_ctc": 1.0725675821304321, "train/loss_error": 0.49529534578323364, "train/loss_total": 0.6107497811317444 }, { "epoch": 4.367886721880844, "step": 16349, "train/loss_ctc": 0.18632730841636658, "train/loss_error": 0.41364768147468567, "train/loss_total": 0.36818361282348633 }, { "epoch": 4.368153887256212, "grad_norm": 2.899843692779541, "learning_rate": 3.8055036067325674e-06, "loss": 0.5064, "step": 16350 }, { "epoch": 4.368153887256212, "step": 16350, "train/loss_ctc": 0.38647785782814026, "train/loss_error": 0.353593111038208, "train/loss_total": 0.36017006635665894 }, { "epoch": 4.368421052631579, "step": 16351, "train/loss_ctc": 0.8341744542121887, "train/loss_error": 0.45656919479370117, "train/loss_total": 0.5320902466773987 }, { "epoch": 4.368688218006946, "step": 16352, "train/loss_ctc": 0.7595272660255432, "train/loss_error": 0.4161560833454132, "train/loss_total": 0.4848303198814392 }, { "epoch": 4.368955383382314, "step": 16353, "train/loss_ctc": 0.6385660171508789, "train/loss_error": 0.3875894844532013, "train/loss_total": 0.4377847909927368 }, { "epoch": 4.369222548757681, "step": 16354, "train/loss_ctc": 0.7985993027687073, "train/loss_error": 0.38332998752593994, "train/loss_total": 0.4663838744163513 }, { "epoch": 4.369489714133048, "step": 16355, "train/loss_ctc": 0.37321150302886963, "train/loss_error": 0.42422837018966675, "train/loss_total": 0.4140250086784363 }, { "epoch": 4.369756879508416, "step": 16356, "train/loss_ctc": 0.5179771184921265, "train/loss_error": 0.5646684765815735, "train/loss_total": 0.555330216884613 }, { "epoch": 4.370024044883783, "step": 16357, "train/loss_ctc": 0.5550246238708496, "train/loss_error": 0.5328449010848999, "train/loss_total": 0.5372808575630188 }, { "epoch": 4.3702912102591505, "step": 16358, "train/loss_ctc": 0.8261359930038452, "train/loss_error": 0.429951936006546, "train/loss_total": 0.5091887712478638 }, { "epoch": 4.370558375634518, "step": 16359, "train/loss_ctc": 0.5911499261856079, "train/loss_error": 0.49720364809036255, "train/loss_total": 0.5159928798675537 }, { "epoch": 4.370825541009885, "grad_norm": 2.3265609741210938, "learning_rate": 3.7894736842105264e-06, "loss": 0.4813, "step": 16360 }, { "epoch": 4.370825541009885, "step": 16360, "train/loss_ctc": 0.8512874841690063, "train/loss_error": 0.4551646113395691, "train/loss_total": 0.5343891978263855 }, { "epoch": 4.3710927063852525, "step": 16361, "train/loss_ctc": 0.46200692653656006, "train/loss_error": 0.44979947805404663, "train/loss_total": 0.4522409737110138 }, { "epoch": 4.37135987176062, "step": 16362, "train/loss_ctc": 0.3432011306285858, "train/loss_error": 0.4064118564128876, "train/loss_total": 0.3937697112560272 }, { "epoch": 4.371627037135987, "step": 16363, "train/loss_ctc": 0.9193334579467773, "train/loss_error": 0.38626566529273987, "train/loss_total": 0.4928792119026184 }, { "epoch": 4.3718942025113545, "step": 16364, "train/loss_ctc": 0.5625831484794617, "train/loss_error": 0.40825608372688293, "train/loss_total": 0.4391215145587921 }, { "epoch": 4.372161367886722, "step": 16365, "train/loss_ctc": 0.6094127297401428, "train/loss_error": 0.4965793788433075, "train/loss_total": 0.5191460251808167 }, { "epoch": 4.372428533262089, "step": 16366, "train/loss_ctc": 1.779481053352356, "train/loss_error": 0.39971020817756653, "train/loss_total": 0.6756643652915955 }, { "epoch": 4.3726956986374566, "step": 16367, "train/loss_ctc": 0.6613143682479858, "train/loss_error": 0.4869454801082611, "train/loss_total": 0.5218192338943481 }, { "epoch": 4.372962864012824, "step": 16368, "train/loss_ctc": 0.40039706230163574, "train/loss_error": 0.41486379504203796, "train/loss_total": 0.41197043657302856 }, { "epoch": 4.373230029388191, "step": 16369, "train/loss_ctc": 0.915596604347229, "train/loss_error": 0.4028690755367279, "train/loss_total": 0.505414605140686 }, { "epoch": 4.373497194763559, "grad_norm": 1.7629793882369995, "learning_rate": 3.7734437616884857e-06, "loss": 0.4946, "step": 16370 }, { "epoch": 4.373497194763559, "step": 16370, "train/loss_ctc": 0.7424716949462891, "train/loss_error": 0.4130205810070038, "train/loss_total": 0.47891080379486084 }, { "epoch": 4.373764360138926, "step": 16371, "train/loss_ctc": 0.711164116859436, "train/loss_error": 0.4783111810684204, "train/loss_total": 0.5248817801475525 }, { "epoch": 4.374031525514293, "step": 16372, "train/loss_ctc": 0.17779392004013062, "train/loss_error": 0.42241916060447693, "train/loss_total": 0.37349411845207214 }, { "epoch": 4.374298690889661, "step": 16373, "train/loss_ctc": 0.6337990760803223, "train/loss_error": 0.463701069355011, "train/loss_total": 0.4977206587791443 }, { "epoch": 4.374565856265028, "step": 16374, "train/loss_ctc": 0.769392192363739, "train/loss_error": 0.40348294377326965, "train/loss_total": 0.47666478157043457 }, { "epoch": 4.374833021640395, "step": 16375, "train/loss_ctc": 0.2470169961452484, "train/loss_error": 0.4213843047618866, "train/loss_total": 0.38651084899902344 }, { "epoch": 4.375100187015763, "step": 16376, "train/loss_ctc": 0.8900311589241028, "train/loss_error": 0.418922483921051, "train/loss_total": 0.5131442546844482 }, { "epoch": 4.37536735239113, "step": 16377, "train/loss_ctc": 0.26257044076919556, "train/loss_error": 0.36505579948425293, "train/loss_total": 0.3445587158203125 }, { "epoch": 4.375634517766498, "step": 16378, "train/loss_ctc": 0.5098636150360107, "train/loss_error": 0.3954678177833557, "train/loss_total": 0.41834700107574463 }, { "epoch": 4.375901683141865, "step": 16379, "train/loss_ctc": 0.6197814345359802, "train/loss_error": 0.41515815258026123, "train/loss_total": 0.456082820892334 }, { "epoch": 4.376168848517232, "grad_norm": 1.9661264419555664, "learning_rate": 3.757413839166444e-06, "loss": 0.447, "step": 16380 }, { "epoch": 4.376168848517232, "step": 16380, "train/loss_ctc": 0.2549472451210022, "train/loss_error": 0.40550029277801514, "train/loss_total": 0.3753896951675415 }, { "epoch": 4.376436013892599, "step": 16381, "train/loss_ctc": 1.192697286605835, "train/loss_error": 0.44661954045295715, "train/loss_total": 0.5958350896835327 }, { "epoch": 4.376703179267967, "step": 16382, "train/loss_ctc": 0.9606161117553711, "train/loss_error": 0.40762433409690857, "train/loss_total": 0.5182226896286011 }, { "epoch": 4.376970344643334, "step": 16383, "train/loss_ctc": 1.0586488246917725, "train/loss_error": 0.5035342574119568, "train/loss_total": 0.614557147026062 }, { "epoch": 4.377237510018702, "step": 16384, "train/loss_ctc": 0.45098817348480225, "train/loss_error": 0.40605348348617554, "train/loss_total": 0.41504043340682983 }, { "epoch": 4.377504675394069, "step": 16385, "train/loss_ctc": 1.1960549354553223, "train/loss_error": 0.3750954568386078, "train/loss_total": 0.5392873287200928 }, { "epoch": 4.377771840769436, "step": 16386, "train/loss_ctc": 0.322590172290802, "train/loss_error": 0.35321393609046936, "train/loss_total": 0.3470892012119293 }, { "epoch": 4.378039006144804, "step": 16387, "train/loss_ctc": 0.7838802337646484, "train/loss_error": 0.38427281379699707, "train/loss_total": 0.46419429779052734 }, { "epoch": 4.378306171520171, "step": 16388, "train/loss_ctc": 0.4623716175556183, "train/loss_error": 0.40571942925453186, "train/loss_total": 0.4170498847961426 }, { "epoch": 4.378573336895538, "step": 16389, "train/loss_ctc": 0.2832479476928711, "train/loss_error": 0.38481056690216064, "train/loss_total": 0.3644980490207672 }, { "epoch": 4.378840502270906, "grad_norm": 37.92334747314453, "learning_rate": 3.741383916644403e-06, "loss": 0.4651, "step": 16390 }, { "epoch": 4.378840502270906, "step": 16390, "train/loss_ctc": 1.1499500274658203, "train/loss_error": 0.437195748090744, "train/loss_total": 0.5797466039657593 }, { "epoch": 4.379107667646273, "step": 16391, "train/loss_ctc": 0.9177314043045044, "train/loss_error": 0.4688968360424042, "train/loss_total": 0.5586637854576111 }, { "epoch": 4.37937483302164, "step": 16392, "train/loss_ctc": 1.0728495121002197, "train/loss_error": 0.4142940044403076, "train/loss_total": 0.546005129814148 }, { "epoch": 4.379641998397008, "step": 16393, "train/loss_ctc": 0.5303047895431519, "train/loss_error": 0.43846914172172546, "train/loss_total": 0.4568362832069397 }, { "epoch": 4.379909163772375, "step": 16394, "train/loss_ctc": 0.39196211099624634, "train/loss_error": 0.41635215282440186, "train/loss_total": 0.4114741384983063 }, { "epoch": 4.3801763291477425, "step": 16395, "train/loss_ctc": 0.8563632965087891, "train/loss_error": 0.49479570984840393, "train/loss_total": 0.567109227180481 }, { "epoch": 4.38044349452311, "step": 16396, "train/loss_ctc": 0.8381708264350891, "train/loss_error": 0.41535842418670654, "train/loss_total": 0.49992090463638306 }, { "epoch": 4.380710659898477, "step": 16397, "train/loss_ctc": 0.5430238842964172, "train/loss_error": 0.41986507177352905, "train/loss_total": 0.44449684023857117 }, { "epoch": 4.3809778252738445, "step": 16398, "train/loss_ctc": 0.7450158596038818, "train/loss_error": 0.5258071422576904, "train/loss_total": 0.5696488618850708 }, { "epoch": 4.381244990649212, "step": 16399, "train/loss_ctc": 0.5468729734420776, "train/loss_error": 0.4084538221359253, "train/loss_total": 0.43613767623901367 }, { "epoch": 4.381512156024579, "grad_norm": 2.342984437942505, "learning_rate": 3.7253539941223617e-06, "loss": 0.507, "step": 16400 }, { "epoch": 4.381512156024579, "step": 16400, "train/loss_ctc": 0.5393015742301941, "train/loss_error": 0.39603808522224426, "train/loss_total": 0.4246908128261566 }, { "epoch": 4.3817793213999465, "step": 16401, "train/loss_ctc": 0.469272255897522, "train/loss_error": 0.4704326391220093, "train/loss_total": 0.4702005684375763 }, { "epoch": 4.382046486775314, "step": 16402, "train/loss_ctc": 0.9804678559303284, "train/loss_error": 0.4711168110370636, "train/loss_total": 0.5729870200157166 }, { "epoch": 4.382313652150681, "step": 16403, "train/loss_ctc": 0.8407111763954163, "train/loss_error": 0.5100772976875305, "train/loss_total": 0.5762040615081787 }, { "epoch": 4.3825808175260486, "step": 16404, "train/loss_ctc": 1.022205114364624, "train/loss_error": 0.4965342879295349, "train/loss_total": 0.6016684770584106 }, { "epoch": 4.382847982901416, "step": 16405, "train/loss_ctc": 1.1130064725875854, "train/loss_error": 0.42797359824180603, "train/loss_total": 0.564980149269104 }, { "epoch": 4.383115148276783, "step": 16406, "train/loss_ctc": 0.8482897877693176, "train/loss_error": 0.44752663373947144, "train/loss_total": 0.5276792645454407 }, { "epoch": 4.383382313652151, "step": 16407, "train/loss_ctc": 1.3935000896453857, "train/loss_error": 0.40563303232192993, "train/loss_total": 0.60320645570755 }, { "epoch": 4.383649479027518, "step": 16408, "train/loss_ctc": 0.44581538438796997, "train/loss_error": 0.38446617126464844, "train/loss_total": 0.3967360258102417 }, { "epoch": 4.383916644402885, "step": 16409, "train/loss_ctc": 0.5628924369812012, "train/loss_error": 0.4188895523548126, "train/loss_total": 0.44769012928009033 }, { "epoch": 4.384183809778253, "grad_norm": 2.540665864944458, "learning_rate": 3.709324071600321e-06, "loss": 0.5186, "step": 16410 }, { "epoch": 4.384183809778253, "step": 16410, "train/loss_ctc": 0.8114670515060425, "train/loss_error": 0.4019817113876343, "train/loss_total": 0.4838787913322449 }, { "epoch": 4.38445097515362, "step": 16411, "train/loss_ctc": 1.29337477684021, "train/loss_error": 0.4554404318332672, "train/loss_total": 0.6230273246765137 }, { "epoch": 4.384718140528987, "step": 16412, "train/loss_ctc": 0.7344110608100891, "train/loss_error": 0.4635370969772339, "train/loss_total": 0.517711877822876 }, { "epoch": 4.384985305904355, "step": 16413, "train/loss_ctc": 1.1860902309417725, "train/loss_error": 0.4240104556083679, "train/loss_total": 0.5764263868331909 }, { "epoch": 4.385252471279722, "step": 16414, "train/loss_ctc": 0.2603203356266022, "train/loss_error": 0.43948549032211304, "train/loss_total": 0.40365245938301086 }, { "epoch": 4.38551963665509, "step": 16415, "train/loss_ctc": 0.65342116355896, "train/loss_error": 0.4409915506839752, "train/loss_total": 0.48347747325897217 }, { "epoch": 4.385786802030457, "step": 16416, "train/loss_ctc": 0.39941877126693726, "train/loss_error": 0.4484153389930725, "train/loss_total": 0.4386160373687744 }, { "epoch": 4.386053967405824, "step": 16417, "train/loss_ctc": 0.42411091923713684, "train/loss_error": 0.4038797914981842, "train/loss_total": 0.4079260230064392 }, { "epoch": 4.386321132781192, "step": 16418, "train/loss_ctc": 0.3997475206851959, "train/loss_error": 0.3861842155456543, "train/loss_total": 0.3888968825340271 }, { "epoch": 4.386588298156559, "step": 16419, "train/loss_ctc": 0.9340710043907166, "train/loss_error": 0.4819493293762207, "train/loss_total": 0.5723736882209778 }, { "epoch": 4.386855463531926, "grad_norm": 2.649306297302246, "learning_rate": 3.6932941490782795e-06, "loss": 0.4896, "step": 16420 }, { "epoch": 4.386855463531926, "step": 16420, "train/loss_ctc": 0.6082918643951416, "train/loss_error": 0.38280433416366577, "train/loss_total": 0.42790186405181885 }, { "epoch": 4.387122628907294, "step": 16421, "train/loss_ctc": 0.48810648918151855, "train/loss_error": 0.522890567779541, "train/loss_total": 0.5159337520599365 }, { "epoch": 4.387389794282661, "step": 16422, "train/loss_ctc": 0.49767136573791504, "train/loss_error": 0.4380495548248291, "train/loss_total": 0.4499739110469818 }, { "epoch": 4.387656959658028, "step": 16423, "train/loss_ctc": 0.6373533606529236, "train/loss_error": 0.4099576473236084, "train/loss_total": 0.4554367959499359 }, { "epoch": 4.387924125033396, "step": 16424, "train/loss_ctc": 1.1128712892532349, "train/loss_error": 0.39055508375167847, "train/loss_total": 0.5350183248519897 }, { "epoch": 4.388191290408763, "step": 16425, "train/loss_ctc": 0.92240309715271, "train/loss_error": 0.47181540727615356, "train/loss_total": 0.5619329810142517 }, { "epoch": 4.38845845578413, "step": 16426, "train/loss_ctc": 0.2825407385826111, "train/loss_error": 0.4146660268306732, "train/loss_total": 0.3882409930229187 }, { "epoch": 4.388725621159498, "step": 16427, "train/loss_ctc": 0.7115405797958374, "train/loss_error": 0.3913147449493408, "train/loss_total": 0.45535990595817566 }, { "epoch": 4.388992786534865, "step": 16428, "train/loss_ctc": 0.5183518528938293, "train/loss_error": 0.49139508605003357, "train/loss_total": 0.4967864751815796 }, { "epoch": 4.389259951910232, "step": 16429, "train/loss_ctc": 0.8554040193557739, "train/loss_error": 0.4697115421295166, "train/loss_total": 0.5468500256538391 }, { "epoch": 4.3895271172856, "grad_norm": 2.1215267181396484, "learning_rate": 3.6772642265562385e-06, "loss": 0.4833, "step": 16430 }, { "epoch": 4.3895271172856, "step": 16430, "train/loss_ctc": 0.6924342513084412, "train/loss_error": 0.4261118769645691, "train/loss_total": 0.4793763756752014 }, { "epoch": 4.389794282660967, "step": 16431, "train/loss_ctc": 1.048980474472046, "train/loss_error": 0.4112009108066559, "train/loss_total": 0.5387568473815918 }, { "epoch": 4.3900614480363345, "step": 16432, "train/loss_ctc": 1.131635069847107, "train/loss_error": 0.5000656843185425, "train/loss_total": 0.6263795495033264 }, { "epoch": 4.390328613411702, "step": 16433, "train/loss_ctc": 1.2147091627120972, "train/loss_error": 0.4094947874546051, "train/loss_total": 0.5705376863479614 }, { "epoch": 4.390595778787069, "step": 16434, "train/loss_ctc": 0.7801976203918457, "train/loss_error": 0.4629596769809723, "train/loss_total": 0.5264072418212891 }, { "epoch": 4.3908629441624365, "step": 16435, "train/loss_ctc": 0.7565146684646606, "train/loss_error": 0.41262322664260864, "train/loss_total": 0.4814015328884125 }, { "epoch": 4.391130109537804, "step": 16436, "train/loss_ctc": 0.8719761371612549, "train/loss_error": 0.47199276089668274, "train/loss_total": 0.5519894361495972 }, { "epoch": 4.391397274913171, "step": 16437, "train/loss_ctc": 0.53705894947052, "train/loss_error": 0.4803406894207001, "train/loss_total": 0.49168434739112854 }, { "epoch": 4.3916644402885385, "step": 16438, "train/loss_ctc": 0.580045223236084, "train/loss_error": 0.42527762055397034, "train/loss_total": 0.45623114705085754 }, { "epoch": 4.391931605663906, "step": 16439, "train/loss_ctc": 0.6900399923324585, "train/loss_error": 0.38687753677368164, "train/loss_total": 0.4475100338459015 }, { "epoch": 4.392198771039273, "grad_norm": 1.659362554550171, "learning_rate": 3.6612343040341974e-06, "loss": 0.517, "step": 16440 }, { "epoch": 4.392198771039273, "step": 16440, "train/loss_ctc": 0.6939431428909302, "train/loss_error": 0.36679133772850037, "train/loss_total": 0.4322217106819153 }, { "epoch": 4.392465936414641, "step": 16441, "train/loss_ctc": 0.9249794483184814, "train/loss_error": 0.40229249000549316, "train/loss_total": 0.5068298578262329 }, { "epoch": 4.392733101790008, "step": 16442, "train/loss_ctc": 0.48281732201576233, "train/loss_error": 0.3688308894634247, "train/loss_total": 0.3916281759738922 }, { "epoch": 4.393000267165375, "step": 16443, "train/loss_ctc": 0.44855737686157227, "train/loss_error": 0.4567977786064148, "train/loss_total": 0.45514971017837524 }, { "epoch": 4.393267432540743, "step": 16444, "train/loss_ctc": 0.2613721787929535, "train/loss_error": 0.3937196731567383, "train/loss_total": 0.3672501742839813 }, { "epoch": 4.39353459791611, "step": 16445, "train/loss_ctc": 1.2445285320281982, "train/loss_error": 0.42479681968688965, "train/loss_total": 0.5887431502342224 }, { "epoch": 4.393801763291478, "step": 16446, "train/loss_ctc": 0.7421401739120483, "train/loss_error": 0.5034839510917664, "train/loss_total": 0.5512152314186096 }, { "epoch": 4.394068928666845, "step": 16447, "train/loss_ctc": 0.8610813617706299, "train/loss_error": 0.4977916181087494, "train/loss_total": 0.5704495906829834 }, { "epoch": 4.394336094042212, "step": 16448, "train/loss_ctc": 0.8408635854721069, "train/loss_error": 0.43265944719314575, "train/loss_total": 0.5143002867698669 }, { "epoch": 4.394603259417579, "step": 16449, "train/loss_ctc": 1.3465269804000854, "train/loss_error": 0.4855300486087799, "train/loss_total": 0.65772944688797 }, { "epoch": 4.394870424792947, "grad_norm": 3.405857801437378, "learning_rate": 3.645204381512156e-06, "loss": 0.5036, "step": 16450 }, { "epoch": 4.394870424792947, "step": 16450, "train/loss_ctc": 0.8451586961746216, "train/loss_error": 0.4744153916835785, "train/loss_total": 0.548564076423645 }, { "epoch": 4.395137590168314, "step": 16451, "train/loss_ctc": 1.3915107250213623, "train/loss_error": 0.40264979004859924, "train/loss_total": 0.6004220247268677 }, { "epoch": 4.395404755543682, "step": 16452, "train/loss_ctc": 0.4308856129646301, "train/loss_error": 0.41175806522369385, "train/loss_total": 0.41558361053466797 }, { "epoch": 4.395671920919049, "step": 16453, "train/loss_ctc": 0.5859552025794983, "train/loss_error": 0.43612757325172424, "train/loss_total": 0.46609312295913696 }, { "epoch": 4.395939086294416, "step": 16454, "train/loss_ctc": 1.0457658767700195, "train/loss_error": 0.4492189884185791, "train/loss_total": 0.568528413772583 }, { "epoch": 4.396206251669784, "step": 16455, "train/loss_ctc": 0.7241746187210083, "train/loss_error": 0.48356980085372925, "train/loss_total": 0.531690776348114 }, { "epoch": 4.396473417045151, "step": 16456, "train/loss_ctc": 0.8157691955566406, "train/loss_error": 0.4222182631492615, "train/loss_total": 0.5009284615516663 }, { "epoch": 4.396740582420518, "step": 16457, "train/loss_ctc": 0.9562797546386719, "train/loss_error": 0.4739641845226288, "train/loss_total": 0.5704272985458374 }, { "epoch": 4.397007747795886, "step": 16458, "train/loss_ctc": 0.7631292939186096, "train/loss_error": 0.5144085884094238, "train/loss_total": 0.564152717590332 }, { "epoch": 4.397274913171253, "step": 16459, "train/loss_ctc": 0.26262396574020386, "train/loss_error": 0.3980150520801544, "train/loss_total": 0.3709368407726288 }, { "epoch": 4.39754207854662, "grad_norm": 5.753241062164307, "learning_rate": 3.629174458990115e-06, "loss": 0.5137, "step": 16460 }, { "epoch": 4.39754207854662, "step": 16460, "train/loss_ctc": 0.7809551358222961, "train/loss_error": 0.4172322452068329, "train/loss_total": 0.48997682332992554 }, { "epoch": 4.397809243921988, "step": 16461, "train/loss_ctc": 0.8897266387939453, "train/loss_error": 0.4081479609012604, "train/loss_total": 0.5044637322425842 }, { "epoch": 4.398076409297355, "step": 16462, "train/loss_ctc": 0.37864911556243896, "train/loss_error": 0.43678173422813416, "train/loss_total": 0.4251552224159241 }, { "epoch": 4.398343574672722, "step": 16463, "train/loss_ctc": 0.4644335210323334, "train/loss_error": 0.4013395607471466, "train/loss_total": 0.4139583706855774 }, { "epoch": 4.39861074004809, "step": 16464, "train/loss_ctc": 0.7588765025138855, "train/loss_error": 0.4123314619064331, "train/loss_total": 0.481640487909317 }, { "epoch": 4.398877905423457, "step": 16465, "train/loss_ctc": 0.5465152263641357, "train/loss_error": 0.45778727531433105, "train/loss_total": 0.4755328893661499 }, { "epoch": 4.399145070798824, "step": 16466, "train/loss_ctc": 0.6658474206924438, "train/loss_error": 0.4252106249332428, "train/loss_total": 0.4733380079269409 }, { "epoch": 4.399412236174192, "step": 16467, "train/loss_ctc": 0.4037371277809143, "train/loss_error": 0.4412704110145569, "train/loss_total": 0.4337637722492218 }, { "epoch": 4.399679401549559, "step": 16468, "train/loss_ctc": 0.8894662857055664, "train/loss_error": 0.39276576042175293, "train/loss_total": 0.4921059012413025 }, { "epoch": 4.3999465669249265, "step": 16469, "train/loss_ctc": 0.5874542593955994, "train/loss_error": 0.5210155248641968, "train/loss_total": 0.5343032479286194 }, { "epoch": 4.400213732300294, "grad_norm": 2.204021692276001, "learning_rate": 3.6131445364680738e-06, "loss": 0.4724, "step": 16470 }, { "epoch": 4.400213732300294, "step": 16470, "train/loss_ctc": 0.8274956941604614, "train/loss_error": 0.4227995276451111, "train/loss_total": 0.5037387609481812 }, { "epoch": 4.400480897675661, "step": 16471, "train/loss_ctc": 0.6048614978790283, "train/loss_error": 0.3853362202644348, "train/loss_total": 0.4292412996292114 }, { "epoch": 4.4007480630510285, "step": 16472, "train/loss_ctc": 0.6108957529067993, "train/loss_error": 0.3959690034389496, "train/loss_total": 0.43895435333251953 }, { "epoch": 4.401015228426396, "step": 16473, "train/loss_ctc": 0.5535677671432495, "train/loss_error": 0.43601784110069275, "train/loss_total": 0.459527850151062 }, { "epoch": 4.401282393801763, "step": 16474, "train/loss_ctc": 0.5038151144981384, "train/loss_error": 0.4252128601074219, "train/loss_total": 0.44093331694602966 }, { "epoch": 4.4015495591771305, "step": 16475, "train/loss_ctc": 0.5099891424179077, "train/loss_error": 0.42478838562965393, "train/loss_total": 0.44182854890823364 }, { "epoch": 4.401816724552498, "step": 16476, "train/loss_ctc": 0.2999744415283203, "train/loss_error": 0.4145083725452423, "train/loss_total": 0.3916015923023224 }, { "epoch": 4.402083889927866, "step": 16477, "train/loss_ctc": 0.8261417746543884, "train/loss_error": 0.4117509722709656, "train/loss_total": 0.4946291446685791 }, { "epoch": 4.402351055303233, "step": 16478, "train/loss_ctc": 0.6584434509277344, "train/loss_error": 0.39791223406791687, "train/loss_total": 0.4500184655189514 }, { "epoch": 4.4026182206786, "step": 16479, "train/loss_ctc": 0.5544964075088501, "train/loss_error": 0.4378044605255127, "train/loss_total": 0.4611428380012512 }, { "epoch": 4.402885386053967, "grad_norm": 1.3433045148849487, "learning_rate": 3.5971146139460327e-06, "loss": 0.4512, "step": 16480 }, { "epoch": 4.402885386053967, "step": 16480, "train/loss_ctc": 0.3006284236907959, "train/loss_error": 0.41457903385162354, "train/loss_total": 0.39178889989852905 }, { "epoch": 4.403152551429335, "step": 16481, "train/loss_ctc": 1.3984147310256958, "train/loss_error": 0.49962174892425537, "train/loss_total": 0.6793803572654724 }, { "epoch": 4.403419716804702, "step": 16482, "train/loss_ctc": 0.7223150730133057, "train/loss_error": 0.4106453061103821, "train/loss_total": 0.47297924757003784 }, { "epoch": 4.40368688218007, "step": 16483, "train/loss_ctc": 0.6025465726852417, "train/loss_error": 0.4460136890411377, "train/loss_total": 0.47732028365135193 }, { "epoch": 4.403954047555437, "step": 16484, "train/loss_ctc": 1.412578821182251, "train/loss_error": 0.499115526676178, "train/loss_total": 0.6818082332611084 }, { "epoch": 4.404221212930804, "step": 16485, "train/loss_ctc": 1.0435450077056885, "train/loss_error": 0.4639504849910736, "train/loss_total": 0.5798693895339966 }, { "epoch": 4.404488378306172, "step": 16486, "train/loss_ctc": 1.0205481052398682, "train/loss_error": 0.41715627908706665, "train/loss_total": 0.537834644317627 }, { "epoch": 4.404755543681539, "step": 16487, "train/loss_ctc": 0.6854674816131592, "train/loss_error": 0.430957555770874, "train/loss_total": 0.48185956478118896 }, { "epoch": 4.405022709056906, "step": 16488, "train/loss_ctc": 0.7349816560745239, "train/loss_error": 0.4169131815433502, "train/loss_total": 0.480526864528656 }, { "epoch": 4.405289874432274, "step": 16489, "train/loss_ctc": 0.8026637434959412, "train/loss_error": 0.3828241527080536, "train/loss_total": 0.46679210662841797 }, { "epoch": 4.405557039807641, "grad_norm": 1.790418028831482, "learning_rate": 3.5810846914239916e-06, "loss": 0.525, "step": 16490 }, { "epoch": 4.405557039807641, "step": 16490, "train/loss_ctc": 0.891379714012146, "train/loss_error": 0.469871461391449, "train/loss_total": 0.5541731119155884 }, { "epoch": 4.405824205183008, "step": 16491, "train/loss_ctc": 0.5697231292724609, "train/loss_error": 0.3823690414428711, "train/loss_total": 0.41983985900878906 }, { "epoch": 4.406091370558376, "step": 16492, "train/loss_ctc": 0.5626128911972046, "train/loss_error": 0.4162560999393463, "train/loss_total": 0.44552746415138245 }, { "epoch": 4.406358535933743, "step": 16493, "train/loss_ctc": 0.42440730333328247, "train/loss_error": 0.423968106508255, "train/loss_total": 0.42405593395233154 }, { "epoch": 4.40662570130911, "step": 16494, "train/loss_ctc": 0.5634826421737671, "train/loss_error": 0.4491754174232483, "train/loss_total": 0.47203686833381653 }, { "epoch": 4.406892866684478, "step": 16495, "train/loss_ctc": 0.9457531571388245, "train/loss_error": 0.455790251493454, "train/loss_total": 0.5537828207015991 }, { "epoch": 4.407160032059845, "step": 16496, "train/loss_ctc": 0.3172929883003235, "train/loss_error": 0.3849417567253113, "train/loss_total": 0.3714120090007782 }, { "epoch": 4.407427197435212, "step": 16497, "train/loss_ctc": 0.203020840883255, "train/loss_error": 0.407397598028183, "train/loss_total": 0.36652225255966187 }, { "epoch": 4.40769436281058, "step": 16498, "train/loss_ctc": 0.632235050201416, "train/loss_error": 0.44344720244407654, "train/loss_total": 0.4812047481536865 }, { "epoch": 4.407961528185947, "step": 16499, "train/loss_ctc": 0.3911252021789551, "train/loss_error": 0.4034322500228882, "train/loss_total": 0.40097084641456604 }, { "epoch": 4.408228693561314, "grad_norm": 1.4520033597946167, "learning_rate": 3.5650547689019505e-06, "loss": 0.449, "step": 16500 }, { "epoch": 4.408228693561314, "step": 16500, "train/loss_ctc": 0.6500231623649597, "train/loss_error": 0.4445219337940216, "train/loss_total": 0.4856221675872803 }, { "epoch": 4.408495858936682, "step": 16501, "train/loss_ctc": 1.139236330986023, "train/loss_error": 0.44475817680358887, "train/loss_total": 0.5836538076400757 }, { "epoch": 4.408763024312049, "step": 16502, "train/loss_ctc": 0.8065220713615417, "train/loss_error": 0.4242803454399109, "train/loss_total": 0.5007287263870239 }, { "epoch": 4.4090301896874164, "step": 16503, "train/loss_ctc": 0.4442364573478699, "train/loss_error": 0.3513011336326599, "train/loss_total": 0.36988818645477295 }, { "epoch": 4.409297355062784, "step": 16504, "train/loss_ctc": 1.4905245304107666, "train/loss_error": 0.48989617824554443, "train/loss_total": 0.6900218725204468 }, { "epoch": 4.409564520438151, "step": 16505, "train/loss_ctc": 0.6840563416481018, "train/loss_error": 0.4899401366710663, "train/loss_total": 0.5287634134292603 }, { "epoch": 4.4098316858135185, "step": 16506, "train/loss_ctc": 0.253700852394104, "train/loss_error": 0.4368665814399719, "train/loss_total": 0.4002334475517273 }, { "epoch": 4.410098851188886, "step": 16507, "train/loss_ctc": 0.5901200771331787, "train/loss_error": 0.4736338257789612, "train/loss_total": 0.4969310760498047 }, { "epoch": 4.410366016564253, "step": 16508, "train/loss_ctc": 0.7222970724105835, "train/loss_error": 0.4448722302913666, "train/loss_total": 0.5003572106361389 }, { "epoch": 4.4106331819396205, "step": 16509, "train/loss_ctc": 0.3269452154636383, "train/loss_error": 0.5158593654632568, "train/loss_total": 0.4780765473842621 }, { "epoch": 4.410900347314988, "grad_norm": 2.73581600189209, "learning_rate": 3.549024846379909e-06, "loss": 0.5034, "step": 16510 }, { "epoch": 4.410900347314988, "step": 16510, "train/loss_ctc": 0.3414779305458069, "train/loss_error": 0.463915079832077, "train/loss_total": 0.4394276738166809 }, { "epoch": 4.411167512690355, "step": 16511, "train/loss_ctc": 0.7149814367294312, "train/loss_error": 0.4103959798812866, "train/loss_total": 0.4713130593299866 }, { "epoch": 4.4114346780657225, "step": 16512, "train/loss_ctc": 0.5286604762077332, "train/loss_error": 0.37057971954345703, "train/loss_total": 0.40219587087631226 }, { "epoch": 4.41170184344109, "step": 16513, "train/loss_ctc": 0.4125673770904541, "train/loss_error": 0.4268634617328644, "train/loss_total": 0.4240042567253113 }, { "epoch": 4.411969008816458, "step": 16514, "train/loss_ctc": 1.1102936267852783, "train/loss_error": 0.42980948090553284, "train/loss_total": 0.5659063458442688 }, { "epoch": 4.412236174191825, "step": 16515, "train/loss_ctc": 0.4485695958137512, "train/loss_error": 0.38748738169670105, "train/loss_total": 0.39970383048057556 }, { "epoch": 4.412503339567192, "step": 16516, "train/loss_ctc": 1.01763916015625, "train/loss_error": 0.4193558990955353, "train/loss_total": 0.5390125513076782 }, { "epoch": 4.41277050494256, "step": 16517, "train/loss_ctc": 0.5078130960464478, "train/loss_error": 0.3833976984024048, "train/loss_total": 0.40828078985214233 }, { "epoch": 4.413037670317927, "step": 16518, "train/loss_ctc": 0.23008595407009125, "train/loss_error": 0.4097694456577301, "train/loss_total": 0.3738327622413635 }, { "epoch": 4.413304835693294, "step": 16519, "train/loss_ctc": 0.8650498986244202, "train/loss_error": 0.4085584282875061, "train/loss_total": 0.49985671043395996 }, { "epoch": 4.413572001068662, "grad_norm": 1.380750060081482, "learning_rate": 3.5329949238578684e-06, "loss": 0.4524, "step": 16520 }, { "epoch": 4.413572001068662, "step": 16520, "train/loss_ctc": 0.8898327946662903, "train/loss_error": 0.4578152000904083, "train/loss_total": 0.5442187190055847 }, { "epoch": 4.413839166444029, "step": 16521, "train/loss_ctc": 0.8735343217849731, "train/loss_error": 0.5021244287490845, "train/loss_total": 0.5764064192771912 }, { "epoch": 4.414106331819396, "step": 16522, "train/loss_ctc": 0.5684007406234741, "train/loss_error": 0.46272197365760803, "train/loss_total": 0.48385775089263916 }, { "epoch": 4.414373497194764, "step": 16523, "train/loss_ctc": 0.5505890846252441, "train/loss_error": 0.40460437536239624, "train/loss_total": 0.4338013231754303 }, { "epoch": 4.414640662570131, "step": 16524, "train/loss_ctc": 0.9017922878265381, "train/loss_error": 0.5107319355010986, "train/loss_total": 0.5889440178871155 }, { "epoch": 4.414907827945498, "step": 16525, "train/loss_ctc": 1.3615155220031738, "train/loss_error": 0.4572707414627075, "train/loss_total": 0.6381196975708008 }, { "epoch": 4.415174993320866, "step": 16526, "train/loss_ctc": 0.9131116271018982, "train/loss_error": 0.4282028377056122, "train/loss_total": 0.5251846313476562 }, { "epoch": 4.415442158696233, "step": 16527, "train/loss_ctc": 0.8896518349647522, "train/loss_error": 0.428215354681015, "train/loss_total": 0.5205026865005493 }, { "epoch": 4.4157093240716, "step": 16528, "train/loss_ctc": 1.1722218990325928, "train/loss_error": 0.378606915473938, "train/loss_total": 0.537329912185669 }, { "epoch": 4.415976489446968, "step": 16529, "train/loss_ctc": 0.7496148347854614, "train/loss_error": 0.45037227869033813, "train/loss_total": 0.5102207660675049 }, { "epoch": 4.416243654822335, "grad_norm": 4.355646133422852, "learning_rate": 3.516965001335827e-06, "loss": 0.5359, "step": 16530 }, { "epoch": 4.416243654822335, "step": 16530, "train/loss_ctc": 0.5407260060310364, "train/loss_error": 0.4337957799434662, "train/loss_total": 0.4551818370819092 }, { "epoch": 4.416510820197702, "step": 16531, "train/loss_ctc": 0.6255090832710266, "train/loss_error": 0.4063810408115387, "train/loss_total": 0.4502066373825073 }, { "epoch": 4.41677798557307, "step": 16532, "train/loss_ctc": 0.5512712597846985, "train/loss_error": 0.4161880314350128, "train/loss_total": 0.44320470094680786 }, { "epoch": 4.417045150948437, "step": 16533, "train/loss_ctc": 0.8521784543991089, "train/loss_error": 0.4000833332538605, "train/loss_total": 0.49050235748291016 }, { "epoch": 4.417312316323804, "step": 16534, "train/loss_ctc": 0.514110803604126, "train/loss_error": 0.4402823746204376, "train/loss_total": 0.4550480544567108 }, { "epoch": 4.417579481699172, "step": 16535, "train/loss_ctc": 0.32801494002342224, "train/loss_error": 0.4872756898403168, "train/loss_total": 0.4554235339164734 }, { "epoch": 4.417846647074539, "step": 16536, "train/loss_ctc": 0.9379750490188599, "train/loss_error": 0.44191497564315796, "train/loss_total": 0.5411269664764404 }, { "epoch": 4.418113812449906, "step": 16537, "train/loss_ctc": 0.36296361684799194, "train/loss_error": 0.47978097200393677, "train/loss_total": 0.4564175307750702 }, { "epoch": 4.418380977825274, "step": 16538, "train/loss_ctc": 0.48968321084976196, "train/loss_error": 0.3988909423351288, "train/loss_total": 0.4170494079589844 }, { "epoch": 4.418648143200641, "step": 16539, "train/loss_ctc": 0.46225038170814514, "train/loss_error": 0.4412766993045807, "train/loss_total": 0.4454714357852936 }, { "epoch": 4.4189153085760084, "grad_norm": 1.576145052909851, "learning_rate": 3.500935078813786e-06, "loss": 0.461, "step": 16540 }, { "epoch": 4.4189153085760084, "step": 16540, "train/loss_ctc": 0.6951189637184143, "train/loss_error": 0.4744793474674225, "train/loss_total": 0.5186072587966919 }, { "epoch": 4.419182473951376, "step": 16541, "train/loss_ctc": 0.9403520822525024, "train/loss_error": 0.4338564872741699, "train/loss_total": 0.5351556539535522 }, { "epoch": 4.419449639326743, "step": 16542, "train/loss_ctc": 0.8863309025764465, "train/loss_error": 0.45458871126174927, "train/loss_total": 0.5409371852874756 }, { "epoch": 4.4197168047021105, "step": 16543, "train/loss_ctc": 0.5382159948348999, "train/loss_error": 0.41025862097740173, "train/loss_total": 0.4358500838279724 }, { "epoch": 4.419983970077478, "step": 16544, "train/loss_ctc": 1.002973198890686, "train/loss_error": 0.4143027067184448, "train/loss_total": 0.5320368409156799 }, { "epoch": 4.420251135452846, "step": 16545, "train/loss_ctc": 0.8054476976394653, "train/loss_error": 0.4032248854637146, "train/loss_total": 0.4836694598197937 }, { "epoch": 4.4205183008282125, "step": 16546, "train/loss_ctc": 1.0766160488128662, "train/loss_error": 0.41540345549583435, "train/loss_total": 0.5476459860801697 }, { "epoch": 4.42078546620358, "step": 16547, "train/loss_ctc": 0.5018399357795715, "train/loss_error": 0.4260447025299072, "train/loss_total": 0.441203773021698 }, { "epoch": 4.421052631578947, "step": 16548, "train/loss_ctc": 0.6470283269882202, "train/loss_error": 0.4129280745983124, "train/loss_total": 0.45974814891815186 }, { "epoch": 4.4213197969543145, "step": 16549, "train/loss_ctc": 0.6232012510299683, "train/loss_error": 0.4350808262825012, "train/loss_total": 0.4727049171924591 }, { "epoch": 4.421586962329682, "grad_norm": 1.7025115489959717, "learning_rate": 3.4849051562917448e-06, "loss": 0.4968, "step": 16550 }, { "epoch": 4.421586962329682, "step": 16550, "train/loss_ctc": 0.8710924983024597, "train/loss_error": 0.441575288772583, "train/loss_total": 0.5274787545204163 }, { "epoch": 4.42185412770505, "step": 16551, "train/loss_ctc": 1.5795977115631104, "train/loss_error": 0.4407472312450409, "train/loss_total": 0.6685173511505127 }, { "epoch": 4.422121293080417, "step": 16552, "train/loss_ctc": 0.46737566590309143, "train/loss_error": 0.44875526428222656, "train/loss_total": 0.45247936248779297 }, { "epoch": 4.422388458455784, "step": 16553, "train/loss_ctc": 0.5662543177604675, "train/loss_error": 0.4579084515571594, "train/loss_total": 0.4795776605606079 }, { "epoch": 4.422655623831152, "step": 16554, "train/loss_ctc": 0.5790570378303528, "train/loss_error": 0.45612606406211853, "train/loss_total": 0.48071226477622986 }, { "epoch": 4.422922789206519, "step": 16555, "train/loss_ctc": 1.6199369430541992, "train/loss_error": 0.4756547212600708, "train/loss_total": 0.7045111656188965 }, { "epoch": 4.423189954581886, "step": 16556, "train/loss_ctc": 0.6893527507781982, "train/loss_error": 0.5266936421394348, "train/loss_total": 0.5592254400253296 }, { "epoch": 4.423457119957254, "step": 16557, "train/loss_ctc": 0.8664847612380981, "train/loss_error": 0.4199441373348236, "train/loss_total": 0.5092523097991943 }, { "epoch": 4.423724285332621, "step": 16558, "train/loss_ctc": 0.895784854888916, "train/loss_error": 0.431203693151474, "train/loss_total": 0.5241199135780334 }, { "epoch": 4.423991450707988, "step": 16559, "train/loss_ctc": 0.6027919054031372, "train/loss_error": 0.46024882793426514, "train/loss_total": 0.488757461309433 }, { "epoch": 4.424258616083356, "grad_norm": 1.5651949644088745, "learning_rate": 3.4688752337697033e-06, "loss": 0.5395, "step": 16560 }, { "epoch": 4.424258616083356, "step": 16560, "train/loss_ctc": 1.0969667434692383, "train/loss_error": 0.4864429831504822, "train/loss_total": 0.6085477471351624 }, { "epoch": 4.424525781458723, "step": 16561, "train/loss_ctc": 0.9477505683898926, "train/loss_error": 0.4517117738723755, "train/loss_total": 0.5509195327758789 }, { "epoch": 4.42479294683409, "step": 16562, "train/loss_ctc": 0.6579211950302124, "train/loss_error": 0.4421517550945282, "train/loss_total": 0.48530566692352295 }, { "epoch": 4.425060112209458, "step": 16563, "train/loss_ctc": 0.8285980224609375, "train/loss_error": 0.4385312497615814, "train/loss_total": 0.5165446400642395 }, { "epoch": 4.425327277584825, "step": 16564, "train/loss_ctc": 0.47251683473587036, "train/loss_error": 0.4362506866455078, "train/loss_total": 0.4435039162635803 }, { "epoch": 4.425594442960192, "step": 16565, "train/loss_ctc": 0.8927035927772522, "train/loss_error": 0.4908372163772583, "train/loss_total": 0.571210503578186 }, { "epoch": 4.42586160833556, "step": 16566, "train/loss_ctc": 0.95811527967453, "train/loss_error": 0.4074552357196808, "train/loss_total": 0.5175872445106506 }, { "epoch": 4.426128773710927, "step": 16567, "train/loss_ctc": 0.6372790932655334, "train/loss_error": 0.3976878225803375, "train/loss_total": 0.4456060528755188 }, { "epoch": 4.426395939086294, "step": 16568, "train/loss_ctc": 0.4693824052810669, "train/loss_error": 0.41597655415534973, "train/loss_total": 0.4266577363014221 }, { "epoch": 4.426663104461662, "step": 16569, "train/loss_ctc": 1.2378182411193848, "train/loss_error": 0.44604334235191345, "train/loss_total": 0.6043983101844788 }, { "epoch": 4.426930269837029, "grad_norm": 1.8805131912231445, "learning_rate": 3.4528453112476626e-06, "loss": 0.517, "step": 16570 }, { "epoch": 4.426930269837029, "step": 16570, "train/loss_ctc": 0.836931586265564, "train/loss_error": 0.4488978683948517, "train/loss_total": 0.526504635810852 }, { "epoch": 4.427197435212396, "step": 16571, "train/loss_ctc": 1.2548197507858276, "train/loss_error": 0.4659664034843445, "train/loss_total": 0.623737096786499 }, { "epoch": 4.427464600587764, "step": 16572, "train/loss_ctc": 0.6784070730209351, "train/loss_error": 0.43326613306999207, "train/loss_total": 0.48229432106018066 }, { "epoch": 4.427731765963131, "step": 16573, "train/loss_ctc": 0.8116757869720459, "train/loss_error": 0.4652736485004425, "train/loss_total": 0.534554123878479 }, { "epoch": 4.427998931338498, "step": 16574, "train/loss_ctc": 0.35062816739082336, "train/loss_error": 0.41532647609710693, "train/loss_total": 0.4023868143558502 }, { "epoch": 4.428266096713866, "step": 16575, "train/loss_ctc": 0.7016439437866211, "train/loss_error": 0.43597015738487244, "train/loss_total": 0.4891049265861511 }, { "epoch": 4.428533262089234, "step": 16576, "train/loss_ctc": 0.8179880380630493, "train/loss_error": 0.42822957038879395, "train/loss_total": 0.5061812400817871 }, { "epoch": 4.4288004274646005, "step": 16577, "train/loss_ctc": 0.7517920136451721, "train/loss_error": 0.4089689552783966, "train/loss_total": 0.47753357887268066 }, { "epoch": 4.429067592839968, "step": 16578, "train/loss_ctc": 0.33956849575042725, "train/loss_error": 0.42024871706962585, "train/loss_total": 0.40411269664764404 }, { "epoch": 4.429334758215335, "step": 16579, "train/loss_ctc": 0.3000905513763428, "train/loss_error": 0.41358643770217896, "train/loss_total": 0.3908872902393341 }, { "epoch": 4.4296019235907025, "grad_norm": 1.9174374341964722, "learning_rate": 3.436815388725621e-06, "loss": 0.4837, "step": 16580 }, { "epoch": 4.4296019235907025, "step": 16580, "train/loss_ctc": 1.0346633195877075, "train/loss_error": 0.5119004249572754, "train/loss_total": 0.6164530515670776 }, { "epoch": 4.42986908896607, "step": 16581, "train/loss_ctc": 0.4221840500831604, "train/loss_error": 0.4040001630783081, "train/loss_total": 0.40763694047927856 }, { "epoch": 4.430136254341438, "step": 16582, "train/loss_ctc": 0.31742677092552185, "train/loss_error": 0.4961691200733185, "train/loss_total": 0.4604206681251526 }, { "epoch": 4.4304034197168045, "step": 16583, "train/loss_ctc": 1.2628490924835205, "train/loss_error": 0.44570687413215637, "train/loss_total": 0.6091353297233582 }, { "epoch": 4.430670585092172, "step": 16584, "train/loss_ctc": 0.9652514457702637, "train/loss_error": 0.5267490744590759, "train/loss_total": 0.6144495606422424 }, { "epoch": 4.43093775046754, "step": 16585, "train/loss_ctc": 1.451958417892456, "train/loss_error": 0.41640788316726685, "train/loss_total": 0.6235179901123047 }, { "epoch": 4.4312049158429065, "step": 16586, "train/loss_ctc": 0.9383159875869751, "train/loss_error": 0.47195860743522644, "train/loss_total": 0.565230131149292 }, { "epoch": 4.431472081218274, "step": 16587, "train/loss_ctc": 0.4052685797214508, "train/loss_error": 0.42586439847946167, "train/loss_total": 0.421745240688324 }, { "epoch": 4.431739246593642, "step": 16588, "train/loss_ctc": 0.9736108183860779, "train/loss_error": 0.46131476759910583, "train/loss_total": 0.5637739896774292 }, { "epoch": 4.432006411969009, "step": 16589, "train/loss_ctc": 1.0007102489471436, "train/loss_error": 0.406097412109375, "train/loss_total": 0.5250200033187866 }, { "epoch": 4.432273577344376, "grad_norm": 1.6952694654464722, "learning_rate": 3.42078546620358e-06, "loss": 0.5407, "step": 16590 }, { "epoch": 4.432273577344376, "step": 16590, "train/loss_ctc": 0.6460009813308716, "train/loss_error": 0.4652292728424072, "train/loss_total": 0.5013836026191711 }, { "epoch": 4.432540742719744, "step": 16591, "train/loss_ctc": 0.7994638681411743, "train/loss_error": 0.4864156246185303, "train/loss_total": 0.549025297164917 }, { "epoch": 4.432807908095111, "step": 16592, "train/loss_ctc": 0.5809999704360962, "train/loss_error": 0.4221948981285095, "train/loss_total": 0.45395591855049133 }, { "epoch": 4.433075073470478, "step": 16593, "train/loss_ctc": 1.1446239948272705, "train/loss_error": 0.531449019908905, "train/loss_total": 0.6540840268135071 }, { "epoch": 4.433342238845846, "step": 16594, "train/loss_ctc": 0.9192144870758057, "train/loss_error": 0.4704606533050537, "train/loss_total": 0.5602114200592041 }, { "epoch": 4.433609404221213, "step": 16595, "train/loss_ctc": 0.5765752196311951, "train/loss_error": 0.3723134696483612, "train/loss_total": 0.4131658375263214 }, { "epoch": 4.43387656959658, "step": 16596, "train/loss_ctc": 0.7225186824798584, "train/loss_error": 0.4649941325187683, "train/loss_total": 0.5164990425109863 }, { "epoch": 4.434143734971948, "step": 16597, "train/loss_ctc": 0.5338589549064636, "train/loss_error": 0.3852728009223938, "train/loss_total": 0.41499003767967224 }, { "epoch": 4.434410900347315, "step": 16598, "train/loss_ctc": 0.6338925957679749, "train/loss_error": 0.4179190695285797, "train/loss_total": 0.4611138105392456 }, { "epoch": 4.434678065722682, "step": 16599, "train/loss_ctc": 0.6298775672912598, "train/loss_error": 0.4423893690109253, "train/loss_total": 0.4798870086669922 }, { "epoch": 4.43494523109805, "grad_norm": 2.2878971099853516, "learning_rate": 3.404755543681539e-06, "loss": 0.5004, "step": 16600 }, { "epoch": 4.43494523109805, "step": 16600, "train/loss_ctc": 0.48672252893447876, "train/loss_error": 0.458307147026062, "train/loss_total": 0.4639902412891388 }, { "epoch": 4.435212396473417, "step": 16601, "train/loss_ctc": 1.073291301727295, "train/loss_error": 0.4214290678501129, "train/loss_total": 0.5518015623092651 }, { "epoch": 4.435479561848784, "step": 16602, "train/loss_ctc": 0.594315230846405, "train/loss_error": 0.4275231659412384, "train/loss_total": 0.4608815908432007 }, { "epoch": 4.435746727224152, "step": 16603, "train/loss_ctc": 0.22215262055397034, "train/loss_error": 0.39142024517059326, "train/loss_total": 0.3575667142868042 }, { "epoch": 4.436013892599519, "step": 16604, "train/loss_ctc": 0.7761234045028687, "train/loss_error": 0.3734709918498993, "train/loss_total": 0.4540014863014221 }, { "epoch": 4.436281057974886, "step": 16605, "train/loss_ctc": 0.622681200504303, "train/loss_error": 0.4121566712856293, "train/loss_total": 0.4542616009712219 }, { "epoch": 4.436548223350254, "step": 16606, "train/loss_ctc": 1.1990751028060913, "train/loss_error": 0.451911062002182, "train/loss_total": 0.6013438701629639 }, { "epoch": 4.436815388725621, "step": 16607, "train/loss_ctc": 0.8614203929901123, "train/loss_error": 0.436745822429657, "train/loss_total": 0.5216807723045349 }, { "epoch": 4.437082554100988, "step": 16608, "train/loss_ctc": 0.7995074987411499, "train/loss_error": 0.38877570629119873, "train/loss_total": 0.4709220826625824 }, { "epoch": 4.437349719476356, "step": 16609, "train/loss_ctc": 0.8588178753852844, "train/loss_error": 0.41801217198371887, "train/loss_total": 0.506173312664032 }, { "epoch": 4.437616884851723, "grad_norm": 2.612790107727051, "learning_rate": 3.388725621159498e-06, "loss": 0.4843, "step": 16610 }, { "epoch": 4.437616884851723, "step": 16610, "train/loss_ctc": 1.0651382207870483, "train/loss_error": 0.4385061264038086, "train/loss_total": 0.5638325214385986 }, { "epoch": 4.43788405022709, "step": 16611, "train/loss_ctc": 1.0547080039978027, "train/loss_error": 0.5054435729980469, "train/loss_total": 0.615296483039856 }, { "epoch": 4.438151215602458, "step": 16612, "train/loss_ctc": 0.7093830108642578, "train/loss_error": 0.44983020424842834, "train/loss_total": 0.5017408132553101 }, { "epoch": 4.438418380977826, "step": 16613, "train/loss_ctc": 0.5880630016326904, "train/loss_error": 0.45141103863716125, "train/loss_total": 0.47874143719673157 }, { "epoch": 4.4386855463531925, "step": 16614, "train/loss_ctc": 0.3533589243888855, "train/loss_error": 0.5074992179870605, "train/loss_total": 0.4766711890697479 }, { "epoch": 4.43895271172856, "step": 16615, "train/loss_ctc": 0.5544493198394775, "train/loss_error": 0.41802963614463806, "train/loss_total": 0.44531357288360596 }, { "epoch": 4.439219877103928, "step": 16616, "train/loss_ctc": 0.40972840785980225, "train/loss_error": 0.4186340868473053, "train/loss_total": 0.4168529510498047 }, { "epoch": 4.4394870424792945, "step": 16617, "train/loss_ctc": 0.6736834049224854, "train/loss_error": 0.4362567961215973, "train/loss_total": 0.4837421178817749 }, { "epoch": 4.439754207854662, "step": 16618, "train/loss_ctc": 0.4406256377696991, "train/loss_error": 0.42275863885879517, "train/loss_total": 0.4263320565223694 }, { "epoch": 4.44002137323003, "step": 16619, "train/loss_ctc": 0.6250163912773132, "train/loss_error": 0.4557936191558838, "train/loss_total": 0.48963817954063416 }, { "epoch": 4.4402885386053965, "grad_norm": 1.2032183408737183, "learning_rate": 3.372695698637457e-06, "loss": 0.4898, "step": 16620 }, { "epoch": 4.4402885386053965, "step": 16620, "train/loss_ctc": 0.5293098092079163, "train/loss_error": 0.4738115966320038, "train/loss_total": 0.4849112331867218 }, { "epoch": 4.440555703980764, "step": 16621, "train/loss_ctc": 0.3987103998661041, "train/loss_error": 0.4490775465965271, "train/loss_total": 0.439004123210907 }, { "epoch": 4.440822869356132, "step": 16622, "train/loss_ctc": 0.4526197910308838, "train/loss_error": 0.39937281608581543, "train/loss_total": 0.41002222895622253 }, { "epoch": 4.4410900347314985, "step": 16623, "train/loss_ctc": 0.8464895486831665, "train/loss_error": 0.3318662643432617, "train/loss_total": 0.4347909092903137 }, { "epoch": 4.441357200106866, "step": 16624, "train/loss_ctc": 0.3089028000831604, "train/loss_error": 0.3899274170398712, "train/loss_total": 0.37372252345085144 }, { "epoch": 4.441624365482234, "step": 16625, "train/loss_ctc": 0.45696935057640076, "train/loss_error": 0.4452861547470093, "train/loss_total": 0.44762277603149414 }, { "epoch": 4.441891530857601, "step": 16626, "train/loss_ctc": 0.2686218023300171, "train/loss_error": 0.44717204570770264, "train/loss_total": 0.4114619791507721 }, { "epoch": 4.442158696232968, "step": 16627, "train/loss_ctc": 0.7303674817085266, "train/loss_error": 0.4162820875644684, "train/loss_total": 0.4790991544723511 }, { "epoch": 4.442425861608336, "step": 16628, "train/loss_ctc": 1.1070679426193237, "train/loss_error": 0.43061697483062744, "train/loss_total": 0.5659071803092957 }, { "epoch": 4.442693026983703, "step": 16629, "train/loss_ctc": 0.5695061087608337, "train/loss_error": 0.4436073303222656, "train/loss_total": 0.4687871038913727 }, { "epoch": 4.44296019235907, "grad_norm": 2.5605101585388184, "learning_rate": 3.3566657761154154e-06, "loss": 0.4515, "step": 16630 }, { "epoch": 4.44296019235907, "step": 16630, "train/loss_ctc": 1.0927131175994873, "train/loss_error": 0.483722448348999, "train/loss_total": 0.6055206060409546 }, { "epoch": 4.443227357734438, "step": 16631, "train/loss_ctc": 0.6212338209152222, "train/loss_error": 0.44022274017333984, "train/loss_total": 0.4764249622821808 }, { "epoch": 4.443494523109805, "step": 16632, "train/loss_ctc": 0.5454798936843872, "train/loss_error": 0.4353857636451721, "train/loss_total": 0.45740461349487305 }, { "epoch": 4.443761688485172, "step": 16633, "train/loss_ctc": 0.7986416220664978, "train/loss_error": 0.5624104738235474, "train/loss_total": 0.6096566915512085 }, { "epoch": 4.44402885386054, "step": 16634, "train/loss_ctc": 1.7321417331695557, "train/loss_error": 0.47448715567588806, "train/loss_total": 0.7260180711746216 }, { "epoch": 4.444296019235907, "step": 16635, "train/loss_ctc": 0.8892438411712646, "train/loss_error": 0.4536116123199463, "train/loss_total": 0.540738046169281 }, { "epoch": 4.444563184611274, "step": 16636, "train/loss_ctc": 0.48155462741851807, "train/loss_error": 0.4509781002998352, "train/loss_total": 0.45709341764450073 }, { "epoch": 4.444830349986642, "step": 16637, "train/loss_ctc": 0.918059766292572, "train/loss_error": 0.46452924609184265, "train/loss_total": 0.5552353858947754 }, { "epoch": 4.445097515362009, "step": 16638, "train/loss_ctc": 0.6495011448860168, "train/loss_error": 0.4268384277820587, "train/loss_total": 0.47137099504470825 }, { "epoch": 4.445364680737376, "step": 16639, "train/loss_ctc": 0.5580865144729614, "train/loss_error": 0.4468792676925659, "train/loss_total": 0.46912071108818054 }, { "epoch": 4.445631846112744, "grad_norm": 1.6883466243743896, "learning_rate": 3.3406358535933743e-06, "loss": 0.5369, "step": 16640 }, { "epoch": 4.445631846112744, "step": 16640, "train/loss_ctc": 0.3199481964111328, "train/loss_error": 0.479555606842041, "train/loss_total": 0.44763413071632385 }, { "epoch": 4.445899011488111, "step": 16641, "train/loss_ctc": 0.4794348478317261, "train/loss_error": 0.37475234270095825, "train/loss_total": 0.39568886160850525 }, { "epoch": 4.446166176863478, "step": 16642, "train/loss_ctc": 0.7489318251609802, "train/loss_error": 0.39284974336624146, "train/loss_total": 0.46406614780426025 }, { "epoch": 4.446433342238846, "step": 16643, "train/loss_ctc": 0.50489342212677, "train/loss_error": 0.37430500984191895, "train/loss_total": 0.40042269229888916 }, { "epoch": 4.446700507614214, "step": 16644, "train/loss_ctc": 0.22907643020153046, "train/loss_error": 0.4553306996822357, "train/loss_total": 0.41007986664772034 }, { "epoch": 4.44696767298958, "step": 16645, "train/loss_ctc": 0.7548894882202148, "train/loss_error": 0.42663562297821045, "train/loss_total": 0.4922863841056824 }, { "epoch": 4.447234838364948, "step": 16646, "train/loss_ctc": 1.1986547708511353, "train/loss_error": 0.47161048650741577, "train/loss_total": 0.6170193552970886 }, { "epoch": 4.447502003740315, "step": 16647, "train/loss_ctc": 0.475500226020813, "train/loss_error": 0.46757298707962036, "train/loss_total": 0.46915844082832336 }, { "epoch": 4.447769169115682, "step": 16648, "train/loss_ctc": 0.5291006565093994, "train/loss_error": 0.4716639816761017, "train/loss_total": 0.48315131664276123 }, { "epoch": 4.44803633449105, "step": 16649, "train/loss_ctc": 0.45955732464790344, "train/loss_error": 0.4354398548603058, "train/loss_total": 0.4402633607387543 }, { "epoch": 4.448303499866418, "grad_norm": 2.043203830718994, "learning_rate": 3.3246059310713332e-06, "loss": 0.462, "step": 16650 }, { "epoch": 4.448303499866418, "step": 16650, "train/loss_ctc": 0.3883715569972992, "train/loss_error": 0.45024314522743225, "train/loss_total": 0.4378688335418701 }, { "epoch": 4.4485706652417845, "step": 16651, "train/loss_ctc": 0.45529288053512573, "train/loss_error": 0.3833314776420593, "train/loss_total": 0.3977237641811371 }, { "epoch": 4.448837830617152, "step": 16652, "train/loss_ctc": 0.6050981283187866, "train/loss_error": 0.467345654964447, "train/loss_total": 0.49489617347717285 }, { "epoch": 4.44910499599252, "step": 16653, "train/loss_ctc": 0.3318134546279907, "train/loss_error": 0.46078723669052124, "train/loss_total": 0.4349924921989441 }, { "epoch": 4.4493721613678865, "step": 16654, "train/loss_ctc": 0.97063809633255, "train/loss_error": 0.41355887055397034, "train/loss_total": 0.5249747037887573 }, { "epoch": 4.449639326743254, "step": 16655, "train/loss_ctc": 0.6459349393844604, "train/loss_error": 0.3414618968963623, "train/loss_total": 0.40235650539398193 }, { "epoch": 4.449906492118622, "step": 16656, "train/loss_ctc": 0.6478978991508484, "train/loss_error": 0.41461730003356934, "train/loss_total": 0.4612734317779541 }, { "epoch": 4.4501736574939885, "step": 16657, "train/loss_ctc": 0.8067349195480347, "train/loss_error": 0.3826483488082886, "train/loss_total": 0.46746569871902466 }, { "epoch": 4.450440822869356, "step": 16658, "train/loss_ctc": 0.6709411144256592, "train/loss_error": 0.4442780911922455, "train/loss_total": 0.4896106719970703 }, { "epoch": 4.450707988244724, "step": 16659, "train/loss_ctc": 0.9049776792526245, "train/loss_error": 0.47257035970687866, "train/loss_total": 0.5590518116950989 }, { "epoch": 4.4509751536200906, "grad_norm": 3.1826295852661133, "learning_rate": 3.308576008549292e-06, "loss": 0.467, "step": 16660 }, { "epoch": 4.4509751536200906, "step": 16660, "train/loss_ctc": 0.73149573802948, "train/loss_error": 0.48994681239128113, "train/loss_total": 0.5382566452026367 }, { "epoch": 4.451242318995458, "step": 16661, "train/loss_ctc": 0.3897979259490967, "train/loss_error": 0.42592546343803406, "train/loss_total": 0.4186999797821045 }, { "epoch": 4.451509484370826, "step": 16662, "train/loss_ctc": 0.34028202295303345, "train/loss_error": 0.397503525018692, "train/loss_total": 0.3860592246055603 }, { "epoch": 4.451776649746193, "step": 16663, "train/loss_ctc": 0.5873239040374756, "train/loss_error": 0.4684884548187256, "train/loss_total": 0.4922555387020111 }, { "epoch": 4.45204381512156, "step": 16664, "train/loss_ctc": 0.6324344873428345, "train/loss_error": 0.4341505169868469, "train/loss_total": 0.47380730509757996 }, { "epoch": 4.452310980496928, "step": 16665, "train/loss_ctc": 0.7193261981010437, "train/loss_error": 0.35806360840797424, "train/loss_total": 0.43031615018844604 }, { "epoch": 4.452578145872295, "step": 16666, "train/loss_ctc": 0.43162134289741516, "train/loss_error": 0.4108598828315735, "train/loss_total": 0.4150121808052063 }, { "epoch": 4.452845311247662, "step": 16667, "train/loss_ctc": 0.5787851810455322, "train/loss_error": 0.41150757670402527, "train/loss_total": 0.44496312737464905 }, { "epoch": 4.45311247662303, "step": 16668, "train/loss_ctc": 0.7291523814201355, "train/loss_error": 0.4240485727787018, "train/loss_total": 0.4850693345069885 }, { "epoch": 4.453379641998397, "step": 16669, "train/loss_ctc": 0.7803792953491211, "train/loss_error": 0.381275475025177, "train/loss_total": 0.46109625697135925 }, { "epoch": 4.453646807373764, "grad_norm": 1.4626071453094482, "learning_rate": 3.2925460860272507e-06, "loss": 0.4546, "step": 16670 }, { "epoch": 4.453646807373764, "step": 16670, "train/loss_ctc": 0.47924062609672546, "train/loss_error": 0.422151118516922, "train/loss_total": 0.4335690140724182 }, { "epoch": 4.453913972749132, "step": 16671, "train/loss_ctc": 0.40953850746154785, "train/loss_error": 0.3595770001411438, "train/loss_total": 0.3695693016052246 }, { "epoch": 4.454181138124499, "step": 16672, "train/loss_ctc": 0.5539889931678772, "train/loss_error": 0.3790375590324402, "train/loss_total": 0.4140278398990631 }, { "epoch": 4.454448303499866, "step": 16673, "train/loss_ctc": 0.5839348435401917, "train/loss_error": 0.45355653762817383, "train/loss_total": 0.4796321988105774 }, { "epoch": 4.454715468875234, "step": 16674, "train/loss_ctc": 0.8059760332107544, "train/loss_error": 0.4487364888191223, "train/loss_total": 0.5201843976974487 }, { "epoch": 4.454982634250602, "step": 16675, "train/loss_ctc": 1.214597225189209, "train/loss_error": 0.4228869080543518, "train/loss_total": 0.5812289714813232 }, { "epoch": 4.455249799625968, "step": 16676, "train/loss_ctc": 0.3660441040992737, "train/loss_error": 0.42763185501098633, "train/loss_total": 0.41531431674957275 }, { "epoch": 4.455516965001336, "step": 16677, "train/loss_ctc": 1.4830254316329956, "train/loss_error": 0.4921550154685974, "train/loss_total": 0.6903290748596191 }, { "epoch": 4.455784130376703, "step": 16678, "train/loss_ctc": 0.5514360666275024, "train/loss_error": 0.4210079610347748, "train/loss_total": 0.4470936059951782 }, { "epoch": 4.45605129575207, "step": 16679, "train/loss_ctc": 0.17111949622631073, "train/loss_error": 0.43555620312690735, "train/loss_total": 0.3826688528060913 }, { "epoch": 4.456318461127438, "grad_norm": 1.2710942029953003, "learning_rate": 3.27651616350521e-06, "loss": 0.4734, "step": 16680 }, { "epoch": 4.456318461127438, "step": 16680, "train/loss_ctc": 0.8978790044784546, "train/loss_error": 0.3945029377937317, "train/loss_total": 0.4951781630516052 }, { "epoch": 4.456585626502806, "step": 16681, "train/loss_ctc": 0.3348647952079773, "train/loss_error": 0.4592567980289459, "train/loss_total": 0.43437841534614563 }, { "epoch": 4.456852791878172, "step": 16682, "train/loss_ctc": 0.6622541546821594, "train/loss_error": 0.45269647240638733, "train/loss_total": 0.4946080446243286 }, { "epoch": 4.45711995725354, "step": 16683, "train/loss_ctc": 0.4818761944770813, "train/loss_error": 0.46097439527511597, "train/loss_total": 0.465154767036438 }, { "epoch": 4.457387122628908, "step": 16684, "train/loss_ctc": 0.5729173421859741, "train/loss_error": 0.39155372977256775, "train/loss_total": 0.427826464176178 }, { "epoch": 4.457654288004274, "step": 16685, "train/loss_ctc": 0.5314388275146484, "train/loss_error": 0.39126095175743103, "train/loss_total": 0.419296532869339 }, { "epoch": 4.457921453379642, "step": 16686, "train/loss_ctc": 0.5409823656082153, "train/loss_error": 0.44132694602012634, "train/loss_total": 0.46125802397727966 }, { "epoch": 4.45818861875501, "step": 16687, "train/loss_ctc": 0.4500524699687958, "train/loss_error": 0.3639094829559326, "train/loss_total": 0.3811380863189697 }, { "epoch": 4.4584557841303765, "step": 16688, "train/loss_ctc": 1.4856321811676025, "train/loss_error": 0.41177091002464294, "train/loss_total": 0.6265431642532349 }, { "epoch": 4.458722949505744, "step": 16689, "train/loss_ctc": 1.0072187185287476, "train/loss_error": 0.3691234290599823, "train/loss_total": 0.49674248695373535 }, { "epoch": 4.458990114881112, "grad_norm": 2.707827091217041, "learning_rate": 3.2604862409831685e-06, "loss": 0.4702, "step": 16690 }, { "epoch": 4.458990114881112, "step": 16690, "train/loss_ctc": 0.9196081161499023, "train/loss_error": 0.46765705943107605, "train/loss_total": 0.5580472946166992 }, { "epoch": 4.4592572802564785, "step": 16691, "train/loss_ctc": 0.42463168501853943, "train/loss_error": 0.4656387269496918, "train/loss_total": 0.45743733644485474 }, { "epoch": 4.459524445631846, "step": 16692, "train/loss_ctc": 0.4435790777206421, "train/loss_error": 0.3652050495147705, "train/loss_total": 0.38087987899780273 }, { "epoch": 4.459791611007214, "step": 16693, "train/loss_ctc": 0.5549697875976562, "train/loss_error": 0.4378412067890167, "train/loss_total": 0.4612669348716736 }, { "epoch": 4.4600587763825805, "step": 16694, "train/loss_ctc": 1.3651187419891357, "train/loss_error": 0.5078614354133606, "train/loss_total": 0.6793129444122314 }, { "epoch": 4.460325941757948, "step": 16695, "train/loss_ctc": 0.57858806848526, "train/loss_error": 0.418883740901947, "train/loss_total": 0.4508246183395386 }, { "epoch": 4.460593107133316, "step": 16696, "train/loss_ctc": 0.5350260138511658, "train/loss_error": 0.37453266978263855, "train/loss_total": 0.40663135051727295 }, { "epoch": 4.4608602725086826, "step": 16697, "train/loss_ctc": 0.7102866768836975, "train/loss_error": 0.48719170689582825, "train/loss_total": 0.5318107008934021 }, { "epoch": 4.46112743788405, "step": 16698, "train/loss_ctc": 0.6599937677383423, "train/loss_error": 0.48207810521125793, "train/loss_total": 0.5176612734794617 }, { "epoch": 4.461394603259418, "step": 16699, "train/loss_ctc": 0.9758346080780029, "train/loss_error": 0.53565514087677, "train/loss_total": 0.6236910223960876 }, { "epoch": 4.461661768634785, "grad_norm": 3.3330373764038086, "learning_rate": 3.244456318461128e-06, "loss": 0.5068, "step": 16700 }, { "epoch": 4.461661768634785, "step": 16700, "train/loss_ctc": 0.44947484135627747, "train/loss_error": 0.4297637939453125, "train/loss_total": 0.43370601534843445 }, { "epoch": 4.461928934010152, "step": 16701, "train/loss_ctc": 0.3103128969669342, "train/loss_error": 0.3988446593284607, "train/loss_total": 0.38113832473754883 }, { "epoch": 4.46219609938552, "step": 16702, "train/loss_ctc": 0.7135629057884216, "train/loss_error": 0.47120288014411926, "train/loss_total": 0.5196748971939087 }, { "epoch": 4.462463264760887, "step": 16703, "train/loss_ctc": 0.7301836013793945, "train/loss_error": 0.3836365044116974, "train/loss_total": 0.4529459476470947 }, { "epoch": 4.462730430136254, "step": 16704, "train/loss_ctc": 0.6463280320167542, "train/loss_error": 0.40493929386138916, "train/loss_total": 0.4532170593738556 }, { "epoch": 4.462997595511622, "step": 16705, "train/loss_ctc": 0.6481466293334961, "train/loss_error": 0.5082753896713257, "train/loss_total": 0.5362496376037598 }, { "epoch": 4.463264760886989, "step": 16706, "train/loss_ctc": 0.612865149974823, "train/loss_error": 0.49943438172340393, "train/loss_total": 0.5221205353736877 }, { "epoch": 4.463531926262356, "step": 16707, "train/loss_ctc": 1.206398844718933, "train/loss_error": 0.471179723739624, "train/loss_total": 0.6182235479354858 }, { "epoch": 4.463799091637724, "step": 16708, "train/loss_ctc": 0.6162998676300049, "train/loss_error": 0.4401998817920685, "train/loss_total": 0.47541987895965576 }, { "epoch": 4.464066257013091, "step": 16709, "train/loss_ctc": 0.98670494556427, "train/loss_error": 0.479452908039093, "train/loss_total": 0.5809032917022705 }, { "epoch": 4.464333422388458, "grad_norm": 2.369814872741699, "learning_rate": 3.2284263959390864e-06, "loss": 0.4974, "step": 16710 }, { "epoch": 4.464333422388458, "step": 16710, "train/loss_ctc": 0.5125019550323486, "train/loss_error": 0.334839791059494, "train/loss_total": 0.3703722357749939 }, { "epoch": 4.464600587763826, "step": 16711, "train/loss_ctc": 1.161609411239624, "train/loss_error": 0.4209313690662384, "train/loss_total": 0.5690670013427734 }, { "epoch": 4.464867753139194, "step": 16712, "train/loss_ctc": 0.7804505228996277, "train/loss_error": 0.3983061611652374, "train/loss_total": 0.4747350513935089 }, { "epoch": 4.46513491851456, "step": 16713, "train/loss_ctc": 0.4684872031211853, "train/loss_error": 0.4156886041164398, "train/loss_total": 0.42624831199645996 }, { "epoch": 4.465402083889928, "step": 16714, "train/loss_ctc": 0.4582882225513458, "train/loss_error": 0.3854998052120209, "train/loss_total": 0.40005749464035034 }, { "epoch": 4.465669249265296, "step": 16715, "train/loss_ctc": 0.6210944056510925, "train/loss_error": 0.43216192722320557, "train/loss_total": 0.4699484407901764 }, { "epoch": 4.465936414640662, "step": 16716, "train/loss_ctc": 0.5686200857162476, "train/loss_error": 0.36192575097084045, "train/loss_total": 0.4032646417617798 }, { "epoch": 4.46620358001603, "step": 16717, "train/loss_ctc": 0.22548162937164307, "train/loss_error": 0.41764944791793823, "train/loss_total": 0.37921589612960815 }, { "epoch": 4.466470745391398, "step": 16718, "train/loss_ctc": 0.7040935754776001, "train/loss_error": 0.4731051027774811, "train/loss_total": 0.5193028450012207 }, { "epoch": 4.466737910766764, "step": 16719, "train/loss_ctc": 0.8383748531341553, "train/loss_error": 0.41813763976097107, "train/loss_total": 0.5021851062774658 }, { "epoch": 4.467005076142132, "grad_norm": 2.558846950531006, "learning_rate": 3.212396473417045e-06, "loss": 0.4514, "step": 16720 }, { "epoch": 4.467005076142132, "step": 16720, "train/loss_ctc": 0.8091953992843628, "train/loss_error": 0.4299263656139374, "train/loss_total": 0.5057801604270935 }, { "epoch": 4.4672722415175, "step": 16721, "train/loss_ctc": 0.5428149700164795, "train/loss_error": 0.3984803855419159, "train/loss_total": 0.427347332239151 }, { "epoch": 4.467539406892866, "step": 16722, "train/loss_ctc": 0.3076847493648529, "train/loss_error": 0.39758577942848206, "train/loss_total": 0.37960556149482727 }, { "epoch": 4.467806572268234, "step": 16723, "train/loss_ctc": 0.20932281017303467, "train/loss_error": 0.3863897919654846, "train/loss_total": 0.3509764075279236 }, { "epoch": 4.468073737643602, "step": 16724, "train/loss_ctc": 0.5396265387535095, "train/loss_error": 0.3983566164970398, "train/loss_total": 0.4266105890274048 }, { "epoch": 4.4683409030189685, "step": 16725, "train/loss_ctc": 0.4286936819553375, "train/loss_error": 0.42478254437446594, "train/loss_total": 0.42556479573249817 }, { "epoch": 4.468608068394336, "step": 16726, "train/loss_ctc": 0.31472164392471313, "train/loss_error": 0.5203672051429749, "train/loss_total": 0.4792380928993225 }, { "epoch": 4.468875233769704, "step": 16727, "train/loss_ctc": 0.9549136161804199, "train/loss_error": 0.4235914349555969, "train/loss_total": 0.5298558473587036 }, { "epoch": 4.4691423991450705, "step": 16728, "train/loss_ctc": 1.1198515892028809, "train/loss_error": 0.42004287242889404, "train/loss_total": 0.5600045919418335 }, { "epoch": 4.469409564520438, "step": 16729, "train/loss_ctc": 0.8475993871688843, "train/loss_error": 0.3849238455295563, "train/loss_total": 0.4774589538574219 }, { "epoch": 4.469676729895806, "grad_norm": 1.2067806720733643, "learning_rate": 3.1963665508950042e-06, "loss": 0.4562, "step": 16730 }, { "epoch": 4.469676729895806, "step": 16730, "train/loss_ctc": 1.3899234533309937, "train/loss_error": 0.5008153915405273, "train/loss_total": 0.6786370277404785 }, { "epoch": 4.4699438952711725, "step": 16731, "train/loss_ctc": 0.33081433176994324, "train/loss_error": 0.36681342124938965, "train/loss_total": 0.3596135973930359 }, { "epoch": 4.47021106064654, "step": 16732, "train/loss_ctc": 0.43165984749794006, "train/loss_error": 0.38635873794555664, "train/loss_total": 0.3954189717769623 }, { "epoch": 4.470478226021908, "step": 16733, "train/loss_ctc": 0.7977887392044067, "train/loss_error": 0.4358689486980438, "train/loss_total": 0.5082529187202454 }, { "epoch": 4.470745391397275, "step": 16734, "train/loss_ctc": 0.3956286907196045, "train/loss_error": 0.45933854579925537, "train/loss_total": 0.44659656286239624 }, { "epoch": 4.471012556772642, "step": 16735, "train/loss_ctc": 0.5389374494552612, "train/loss_error": 0.36451807618141174, "train/loss_total": 0.3994019627571106 }, { "epoch": 4.47127972214801, "step": 16736, "train/loss_ctc": 0.5584279894828796, "train/loss_error": 0.3923074007034302, "train/loss_total": 0.4255315363407135 }, { "epoch": 4.471546887523377, "step": 16737, "train/loss_ctc": 0.5244685411453247, "train/loss_error": 0.45717906951904297, "train/loss_total": 0.4706369638442993 }, { "epoch": 4.471814052898744, "step": 16738, "train/loss_ctc": 0.3875639736652374, "train/loss_error": 0.411309152841568, "train/loss_total": 0.40656012296676636 }, { "epoch": 4.472081218274112, "step": 16739, "train/loss_ctc": 1.247005581855774, "train/loss_error": 0.4034944176673889, "train/loss_total": 0.5721966624259949 }, { "epoch": 4.472348383649479, "grad_norm": 6.574997425079346, "learning_rate": 3.1803366283729627e-06, "loss": 0.4663, "step": 16740 }, { "epoch": 4.472348383649479, "step": 16740, "train/loss_ctc": 0.30313435196876526, "train/loss_error": 0.3646745979785919, "train/loss_total": 0.35236653685569763 }, { "epoch": 4.472615549024846, "step": 16741, "train/loss_ctc": 0.6759171485900879, "train/loss_error": 0.45198172330856323, "train/loss_total": 0.4967688322067261 }, { "epoch": 4.472882714400214, "step": 16742, "train/loss_ctc": 0.9591277837753296, "train/loss_error": 0.40135860443115234, "train/loss_total": 0.5129124522209167 }, { "epoch": 4.4731498797755815, "step": 16743, "train/loss_ctc": 1.1799554824829102, "train/loss_error": 0.45259276032447815, "train/loss_total": 0.5980653166770935 }, { "epoch": 4.473417045150948, "step": 16744, "train/loss_ctc": 0.3746233582496643, "train/loss_error": 0.5189013481140137, "train/loss_total": 0.4900457561016083 }, { "epoch": 4.473684210526316, "step": 16745, "train/loss_ctc": 0.4447518289089203, "train/loss_error": 0.4360625445842743, "train/loss_total": 0.43780040740966797 }, { "epoch": 4.473951375901683, "step": 16746, "train/loss_ctc": 0.6665405035018921, "train/loss_error": 0.418079674243927, "train/loss_total": 0.46777182817459106 }, { "epoch": 4.47421854127705, "step": 16747, "train/loss_ctc": 0.2482343465089798, "train/loss_error": 0.4135943055152893, "train/loss_total": 0.38052231073379517 }, { "epoch": 4.474485706652418, "step": 16748, "train/loss_ctc": 1.0634102821350098, "train/loss_error": 0.48989182710647583, "train/loss_total": 0.6045955419540405 }, { "epoch": 4.474752872027786, "step": 16749, "train/loss_ctc": 0.8695761561393738, "train/loss_error": 0.3962377607822418, "train/loss_total": 0.4909054636955261 }, { "epoch": 4.475020037403152, "grad_norm": 4.1885271072387695, "learning_rate": 3.1643067058509217e-06, "loss": 0.4832, "step": 16750 }, { "epoch": 4.475020037403152, "step": 16750, "train/loss_ctc": 0.741614818572998, "train/loss_error": 0.430733323097229, "train/loss_total": 0.49290964007377625 }, { "epoch": 4.47528720277852, "step": 16751, "train/loss_ctc": 0.509494423866272, "train/loss_error": 0.4623754918575287, "train/loss_total": 0.4717992842197418 }, { "epoch": 4.475554368153888, "step": 16752, "train/loss_ctc": 1.070620059967041, "train/loss_error": 0.4454343020915985, "train/loss_total": 0.570471465587616 }, { "epoch": 4.475821533529254, "step": 16753, "train/loss_ctc": 0.48681333661079407, "train/loss_error": 0.42067575454711914, "train/loss_total": 0.4339032769203186 }, { "epoch": 4.476088698904622, "step": 16754, "train/loss_ctc": 0.6765998005867004, "train/loss_error": 0.4090144634246826, "train/loss_total": 0.46253156661987305 }, { "epoch": 4.47635586427999, "step": 16755, "train/loss_ctc": 0.5537500381469727, "train/loss_error": 0.3961924612522125, "train/loss_total": 0.42770400643348694 }, { "epoch": 4.476623029655356, "step": 16756, "train/loss_ctc": 0.2938244938850403, "train/loss_error": 0.3972588777542114, "train/loss_total": 0.37657201290130615 }, { "epoch": 4.476890195030724, "step": 16757, "train/loss_ctc": 0.9158916473388672, "train/loss_error": 0.5435357689857483, "train/loss_total": 0.6180069446563721 }, { "epoch": 4.477157360406092, "step": 16758, "train/loss_ctc": 0.5828847289085388, "train/loss_error": 0.3962736427783966, "train/loss_total": 0.4335958659648895 }, { "epoch": 4.477424525781458, "step": 16759, "train/loss_ctc": 0.23521000146865845, "train/loss_error": 0.4127900302410126, "train/loss_total": 0.3772740364074707 }, { "epoch": 4.477691691156826, "grad_norm": 1.40824556350708, "learning_rate": 3.1482767833288806e-06, "loss": 0.4665, "step": 16760 }, { "epoch": 4.477691691156826, "step": 16760, "train/loss_ctc": 0.9829816818237305, "train/loss_error": 0.4473403990268707, "train/loss_total": 0.5544686913490295 }, { "epoch": 4.477958856532194, "step": 16761, "train/loss_ctc": 0.5925695896148682, "train/loss_error": 0.5232624411582947, "train/loss_total": 0.5371238589286804 }, { "epoch": 4.4782260219075605, "step": 16762, "train/loss_ctc": 0.5796784162521362, "train/loss_error": 0.4799831509590149, "train/loss_total": 0.4999222159385681 }, { "epoch": 4.478493187282928, "step": 16763, "train/loss_ctc": 0.547825276851654, "train/loss_error": 0.4797672927379608, "train/loss_total": 0.4933788776397705 }, { "epoch": 4.478760352658296, "step": 16764, "train/loss_ctc": 0.9227686524391174, "train/loss_error": 0.4257855713367462, "train/loss_total": 0.5251821875572205 }, { "epoch": 4.4790275180336625, "step": 16765, "train/loss_ctc": 0.7349215745925903, "train/loss_error": 0.4511176645755768, "train/loss_total": 0.5078784823417664 }, { "epoch": 4.47929468340903, "step": 16766, "train/loss_ctc": 0.669809103012085, "train/loss_error": 0.4057937562465668, "train/loss_total": 0.4585968255996704 }, { "epoch": 4.479561848784398, "step": 16767, "train/loss_ctc": 1.818472146987915, "train/loss_error": 0.5137109160423279, "train/loss_total": 0.7746632099151611 }, { "epoch": 4.4798290141597645, "step": 16768, "train/loss_ctc": 0.8038305044174194, "train/loss_error": 0.4927673935890198, "train/loss_total": 0.5549800395965576 }, { "epoch": 4.480096179535132, "step": 16769, "train/loss_ctc": 0.4636289179325104, "train/loss_error": 0.4761371314525604, "train/loss_total": 0.4736354947090149 }, { "epoch": 4.4803633449105, "grad_norm": 1.4601422548294067, "learning_rate": 3.1322468608068395e-06, "loss": 0.538, "step": 16770 }, { "epoch": 4.4803633449105, "step": 16770, "train/loss_ctc": 0.6919221878051758, "train/loss_error": 0.405349999666214, "train/loss_total": 0.4626644253730774 }, { "epoch": 4.480630510285867, "step": 16771, "train/loss_ctc": 0.5268002152442932, "train/loss_error": 0.392823725938797, "train/loss_total": 0.41961902379989624 }, { "epoch": 4.480897675661234, "step": 16772, "train/loss_ctc": 0.7651618123054504, "train/loss_error": 0.40956997871398926, "train/loss_total": 0.4806883633136749 }, { "epoch": 4.481164841036602, "step": 16773, "train/loss_ctc": 0.653121829032898, "train/loss_error": 0.4318285882472992, "train/loss_total": 0.4760872721672058 }, { "epoch": 4.481432006411969, "step": 16774, "train/loss_ctc": 0.5127113461494446, "train/loss_error": 0.392740935087204, "train/loss_total": 0.4167350232601166 }, { "epoch": 4.481699171787336, "step": 16775, "train/loss_ctc": 1.1105159521102905, "train/loss_error": 0.41732949018478394, "train/loss_total": 0.5559667944908142 }, { "epoch": 4.481966337162704, "step": 16776, "train/loss_ctc": 0.6973613500595093, "train/loss_error": 0.4593554735183716, "train/loss_total": 0.5069566965103149 }, { "epoch": 4.482233502538071, "step": 16777, "train/loss_ctc": 0.724888801574707, "train/loss_error": 0.39856401085853577, "train/loss_total": 0.463828980922699 }, { "epoch": 4.482500667913438, "step": 16778, "train/loss_ctc": 0.8956540822982788, "train/loss_error": 0.4534000754356384, "train/loss_total": 0.5418509244918823 }, { "epoch": 4.482767833288806, "step": 16779, "train/loss_ctc": 0.6084860563278198, "train/loss_error": 0.4610784649848938, "train/loss_total": 0.49055999517440796 }, { "epoch": 4.4830349986641735, "grad_norm": 1.191773772239685, "learning_rate": 3.1162169382847985e-06, "loss": 0.4815, "step": 16780 }, { "epoch": 4.4830349986641735, "step": 16780, "train/loss_ctc": 1.067360281944275, "train/loss_error": 0.453469842672348, "train/loss_total": 0.5762479305267334 }, { "epoch": 4.48330216403954, "step": 16781, "train/loss_ctc": 0.5504859089851379, "train/loss_error": 0.389323890209198, "train/loss_total": 0.421556293964386 }, { "epoch": 4.483569329414908, "step": 16782, "train/loss_ctc": 0.5324901938438416, "train/loss_error": 0.4926338791847229, "train/loss_total": 0.5006051659584045 }, { "epoch": 4.483836494790276, "step": 16783, "train/loss_ctc": 0.3740534484386444, "train/loss_error": 0.4633338153362274, "train/loss_total": 0.44547775387763977 }, { "epoch": 4.484103660165642, "step": 16784, "train/loss_ctc": 0.8664679527282715, "train/loss_error": 0.39207762479782104, "train/loss_total": 0.4869557023048401 }, { "epoch": 4.48437082554101, "step": 16785, "train/loss_ctc": 0.6144095063209534, "train/loss_error": 0.40943774580955505, "train/loss_total": 0.4504321217536926 }, { "epoch": 4.484637990916378, "step": 16786, "train/loss_ctc": 0.366587370634079, "train/loss_error": 0.38986000418663025, "train/loss_total": 0.38520547747612 }, { "epoch": 4.484905156291744, "step": 16787, "train/loss_ctc": 0.6844860315322876, "train/loss_error": 0.4518125355243683, "train/loss_total": 0.4983472526073456 }, { "epoch": 4.485172321667112, "step": 16788, "train/loss_ctc": 0.393671452999115, "train/loss_error": 0.4595492482185364, "train/loss_total": 0.44637370109558105 }, { "epoch": 4.48543948704248, "step": 16789, "train/loss_ctc": 0.982731819152832, "train/loss_error": 0.49092307686805725, "train/loss_total": 0.5892848372459412 }, { "epoch": 4.485706652417846, "grad_norm": 2.275474786758423, "learning_rate": 3.1001870157627574e-06, "loss": 0.48, "step": 16790 }, { "epoch": 4.485706652417846, "step": 16790, "train/loss_ctc": 0.5502475500106812, "train/loss_error": 0.41004228591918945, "train/loss_total": 0.43808335065841675 }, { "epoch": 4.485973817793214, "step": 16791, "train/loss_ctc": 0.862409234046936, "train/loss_error": 0.4089098274707794, "train/loss_total": 0.49960970878601074 }, { "epoch": 4.486240983168582, "step": 16792, "train/loss_ctc": 0.8692131042480469, "train/loss_error": 0.4023040235042572, "train/loss_total": 0.495685875415802 }, { "epoch": 4.486508148543948, "step": 16793, "train/loss_ctc": 0.9625241160392761, "train/loss_error": 0.42540207505226135, "train/loss_total": 0.5328264832496643 }, { "epoch": 4.486775313919316, "step": 16794, "train/loss_ctc": 0.6749283075332642, "train/loss_error": 0.4292129576206207, "train/loss_total": 0.4783560633659363 }, { "epoch": 4.487042479294684, "step": 16795, "train/loss_ctc": 0.7802239656448364, "train/loss_error": 0.461758017539978, "train/loss_total": 0.5254512429237366 }, { "epoch": 4.4873096446700504, "step": 16796, "train/loss_ctc": 0.4925532341003418, "train/loss_error": 0.46666544675827026, "train/loss_total": 0.47184300422668457 }, { "epoch": 4.487576810045418, "step": 16797, "train/loss_ctc": 0.3636113107204437, "train/loss_error": 0.46637704968452454, "train/loss_total": 0.44582390785217285 }, { "epoch": 4.487843975420786, "step": 16798, "train/loss_ctc": 0.9345724582672119, "train/loss_error": 0.4550575911998749, "train/loss_total": 0.5509605407714844 }, { "epoch": 4.4881111407961525, "step": 16799, "train/loss_ctc": 0.7931045889854431, "train/loss_error": 0.3938753306865692, "train/loss_total": 0.4737212061882019 }, { "epoch": 4.48837830617152, "grad_norm": 1.6322861909866333, "learning_rate": 3.084157093240716e-06, "loss": 0.4912, "step": 16800 }, { "epoch": 4.48837830617152, "step": 16800, "train/loss_ctc": 0.7678847312927246, "train/loss_error": 0.39298009872436523, "train/loss_total": 0.46796101331710815 }, { "epoch": 4.488645471546888, "step": 16801, "train/loss_ctc": 1.269484281539917, "train/loss_error": 0.4615197777748108, "train/loss_total": 0.623112678527832 }, { "epoch": 4.4889126369222545, "step": 16802, "train/loss_ctc": 0.9369891285896301, "train/loss_error": 0.43291589617729187, "train/loss_total": 0.5337305665016174 }, { "epoch": 4.489179802297622, "step": 16803, "train/loss_ctc": 0.5865596532821655, "train/loss_error": 0.4384092688560486, "train/loss_total": 0.468039333820343 }, { "epoch": 4.48944696767299, "step": 16804, "train/loss_ctc": 0.3933295011520386, "train/loss_error": 0.39525753259658813, "train/loss_total": 0.39487195014953613 }, { "epoch": 4.4897141330483565, "step": 16805, "train/loss_ctc": 0.8393123149871826, "train/loss_error": 0.47169503569602966, "train/loss_total": 0.5452184677124023 }, { "epoch": 4.489981298423724, "step": 16806, "train/loss_ctc": 0.5444803833961487, "train/loss_error": 0.42092156410217285, "train/loss_total": 0.44563332200050354 }, { "epoch": 4.490248463799092, "step": 16807, "train/loss_ctc": 0.2948370575904846, "train/loss_error": 0.37435513734817505, "train/loss_total": 0.3584515154361725 }, { "epoch": 4.490515629174459, "step": 16808, "train/loss_ctc": 0.6485816836357117, "train/loss_error": 0.42280060052871704, "train/loss_total": 0.4679568111896515 }, { "epoch": 4.490782794549826, "step": 16809, "train/loss_ctc": 0.8013820648193359, "train/loss_error": 0.475628137588501, "train/loss_total": 0.5407789349555969 }, { "epoch": 4.491049959925194, "grad_norm": 1.7342947721481323, "learning_rate": 3.068127170718675e-06, "loss": 0.4846, "step": 16810 }, { "epoch": 4.491049959925194, "step": 16810, "train/loss_ctc": 0.46371695399284363, "train/loss_error": 0.45713910460472107, "train/loss_total": 0.4584546983242035 }, { "epoch": 4.4913171253005615, "step": 16811, "train/loss_ctc": 0.5641512274742126, "train/loss_error": 0.4569917321205139, "train/loss_total": 0.4784236550331116 }, { "epoch": 4.491584290675928, "step": 16812, "train/loss_ctc": 0.6526967883110046, "train/loss_error": 0.41880524158477783, "train/loss_total": 0.46558356285095215 }, { "epoch": 4.491851456051296, "step": 16813, "train/loss_ctc": 0.9246748685836792, "train/loss_error": 0.45759299397468567, "train/loss_total": 0.5510094165802002 }, { "epoch": 4.4921186214266635, "step": 16814, "train/loss_ctc": 0.4454638957977295, "train/loss_error": 0.3981863856315613, "train/loss_total": 0.4076419174671173 }, { "epoch": 4.49238578680203, "step": 16815, "train/loss_ctc": 0.561536431312561, "train/loss_error": 0.4508630931377411, "train/loss_total": 0.4729977548122406 }, { "epoch": 4.492652952177398, "step": 16816, "train/loss_ctc": 0.5617089867591858, "train/loss_error": 0.4411271810531616, "train/loss_total": 0.46524354815483093 }, { "epoch": 4.4929201175527655, "step": 16817, "train/loss_ctc": 0.7307494878768921, "train/loss_error": 0.4076332151889801, "train/loss_total": 0.47225648164749146 }, { "epoch": 4.493187282928132, "step": 16818, "train/loss_ctc": 0.9119124412536621, "train/loss_error": 0.4443533718585968, "train/loss_total": 0.537865161895752 }, { "epoch": 4.4934544483035, "step": 16819, "train/loss_ctc": 0.6244276165962219, "train/loss_error": 0.488718718290329, "train/loss_total": 0.5158604979515076 }, { "epoch": 4.493721613678868, "grad_norm": 2.9132840633392334, "learning_rate": 3.0520972481966338e-06, "loss": 0.4825, "step": 16820 }, { "epoch": 4.493721613678868, "step": 16820, "train/loss_ctc": 1.2189196348190308, "train/loss_error": 0.44405820965766907, "train/loss_total": 0.5990304946899414 }, { "epoch": 4.493988779054234, "step": 16821, "train/loss_ctc": 0.907157301902771, "train/loss_error": 0.44715946912765503, "train/loss_total": 0.5391590595245361 }, { "epoch": 4.494255944429602, "step": 16822, "train/loss_ctc": 0.39470338821411133, "train/loss_error": 0.40195631980895996, "train/loss_total": 0.40050575137138367 }, { "epoch": 4.49452310980497, "step": 16823, "train/loss_ctc": 0.9663341045379639, "train/loss_error": 0.45190179347991943, "train/loss_total": 0.5547882914543152 }, { "epoch": 4.494790275180336, "step": 16824, "train/loss_ctc": 0.7333315014839172, "train/loss_error": 0.4472309648990631, "train/loss_total": 0.5044510960578918 }, { "epoch": 4.495057440555704, "step": 16825, "train/loss_ctc": 0.6229003667831421, "train/loss_error": 0.42802250385284424, "train/loss_total": 0.4669981002807617 }, { "epoch": 4.495324605931072, "step": 16826, "train/loss_ctc": 1.1833038330078125, "train/loss_error": 0.49442175030708313, "train/loss_total": 0.6321981549263 }, { "epoch": 4.495591771306438, "step": 16827, "train/loss_ctc": 0.8552631735801697, "train/loss_error": 0.49827855825424194, "train/loss_total": 0.5696754455566406 }, { "epoch": 4.495858936681806, "step": 16828, "train/loss_ctc": 0.7342315912246704, "train/loss_error": 0.44176533818244934, "train/loss_total": 0.5002585649490356 }, { "epoch": 4.496126102057174, "step": 16829, "train/loss_ctc": 0.5917498469352722, "train/loss_error": 0.395072877407074, "train/loss_total": 0.4344082772731781 }, { "epoch": 4.49639326743254, "grad_norm": 1.5373033285140991, "learning_rate": 3.0360673256745923e-06, "loss": 0.5201, "step": 16830 }, { "epoch": 4.49639326743254, "step": 16830, "train/loss_ctc": 0.9416258335113525, "train/loss_error": 0.5203923583030701, "train/loss_total": 0.6046390533447266 }, { "epoch": 4.496660432807908, "step": 16831, "train/loss_ctc": 1.1864080429077148, "train/loss_error": 0.4089723229408264, "train/loss_total": 0.5644594430923462 }, { "epoch": 4.496927598183276, "step": 16832, "train/loss_ctc": 0.5167498588562012, "train/loss_error": 0.4356706440448761, "train/loss_total": 0.45188650488853455 }, { "epoch": 4.4971947635586424, "step": 16833, "train/loss_ctc": 0.7589992880821228, "train/loss_error": 0.4387153685092926, "train/loss_total": 0.5027721524238586 }, { "epoch": 4.49746192893401, "step": 16834, "train/loss_ctc": 0.2919803857803345, "train/loss_error": 0.41626277565956116, "train/loss_total": 0.3914062976837158 }, { "epoch": 4.497729094309378, "step": 16835, "train/loss_ctc": 1.373482584953308, "train/loss_error": 0.4908779561519623, "train/loss_total": 0.6673989295959473 }, { "epoch": 4.4979962596847445, "step": 16836, "train/loss_ctc": 1.0265268087387085, "train/loss_error": 0.4253537952899933, "train/loss_total": 0.5455883741378784 }, { "epoch": 4.498263425060112, "step": 16837, "train/loss_ctc": 0.8463457822799683, "train/loss_error": 0.45693737268447876, "train/loss_total": 0.5348190665245056 }, { "epoch": 4.49853059043548, "step": 16838, "train/loss_ctc": 0.6914680600166321, "train/loss_error": 0.529058039188385, "train/loss_total": 0.5615400671958923 }, { "epoch": 4.4987977558108465, "step": 16839, "train/loss_ctc": 1.1669528484344482, "train/loss_error": 0.46599051356315613, "train/loss_total": 0.6061829924583435 }, { "epoch": 4.499064921186214, "grad_norm": 1.975494384765625, "learning_rate": 3.0200374031525516e-06, "loss": 0.5431, "step": 16840 }, { "epoch": 4.499064921186214, "step": 16840, "train/loss_ctc": 0.8308053016662598, "train/loss_error": 0.4079631567001343, "train/loss_total": 0.49253159761428833 }, { "epoch": 4.499332086561582, "step": 16841, "train/loss_ctc": 0.3500629961490631, "train/loss_error": 0.39993807673454285, "train/loss_total": 0.3899630606174469 }, { "epoch": 4.499599251936949, "step": 16842, "train/loss_ctc": 1.3691737651824951, "train/loss_error": 0.4147828221321106, "train/loss_total": 0.6056610345840454 }, { "epoch": 4.499866417312316, "step": 16843, "train/loss_ctc": 0.9530056715011597, "train/loss_error": 0.4783581793308258, "train/loss_total": 0.5732877254486084 }, { "epoch": 4.500133582687684, "step": 16844, "train/loss_ctc": 0.7107778787612915, "train/loss_error": 0.4471173882484436, "train/loss_total": 0.49984949827194214 }, { "epoch": 4.500400748063051, "step": 16845, "train/loss_ctc": 0.46200162172317505, "train/loss_error": 0.4813011884689331, "train/loss_total": 0.47744131088256836 }, { "epoch": 4.500667913438418, "step": 16846, "train/loss_ctc": 0.49161791801452637, "train/loss_error": 0.4265303611755371, "train/loss_total": 0.4395478665828705 }, { "epoch": 4.500935078813786, "step": 16847, "train/loss_ctc": 0.9231338500976562, "train/loss_error": 0.4014093577861786, "train/loss_total": 0.505754292011261 }, { "epoch": 4.5012022441891535, "step": 16848, "train/loss_ctc": 0.5168454647064209, "train/loss_error": 0.5197508931159973, "train/loss_total": 0.519169807434082 }, { "epoch": 4.50146940956452, "step": 16849, "train/loss_ctc": 0.4642499089241028, "train/loss_error": 0.42980870604515076, "train/loss_total": 0.43669694662094116 }, { "epoch": 4.501736574939888, "grad_norm": 1.6067830324172974, "learning_rate": 3.00400748063051e-06, "loss": 0.494, "step": 16850 }, { "epoch": 4.501736574939888, "step": 16850, "train/loss_ctc": 0.6593374013900757, "train/loss_error": 0.4835095703601837, "train/loss_total": 0.5186751484870911 }, { "epoch": 4.5020037403152555, "step": 16851, "train/loss_ctc": 0.9732552766799927, "train/loss_error": 0.3792440891265869, "train/loss_total": 0.498046338558197 }, { "epoch": 4.502270905690622, "step": 16852, "train/loss_ctc": 0.8445628881454468, "train/loss_error": 0.4197258949279785, "train/loss_total": 0.5046932697296143 }, { "epoch": 4.50253807106599, "step": 16853, "train/loss_ctc": 0.6741937398910522, "train/loss_error": 0.4798307418823242, "train/loss_total": 0.5187033414840698 }, { "epoch": 4.5028052364413576, "step": 16854, "train/loss_ctc": 0.7783215641975403, "train/loss_error": 0.4414632022380829, "train/loss_total": 0.5088348984718323 }, { "epoch": 4.503072401816724, "step": 16855, "train/loss_ctc": 1.459671139717102, "train/loss_error": 0.483524888753891, "train/loss_total": 0.6787541508674622 }, { "epoch": 4.503339567192092, "step": 16856, "train/loss_ctc": 0.5502856969833374, "train/loss_error": 0.4787152111530304, "train/loss_total": 0.49302932620048523 }, { "epoch": 4.50360673256746, "step": 16857, "train/loss_ctc": 0.5011515617370605, "train/loss_error": 0.4331933259963989, "train/loss_total": 0.44678497314453125 }, { "epoch": 4.503873897942826, "step": 16858, "train/loss_ctc": 0.6230131387710571, "train/loss_error": 0.40789175033569336, "train/loss_total": 0.450916051864624 }, { "epoch": 4.504141063318194, "step": 16859, "train/loss_ctc": 0.49028804898262024, "train/loss_error": 0.37013447284698486, "train/loss_total": 0.3941652178764343 }, { "epoch": 4.504408228693562, "grad_norm": 2.513181447982788, "learning_rate": 2.9879775581084695e-06, "loss": 0.5013, "step": 16860 }, { "epoch": 4.504408228693562, "step": 16860, "train/loss_ctc": 0.5337272882461548, "train/loss_error": 0.39504897594451904, "train/loss_total": 0.42278462648391724 }, { "epoch": 4.504675394068928, "step": 16861, "train/loss_ctc": 0.37345898151397705, "train/loss_error": 0.380320280790329, "train/loss_total": 0.37894803285598755 }, { "epoch": 4.504942559444296, "step": 16862, "train/loss_ctc": 0.36720719933509827, "train/loss_error": 0.4333682656288147, "train/loss_total": 0.42013606429100037 }, { "epoch": 4.505209724819664, "step": 16863, "train/loss_ctc": 0.8488457202911377, "train/loss_error": 0.4870049059391022, "train/loss_total": 0.5593730807304382 }, { "epoch": 4.50547689019503, "step": 16864, "train/loss_ctc": 0.9072433710098267, "train/loss_error": 0.4319642186164856, "train/loss_total": 0.5270200371742249 }, { "epoch": 4.505744055570398, "step": 16865, "train/loss_ctc": 0.4218674302101135, "train/loss_error": 0.4000808298587799, "train/loss_total": 0.4044381380081177 }, { "epoch": 4.506011220945766, "step": 16866, "train/loss_ctc": 0.9730261564254761, "train/loss_error": 0.4444323480129242, "train/loss_total": 0.5501511096954346 }, { "epoch": 4.506278386321132, "step": 16867, "train/loss_ctc": 1.0115327835083008, "train/loss_error": 0.47610512375831604, "train/loss_total": 0.5831906795501709 }, { "epoch": 4.5065455516965, "step": 16868, "train/loss_ctc": 0.8680019378662109, "train/loss_error": 0.49107375741004944, "train/loss_total": 0.5664594173431396 }, { "epoch": 4.506812717071868, "step": 16869, "train/loss_ctc": 0.5564032793045044, "train/loss_error": 0.4458572566509247, "train/loss_total": 0.4679664671421051 }, { "epoch": 4.5070798824472345, "grad_norm": 2.5569143295288086, "learning_rate": 2.971947635586428e-06, "loss": 0.488, "step": 16870 }, { "epoch": 4.5070798824472345, "step": 16870, "train/loss_ctc": 0.9531509876251221, "train/loss_error": 0.463079571723938, "train/loss_total": 0.5610938668251038 }, { "epoch": 4.507347047822602, "step": 16871, "train/loss_ctc": 1.4924275875091553, "train/loss_error": 0.48583123087882996, "train/loss_total": 0.6871504783630371 }, { "epoch": 4.50761421319797, "step": 16872, "train/loss_ctc": 0.6390676498413086, "train/loss_error": 0.42764464020729065, "train/loss_total": 0.4699292778968811 }, { "epoch": 4.507881378573337, "step": 16873, "train/loss_ctc": 0.7515559196472168, "train/loss_error": 0.4602510631084442, "train/loss_total": 0.5185120701789856 }, { "epoch": 4.508148543948704, "step": 16874, "train/loss_ctc": 0.4075964093208313, "train/loss_error": 0.42632851004600525, "train/loss_total": 0.42258208990097046 }, { "epoch": 4.508415709324072, "step": 16875, "train/loss_ctc": 0.2924501299858093, "train/loss_error": 0.40471479296684265, "train/loss_total": 0.38226187229156494 }, { "epoch": 4.5086828746994385, "step": 16876, "train/loss_ctc": 0.5424996018409729, "train/loss_error": 0.399795800447464, "train/loss_total": 0.42833656072616577 }, { "epoch": 4.508950040074806, "step": 16877, "train/loss_ctc": 1.0223171710968018, "train/loss_error": 0.36527326703071594, "train/loss_total": 0.4966820478439331 }, { "epoch": 4.509217205450174, "step": 16878, "train/loss_ctc": 0.7646726369857788, "train/loss_error": 0.3621605634689331, "train/loss_total": 0.4426630139350891 }, { "epoch": 4.509484370825541, "step": 16879, "train/loss_ctc": 0.6520216464996338, "train/loss_error": 0.4570333659648895, "train/loss_total": 0.4960310459136963 }, { "epoch": 4.509751536200908, "grad_norm": 2.9661264419555664, "learning_rate": 2.955917713064387e-06, "loss": 0.4905, "step": 16880 }, { "epoch": 4.509751536200908, "step": 16880, "train/loss_ctc": 0.7674391269683838, "train/loss_error": 0.46226370334625244, "train/loss_total": 0.5232987999916077 }, { "epoch": 4.510018701576276, "step": 16881, "train/loss_ctc": 0.8972878456115723, "train/loss_error": 0.42522889375686646, "train/loss_total": 0.5196406841278076 }, { "epoch": 4.510285866951643, "step": 16882, "train/loss_ctc": 0.9029881954193115, "train/loss_error": 0.4923478960990906, "train/loss_total": 0.5744759440422058 }, { "epoch": 4.51055303232701, "step": 16883, "train/loss_ctc": 0.4923034906387329, "train/loss_error": 0.44823145866394043, "train/loss_total": 0.45704588294029236 }, { "epoch": 4.510820197702378, "step": 16884, "train/loss_ctc": 0.5437867045402527, "train/loss_error": 0.4034966230392456, "train/loss_total": 0.4315546452999115 }, { "epoch": 4.5110873630777455, "step": 16885, "train/loss_ctc": 0.5402926206588745, "train/loss_error": 0.4698055684566498, "train/loss_total": 0.4839029908180237 }, { "epoch": 4.511354528453112, "step": 16886, "train/loss_ctc": 0.8556128740310669, "train/loss_error": 0.46141114830970764, "train/loss_total": 0.5402514934539795 }, { "epoch": 4.51162169382848, "step": 16887, "train/loss_ctc": 1.6463953256607056, "train/loss_error": 0.42536598443984985, "train/loss_total": 0.6695718765258789 }, { "epoch": 4.5118888592038475, "step": 16888, "train/loss_ctc": 0.9869356155395508, "train/loss_error": 0.506915271282196, "train/loss_total": 0.602919340133667 }, { "epoch": 4.512156024579214, "step": 16889, "train/loss_ctc": 1.011954665184021, "train/loss_error": 0.39179667830467224, "train/loss_total": 0.5158282518386841 }, { "epoch": 4.512423189954582, "grad_norm": 2.589834213256836, "learning_rate": 2.939887790542346e-06, "loss": 0.5318, "step": 16890 }, { "epoch": 4.512423189954582, "step": 16890, "train/loss_ctc": 0.21790218353271484, "train/loss_error": 0.37642210721969604, "train/loss_total": 0.3447181284427643 }, { "epoch": 4.5126903553299496, "step": 16891, "train/loss_ctc": 1.08856201171875, "train/loss_error": 0.4949975311756134, "train/loss_total": 0.6137104034423828 }, { "epoch": 4.512957520705316, "step": 16892, "train/loss_ctc": 0.754805862903595, "train/loss_error": 0.3496115803718567, "train/loss_total": 0.4306504726409912 }, { "epoch": 4.513224686080684, "step": 16893, "train/loss_ctc": 0.5131008625030518, "train/loss_error": 0.45415541529655457, "train/loss_total": 0.4659445285797119 }, { "epoch": 4.513491851456052, "step": 16894, "train/loss_ctc": 0.6996122002601624, "train/loss_error": 0.44917532801628113, "train/loss_total": 0.4992627203464508 }, { "epoch": 4.513759016831418, "step": 16895, "train/loss_ctc": 1.1633574962615967, "train/loss_error": 0.4698963463306427, "train/loss_total": 0.6085885763168335 }, { "epoch": 4.514026182206786, "step": 16896, "train/loss_ctc": 0.7048732042312622, "train/loss_error": 0.38661399483680725, "train/loss_total": 0.4502658545970917 }, { "epoch": 4.514293347582154, "step": 16897, "train/loss_ctc": 0.8360676169395447, "train/loss_error": 0.3652980625629425, "train/loss_total": 0.45945197343826294 }, { "epoch": 4.51456051295752, "step": 16898, "train/loss_ctc": 0.4859095811843872, "train/loss_error": 0.4610212743282318, "train/loss_total": 0.46599894762039185 }, { "epoch": 4.514827678332888, "step": 16899, "train/loss_ctc": 0.43885084986686707, "train/loss_error": 0.39029809832572937, "train/loss_total": 0.4000086486339569 }, { "epoch": 4.515094843708256, "grad_norm": 2.5063085556030273, "learning_rate": 2.9238578680203048e-06, "loss": 0.4739, "step": 16900 }, { "epoch": 4.515094843708256, "step": 16900, "train/loss_ctc": 1.222093105316162, "train/loss_error": 0.3818768858909607, "train/loss_total": 0.5499201416969299 }, { "epoch": 4.515362009083622, "step": 16901, "train/loss_ctc": 1.0575790405273438, "train/loss_error": 0.3963695764541626, "train/loss_total": 0.5286114811897278 }, { "epoch": 4.51562917445899, "step": 16902, "train/loss_ctc": 1.2602307796478271, "train/loss_error": 0.43090710043907166, "train/loss_total": 0.5967718362808228 }, { "epoch": 4.515896339834358, "step": 16903, "train/loss_ctc": 0.5917069911956787, "train/loss_error": 0.42324167490005493, "train/loss_total": 0.45693475008010864 }, { "epoch": 4.516163505209725, "step": 16904, "train/loss_ctc": 1.0365676879882812, "train/loss_error": 0.467297226190567, "train/loss_total": 0.5811513662338257 }, { "epoch": 4.516430670585092, "step": 16905, "train/loss_ctc": 0.5460671782493591, "train/loss_error": 0.37310072779655457, "train/loss_total": 0.4076940417289734 }, { "epoch": 4.51669783596046, "step": 16906, "train/loss_ctc": 0.8281990885734558, "train/loss_error": 0.450979620218277, "train/loss_total": 0.5264235138893127 }, { "epoch": 4.5169650013358265, "step": 16907, "train/loss_ctc": 1.0496200323104858, "train/loss_error": 0.46589022874832153, "train/loss_total": 0.5826362371444702 }, { "epoch": 4.517232166711194, "step": 16908, "train/loss_ctc": 0.6804366111755371, "train/loss_error": 0.4544876515865326, "train/loss_total": 0.499677449464798 }, { "epoch": 4.517499332086562, "step": 16909, "train/loss_ctc": 0.381518691778183, "train/loss_error": 0.4461865723133087, "train/loss_total": 0.4332530200481415 }, { "epoch": 4.517766497461929, "grad_norm": 1.0130360126495361, "learning_rate": 2.9078279454982637e-06, "loss": 0.5163, "step": 16910 }, { "epoch": 4.517766497461929, "step": 16910, "train/loss_ctc": 0.29878532886505127, "train/loss_error": 0.34152865409851074, "train/loss_total": 0.3329799771308899 }, { "epoch": 4.518033662837296, "step": 16911, "train/loss_ctc": 0.7847657203674316, "train/loss_error": 0.4148596525192261, "train/loss_total": 0.48884087800979614 }, { "epoch": 4.518300828212664, "step": 16912, "train/loss_ctc": 0.4022175669670105, "train/loss_error": 0.4785095453262329, "train/loss_total": 0.46325117349624634 }, { "epoch": 4.5185679935880305, "step": 16913, "train/loss_ctc": 0.5642272233963013, "train/loss_error": 0.3662782609462738, "train/loss_total": 0.4058680534362793 }, { "epoch": 4.518835158963398, "step": 16914, "train/loss_ctc": 0.500150203704834, "train/loss_error": 0.4387841820716858, "train/loss_total": 0.4510573744773865 }, { "epoch": 4.519102324338766, "step": 16915, "train/loss_ctc": 0.6001855134963989, "train/loss_error": 0.4337846338748932, "train/loss_total": 0.46706482768058777 }, { "epoch": 4.519369489714133, "step": 16916, "train/loss_ctc": 1.2405056953430176, "train/loss_error": 0.48609909415245056, "train/loss_total": 0.636980414390564 }, { "epoch": 4.5196366550895, "step": 16917, "train/loss_ctc": 1.095376968383789, "train/loss_error": 0.38082557916641235, "train/loss_total": 0.5237358808517456 }, { "epoch": 4.519903820464868, "step": 16918, "train/loss_ctc": 0.8014015555381775, "train/loss_error": 0.4158681333065033, "train/loss_total": 0.49297481775283813 }, { "epoch": 4.5201709858402355, "step": 16919, "train/loss_ctc": 0.8332309126853943, "train/loss_error": 0.4433180093765259, "train/loss_total": 0.5213005542755127 }, { "epoch": 4.520438151215602, "grad_norm": 2.7458243370056152, "learning_rate": 2.8917980229762222e-06, "loss": 0.4784, "step": 16920 }, { "epoch": 4.520438151215602, "step": 16920, "train/loss_ctc": 0.7084509134292603, "train/loss_error": 0.3860774636268616, "train/loss_total": 0.45055216550827026 }, { "epoch": 4.52070531659097, "step": 16921, "train/loss_ctc": 1.6664142608642578, "train/loss_error": 0.5128287672996521, "train/loss_total": 0.7435458898544312 }, { "epoch": 4.5209724819663375, "step": 16922, "train/loss_ctc": 0.7893833518028259, "train/loss_error": 0.3954665958881378, "train/loss_total": 0.4742499589920044 }, { "epoch": 4.521239647341704, "step": 16923, "train/loss_ctc": 0.9546789526939392, "train/loss_error": 0.4646413326263428, "train/loss_total": 0.5626488924026489 }, { "epoch": 4.521506812717072, "step": 16924, "train/loss_ctc": 0.6138708591461182, "train/loss_error": 0.39706048369407654, "train/loss_total": 0.44042256474494934 }, { "epoch": 4.5217739780924395, "step": 16925, "train/loss_ctc": 1.5828063488006592, "train/loss_error": 0.43271759152412415, "train/loss_total": 0.6627353429794312 }, { "epoch": 4.522041143467806, "step": 16926, "train/loss_ctc": 0.8196099996566772, "train/loss_error": 0.3873869776725769, "train/loss_total": 0.4738315939903259 }, { "epoch": 4.522308308843174, "step": 16927, "train/loss_ctc": 0.42007124423980713, "train/loss_error": 0.3993431031703949, "train/loss_total": 0.40348875522613525 }, { "epoch": 4.522575474218542, "step": 16928, "train/loss_ctc": 0.9274165630340576, "train/loss_error": 0.4871932864189148, "train/loss_total": 0.5752379298210144 }, { "epoch": 4.522842639593908, "step": 16929, "train/loss_ctc": 0.6045472621917725, "train/loss_error": 0.4299358129501343, "train/loss_total": 0.46485811471939087 }, { "epoch": 4.523109804969276, "grad_norm": 1.664840579032898, "learning_rate": 2.875768100454181e-06, "loss": 0.5252, "step": 16930 }, { "epoch": 4.523109804969276, "step": 16930, "train/loss_ctc": 0.6511423587799072, "train/loss_error": 0.4272014796733856, "train/loss_total": 0.4719896912574768 }, { "epoch": 4.523376970344644, "step": 16931, "train/loss_ctc": 0.6400348544120789, "train/loss_error": 0.44074806571006775, "train/loss_total": 0.48060542345046997 }, { "epoch": 4.52364413572001, "step": 16932, "train/loss_ctc": 1.35770845413208, "train/loss_error": 0.43380051851272583, "train/loss_total": 0.6185821294784546 }, { "epoch": 4.523911301095378, "step": 16933, "train/loss_ctc": 0.5098334550857544, "train/loss_error": 0.40252265334129333, "train/loss_total": 0.4239848256111145 }, { "epoch": 4.524178466470746, "step": 16934, "train/loss_ctc": 0.26478707790374756, "train/loss_error": 0.37558725476264954, "train/loss_total": 0.3534272313117981 }, { "epoch": 4.524445631846112, "step": 16935, "train/loss_ctc": 0.3880612254142761, "train/loss_error": 0.418979674577713, "train/loss_total": 0.4127959907054901 }, { "epoch": 4.52471279722148, "step": 16936, "train/loss_ctc": 0.4535123407840729, "train/loss_error": 0.44276952743530273, "train/loss_total": 0.44491809606552124 }, { "epoch": 4.524979962596848, "step": 16937, "train/loss_ctc": 0.7309862375259399, "train/loss_error": 0.45521438121795654, "train/loss_total": 0.5103687644004822 }, { "epoch": 4.525247127972214, "step": 16938, "train/loss_ctc": 0.7924772500991821, "train/loss_error": 0.49891868233680725, "train/loss_total": 0.5576304197311401 }, { "epoch": 4.525514293347582, "step": 16939, "train/loss_ctc": 0.6234425902366638, "train/loss_error": 0.4471467137336731, "train/loss_total": 0.4824059009552002 }, { "epoch": 4.52578145872295, "grad_norm": 1.8378338813781738, "learning_rate": 2.85973817793214e-06, "loss": 0.4757, "step": 16940 }, { "epoch": 4.52578145872295, "step": 16940, "train/loss_ctc": 0.6324353814125061, "train/loss_error": 0.3840702474117279, "train/loss_total": 0.43374326825141907 }, { "epoch": 4.526048624098317, "step": 16941, "train/loss_ctc": 0.7100244760513306, "train/loss_error": 0.4382777512073517, "train/loss_total": 0.4926270842552185 }, { "epoch": 4.526315789473684, "step": 16942, "train/loss_ctc": 0.37964296340942383, "train/loss_error": 0.3985595107078552, "train/loss_total": 0.39477622509002686 }, { "epoch": 4.526582954849052, "step": 16943, "train/loss_ctc": 0.303066223859787, "train/loss_error": 0.3869578540325165, "train/loss_total": 0.37017953395843506 }, { "epoch": 4.5268501202244185, "step": 16944, "train/loss_ctc": 0.6399222612380981, "train/loss_error": 0.46758556365966797, "train/loss_total": 0.502052903175354 }, { "epoch": 4.527117285599786, "step": 16945, "train/loss_ctc": 0.30144283175468445, "train/loss_error": 0.4957916736602783, "train/loss_total": 0.45692193508148193 }, { "epoch": 4.527384450975154, "step": 16946, "train/loss_ctc": 1.7255475521087646, "train/loss_error": 0.5010778903961182, "train/loss_total": 0.7459717988967896 }, { "epoch": 4.527651616350521, "step": 16947, "train/loss_ctc": 0.35592353343963623, "train/loss_error": 0.42414718866348267, "train/loss_total": 0.41050249338150024 }, { "epoch": 4.527918781725888, "step": 16948, "train/loss_ctc": 0.9906038045883179, "train/loss_error": 0.4874509871006012, "train/loss_total": 0.5880815386772156 }, { "epoch": 4.528185947101256, "step": 16949, "train/loss_ctc": 0.32690632343292236, "train/loss_error": 0.4417429268360138, "train/loss_total": 0.41877561807632446 }, { "epoch": 4.528453112476623, "grad_norm": 2.879248857498169, "learning_rate": 2.843708255410099e-06, "loss": 0.4814, "step": 16950 }, { "epoch": 4.528453112476623, "step": 16950, "train/loss_ctc": 0.5884979963302612, "train/loss_error": 0.41257914900779724, "train/loss_total": 0.4477629065513611 }, { "epoch": 4.52872027785199, "step": 16951, "train/loss_ctc": 0.4539630115032196, "train/loss_error": 0.44034379720687866, "train/loss_total": 0.44306764006614685 }, { "epoch": 4.528987443227358, "step": 16952, "train/loss_ctc": 0.3513889014720917, "train/loss_error": 0.47094064950942993, "train/loss_total": 0.44703030586242676 }, { "epoch": 4.529254608602725, "step": 16953, "train/loss_ctc": 0.6198269724845886, "train/loss_error": 0.35301703214645386, "train/loss_total": 0.4063790440559387 }, { "epoch": 4.529521773978092, "step": 16954, "train/loss_ctc": 0.4753040373325348, "train/loss_error": 0.4116222560405731, "train/loss_total": 0.424358606338501 }, { "epoch": 4.52978893935346, "step": 16955, "train/loss_ctc": 0.3022466003894806, "train/loss_error": 0.3822557032108307, "train/loss_total": 0.36625391244888306 }, { "epoch": 4.5300561047288275, "step": 16956, "train/loss_ctc": 1.2741491794586182, "train/loss_error": 0.3697751760482788, "train/loss_total": 0.5506500005722046 }, { "epoch": 4.530323270104194, "step": 16957, "train/loss_ctc": 0.7148874998092651, "train/loss_error": 0.338958740234375, "train/loss_total": 0.41414451599121094 }, { "epoch": 4.530590435479562, "step": 16958, "train/loss_ctc": 0.7124797105789185, "train/loss_error": 0.4005494713783264, "train/loss_total": 0.46293550729751587 }, { "epoch": 4.5308576008549295, "step": 16959, "train/loss_ctc": 0.3434392809867859, "train/loss_error": 0.40250515937805176, "train/loss_total": 0.39069199562072754 }, { "epoch": 4.531124766230296, "grad_norm": 1.8315600156784058, "learning_rate": 2.8276783328880575e-06, "loss": 0.4353, "step": 16960 }, { "epoch": 4.531124766230296, "step": 16960, "train/loss_ctc": 0.8631458282470703, "train/loss_error": 0.428054541349411, "train/loss_total": 0.5150728225708008 }, { "epoch": 4.531391931605664, "step": 16961, "train/loss_ctc": 0.6720316410064697, "train/loss_error": 0.4499783515930176, "train/loss_total": 0.49438902735710144 }, { "epoch": 4.5316590969810315, "step": 16962, "train/loss_ctc": 0.43493542075157166, "train/loss_error": 0.3796631097793579, "train/loss_total": 0.3907175660133362 }, { "epoch": 4.531926262356398, "step": 16963, "train/loss_ctc": 0.584992527961731, "train/loss_error": 0.4243292808532715, "train/loss_total": 0.45646196603775024 }, { "epoch": 4.532193427731766, "step": 16964, "train/loss_ctc": 0.3272457420825958, "train/loss_error": 0.4540563225746155, "train/loss_total": 0.4286942183971405 }, { "epoch": 4.532460593107134, "step": 16965, "train/loss_ctc": 0.3527390658855438, "train/loss_error": 0.42500102519989014, "train/loss_total": 0.4105486571788788 }, { "epoch": 4.5327277584825, "step": 16966, "train/loss_ctc": 1.0125658512115479, "train/loss_error": 0.4513401687145233, "train/loss_total": 0.5635853409767151 }, { "epoch": 4.532994923857868, "step": 16967, "train/loss_ctc": 0.988243579864502, "train/loss_error": 0.4881957173347473, "train/loss_total": 0.5882052779197693 }, { "epoch": 4.533262089233236, "step": 16968, "train/loss_ctc": 1.0645930767059326, "train/loss_error": 0.4656864404678345, "train/loss_total": 0.5854677557945251 }, { "epoch": 4.533529254608602, "step": 16969, "train/loss_ctc": 0.5401307940483093, "train/loss_error": 0.40213286876678467, "train/loss_total": 0.42973244190216064 }, { "epoch": 4.53379641998397, "grad_norm": 4.981181621551514, "learning_rate": 2.811648410366017e-06, "loss": 0.4863, "step": 16970 }, { "epoch": 4.53379641998397, "step": 16970, "train/loss_ctc": 0.7316284775733948, "train/loss_error": 0.43084466457366943, "train/loss_total": 0.4910014271736145 }, { "epoch": 4.534063585359338, "step": 16971, "train/loss_ctc": 0.5332993268966675, "train/loss_error": 0.4108504354953766, "train/loss_total": 0.4353402256965637 }, { "epoch": 4.534330750734705, "step": 16972, "train/loss_ctc": 0.13484659790992737, "train/loss_error": 0.42171770334243774, "train/loss_total": 0.3643434941768646 }, { "epoch": 4.534597916110072, "step": 16973, "train/loss_ctc": 0.41203272342681885, "train/loss_error": 0.4266086518764496, "train/loss_total": 0.4236934781074524 }, { "epoch": 4.53486508148544, "step": 16974, "train/loss_ctc": 0.48235833644866943, "train/loss_error": 0.45457518100738525, "train/loss_total": 0.46013182401657104 }, { "epoch": 4.535132246860806, "step": 16975, "train/loss_ctc": 0.762519121170044, "train/loss_error": 0.41764381527900696, "train/loss_total": 0.48661887645721436 }, { "epoch": 4.535399412236174, "step": 16976, "train/loss_ctc": 0.32599231600761414, "train/loss_error": 0.4339047372341156, "train/loss_total": 0.4123222827911377 }, { "epoch": 4.535666577611542, "step": 16977, "train/loss_ctc": 0.5168972015380859, "train/loss_error": 0.45133233070373535, "train/loss_total": 0.4644452929496765 }, { "epoch": 4.535933742986909, "step": 16978, "train/loss_ctc": 0.6667214632034302, "train/loss_error": 0.4093300700187683, "train/loss_total": 0.4608083665370941 }, { "epoch": 4.536200908362276, "step": 16979, "train/loss_ctc": 1.2114741802215576, "train/loss_error": 0.5204492807388306, "train/loss_total": 0.6586542725563049 }, { "epoch": 4.536468073737644, "grad_norm": 1.8160804510116577, "learning_rate": 2.7956184878439754e-06, "loss": 0.4657, "step": 16980 }, { "epoch": 4.536468073737644, "step": 16980, "train/loss_ctc": 0.48669636249542236, "train/loss_error": 0.3869358003139496, "train/loss_total": 0.4068879187107086 }, { "epoch": 4.5367352391130105, "step": 16981, "train/loss_ctc": 0.5627577900886536, "train/loss_error": 0.4353133738040924, "train/loss_total": 0.460802286863327 }, { "epoch": 4.537002404488378, "step": 16982, "train/loss_ctc": 0.5867971181869507, "train/loss_error": 0.4152991473674774, "train/loss_total": 0.4495987594127655 }, { "epoch": 4.537269569863746, "step": 16983, "train/loss_ctc": 0.3392632007598877, "train/loss_error": 0.45461374521255493, "train/loss_total": 0.43154364824295044 }, { "epoch": 4.537536735239113, "step": 16984, "train/loss_ctc": 0.5089130401611328, "train/loss_error": 0.5063261389732361, "train/loss_total": 0.5068435072898865 }, { "epoch": 4.53780390061448, "step": 16985, "train/loss_ctc": 2.625782012939453, "train/loss_error": 0.4952738285064697, "train/loss_total": 0.9213755130767822 }, { "epoch": 4.538071065989848, "step": 16986, "train/loss_ctc": 0.5175759792327881, "train/loss_error": 0.5336823463439941, "train/loss_total": 0.5304610729217529 }, { "epoch": 4.538338231365215, "step": 16987, "train/loss_ctc": 0.47577032446861267, "train/loss_error": 0.5004991292953491, "train/loss_total": 0.4955533742904663 }, { "epoch": 4.538605396740582, "step": 16988, "train/loss_ctc": 0.6458300948143005, "train/loss_error": 0.3962545096874237, "train/loss_total": 0.4461696147918701 }, { "epoch": 4.53887256211595, "step": 16989, "train/loss_ctc": 0.7125800848007202, "train/loss_error": 0.44499319791793823, "train/loss_total": 0.49851056933403015 }, { "epoch": 4.5391397274913174, "grad_norm": 15.887065887451172, "learning_rate": 2.7795885653219347e-06, "loss": 0.5148, "step": 16990 }, { "epoch": 4.5391397274913174, "step": 16990, "train/loss_ctc": 1.0475941896438599, "train/loss_error": 0.4193408191204071, "train/loss_total": 0.5449914932250977 }, { "epoch": 4.539406892866684, "step": 16991, "train/loss_ctc": 0.5517456531524658, "train/loss_error": 0.42083895206451416, "train/loss_total": 0.4470202922821045 }, { "epoch": 4.539674058242052, "step": 16992, "train/loss_ctc": 0.7268061637878418, "train/loss_error": 0.39219340682029724, "train/loss_total": 0.45911598205566406 }, { "epoch": 4.5399412236174195, "step": 16993, "train/loss_ctc": 1.288938045501709, "train/loss_error": 0.37972408533096313, "train/loss_total": 0.5615668892860413 }, { "epoch": 4.540208388992786, "step": 16994, "train/loss_ctc": 0.6473309993743896, "train/loss_error": 0.4136759042739868, "train/loss_total": 0.46040692925453186 }, { "epoch": 4.540475554368154, "step": 16995, "train/loss_ctc": 0.3820439577102661, "train/loss_error": 0.4288721978664398, "train/loss_total": 0.41950657963752747 }, { "epoch": 4.5407427197435215, "step": 16996, "train/loss_ctc": 0.9139811992645264, "train/loss_error": 0.39156216382980347, "train/loss_total": 0.4960459768772125 }, { "epoch": 4.541009885118888, "step": 16997, "train/loss_ctc": 0.7951436042785645, "train/loss_error": 0.45090124011039734, "train/loss_total": 0.5197497010231018 }, { "epoch": 4.541277050494256, "step": 16998, "train/loss_ctc": 1.1372911930084229, "train/loss_error": 0.39065900444984436, "train/loss_total": 0.5399854183197021 }, { "epoch": 4.5415442158696235, "step": 16999, "train/loss_ctc": 1.2302923202514648, "train/loss_error": 0.417228639125824, "train/loss_total": 0.5798413753509521 }, { "epoch": 4.54181138124499, "grad_norm": 2.3020739555358887, "learning_rate": 2.7635586427998932e-06, "loss": 0.5028, "step": 17000 }, { "epoch": 4.54181138124499, "step": 17000, "train/loss_ctc": 0.5437088012695312, "train/loss_error": 0.4796282947063446, "train/loss_total": 0.49244439601898193 }, { "epoch": 4.542078546620358, "step": 17001, "train/loss_ctc": 0.30136606097221375, "train/loss_error": 0.4662409722805023, "train/loss_total": 0.4332659840583801 }, { "epoch": 4.542345711995726, "step": 17002, "train/loss_ctc": 0.46171486377716064, "train/loss_error": 0.4419134557247162, "train/loss_total": 0.4458737373352051 }, { "epoch": 4.542612877371093, "step": 17003, "train/loss_ctc": 0.9912981390953064, "train/loss_error": 0.42794084548950195, "train/loss_total": 0.5406123399734497 }, { "epoch": 4.54288004274646, "step": 17004, "train/loss_ctc": 0.39975133538246155, "train/loss_error": 0.47408220171928406, "train/loss_total": 0.45921602845191956 }, { "epoch": 4.543147208121828, "step": 17005, "train/loss_ctc": 0.8088303804397583, "train/loss_error": 0.46864646673202515, "train/loss_total": 0.5366832613945007 }, { "epoch": 4.543414373497194, "step": 17006, "train/loss_ctc": 0.7118053436279297, "train/loss_error": 0.39540132880210876, "train/loss_total": 0.4586821496486664 }, { "epoch": 4.543681538872562, "step": 17007, "train/loss_ctc": 0.22674627602100372, "train/loss_error": 0.38742944598197937, "train/loss_total": 0.35529279708862305 }, { "epoch": 4.54394870424793, "step": 17008, "train/loss_ctc": 0.7762081623077393, "train/loss_error": 0.35974621772766113, "train/loss_total": 0.44303861260414124 }, { "epoch": 4.544215869623297, "step": 17009, "train/loss_ctc": 0.6749129295349121, "train/loss_error": 0.3625269830226898, "train/loss_total": 0.42500418424606323 }, { "epoch": 4.544483034998664, "grad_norm": 1.938545823097229, "learning_rate": 2.7475287202778517e-06, "loss": 0.459, "step": 17010 }, { "epoch": 4.544483034998664, "step": 17010, "train/loss_ctc": 0.34269753098487854, "train/loss_error": 0.3446797728538513, "train/loss_total": 0.3442833125591278 }, { "epoch": 4.544750200374032, "step": 17011, "train/loss_ctc": 0.2969670593738556, "train/loss_error": 0.4690878093242645, "train/loss_total": 0.43466365337371826 }, { "epoch": 4.545017365749398, "step": 17012, "train/loss_ctc": 0.6481963992118835, "train/loss_error": 0.3710489571094513, "train/loss_total": 0.42647844552993774 }, { "epoch": 4.545284531124766, "step": 17013, "train/loss_ctc": 1.3703356981277466, "train/loss_error": 0.4392382502555847, "train/loss_total": 0.625457763671875 }, { "epoch": 4.545551696500134, "step": 17014, "train/loss_ctc": 0.7230316400527954, "train/loss_error": 0.3879272937774658, "train/loss_total": 0.45494818687438965 }, { "epoch": 4.545818861875501, "step": 17015, "train/loss_ctc": 0.612908124923706, "train/loss_error": 0.4284897744655609, "train/loss_total": 0.4653734564781189 }, { "epoch": 4.546086027250868, "step": 17016, "train/loss_ctc": 0.6204665899276733, "train/loss_error": 0.4103517234325409, "train/loss_total": 0.4523746967315674 }, { "epoch": 4.546353192626236, "step": 17017, "train/loss_ctc": 0.9370975494384766, "train/loss_error": 0.4104926884174347, "train/loss_total": 0.5158136487007141 }, { "epoch": 4.546620358001603, "step": 17018, "train/loss_ctc": 0.5107057094573975, "train/loss_error": 0.372372031211853, "train/loss_total": 0.40003877878189087 }, { "epoch": 4.54688752337697, "step": 17019, "train/loss_ctc": 0.5365080833435059, "train/loss_error": 0.37702274322509766, "train/loss_total": 0.4089198112487793 }, { "epoch": 4.547154688752338, "grad_norm": 1.440476655960083, "learning_rate": 2.731498797755811e-06, "loss": 0.4528, "step": 17020 }, { "epoch": 4.547154688752338, "step": 17020, "train/loss_ctc": 0.3566009998321533, "train/loss_error": 0.39563828706741333, "train/loss_total": 0.38783085346221924 }, { "epoch": 4.547421854127705, "step": 17021, "train/loss_ctc": 0.4385581314563751, "train/loss_error": 0.4217801094055176, "train/loss_total": 0.4251357316970825 }, { "epoch": 4.547689019503072, "step": 17022, "train/loss_ctc": 0.6684660911560059, "train/loss_error": 0.4013463854789734, "train/loss_total": 0.4547703266143799 }, { "epoch": 4.54795618487844, "step": 17023, "train/loss_ctc": 0.9015443325042725, "train/loss_error": 0.4059891104698181, "train/loss_total": 0.5051001310348511 }, { "epoch": 4.548223350253807, "step": 17024, "train/loss_ctc": 0.9807514548301697, "train/loss_error": 0.4215032160282135, "train/loss_total": 0.5333528518676758 }, { "epoch": 4.548490515629174, "step": 17025, "train/loss_ctc": 0.8410804271697998, "train/loss_error": 0.45591142773628235, "train/loss_total": 0.5329452157020569 }, { "epoch": 4.548757681004542, "step": 17026, "train/loss_ctc": 0.7943745851516724, "train/loss_error": 0.39860999584198, "train/loss_total": 0.47776293754577637 }, { "epoch": 4.5490248463799094, "step": 17027, "train/loss_ctc": 1.0563217401504517, "train/loss_error": 0.4209854006767273, "train/loss_total": 0.5480526685714722 }, { "epoch": 4.549292011755276, "step": 17028, "train/loss_ctc": 1.217354655265808, "train/loss_error": 0.45915499329566956, "train/loss_total": 0.6107949018478394 }, { "epoch": 4.549559177130644, "step": 17029, "train/loss_ctc": 0.2112206220626831, "train/loss_error": 0.3761782944202423, "train/loss_total": 0.34318676590919495 }, { "epoch": 4.5498263425060115, "grad_norm": 1.513815999031067, "learning_rate": 2.7154688752337696e-06, "loss": 0.4819, "step": 17030 }, { "epoch": 4.5498263425060115, "step": 17030, "train/loss_ctc": 0.547917366027832, "train/loss_error": 0.5049514174461365, "train/loss_total": 0.5135446190834045 }, { "epoch": 4.550093507881378, "step": 17031, "train/loss_ctc": 1.458796739578247, "train/loss_error": 0.41727638244628906, "train/loss_total": 0.6255804300308228 }, { "epoch": 4.550360673256746, "step": 17032, "train/loss_ctc": 0.5812779664993286, "train/loss_error": 0.3947755694389343, "train/loss_total": 0.43207603693008423 }, { "epoch": 4.5506278386321135, "step": 17033, "train/loss_ctc": 0.6579941511154175, "train/loss_error": 0.45907163619995117, "train/loss_total": 0.49885615706443787 }, { "epoch": 4.55089500400748, "step": 17034, "train/loss_ctc": 0.9044384956359863, "train/loss_error": 0.457295686006546, "train/loss_total": 0.546724259853363 }, { "epoch": 4.551162169382848, "step": 17035, "train/loss_ctc": 1.0961241722106934, "train/loss_error": 0.45258286595344543, "train/loss_total": 0.581291139125824 }, { "epoch": 4.5514293347582155, "step": 17036, "train/loss_ctc": 1.0342254638671875, "train/loss_error": 0.4401822090148926, "train/loss_total": 0.5589908361434937 }, { "epoch": 4.551696500133582, "step": 17037, "train/loss_ctc": 0.8081015348434448, "train/loss_error": 0.5360189080238342, "train/loss_total": 0.5904354453086853 }, { "epoch": 4.55196366550895, "step": 17038, "train/loss_ctc": 0.3363463282585144, "train/loss_error": 0.4099626839160919, "train/loss_total": 0.3952394127845764 }, { "epoch": 4.552230830884318, "step": 17039, "train/loss_ctc": 1.0239238739013672, "train/loss_error": 0.44841745495796204, "train/loss_total": 0.563518762588501 }, { "epoch": 4.552497996259685, "grad_norm": 1.685482144355774, "learning_rate": 2.6994389527117285e-06, "loss": 0.5306, "step": 17040 }, { "epoch": 4.552497996259685, "step": 17040, "train/loss_ctc": 0.4341846704483032, "train/loss_error": 0.45476117730140686, "train/loss_total": 0.45064589381217957 }, { "epoch": 4.552765161635052, "step": 17041, "train/loss_ctc": 0.6175863742828369, "train/loss_error": 0.4375326931476593, "train/loss_total": 0.4735434353351593 }, { "epoch": 4.55303232701042, "step": 17042, "train/loss_ctc": 0.4773971438407898, "train/loss_error": 0.37571805715560913, "train/loss_total": 0.39605388045310974 }, { "epoch": 4.553299492385786, "step": 17043, "train/loss_ctc": 0.769207239151001, "train/loss_error": 0.36340489983558655, "train/loss_total": 0.4445653557777405 }, { "epoch": 4.553566657761154, "step": 17044, "train/loss_ctc": 0.5992476940155029, "train/loss_error": 0.4338528513908386, "train/loss_total": 0.4669318199157715 }, { "epoch": 4.553833823136522, "step": 17045, "train/loss_ctc": 1.5252937078475952, "train/loss_error": 0.4131101071834564, "train/loss_total": 0.6355468034744263 }, { "epoch": 4.554100988511889, "step": 17046, "train/loss_ctc": 0.9697777032852173, "train/loss_error": 0.5426471829414368, "train/loss_total": 0.6280733346939087 }, { "epoch": 4.554368153887256, "step": 17047, "train/loss_ctc": 1.3727335929870605, "train/loss_error": 0.37281301617622375, "train/loss_total": 0.5727971196174622 }, { "epoch": 4.554635319262624, "step": 17048, "train/loss_ctc": 0.9894097447395325, "train/loss_error": 0.4892699718475342, "train/loss_total": 0.5892979502677917 }, { "epoch": 4.554902484637991, "step": 17049, "train/loss_ctc": 0.6287417411804199, "train/loss_error": 0.3364831209182739, "train/loss_total": 0.39493483304977417 }, { "epoch": 4.555169650013358, "grad_norm": 6.927265167236328, "learning_rate": 2.6834090301896875e-06, "loss": 0.5052, "step": 17050 }, { "epoch": 4.555169650013358, "step": 17050, "train/loss_ctc": 0.7705446481704712, "train/loss_error": 0.4714522659778595, "train/loss_total": 0.5312707424163818 }, { "epoch": 4.555436815388726, "step": 17051, "train/loss_ctc": 0.3829605281352997, "train/loss_error": 0.39690619707107544, "train/loss_total": 0.3941170871257782 }, { "epoch": 4.555703980764093, "step": 17052, "train/loss_ctc": 0.453762412071228, "train/loss_error": 0.4391186237335205, "train/loss_total": 0.4420473873615265 }, { "epoch": 4.55597114613946, "step": 17053, "train/loss_ctc": 0.6485582590103149, "train/loss_error": 0.4728529751300812, "train/loss_total": 0.5079940557479858 }, { "epoch": 4.556238311514828, "step": 17054, "train/loss_ctc": 0.6521851420402527, "train/loss_error": 0.4428078234195709, "train/loss_total": 0.4846832752227783 }, { "epoch": 4.556505476890195, "step": 17055, "train/loss_ctc": 1.2919771671295166, "train/loss_error": 0.4829065501689911, "train/loss_total": 0.6447206735610962 }, { "epoch": 4.556772642265562, "step": 17056, "train/loss_ctc": 1.0437523126602173, "train/loss_error": 0.4507651925086975, "train/loss_total": 0.5693626403808594 }, { "epoch": 4.55703980764093, "step": 17057, "train/loss_ctc": 0.32124486565589905, "train/loss_error": 0.4130278527736664, "train/loss_total": 0.3946712613105774 }, { "epoch": 4.557306973016297, "step": 17058, "train/loss_ctc": 0.4183565378189087, "train/loss_error": 0.3803446292877197, "train/loss_total": 0.3879470229148865 }, { "epoch": 4.557574138391664, "step": 17059, "train/loss_ctc": 0.8354923725128174, "train/loss_error": 0.4934272766113281, "train/loss_total": 0.561840295791626 }, { "epoch": 4.557841303767032, "grad_norm": 1.8392890691757202, "learning_rate": 2.6673791076676464e-06, "loss": 0.4919, "step": 17060 }, { "epoch": 4.557841303767032, "step": 17060, "train/loss_ctc": 0.5476064085960388, "train/loss_error": 0.41711974143981934, "train/loss_total": 0.44321709871292114 }, { "epoch": 4.558108469142399, "step": 17061, "train/loss_ctc": 0.5292664170265198, "train/loss_error": 0.4507785737514496, "train/loss_total": 0.4664761424064636 }, { "epoch": 4.558375634517766, "step": 17062, "train/loss_ctc": 0.5781844258308411, "train/loss_error": 0.5072336792945862, "train/loss_total": 0.5214238166809082 }, { "epoch": 4.558642799893134, "step": 17063, "train/loss_ctc": 1.3823955059051514, "train/loss_error": 0.4374482333660126, "train/loss_total": 0.6264376640319824 }, { "epoch": 4.5589099652685015, "step": 17064, "train/loss_ctc": 0.5226435661315918, "train/loss_error": 0.38848862051963806, "train/loss_total": 0.41531962156295776 }, { "epoch": 4.559177130643868, "step": 17065, "train/loss_ctc": 0.31758973002433777, "train/loss_error": 0.4451535940170288, "train/loss_total": 0.41964083909988403 }, { "epoch": 4.559444296019236, "step": 17066, "train/loss_ctc": 1.389699935913086, "train/loss_error": 0.4536342918872833, "train/loss_total": 0.6408474445343018 }, { "epoch": 4.5597114613946035, "step": 17067, "train/loss_ctc": 0.3535727858543396, "train/loss_error": 0.4230123460292816, "train/loss_total": 0.4091244339942932 }, { "epoch": 4.55997862676997, "step": 17068, "train/loss_ctc": 1.3273694515228271, "train/loss_error": 0.5043998956680298, "train/loss_total": 0.6689938306808472 }, { "epoch": 4.560245792145338, "step": 17069, "train/loss_ctc": 0.6641446352005005, "train/loss_error": 0.34211161732673645, "train/loss_total": 0.40651822090148926 }, { "epoch": 4.5605129575207055, "grad_norm": 2.170769691467285, "learning_rate": 2.6513491851456053e-06, "loss": 0.5018, "step": 17070 }, { "epoch": 4.5605129575207055, "step": 17070, "train/loss_ctc": 0.22873973846435547, "train/loss_error": 0.42907845973968506, "train/loss_total": 0.3890106976032257 }, { "epoch": 4.560780122896073, "step": 17071, "train/loss_ctc": 0.6127046942710876, "train/loss_error": 0.42397353053092957, "train/loss_total": 0.4617197811603546 }, { "epoch": 4.56104728827144, "step": 17072, "train/loss_ctc": 0.6277915239334106, "train/loss_error": 0.3721317648887634, "train/loss_total": 0.4232637286186218 }, { "epoch": 4.5613144536468075, "step": 17073, "train/loss_ctc": 0.6978392004966736, "train/loss_error": 0.4554712176322937, "train/loss_total": 0.5039448142051697 }, { "epoch": 4.561581619022174, "step": 17074, "train/loss_ctc": 0.4666284918785095, "train/loss_error": 0.48117998242378235, "train/loss_total": 0.47826969623565674 }, { "epoch": 4.561848784397542, "step": 17075, "train/loss_ctc": 0.6665722131729126, "train/loss_error": 0.4704432189464569, "train/loss_total": 0.5096690058708191 }, { "epoch": 4.56211594977291, "step": 17076, "train/loss_ctc": 0.7005223035812378, "train/loss_error": 0.43903470039367676, "train/loss_total": 0.4913322329521179 }, { "epoch": 4.562383115148277, "step": 17077, "train/loss_ctc": 0.9011536836624146, "train/loss_error": 0.3806077837944031, "train/loss_total": 0.4847169816493988 }, { "epoch": 4.562650280523644, "step": 17078, "train/loss_ctc": 0.7539091110229492, "train/loss_error": 0.3665982782840729, "train/loss_total": 0.44406044483184814 }, { "epoch": 4.562917445899012, "step": 17079, "train/loss_ctc": 0.5160862803459167, "train/loss_error": 0.41254574060440063, "train/loss_total": 0.4332538843154907 }, { "epoch": 4.563184611274378, "grad_norm": 1.0832535028457642, "learning_rate": 2.6353192626235643e-06, "loss": 0.4619, "step": 17080 }, { "epoch": 4.563184611274378, "step": 17080, "train/loss_ctc": 1.2123827934265137, "train/loss_error": 0.4959478974342346, "train/loss_total": 0.6392349004745483 }, { "epoch": 4.563451776649746, "step": 17081, "train/loss_ctc": 1.2803676128387451, "train/loss_error": 0.40523186326026917, "train/loss_total": 0.5802590250968933 }, { "epoch": 4.563718942025114, "step": 17082, "train/loss_ctc": 0.8094682097434998, "train/loss_error": 0.4134010374546051, "train/loss_total": 0.4926145076751709 }, { "epoch": 4.563986107400481, "step": 17083, "train/loss_ctc": 0.7802442908287048, "train/loss_error": 0.48804396390914917, "train/loss_total": 0.5464840531349182 }, { "epoch": 4.564253272775848, "step": 17084, "train/loss_ctc": 0.5835437178611755, "train/loss_error": 0.4860846996307373, "train/loss_total": 0.505576491355896 }, { "epoch": 4.564520438151216, "step": 17085, "train/loss_ctc": 1.1749613285064697, "train/loss_error": 0.41281658411026, "train/loss_total": 0.565245509147644 }, { "epoch": 4.564787603526583, "step": 17086, "train/loss_ctc": 0.6469463109970093, "train/loss_error": 0.4262116253376007, "train/loss_total": 0.47035855054855347 }, { "epoch": 4.56505476890195, "step": 17087, "train/loss_ctc": 0.809651792049408, "train/loss_error": 0.46263131499290466, "train/loss_total": 0.5320354104042053 }, { "epoch": 4.565321934277318, "step": 17088, "train/loss_ctc": 0.971526026725769, "train/loss_error": 0.42670533061027527, "train/loss_total": 0.5356694459915161 }, { "epoch": 4.565589099652685, "step": 17089, "train/loss_ctc": 0.30316162109375, "train/loss_error": 0.49192649126052856, "train/loss_total": 0.4541735053062439 }, { "epoch": 4.565856265028052, "grad_norm": 2.0420756340026855, "learning_rate": 2.6192893401015228e-06, "loss": 0.5322, "step": 17090 }, { "epoch": 4.565856265028052, "step": 17090, "train/loss_ctc": 0.5206490755081177, "train/loss_error": 0.44895800948143005, "train/loss_total": 0.46329623460769653 }, { "epoch": 4.56612343040342, "step": 17091, "train/loss_ctc": 0.9689717292785645, "train/loss_error": 0.4673043489456177, "train/loss_total": 0.5676378607749939 }, { "epoch": 4.566390595778787, "step": 17092, "train/loss_ctc": 0.597610592842102, "train/loss_error": 0.35939547419548035, "train/loss_total": 0.40703850984573364 }, { "epoch": 4.566657761154154, "step": 17093, "train/loss_ctc": 0.35344868898391724, "train/loss_error": 0.42560938000679016, "train/loss_total": 0.41117724776268005 }, { "epoch": 4.566924926529522, "step": 17094, "train/loss_ctc": 0.8267824649810791, "train/loss_error": 0.4347480237483978, "train/loss_total": 0.513154923915863 }, { "epoch": 4.567192091904889, "step": 17095, "train/loss_ctc": 0.5910448431968689, "train/loss_error": 0.4891296923160553, "train/loss_total": 0.509512722492218 }, { "epoch": 4.567459257280256, "step": 17096, "train/loss_ctc": 1.084768295288086, "train/loss_error": 0.4699462056159973, "train/loss_total": 0.592910647392273 }, { "epoch": 4.567726422655624, "step": 17097, "train/loss_ctc": 0.47556793689727783, "train/loss_error": 0.38288041949272156, "train/loss_total": 0.40141791105270386 }, { "epoch": 4.567993588030991, "step": 17098, "train/loss_ctc": 0.42580991983413696, "train/loss_error": 0.4278724789619446, "train/loss_total": 0.4274599850177765 }, { "epoch": 4.568260753406358, "step": 17099, "train/loss_ctc": 0.5116605758666992, "train/loss_error": 0.477768212556839, "train/loss_total": 0.4845466911792755 }, { "epoch": 4.568527918781726, "grad_norm": 1.7125623226165771, "learning_rate": 2.6032594175794817e-06, "loss": 0.4778, "step": 17100 }, { "epoch": 4.568527918781726, "step": 17100, "train/loss_ctc": 1.3997340202331543, "train/loss_error": 0.46672916412353516, "train/loss_total": 0.6533301472663879 }, { "epoch": 4.5687950841570935, "step": 17101, "train/loss_ctc": 0.7839722633361816, "train/loss_error": 0.4254378378391266, "train/loss_total": 0.4971447288990021 }, { "epoch": 4.56906224953246, "step": 17102, "train/loss_ctc": 1.0309609174728394, "train/loss_error": 0.3765336871147156, "train/loss_total": 0.5074191093444824 }, { "epoch": 4.569329414907828, "step": 17103, "train/loss_ctc": 0.2673938572406769, "train/loss_error": 0.4063842296600342, "train/loss_total": 0.37858617305755615 }, { "epoch": 4.5695965802831955, "step": 17104, "train/loss_ctc": 0.5441678762435913, "train/loss_error": 0.37007248401641846, "train/loss_total": 0.40489158034324646 }, { "epoch": 4.569863745658562, "step": 17105, "train/loss_ctc": 0.683884859085083, "train/loss_error": 0.4944610595703125, "train/loss_total": 0.5323458313941956 }, { "epoch": 4.57013091103393, "step": 17106, "train/loss_ctc": 0.7541837096214294, "train/loss_error": 0.418501615524292, "train/loss_total": 0.4856380224227905 }, { "epoch": 4.5703980764092975, "step": 17107, "train/loss_ctc": 0.8370319604873657, "train/loss_error": 0.40175125002861023, "train/loss_total": 0.4888073801994324 }, { "epoch": 4.570665241784665, "step": 17108, "train/loss_ctc": 1.0397100448608398, "train/loss_error": 0.4770052433013916, "train/loss_total": 0.5895462036132812 }, { "epoch": 4.570932407160032, "step": 17109, "train/loss_ctc": 0.9343779683113098, "train/loss_error": 0.45903754234313965, "train/loss_total": 0.5541056394577026 }, { "epoch": 4.5711995725353995, "grad_norm": 1.82341468334198, "learning_rate": 2.5872294950574406e-06, "loss": 0.5092, "step": 17110 }, { "epoch": 4.5711995725353995, "step": 17110, "train/loss_ctc": 0.8290432691574097, "train/loss_error": 0.41848888993263245, "train/loss_total": 0.5005998015403748 }, { "epoch": 4.571466737910766, "step": 17111, "train/loss_ctc": 0.6440204381942749, "train/loss_error": 0.5059308409690857, "train/loss_total": 0.5335487723350525 }, { "epoch": 4.571733903286134, "step": 17112, "train/loss_ctc": 0.6268196105957031, "train/loss_error": 0.3995911777019501, "train/loss_total": 0.4450368881225586 }, { "epoch": 4.572001068661502, "step": 17113, "train/loss_ctc": 0.5421658754348755, "train/loss_error": 0.38153767585754395, "train/loss_total": 0.4136633276939392 }, { "epoch": 4.572268234036869, "step": 17114, "train/loss_ctc": 0.3397304117679596, "train/loss_error": 0.4818328022956848, "train/loss_total": 0.45341232419013977 }, { "epoch": 4.572535399412236, "step": 17115, "train/loss_ctc": 0.5027757883071899, "train/loss_error": 0.42812323570251465, "train/loss_total": 0.4430537521839142 }, { "epoch": 4.572802564787604, "step": 17116, "train/loss_ctc": 0.3176373243331909, "train/loss_error": 0.35283806920051575, "train/loss_total": 0.34579792618751526 }, { "epoch": 4.573069730162971, "step": 17117, "train/loss_ctc": 0.25208231806755066, "train/loss_error": 0.43333548307418823, "train/loss_total": 0.3970848619937897 }, { "epoch": 4.573336895538338, "step": 17118, "train/loss_ctc": 0.582156777381897, "train/loss_error": 0.440032958984375, "train/loss_total": 0.46845772862434387 }, { "epoch": 4.573604060913706, "step": 17119, "train/loss_ctc": 0.6061496138572693, "train/loss_error": 0.4591729938983917, "train/loss_total": 0.4885683059692383 }, { "epoch": 4.573871226289073, "grad_norm": 1.6004241704940796, "learning_rate": 2.571199572535399e-06, "loss": 0.4489, "step": 17120 }, { "epoch": 4.573871226289073, "step": 17120, "train/loss_ctc": 0.4893871545791626, "train/loss_error": 0.44776931405067444, "train/loss_total": 0.456092894077301 }, { "epoch": 4.57413839166444, "step": 17121, "train/loss_ctc": 0.5091552734375, "train/loss_error": 0.3793986141681671, "train/loss_total": 0.4053499400615692 }, { "epoch": 4.574405557039808, "step": 17122, "train/loss_ctc": 0.4741644263267517, "train/loss_error": 0.36995431780815125, "train/loss_total": 0.39079636335372925 }, { "epoch": 4.574672722415175, "step": 17123, "train/loss_ctc": 0.7315059900283813, "train/loss_error": 0.454995721578598, "train/loss_total": 0.5102977752685547 }, { "epoch": 4.574939887790542, "step": 17124, "train/loss_ctc": 0.5492885708808899, "train/loss_error": 0.3956720232963562, "train/loss_total": 0.42639532685279846 }, { "epoch": 4.57520705316591, "step": 17125, "train/loss_ctc": 0.5747283101081848, "train/loss_error": 0.4612400531768799, "train/loss_total": 0.48393774032592773 }, { "epoch": 4.575474218541277, "step": 17126, "train/loss_ctc": 1.0905033349990845, "train/loss_error": 0.4423266649246216, "train/loss_total": 0.5719619989395142 }, { "epoch": 4.575741383916644, "step": 17127, "train/loss_ctc": 0.6817591190338135, "train/loss_error": 0.44258058071136475, "train/loss_total": 0.4904162883758545 }, { "epoch": 4.576008549292012, "step": 17128, "train/loss_ctc": 0.8154937624931335, "train/loss_error": 0.47366923093795776, "train/loss_total": 0.5420341491699219 }, { "epoch": 4.576275714667379, "step": 17129, "train/loss_ctc": 0.6323240995407104, "train/loss_error": 0.4269821047782898, "train/loss_total": 0.4680505394935608 }, { "epoch": 4.576542880042746, "grad_norm": 2.1824281215667725, "learning_rate": 2.5551696500133585e-06, "loss": 0.4745, "step": 17130 }, { "epoch": 4.576542880042746, "step": 17130, "train/loss_ctc": 1.1874290704727173, "train/loss_error": 0.4312967360019684, "train/loss_total": 0.5825232267379761 }, { "epoch": 4.576810045418114, "step": 17131, "train/loss_ctc": 1.188244342803955, "train/loss_error": 0.41788819432258606, "train/loss_total": 0.5719594359397888 }, { "epoch": 4.577077210793481, "step": 17132, "train/loss_ctc": 0.4905622899532318, "train/loss_error": 0.34274977445602417, "train/loss_total": 0.3723122775554657 }, { "epoch": 4.577344376168848, "step": 17133, "train/loss_ctc": 0.4239142835140228, "train/loss_error": 0.44859421253204346, "train/loss_total": 0.4436582326889038 }, { "epoch": 4.577611541544216, "step": 17134, "train/loss_ctc": 1.1039326190948486, "train/loss_error": 0.4242268204689026, "train/loss_total": 0.5601679682731628 }, { "epoch": 4.577878706919583, "step": 17135, "train/loss_ctc": 1.0418667793273926, "train/loss_error": 0.4168508052825928, "train/loss_total": 0.5418540239334106 }, { "epoch": 4.57814587229495, "step": 17136, "train/loss_ctc": 1.4619314670562744, "train/loss_error": 0.3229464590549469, "train/loss_total": 0.5507434606552124 }, { "epoch": 4.578413037670318, "step": 17137, "train/loss_ctc": 0.6935739517211914, "train/loss_error": 0.4001069664955139, "train/loss_total": 0.45880037546157837 }, { "epoch": 4.5786802030456855, "step": 17138, "train/loss_ctc": 0.6439590454101562, "train/loss_error": 0.43974313139915466, "train/loss_total": 0.48058632016181946 }, { "epoch": 4.578947368421053, "step": 17139, "train/loss_ctc": 0.9785653948783875, "train/loss_error": 0.5126925110816956, "train/loss_total": 0.6058670878410339 }, { "epoch": 4.57921453379642, "grad_norm": 2.0027618408203125, "learning_rate": 2.539139727491317e-06, "loss": 0.5168, "step": 17140 }, { "epoch": 4.57921453379642, "step": 17140, "train/loss_ctc": 0.9827106595039368, "train/loss_error": 0.44132834672927856, "train/loss_total": 0.5496048331260681 }, { "epoch": 4.5794816991717875, "step": 17141, "train/loss_ctc": 0.6608185768127441, "train/loss_error": 0.47298023104667664, "train/loss_total": 0.510547935962677 }, { "epoch": 4.579748864547154, "step": 17142, "train/loss_ctc": 1.7992150783538818, "train/loss_error": 0.4872557520866394, "train/loss_total": 0.7496476173400879 }, { "epoch": 4.580016029922522, "step": 17143, "train/loss_ctc": 0.8212954998016357, "train/loss_error": 0.4692250192165375, "train/loss_total": 0.5396391153335571 }, { "epoch": 4.5802831952978895, "step": 17144, "train/loss_ctc": 0.4997878670692444, "train/loss_error": 0.3796502947807312, "train/loss_total": 0.4036778211593628 }, { "epoch": 4.580550360673257, "step": 17145, "train/loss_ctc": 0.7305443286895752, "train/loss_error": 0.450944721698761, "train/loss_total": 0.5068646669387817 }, { "epoch": 4.580817526048624, "step": 17146, "train/loss_ctc": 0.5805915594100952, "train/loss_error": 0.35965555906295776, "train/loss_total": 0.4038427770137787 }, { "epoch": 4.5810846914239916, "step": 17147, "train/loss_ctc": 0.6048002243041992, "train/loss_error": 0.4918558597564697, "train/loss_total": 0.5144447088241577 }, { "epoch": 4.581351856799359, "step": 17148, "train/loss_ctc": 0.45258623361587524, "train/loss_error": 0.41874462366104126, "train/loss_total": 0.42551296949386597 }, { "epoch": 4.581619022174726, "step": 17149, "train/loss_ctc": 1.0375779867172241, "train/loss_error": 0.4100170433521271, "train/loss_total": 0.5355292558670044 }, { "epoch": 4.581886187550094, "grad_norm": 1.6356792449951172, "learning_rate": 2.5231098049692763e-06, "loss": 0.5139, "step": 17150 }, { "epoch": 4.581886187550094, "step": 17150, "train/loss_ctc": 0.4291757345199585, "train/loss_error": 0.39453792572021484, "train/loss_total": 0.401465505361557 }, { "epoch": 4.582153352925461, "step": 17151, "train/loss_ctc": 0.31956377625465393, "train/loss_error": 0.45525500178337097, "train/loss_total": 0.4281167685985565 }, { "epoch": 4.582420518300828, "step": 17152, "train/loss_ctc": 0.5208783149719238, "train/loss_error": 0.40279433131217957, "train/loss_total": 0.42641112208366394 }, { "epoch": 4.582687683676196, "step": 17153, "train/loss_ctc": 0.9459379315376282, "train/loss_error": 0.41667068004608154, "train/loss_total": 0.5225241184234619 }, { "epoch": 4.582954849051563, "step": 17154, "train/loss_ctc": 0.954136073589325, "train/loss_error": 0.4425223767757416, "train/loss_total": 0.5448451042175293 }, { "epoch": 4.58322201442693, "step": 17155, "train/loss_ctc": 0.6882113814353943, "train/loss_error": 0.3817135989665985, "train/loss_total": 0.44301319122314453 }, { "epoch": 4.583489179802298, "step": 17156, "train/loss_ctc": 0.7786194086074829, "train/loss_error": 0.4734313488006592, "train/loss_total": 0.534468948841095 }, { "epoch": 4.583756345177665, "step": 17157, "train/loss_ctc": 0.5830941796302795, "train/loss_error": 0.37984365224838257, "train/loss_total": 0.4204937815666199 }, { "epoch": 4.584023510553032, "step": 17158, "train/loss_ctc": 0.8773704171180725, "train/loss_error": 0.4380621612071991, "train/loss_total": 0.5259238481521606 }, { "epoch": 4.5842906759284, "step": 17159, "train/loss_ctc": 0.45557326078414917, "train/loss_error": 0.44730904698371887, "train/loss_total": 0.44896191358566284 }, { "epoch": 4.584557841303767, "grad_norm": 1.7773902416229248, "learning_rate": 2.507079882447235e-06, "loss": 0.4696, "step": 17160 }, { "epoch": 4.584557841303767, "step": 17160, "train/loss_ctc": 0.7743366956710815, "train/loss_error": 0.3123728930950165, "train/loss_total": 0.40476566553115845 }, { "epoch": 4.584825006679134, "step": 17161, "train/loss_ctc": 0.5642082691192627, "train/loss_error": 0.4284398853778839, "train/loss_total": 0.4555935859680176 }, { "epoch": 4.585092172054502, "step": 17162, "train/loss_ctc": 0.7342246770858765, "train/loss_error": 0.4472457468509674, "train/loss_total": 0.5046415328979492 }, { "epoch": 4.585359337429869, "step": 17163, "train/loss_ctc": 0.6562549471855164, "train/loss_error": 0.38683366775512695, "train/loss_total": 0.4407179355621338 }, { "epoch": 4.585626502805236, "step": 17164, "train/loss_ctc": 0.8159634470939636, "train/loss_error": 0.4232560098171234, "train/loss_total": 0.5017974972724915 }, { "epoch": 4.585893668180604, "step": 17165, "train/loss_ctc": 0.1714518964290619, "train/loss_error": 0.3988324999809265, "train/loss_total": 0.35335639119148254 }, { "epoch": 4.586160833555971, "step": 17166, "train/loss_ctc": 0.8360213041305542, "train/loss_error": 0.4269161522388458, "train/loss_total": 0.5087372064590454 }, { "epoch": 4.586427998931338, "step": 17167, "train/loss_ctc": 0.5401527881622314, "train/loss_error": 0.4422140121459961, "train/loss_total": 0.46180176734924316 }, { "epoch": 4.586695164306706, "step": 17168, "train/loss_ctc": 0.5902431011199951, "train/loss_error": 0.44886457920074463, "train/loss_total": 0.47714030742645264 }, { "epoch": 4.586962329682073, "step": 17169, "train/loss_ctc": 1.6362959146499634, "train/loss_error": 0.4540407955646515, "train/loss_total": 0.690491795539856 }, { "epoch": 4.587229495057441, "grad_norm": 1.5677201747894287, "learning_rate": 2.4910499599251938e-06, "loss": 0.4799, "step": 17170 }, { "epoch": 4.587229495057441, "step": 17170, "train/loss_ctc": 0.7846962809562683, "train/loss_error": 0.4450458288192749, "train/loss_total": 0.5129759311676025 }, { "epoch": 4.587496660432808, "step": 17171, "train/loss_ctc": 0.9744700193405151, "train/loss_error": 0.4269998073577881, "train/loss_total": 0.5364938378334045 }, { "epoch": 4.587763825808175, "step": 17172, "train/loss_ctc": 0.510714054107666, "train/loss_error": 0.4636514186859131, "train/loss_total": 0.47306394577026367 }, { "epoch": 4.588030991183542, "step": 17173, "train/loss_ctc": 0.38224267959594727, "train/loss_error": 0.390746533870697, "train/loss_total": 0.389045774936676 }, { "epoch": 4.58829815655891, "step": 17174, "train/loss_ctc": 0.8791599273681641, "train/loss_error": 0.4435487687587738, "train/loss_total": 0.5306710004806519 }, { "epoch": 4.5885653219342775, "step": 17175, "train/loss_ctc": 0.9632546901702881, "train/loss_error": 0.4827684760093689, "train/loss_total": 0.5788657665252686 }, { "epoch": 4.588832487309645, "step": 17176, "train/loss_ctc": 0.4004199504852295, "train/loss_error": 0.4209652245044708, "train/loss_total": 0.41685616970062256 }, { "epoch": 4.589099652685012, "step": 17177, "train/loss_ctc": 0.4132521152496338, "train/loss_error": 0.5191026926040649, "train/loss_total": 0.4979325830936432 }, { "epoch": 4.5893668180603795, "step": 17178, "train/loss_ctc": 0.3978164494037628, "train/loss_error": 0.4210858643054962, "train/loss_total": 0.4164319932460785 }, { "epoch": 4.589633983435746, "step": 17179, "train/loss_ctc": 0.30764105916023254, "train/loss_error": 0.3857307434082031, "train/loss_total": 0.370112806558609 }, { "epoch": 4.589901148811114, "grad_norm": 2.3634023666381836, "learning_rate": 2.4750200374031527e-06, "loss": 0.4722, "step": 17180 }, { "epoch": 4.589901148811114, "step": 17180, "train/loss_ctc": 0.540655791759491, "train/loss_error": 0.42065849900245667, "train/loss_total": 0.44465798139572144 }, { "epoch": 4.5901683141864815, "step": 17181, "train/loss_ctc": 0.516696035861969, "train/loss_error": 0.40735629200935364, "train/loss_total": 0.42922425270080566 }, { "epoch": 4.590435479561849, "step": 17182, "train/loss_ctc": 0.46755948662757874, "train/loss_error": 0.39194774627685547, "train/loss_total": 0.4070701003074646 }, { "epoch": 4.590702644937216, "step": 17183, "train/loss_ctc": 0.5061295032501221, "train/loss_error": 0.4358005225658417, "train/loss_total": 0.44986632466316223 }, { "epoch": 4.5909698103125836, "step": 17184, "train/loss_ctc": 0.828873872756958, "train/loss_error": 0.41424548625946045, "train/loss_total": 0.49717116355895996 }, { "epoch": 4.591236975687951, "step": 17185, "train/loss_ctc": 0.42920568585395813, "train/loss_error": 0.5070130825042725, "train/loss_total": 0.491451621055603 }, { "epoch": 4.591504141063318, "step": 17186, "train/loss_ctc": 0.48138177394866943, "train/loss_error": 0.4100818634033203, "train/loss_total": 0.4243418574333191 }, { "epoch": 4.591771306438686, "step": 17187, "train/loss_ctc": 0.9115151762962341, "train/loss_error": 0.465793639421463, "train/loss_total": 0.5549379587173462 }, { "epoch": 4.592038471814053, "step": 17188, "train/loss_ctc": 1.1541342735290527, "train/loss_error": 0.3993742763996124, "train/loss_total": 0.5503262877464294 }, { "epoch": 4.59230563718942, "step": 17189, "train/loss_ctc": 0.8050060272216797, "train/loss_error": 0.4814801812171936, "train/loss_total": 0.5461853742599487 }, { "epoch": 4.592572802564788, "grad_norm": 2.2813010215759277, "learning_rate": 2.4589901148811112e-06, "loss": 0.4795, "step": 17190 }, { "epoch": 4.592572802564788, "step": 17190, "train/loss_ctc": 0.2830556035041809, "train/loss_error": 0.43578919768333435, "train/loss_total": 0.4052424728870392 }, { "epoch": 4.592839967940155, "step": 17191, "train/loss_ctc": 0.39639079570770264, "train/loss_error": 0.4811632037162781, "train/loss_total": 0.4642087519168854 }, { "epoch": 4.593107133315522, "step": 17192, "train/loss_ctc": 0.9692379832267761, "train/loss_error": 0.3803936839103699, "train/loss_total": 0.49816253781318665 }, { "epoch": 4.59337429869089, "step": 17193, "train/loss_ctc": 0.6767554879188538, "train/loss_error": 0.42162248492240906, "train/loss_total": 0.47264909744262695 }, { "epoch": 4.593641464066257, "step": 17194, "train/loss_ctc": 0.5287880301475525, "train/loss_error": 0.45590782165527344, "train/loss_total": 0.4704838991165161 }, { "epoch": 4.593908629441624, "step": 17195, "train/loss_ctc": 0.6078227162361145, "train/loss_error": 0.39336687326431274, "train/loss_total": 0.4362580478191376 }, { "epoch": 4.594175794816992, "step": 17196, "train/loss_ctc": 0.857172966003418, "train/loss_error": 0.463035523891449, "train/loss_total": 0.5418630242347717 }, { "epoch": 4.594442960192359, "step": 17197, "train/loss_ctc": 1.2518621683120728, "train/loss_error": 0.4928002655506134, "train/loss_total": 0.6446126699447632 }, { "epoch": 4.594710125567726, "step": 17198, "train/loss_ctc": 1.0791231393814087, "train/loss_error": 0.4052020013332367, "train/loss_total": 0.539986252784729 }, { "epoch": 4.594977290943094, "step": 17199, "train/loss_ctc": 0.432988703250885, "train/loss_error": 0.39551588892936707, "train/loss_total": 0.40301045775413513 }, { "epoch": 4.595244456318461, "grad_norm": 1.2113136053085327, "learning_rate": 2.4429601923590706e-06, "loss": 0.4876, "step": 17200 }, { "epoch": 4.595244456318461, "step": 17200, "train/loss_ctc": 0.5087419748306274, "train/loss_error": 0.4758772850036621, "train/loss_total": 0.4824502468109131 }, { "epoch": 4.595511621693828, "step": 17201, "train/loss_ctc": 0.30577921867370605, "train/loss_error": 0.4250366687774658, "train/loss_total": 0.40118518471717834 }, { "epoch": 4.595778787069196, "step": 17202, "train/loss_ctc": 1.123473882675171, "train/loss_error": 0.4490879476070404, "train/loss_total": 0.5839651226997375 }, { "epoch": 4.596045952444563, "step": 17203, "train/loss_ctc": 0.49878135323524475, "train/loss_error": 0.36992961168289185, "train/loss_total": 0.39569997787475586 }, { "epoch": 4.59631311781993, "step": 17204, "train/loss_ctc": 1.1955006122589111, "train/loss_error": 0.47944724559783936, "train/loss_total": 0.6226578950881958 }, { "epoch": 4.596580283195298, "step": 17205, "train/loss_ctc": 0.796600878238678, "train/loss_error": 0.46011048555374146, "train/loss_total": 0.5274085998535156 }, { "epoch": 4.596847448570665, "step": 17206, "train/loss_ctc": 0.3330730199813843, "train/loss_error": 0.44745197892189026, "train/loss_total": 0.42457619309425354 }, { "epoch": 4.597114613946033, "step": 17207, "train/loss_ctc": 0.529212474822998, "train/loss_error": 0.44102203845977783, "train/loss_total": 0.4586601257324219 }, { "epoch": 4.5973817793214, "step": 17208, "train/loss_ctc": 1.360689640045166, "train/loss_error": 0.4629925489425659, "train/loss_total": 0.6425319910049438 }, { "epoch": 4.597648944696767, "step": 17209, "train/loss_ctc": 0.4442636966705322, "train/loss_error": 0.41291651129722595, "train/loss_total": 0.41918593645095825 }, { "epoch": 4.597916110072134, "grad_norm": 2.4992144107818604, "learning_rate": 2.426930269837029e-06, "loss": 0.4958, "step": 17210 }, { "epoch": 4.597916110072134, "step": 17210, "train/loss_ctc": 0.788612425327301, "train/loss_error": 0.4695175886154175, "train/loss_total": 0.5333365797996521 }, { "epoch": 4.598183275447502, "step": 17211, "train/loss_ctc": 1.0997792482376099, "train/loss_error": 0.3877516984939575, "train/loss_total": 0.530157208442688 }, { "epoch": 4.5984504408228695, "step": 17212, "train/loss_ctc": 0.45382821559906006, "train/loss_error": 0.39817366003990173, "train/loss_total": 0.40930458903312683 }, { "epoch": 4.598717606198237, "step": 17213, "train/loss_ctc": 0.2255827784538269, "train/loss_error": 0.39419010281562805, "train/loss_total": 0.36046862602233887 }, { "epoch": 4.598984771573604, "step": 17214, "train/loss_ctc": 0.7089376449584961, "train/loss_error": 0.4804133474826813, "train/loss_total": 0.5261182188987732 }, { "epoch": 4.5992519369489715, "step": 17215, "train/loss_ctc": 0.751288652420044, "train/loss_error": 0.3989257514476776, "train/loss_total": 0.4693983495235443 }, { "epoch": 4.599519102324339, "step": 17216, "train/loss_ctc": 0.5304252505302429, "train/loss_error": 0.46488916873931885, "train/loss_total": 0.4779964089393616 }, { "epoch": 4.599786267699706, "step": 17217, "train/loss_ctc": 0.49054908752441406, "train/loss_error": 0.44606244564056396, "train/loss_total": 0.45495977997779846 }, { "epoch": 4.6000534330750735, "step": 17218, "train/loss_ctc": 1.0531929731369019, "train/loss_error": 0.43748238682746887, "train/loss_total": 0.5606245398521423 }, { "epoch": 4.600320598450441, "step": 17219, "train/loss_ctc": 0.7563650608062744, "train/loss_error": 0.4600678086280823, "train/loss_total": 0.5193272829055786 }, { "epoch": 4.600587763825808, "grad_norm": 4.778279781341553, "learning_rate": 2.410900347314988e-06, "loss": 0.4842, "step": 17220 }, { "epoch": 4.600587763825808, "step": 17220, "train/loss_ctc": 0.7848353981971741, "train/loss_error": 0.46643736958503723, "train/loss_total": 0.5301169753074646 }, { "epoch": 4.600854929201176, "step": 17221, "train/loss_ctc": 1.3931702375411987, "train/loss_error": 0.44903236627578735, "train/loss_total": 0.6378599405288696 }, { "epoch": 4.601122094576543, "step": 17222, "train/loss_ctc": 0.5434482097625732, "train/loss_error": 0.41395097970962524, "train/loss_total": 0.43985041975975037 }, { "epoch": 4.60138925995191, "step": 17223, "train/loss_ctc": 0.6256583333015442, "train/loss_error": 0.41637617349624634, "train/loss_total": 0.4582326114177704 }, { "epoch": 4.601656425327278, "step": 17224, "train/loss_ctc": 0.9630599617958069, "train/loss_error": 0.4966466426849365, "train/loss_total": 0.5899293422698975 }, { "epoch": 4.601923590702645, "step": 17225, "train/loss_ctc": 0.6771668195724487, "train/loss_error": 0.40081992745399475, "train/loss_total": 0.4560893177986145 }, { "epoch": 4.602190756078012, "step": 17226, "train/loss_ctc": 0.5657763481140137, "train/loss_error": 0.46967893838882446, "train/loss_total": 0.4888984262943268 }, { "epoch": 4.60245792145338, "step": 17227, "train/loss_ctc": 0.5054512023925781, "train/loss_error": 0.4318135380744934, "train/loss_total": 0.44654110074043274 }, { "epoch": 4.602725086828747, "step": 17228, "train/loss_ctc": 0.6488469243049622, "train/loss_error": 0.4489586651325226, "train/loss_total": 0.48893633484840393 }, { "epoch": 4.602992252204114, "step": 17229, "train/loss_ctc": 0.7391053438186646, "train/loss_error": 0.3900443911552429, "train/loss_total": 0.4598565995693207 }, { "epoch": 4.603259417579482, "grad_norm": 2.0914011001586914, "learning_rate": 2.394870424792947e-06, "loss": 0.4996, "step": 17230 }, { "epoch": 4.603259417579482, "step": 17230, "train/loss_ctc": 0.6047797799110413, "train/loss_error": 0.47984522581100464, "train/loss_total": 0.5048321485519409 }, { "epoch": 4.603526582954849, "step": 17231, "train/loss_ctc": 0.7246332764625549, "train/loss_error": 0.3953399658203125, "train/loss_total": 0.461198627948761 }, { "epoch": 4.603793748330216, "step": 17232, "train/loss_ctc": 0.5304510593414307, "train/loss_error": 0.421447217464447, "train/loss_total": 0.4432480037212372 }, { "epoch": 4.604060913705584, "step": 17233, "train/loss_ctc": 0.6777830123901367, "train/loss_error": 0.392314076423645, "train/loss_total": 0.4494078755378723 }, { "epoch": 4.604328079080951, "step": 17234, "train/loss_ctc": 1.1263536214828491, "train/loss_error": 0.4551645517349243, "train/loss_total": 0.5894023776054382 }, { "epoch": 4.604595244456318, "step": 17235, "train/loss_ctc": 1.1665544509887695, "train/loss_error": 0.44454488158226013, "train/loss_total": 0.5889468193054199 }, { "epoch": 4.604862409831686, "step": 17236, "train/loss_ctc": 1.0114283561706543, "train/loss_error": 0.4472973048686981, "train/loss_total": 0.5601235628128052 }, { "epoch": 4.605129575207053, "step": 17237, "train/loss_ctc": 0.552845299243927, "train/loss_error": 0.4911206364631653, "train/loss_total": 0.5034655332565308 }, { "epoch": 4.605396740582421, "step": 17238, "train/loss_ctc": 0.6887468695640564, "train/loss_error": 0.45698872208595276, "train/loss_total": 0.5033403635025024 }, { "epoch": 4.605663905957788, "step": 17239, "train/loss_ctc": 0.6318318843841553, "train/loss_error": 0.4567357301712036, "train/loss_total": 0.4917549788951874 }, { "epoch": 4.605931071333155, "grad_norm": 1.3837785720825195, "learning_rate": 2.378840502270906e-06, "loss": 0.5096, "step": 17240 }, { "epoch": 4.605931071333155, "step": 17240, "train/loss_ctc": 0.5682220458984375, "train/loss_error": 0.36115092039108276, "train/loss_total": 0.4025651514530182 }, { "epoch": 4.606198236708522, "step": 17241, "train/loss_ctc": 0.6585907936096191, "train/loss_error": 0.3926772475242615, "train/loss_total": 0.44585996866226196 }, { "epoch": 4.60646540208389, "step": 17242, "train/loss_ctc": 0.8310785889625549, "train/loss_error": 0.4870404303073883, "train/loss_total": 0.5558480620384216 }, { "epoch": 4.606732567459257, "step": 17243, "train/loss_ctc": 0.9008951187133789, "train/loss_error": 0.4685725271701813, "train/loss_total": 0.5550370216369629 }, { "epoch": 4.606999732834625, "step": 17244, "train/loss_ctc": 1.2850244045257568, "train/loss_error": 0.43252527713775635, "train/loss_total": 0.6030250787734985 }, { "epoch": 4.607266898209992, "step": 17245, "train/loss_ctc": 0.6469050645828247, "train/loss_error": 0.44516170024871826, "train/loss_total": 0.4855104088783264 }, { "epoch": 4.607534063585359, "step": 17246, "train/loss_ctc": 1.0715081691741943, "train/loss_error": 0.48087078332901, "train/loss_total": 0.5989982485771179 }, { "epoch": 4.607801228960726, "step": 17247, "train/loss_ctc": 0.911227285861969, "train/loss_error": 0.4028926491737366, "train/loss_total": 0.5045595765113831 }, { "epoch": 4.608068394336094, "step": 17248, "train/loss_ctc": 0.88933265209198, "train/loss_error": 0.4786241352558136, "train/loss_total": 0.5607658624649048 }, { "epoch": 4.6083355597114615, "step": 17249, "train/loss_ctc": 0.22759485244750977, "train/loss_error": 0.410070538520813, "train/loss_total": 0.3735753893852234 }, { "epoch": 4.608602725086829, "grad_norm": 1.4621316194534302, "learning_rate": 2.3628105797488644e-06, "loss": 0.5086, "step": 17250 }, { "epoch": 4.608602725086829, "step": 17250, "train/loss_ctc": 0.8527688980102539, "train/loss_error": 0.44101855158805847, "train/loss_total": 0.5233686566352844 }, { "epoch": 4.608869890462196, "step": 17251, "train/loss_ctc": 0.5448042154312134, "train/loss_error": 0.47151732444763184, "train/loss_total": 0.48617470264434814 }, { "epoch": 4.6091370558375635, "step": 17252, "train/loss_ctc": 0.5086454153060913, "train/loss_error": 0.43550723791122437, "train/loss_total": 0.45013490319252014 }, { "epoch": 4.609404221212931, "step": 17253, "train/loss_ctc": 0.43005841970443726, "train/loss_error": 0.4112306237220764, "train/loss_total": 0.4149961769580841 }, { "epoch": 4.609671386588298, "step": 17254, "train/loss_ctc": 0.4907762408256531, "train/loss_error": 0.4407104253768921, "train/loss_total": 0.4507236182689667 }, { "epoch": 4.6099385519636655, "step": 17255, "train/loss_ctc": 0.6003589034080505, "train/loss_error": 0.39905884861946106, "train/loss_total": 0.4393188953399658 }, { "epoch": 4.610205717339033, "step": 17256, "train/loss_ctc": 0.8149782419204712, "train/loss_error": 0.41692256927490234, "train/loss_total": 0.49653369188308716 }, { "epoch": 4.6104728827144, "step": 17257, "train/loss_ctc": 0.2746005654335022, "train/loss_error": 0.36227935552597046, "train/loss_total": 0.34474360942840576 }, { "epoch": 4.610740048089768, "step": 17258, "train/loss_ctc": 0.544867753982544, "train/loss_error": 0.4268871545791626, "train/loss_total": 0.4504832923412323 }, { "epoch": 4.611007213465135, "step": 17259, "train/loss_ctc": 0.8439695835113525, "train/loss_error": 0.4827515184879303, "train/loss_total": 0.5549951791763306 }, { "epoch": 4.611274378840502, "grad_norm": 4.796717643737793, "learning_rate": 2.3467806572268237e-06, "loss": 0.4611, "step": 17260 }, { "epoch": 4.611274378840502, "step": 17260, "train/loss_ctc": 0.724269688129425, "train/loss_error": 0.4427020847797394, "train/loss_total": 0.4990156292915344 }, { "epoch": 4.61154154421587, "step": 17261, "train/loss_ctc": 0.5901861190795898, "train/loss_error": 0.47336044907569885, "train/loss_total": 0.49672558903694153 }, { "epoch": 4.611808709591237, "step": 17262, "train/loss_ctc": 0.37514904141426086, "train/loss_error": 0.4455534815788269, "train/loss_total": 0.4314725995063782 }, { "epoch": 4.612075874966604, "step": 17263, "train/loss_ctc": 0.3984891474246979, "train/loss_error": 0.4804437756538391, "train/loss_total": 0.46405285596847534 }, { "epoch": 4.612343040341972, "step": 17264, "train/loss_ctc": 0.8744365572929382, "train/loss_error": 0.40810927748680115, "train/loss_total": 0.5013747215270996 }, { "epoch": 4.612610205717339, "step": 17265, "train/loss_ctc": 0.8169897794723511, "train/loss_error": 0.4087124466896057, "train/loss_total": 0.4903678894042969 }, { "epoch": 4.612877371092706, "step": 17266, "train/loss_ctc": 1.6700633764266968, "train/loss_error": 0.46933484077453613, "train/loss_total": 0.7094805240631104 }, { "epoch": 4.613144536468074, "step": 17267, "train/loss_ctc": 0.6094950437545776, "train/loss_error": 0.41616615653038025, "train/loss_total": 0.45483192801475525 }, { "epoch": 4.613411701843441, "step": 17268, "train/loss_ctc": 0.19192001223564148, "train/loss_error": 0.4279828369617462, "train/loss_total": 0.3807702660560608 }, { "epoch": 4.613678867218809, "step": 17269, "train/loss_ctc": 0.7431873679161072, "train/loss_error": 0.3823738396167755, "train/loss_total": 0.4545365571975708 }, { "epoch": 4.613946032594176, "grad_norm": 2.7812771797180176, "learning_rate": 2.3307507347047822e-06, "loss": 0.4883, "step": 17270 }, { "epoch": 4.613946032594176, "step": 17270, "train/loss_ctc": 0.5021373629570007, "train/loss_error": 0.42422744631767273, "train/loss_total": 0.4398094415664673 }, { "epoch": 4.614213197969543, "step": 17271, "train/loss_ctc": 0.52668297290802, "train/loss_error": 0.40117597579956055, "train/loss_total": 0.42627739906311035 }, { "epoch": 4.61448036334491, "step": 17272, "train/loss_ctc": 0.4768725633621216, "train/loss_error": 0.4030855596065521, "train/loss_total": 0.4178429841995239 }, { "epoch": 4.614747528720278, "step": 17273, "train/loss_ctc": 0.5583705902099609, "train/loss_error": 0.4625273048877716, "train/loss_total": 0.4816959798336029 }, { "epoch": 4.615014694095645, "step": 17274, "train/loss_ctc": 0.5815447568893433, "train/loss_error": 0.41423499584198, "train/loss_total": 0.4476969540119171 }, { "epoch": 4.615281859471013, "step": 17275, "train/loss_ctc": 0.6725900173187256, "train/loss_error": 0.46960344910621643, "train/loss_total": 0.5102007985115051 }, { "epoch": 4.61554902484638, "step": 17276, "train/loss_ctc": 0.3122548460960388, "train/loss_error": 0.3943580687046051, "train/loss_total": 0.3779374361038208 }, { "epoch": 4.615816190221747, "step": 17277, "train/loss_ctc": 0.6060799360275269, "train/loss_error": 0.41837090253829956, "train/loss_total": 0.4559127390384674 }, { "epoch": 4.616083355597114, "step": 17278, "train/loss_ctc": 0.4746520519256592, "train/loss_error": 0.4463031589984894, "train/loss_total": 0.45197293162345886 }, { "epoch": 4.616350520972482, "step": 17279, "train/loss_ctc": 0.5943556427955627, "train/loss_error": 0.46864262223243713, "train/loss_total": 0.49378523230552673 }, { "epoch": 4.616617686347849, "grad_norm": 1.68418550491333, "learning_rate": 2.314720812182741e-06, "loss": 0.4503, "step": 17280 }, { "epoch": 4.616617686347849, "step": 17280, "train/loss_ctc": 1.0664749145507812, "train/loss_error": 0.4672132134437561, "train/loss_total": 0.587065577507019 }, { "epoch": 4.616884851723217, "step": 17281, "train/loss_ctc": 0.6651101112365723, "train/loss_error": 0.44003450870513916, "train/loss_total": 0.48504966497421265 }, { "epoch": 4.617152017098584, "step": 17282, "train/loss_ctc": 0.7493816614151001, "train/loss_error": 0.4204440116882324, "train/loss_total": 0.48623156547546387 }, { "epoch": 4.6174191824739514, "step": 17283, "train/loss_ctc": 0.8534798622131348, "train/loss_error": 0.4204427897930145, "train/loss_total": 0.5070502161979675 }, { "epoch": 4.617686347849319, "step": 17284, "train/loss_ctc": 0.5179957747459412, "train/loss_error": 0.4273947775363922, "train/loss_total": 0.445514976978302 }, { "epoch": 4.617953513224686, "step": 17285, "train/loss_ctc": 0.6550825834274292, "train/loss_error": 0.42368072271347046, "train/loss_total": 0.46996110677719116 }, { "epoch": 4.6182206786000535, "step": 17286, "train/loss_ctc": 0.379228800535202, "train/loss_error": 0.43200206756591797, "train/loss_total": 0.42144739627838135 }, { "epoch": 4.618487843975421, "step": 17287, "train/loss_ctc": 0.4987737834453583, "train/loss_error": 0.4537086486816406, "train/loss_total": 0.46272167563438416 }, { "epoch": 4.618755009350788, "step": 17288, "train/loss_ctc": 1.0895037651062012, "train/loss_error": 0.351272851228714, "train/loss_total": 0.4989190399646759 }, { "epoch": 4.6190221747261555, "step": 17289, "train/loss_ctc": 0.39030957221984863, "train/loss_error": 0.40990230441093445, "train/loss_total": 0.40598374605178833 }, { "epoch": 4.619289340101523, "grad_norm": 2.92140531539917, "learning_rate": 2.2986908896607e-06, "loss": 0.477, "step": 17290 }, { "epoch": 4.619289340101523, "step": 17290, "train/loss_ctc": 0.5222684144973755, "train/loss_error": 0.4596151113510132, "train/loss_total": 0.47214576601982117 }, { "epoch": 4.61955650547689, "step": 17291, "train/loss_ctc": 0.4721451699733734, "train/loss_error": 0.4747747480869293, "train/loss_total": 0.47424885630607605 }, { "epoch": 4.6198236708522575, "step": 17292, "train/loss_ctc": 0.4449205696582794, "train/loss_error": 0.40520602464675903, "train/loss_total": 0.4131489396095276 }, { "epoch": 4.620090836227625, "step": 17293, "train/loss_ctc": 0.4659811854362488, "train/loss_error": 0.4298918843269348, "train/loss_total": 0.4371097683906555 }, { "epoch": 4.620358001602992, "step": 17294, "train/loss_ctc": 0.993653416633606, "train/loss_error": 0.47617167234420776, "train/loss_total": 0.5796680450439453 }, { "epoch": 4.62062516697836, "step": 17295, "train/loss_ctc": 0.23492999374866486, "train/loss_error": 0.3416368067264557, "train/loss_total": 0.32029545307159424 }, { "epoch": 4.620892332353727, "step": 17296, "train/loss_ctc": 1.1405051946640015, "train/loss_error": 0.4720693528652191, "train/loss_total": 0.6057565212249756 }, { "epoch": 4.621159497729094, "step": 17297, "train/loss_ctc": 0.40287476778030396, "train/loss_error": 0.46373987197875977, "train/loss_total": 0.4515668749809265 }, { "epoch": 4.621426663104462, "step": 17298, "train/loss_ctc": 1.227431058883667, "train/loss_error": 0.4604202210903168, "train/loss_total": 0.6138224005699158 }, { "epoch": 4.621693828479829, "step": 17299, "train/loss_ctc": 0.5589131116867065, "train/loss_error": 0.4347430467605591, "train/loss_total": 0.4595770835876465 }, { "epoch": 4.621960993855196, "grad_norm": 2.0891029834747314, "learning_rate": 2.2826609671386586e-06, "loss": 0.4827, "step": 17300 }, { "epoch": 4.621960993855196, "step": 17300, "train/loss_ctc": 0.5903348922729492, "train/loss_error": 0.4075627326965332, "train/loss_total": 0.4441171884536743 }, { "epoch": 4.622228159230564, "step": 17301, "train/loss_ctc": 0.7518360614776611, "train/loss_error": 0.38441675901412964, "train/loss_total": 0.45790064334869385 }, { "epoch": 4.622495324605931, "step": 17302, "train/loss_ctc": 0.4726422131061554, "train/loss_error": 0.4250691831111908, "train/loss_total": 0.43458378314971924 }, { "epoch": 4.622762489981298, "step": 17303, "train/loss_ctc": 0.9281668663024902, "train/loss_error": 0.3937733769416809, "train/loss_total": 0.5006520748138428 }, { "epoch": 4.623029655356666, "step": 17304, "train/loss_ctc": 0.5695474147796631, "train/loss_error": 0.45033419132232666, "train/loss_total": 0.4741768538951874 }, { "epoch": 4.623296820732033, "step": 17305, "train/loss_ctc": 0.6786682605743408, "train/loss_error": 0.3941226899623871, "train/loss_total": 0.45103180408477783 }, { "epoch": 4.623563986107401, "step": 17306, "train/loss_ctc": 0.4718182682991028, "train/loss_error": 0.45365768671035767, "train/loss_total": 0.45728981494903564 }, { "epoch": 4.623831151482768, "step": 17307, "train/loss_ctc": 0.5982418656349182, "train/loss_error": 0.4804173409938812, "train/loss_total": 0.5039822459220886 }, { "epoch": 4.624098316858135, "step": 17308, "train/loss_ctc": 0.6969923973083496, "train/loss_error": 0.4679439067840576, "train/loss_total": 0.5137536525726318 }, { "epoch": 4.624365482233502, "step": 17309, "train/loss_ctc": 0.30638259649276733, "train/loss_error": 0.45224303007125854, "train/loss_total": 0.4230709671974182 }, { "epoch": 4.62463264760887, "grad_norm": 2.1039323806762695, "learning_rate": 2.266631044616618e-06, "loss": 0.4661, "step": 17310 }, { "epoch": 4.62463264760887, "step": 17310, "train/loss_ctc": 0.42865365743637085, "train/loss_error": 0.459551066160202, "train/loss_total": 0.4533715844154358 }, { "epoch": 4.624899812984237, "step": 17311, "train/loss_ctc": 2.6912407875061035, "train/loss_error": 0.5629065632820129, "train/loss_total": 0.988573431968689 }, { "epoch": 4.625166978359605, "step": 17312, "train/loss_ctc": 1.1589219570159912, "train/loss_error": 0.4399041533470154, "train/loss_total": 0.5837076902389526 }, { "epoch": 4.625434143734972, "step": 17313, "train/loss_ctc": 1.3815231323242188, "train/loss_error": 0.4583428204059601, "train/loss_total": 0.6429789066314697 }, { "epoch": 4.625701309110339, "step": 17314, "train/loss_ctc": 0.8402687311172485, "train/loss_error": 0.3722473680973053, "train/loss_total": 0.46585163474082947 }, { "epoch": 4.625968474485707, "step": 17315, "train/loss_ctc": 1.1607847213745117, "train/loss_error": 0.42998239398002625, "train/loss_total": 0.5761428475379944 }, { "epoch": 4.626235639861074, "step": 17316, "train/loss_ctc": 0.531502902507782, "train/loss_error": 0.4052337408065796, "train/loss_total": 0.43048760294914246 }, { "epoch": 4.626502805236441, "step": 17317, "train/loss_ctc": 0.830906867980957, "train/loss_error": 0.47342777252197266, "train/loss_total": 0.5449236035346985 }, { "epoch": 4.626769970611809, "step": 17318, "train/loss_ctc": 0.8562127947807312, "train/loss_error": 0.3816983103752136, "train/loss_total": 0.4766012132167816 }, { "epoch": 4.627037135987176, "step": 17319, "train/loss_ctc": 1.0100727081298828, "train/loss_error": 0.35001930594444275, "train/loss_total": 0.4820299744606018 }, { "epoch": 4.6273043013625434, "grad_norm": 1.8276172876358032, "learning_rate": 2.2506011220945765e-06, "loss": 0.5645, "step": 17320 }, { "epoch": 4.6273043013625434, "step": 17320, "train/loss_ctc": 0.5069913864135742, "train/loss_error": 0.4147501289844513, "train/loss_total": 0.43319839239120483 }, { "epoch": 4.627571466737911, "step": 17321, "train/loss_ctc": 0.2938288748264313, "train/loss_error": 0.39601269364356995, "train/loss_total": 0.3755759298801422 }, { "epoch": 4.627838632113278, "step": 17322, "train/loss_ctc": 0.3187263011932373, "train/loss_error": 0.45571404695510864, "train/loss_total": 0.42831650376319885 }, { "epoch": 4.6281057974886455, "step": 17323, "train/loss_ctc": 1.2528427839279175, "train/loss_error": 0.4693338871002197, "train/loss_total": 0.6260356903076172 }, { "epoch": 4.628372962864013, "step": 17324, "train/loss_ctc": 1.8558825254440308, "train/loss_error": 0.4546267092227936, "train/loss_total": 0.73487788438797 }, { "epoch": 4.62864012823938, "step": 17325, "train/loss_ctc": 0.3207695782184601, "train/loss_error": 0.4655436873435974, "train/loss_total": 0.436588853597641 }, { "epoch": 4.6289072936147475, "step": 17326, "train/loss_ctc": 0.9497681856155396, "train/loss_error": 0.4151190221309662, "train/loss_total": 0.5220488905906677 }, { "epoch": 4.629174458990115, "step": 17327, "train/loss_ctc": 0.23399722576141357, "train/loss_error": 0.4123755097389221, "train/loss_total": 0.37669986486434937 }, { "epoch": 4.629441624365482, "step": 17328, "train/loss_ctc": 0.6310850381851196, "train/loss_error": 0.4297836720943451, "train/loss_total": 0.47004395723342896 }, { "epoch": 4.6297087897408495, "step": 17329, "train/loss_ctc": 1.016308307647705, "train/loss_error": 0.4189140200614929, "train/loss_total": 0.5383929014205933 }, { "epoch": 4.629975955116217, "grad_norm": 1.7216272354125977, "learning_rate": 2.2345711995725354e-06, "loss": 0.4942, "step": 17330 }, { "epoch": 4.629975955116217, "step": 17330, "train/loss_ctc": 0.7853755354881287, "train/loss_error": 0.40767088532447815, "train/loss_total": 0.48321181535720825 }, { "epoch": 4.630243120491584, "step": 17331, "train/loss_ctc": 0.7122998237609863, "train/loss_error": 0.5378463864326477, "train/loss_total": 0.5727370977401733 }, { "epoch": 4.630510285866952, "step": 17332, "train/loss_ctc": 0.5185409784317017, "train/loss_error": 0.4375508725643158, "train/loss_total": 0.4537489116191864 }, { "epoch": 4.630777451242319, "step": 17333, "train/loss_ctc": 0.45875731110572815, "train/loss_error": 0.3998763859272003, "train/loss_total": 0.4116525650024414 }, { "epoch": 4.631044616617686, "step": 17334, "train/loss_ctc": 0.9525034427642822, "train/loss_error": 0.4273921251296997, "train/loss_total": 0.5324143767356873 }, { "epoch": 4.631311781993054, "step": 17335, "train/loss_ctc": 0.7107499837875366, "train/loss_error": 0.3969706892967224, "train/loss_total": 0.45972657203674316 }, { "epoch": 4.631578947368421, "step": 17336, "train/loss_ctc": 0.3039400279521942, "train/loss_error": 0.3705250918865204, "train/loss_total": 0.3572080731391907 }, { "epoch": 4.631846112743789, "step": 17337, "train/loss_ctc": 0.9069377779960632, "train/loss_error": 0.4387090504169464, "train/loss_total": 0.5323548316955566 }, { "epoch": 4.632113278119156, "step": 17338, "train/loss_ctc": 1.0461864471435547, "train/loss_error": 0.4414641559123993, "train/loss_total": 0.5624086260795593 }, { "epoch": 4.632380443494523, "step": 17339, "train/loss_ctc": 0.5177599191665649, "train/loss_error": 0.4072161018848419, "train/loss_total": 0.4293248653411865 }, { "epoch": 4.63264760886989, "grad_norm": 1.4843920469284058, "learning_rate": 2.2185412770504943e-06, "loss": 0.4795, "step": 17340 }, { "epoch": 4.63264760886989, "step": 17340, "train/loss_ctc": 0.574549674987793, "train/loss_error": 0.4232604503631592, "train/loss_total": 0.4535183012485504 }, { "epoch": 4.632914774245258, "step": 17341, "train/loss_ctc": 0.5733857154846191, "train/loss_error": 0.38458555936813354, "train/loss_total": 0.4223455786705017 }, { "epoch": 4.633181939620625, "step": 17342, "train/loss_ctc": 0.8584536910057068, "train/loss_error": 0.4327426254749298, "train/loss_total": 0.5178848505020142 }, { "epoch": 4.633449104995993, "step": 17343, "train/loss_ctc": 0.4264218807220459, "train/loss_error": 0.3592180013656616, "train/loss_total": 0.37265878915786743 }, { "epoch": 4.63371627037136, "step": 17344, "train/loss_ctc": 1.0739860534667969, "train/loss_error": 0.4473598897457123, "train/loss_total": 0.5726851224899292 }, { "epoch": 4.633983435746727, "step": 17345, "train/loss_ctc": 0.9688258171081543, "train/loss_error": 0.4543631076812744, "train/loss_total": 0.5572556257247925 }, { "epoch": 4.634250601122094, "step": 17346, "train/loss_ctc": 0.9196560382843018, "train/loss_error": 0.4403362274169922, "train/loss_total": 0.536200225353241 }, { "epoch": 4.634517766497462, "step": 17347, "train/loss_ctc": 0.8755204081535339, "train/loss_error": 0.4319766163825989, "train/loss_total": 0.5206853747367859 }, { "epoch": 4.634784931872829, "step": 17348, "train/loss_ctc": 0.7681554555892944, "train/loss_error": 0.4107464551925659, "train/loss_total": 0.48222824931144714 }, { "epoch": 4.635052097248197, "step": 17349, "train/loss_ctc": 0.3750501871109009, "train/loss_error": 0.3661850094795227, "train/loss_total": 0.36795803904533386 }, { "epoch": 4.635319262623564, "grad_norm": 1.9701157808303833, "learning_rate": 2.2025113545284532e-06, "loss": 0.4803, "step": 17350 }, { "epoch": 4.635319262623564, "step": 17350, "train/loss_ctc": 0.4773792326450348, "train/loss_error": 0.434116393327713, "train/loss_total": 0.44276896119117737 }, { "epoch": 4.635586427998931, "step": 17351, "train/loss_ctc": 0.6001611351966858, "train/loss_error": 0.4222988784313202, "train/loss_total": 0.45787131786346436 }, { "epoch": 4.635853593374299, "step": 17352, "train/loss_ctc": 1.173228144645691, "train/loss_error": 0.4299304783344269, "train/loss_total": 0.5785900354385376 }, { "epoch": 4.636120758749666, "step": 17353, "train/loss_ctc": 0.6432528495788574, "train/loss_error": 0.44774898886680603, "train/loss_total": 0.4868497848510742 }, { "epoch": 4.636387924125033, "step": 17354, "train/loss_ctc": 0.4570259749889374, "train/loss_error": 0.456140398979187, "train/loss_total": 0.4563175439834595 }, { "epoch": 4.636655089500401, "step": 17355, "train/loss_ctc": 0.6630921959877014, "train/loss_error": 0.4924536347389221, "train/loss_total": 0.526581346988678 }, { "epoch": 4.636922254875768, "step": 17356, "train/loss_ctc": 0.5300507545471191, "train/loss_error": 0.41802656650543213, "train/loss_total": 0.4404314160346985 }, { "epoch": 4.6371894202511355, "step": 17357, "train/loss_ctc": 0.5584308505058289, "train/loss_error": 0.44180408120155334, "train/loss_total": 0.46512943506240845 }, { "epoch": 4.637456585626503, "step": 17358, "train/loss_ctc": 0.953061580657959, "train/loss_error": 0.4401971697807312, "train/loss_total": 0.5427700281143188 }, { "epoch": 4.63772375100187, "step": 17359, "train/loss_ctc": 0.7457773089408875, "train/loss_error": 0.46047443151474, "train/loss_total": 0.5175350308418274 }, { "epoch": 4.6379909163772375, "grad_norm": 1.4396759271621704, "learning_rate": 2.186481432006412e-06, "loss": 0.4915, "step": 17360 }, { "epoch": 4.6379909163772375, "step": 17360, "train/loss_ctc": 0.5484778881072998, "train/loss_error": 0.4504017233848572, "train/loss_total": 0.4700169563293457 }, { "epoch": 4.638258081752605, "step": 17361, "train/loss_ctc": 0.6647500991821289, "train/loss_error": 0.39815738797187805, "train/loss_total": 0.45147591829299927 }, { "epoch": 4.638525247127972, "step": 17362, "train/loss_ctc": 0.7913471460342407, "train/loss_error": 0.3586912751197815, "train/loss_total": 0.4452224671840668 }, { "epoch": 4.6387924125033395, "step": 17363, "train/loss_ctc": 0.24978740513324738, "train/loss_error": 0.40762725472450256, "train/loss_total": 0.37605929374694824 }, { "epoch": 4.639059577878707, "step": 17364, "train/loss_ctc": 0.28683918714523315, "train/loss_error": 0.3947780132293701, "train/loss_total": 0.3731902539730072 }, { "epoch": 4.639326743254074, "step": 17365, "train/loss_ctc": 0.7395309209823608, "train/loss_error": 0.45671600103378296, "train/loss_total": 0.5132789611816406 }, { "epoch": 4.6395939086294415, "step": 17366, "train/loss_ctc": 0.8104732036590576, "train/loss_error": 0.44783514738082886, "train/loss_total": 0.5203627347946167 }, { "epoch": 4.639861074004809, "step": 17367, "train/loss_ctc": 0.3178313970565796, "train/loss_error": 0.4566698670387268, "train/loss_total": 0.42890220880508423 }, { "epoch": 4.640128239380177, "step": 17368, "train/loss_ctc": 0.4299156665802002, "train/loss_error": 0.432269811630249, "train/loss_total": 0.4317989945411682 }, { "epoch": 4.640395404755544, "step": 17369, "train/loss_ctc": 0.2064746767282486, "train/loss_error": 0.38965359330177307, "train/loss_total": 0.35301780700683594 }, { "epoch": 4.640662570130911, "grad_norm": 1.9550325870513916, "learning_rate": 2.170451509484371e-06, "loss": 0.4363, "step": 17370 }, { "epoch": 4.640662570130911, "step": 17370, "train/loss_ctc": 0.7638489007949829, "train/loss_error": 0.43455204367637634, "train/loss_total": 0.5004114508628845 }, { "epoch": 4.640929735506278, "step": 17371, "train/loss_ctc": 0.6888015270233154, "train/loss_error": 0.4069165885448456, "train/loss_total": 0.46329358220100403 }, { "epoch": 4.641196900881646, "step": 17372, "train/loss_ctc": 0.735488772392273, "train/loss_error": 0.35695889592170715, "train/loss_total": 0.4326648712158203 }, { "epoch": 4.641464066257013, "step": 17373, "train/loss_ctc": 0.5099419355392456, "train/loss_error": 0.43870243430137634, "train/loss_total": 0.4529503583908081 }, { "epoch": 4.641731231632381, "step": 17374, "train/loss_ctc": 0.3289068639278412, "train/loss_error": 0.3846244215965271, "train/loss_total": 0.3734809160232544 }, { "epoch": 4.641998397007748, "step": 17375, "train/loss_ctc": 0.38239586353302, "train/loss_error": 0.46015802025794983, "train/loss_total": 0.44460558891296387 }, { "epoch": 4.642265562383115, "step": 17376, "train/loss_ctc": 0.37048768997192383, "train/loss_error": 0.3579651117324829, "train/loss_total": 0.36046963930130005 }, { "epoch": 4.642532727758482, "step": 17377, "train/loss_ctc": 0.27263033390045166, "train/loss_error": 0.44241592288017273, "train/loss_total": 0.40845879912376404 }, { "epoch": 4.64279989313385, "step": 17378, "train/loss_ctc": 0.6649078130722046, "train/loss_error": 0.46723154187202454, "train/loss_total": 0.5067667961120605 }, { "epoch": 4.643067058509217, "step": 17379, "train/loss_ctc": 0.40624746680259705, "train/loss_error": 0.45511531829833984, "train/loss_total": 0.4453417658805847 }, { "epoch": 4.643334223884585, "grad_norm": 5.14310359954834, "learning_rate": 2.1544215869623296e-06, "loss": 0.4388, "step": 17380 }, { "epoch": 4.643334223884585, "step": 17380, "train/loss_ctc": 0.646091878414154, "train/loss_error": 0.46201831102371216, "train/loss_total": 0.4988330602645874 }, { "epoch": 4.643601389259952, "step": 17381, "train/loss_ctc": 0.6085073947906494, "train/loss_error": 0.4158172607421875, "train/loss_total": 0.45435529947280884 }, { "epoch": 4.643868554635319, "step": 17382, "train/loss_ctc": 0.4612494111061096, "train/loss_error": 0.3828972578048706, "train/loss_total": 0.39856767654418945 }, { "epoch": 4.644135720010687, "step": 17383, "train/loss_ctc": 0.7673575282096863, "train/loss_error": 0.4383300244808197, "train/loss_total": 0.5041355490684509 }, { "epoch": 4.644402885386054, "step": 17384, "train/loss_ctc": 0.4821837544441223, "train/loss_error": 0.3426709771156311, "train/loss_total": 0.3705735206604004 }, { "epoch": 4.644670050761421, "step": 17385, "train/loss_ctc": 1.2671566009521484, "train/loss_error": 0.44223538041114807, "train/loss_total": 0.6072196364402771 }, { "epoch": 4.644937216136789, "step": 17386, "train/loss_ctc": 0.833092212677002, "train/loss_error": 0.4999072253704071, "train/loss_total": 0.566544234752655 }, { "epoch": 4.645204381512156, "step": 17387, "train/loss_ctc": 0.7869438529014587, "train/loss_error": 0.45546531677246094, "train/loss_total": 0.5217610597610474 }, { "epoch": 4.645471546887523, "step": 17388, "train/loss_ctc": 1.051287055015564, "train/loss_error": 0.4469481110572815, "train/loss_total": 0.567815899848938 }, { "epoch": 4.645738712262891, "step": 17389, "train/loss_ctc": 0.6343528032302856, "train/loss_error": 0.4322412610054016, "train/loss_total": 0.4726635813713074 }, { "epoch": 4.646005877638258, "grad_norm": 2.8337290287017822, "learning_rate": 2.1383916644402885e-06, "loss": 0.4962, "step": 17390 }, { "epoch": 4.646005877638258, "step": 17390, "train/loss_ctc": 0.4277178645133972, "train/loss_error": 0.4581677317619324, "train/loss_total": 0.4520777761936188 }, { "epoch": 4.646273043013625, "step": 17391, "train/loss_ctc": 0.791083574295044, "train/loss_error": 0.418234258890152, "train/loss_total": 0.4928041398525238 }, { "epoch": 4.646540208388993, "step": 17392, "train/loss_ctc": 0.40058183670043945, "train/loss_error": 0.34641149640083313, "train/loss_total": 0.3572455644607544 }, { "epoch": 4.64680737376436, "step": 17393, "train/loss_ctc": 0.4879174530506134, "train/loss_error": 0.45787858963012695, "train/loss_total": 0.4638863801956177 }, { "epoch": 4.6470745391397275, "step": 17394, "train/loss_ctc": 0.7330319285392761, "train/loss_error": 0.3916076421737671, "train/loss_total": 0.45989251136779785 }, { "epoch": 4.647341704515095, "step": 17395, "train/loss_ctc": 1.7879211902618408, "train/loss_error": 0.4365721046924591, "train/loss_total": 0.7068419456481934 }, { "epoch": 4.647608869890462, "step": 17396, "train/loss_ctc": 0.8784400820732117, "train/loss_error": 0.4547451436519623, "train/loss_total": 0.5394841432571411 }, { "epoch": 4.6478760352658295, "step": 17397, "train/loss_ctc": 2.0697171688079834, "train/loss_error": 0.45155569911003113, "train/loss_total": 0.7751879692077637 }, { "epoch": 4.648143200641197, "step": 17398, "train/loss_ctc": 0.6416769027709961, "train/loss_error": 0.44208359718322754, "train/loss_total": 0.48200225830078125 }, { "epoch": 4.648410366016564, "step": 17399, "train/loss_ctc": 0.699303925037384, "train/loss_error": 0.4263670742511749, "train/loss_total": 0.48095446825027466 }, { "epoch": 4.6486775313919315, "grad_norm": 3.1615071296691895, "learning_rate": 2.1223617419182475e-06, "loss": 0.521, "step": 17400 }, { "epoch": 4.6486775313919315, "step": 17400, "train/loss_ctc": 0.6394581198692322, "train/loss_error": 0.41364577412605286, "train/loss_total": 0.4588082432746887 }, { "epoch": 4.648944696767299, "step": 17401, "train/loss_ctc": 1.2357981204986572, "train/loss_error": 0.5069746375083923, "train/loss_total": 0.6527393460273743 }, { "epoch": 4.649211862142666, "step": 17402, "train/loss_ctc": 0.979724645614624, "train/loss_error": 0.47132769227027893, "train/loss_total": 0.5730071067810059 }, { "epoch": 4.6494790275180335, "step": 17403, "train/loss_ctc": 0.7952479124069214, "train/loss_error": 0.5315959453582764, "train/loss_total": 0.5843263268470764 }, { "epoch": 4.649746192893401, "step": 17404, "train/loss_ctc": 1.0601086616516113, "train/loss_error": 0.45868298411369324, "train/loss_total": 0.5789681673049927 }, { "epoch": 4.650013358268769, "step": 17405, "train/loss_ctc": 0.9901064038276672, "train/loss_error": 0.3965100347995758, "train/loss_total": 0.5152292847633362 }, { "epoch": 4.650280523644136, "step": 17406, "train/loss_ctc": 1.278188943862915, "train/loss_error": 0.3967342972755432, "train/loss_total": 0.5730252265930176 }, { "epoch": 4.650547689019503, "step": 17407, "train/loss_ctc": 1.0245604515075684, "train/loss_error": 0.4613119065761566, "train/loss_total": 0.573961615562439 }, { "epoch": 4.65081485439487, "step": 17408, "train/loss_ctc": 1.0997158288955688, "train/loss_error": 0.49470025300979614, "train/loss_total": 0.6157033443450928 }, { "epoch": 4.651082019770238, "step": 17409, "train/loss_ctc": 0.7393616437911987, "train/loss_error": 0.3704054057598114, "train/loss_total": 0.4441966712474823 }, { "epoch": 4.651349185145605, "grad_norm": 1.186378002166748, "learning_rate": 2.106331819396206e-06, "loss": 0.557, "step": 17410 }, { "epoch": 4.651349185145605, "step": 17410, "train/loss_ctc": 0.6018587350845337, "train/loss_error": 0.4283146858215332, "train/loss_total": 0.46302351355552673 }, { "epoch": 4.651616350520973, "step": 17411, "train/loss_ctc": 0.45830559730529785, "train/loss_error": 0.4386993944644928, "train/loss_total": 0.4426206350326538 }, { "epoch": 4.65188351589634, "step": 17412, "train/loss_ctc": 0.38657906651496887, "train/loss_error": 0.45588454604148865, "train/loss_total": 0.44202345609664917 }, { "epoch": 4.652150681271707, "step": 17413, "train/loss_ctc": 0.426318883895874, "train/loss_error": 0.33917152881622314, "train/loss_total": 0.3566010296344757 }, { "epoch": 4.652417846647075, "step": 17414, "train/loss_ctc": 0.39818239212036133, "train/loss_error": 0.36920636892318726, "train/loss_total": 0.37500157952308655 }, { "epoch": 4.652685012022442, "step": 17415, "train/loss_ctc": 0.6226143836975098, "train/loss_error": 0.44558319449424744, "train/loss_total": 0.4809894561767578 }, { "epoch": 4.652952177397809, "step": 17416, "train/loss_ctc": 0.2906441390514374, "train/loss_error": 0.40835434198379517, "train/loss_total": 0.3848123252391815 }, { "epoch": 4.653219342773177, "step": 17417, "train/loss_ctc": 0.19723570346832275, "train/loss_error": 0.3659181296825409, "train/loss_total": 0.3321816325187683 }, { "epoch": 4.653486508148544, "step": 17418, "train/loss_ctc": 0.7539879083633423, "train/loss_error": 0.394672691822052, "train/loss_total": 0.466535747051239 }, { "epoch": 4.653753673523911, "step": 17419, "train/loss_ctc": 0.5871775150299072, "train/loss_error": 0.41824615001678467, "train/loss_total": 0.4520324468612671 }, { "epoch": 4.654020838899279, "grad_norm": 1.6435816287994385, "learning_rate": 2.0903018968741653e-06, "loss": 0.4196, "step": 17420 }, { "epoch": 4.654020838899279, "step": 17420, "train/loss_ctc": 0.7658910751342773, "train/loss_error": 0.32431644201278687, "train/loss_total": 0.4126313626766205 }, { "epoch": 4.654288004274646, "step": 17421, "train/loss_ctc": 0.5010069608688354, "train/loss_error": 0.4245336651802063, "train/loss_total": 0.4398283362388611 }, { "epoch": 4.654555169650013, "step": 17422, "train/loss_ctc": 0.20872610807418823, "train/loss_error": 0.4141603112220764, "train/loss_total": 0.3730734586715698 }, { "epoch": 4.654822335025381, "step": 17423, "train/loss_ctc": 0.5363348722457886, "train/loss_error": 0.3968060612678528, "train/loss_total": 0.42471182346343994 }, { "epoch": 4.655089500400748, "step": 17424, "train/loss_ctc": 0.41511300206184387, "train/loss_error": 0.46587076783180237, "train/loss_total": 0.4557192027568817 }, { "epoch": 4.655356665776115, "step": 17425, "train/loss_ctc": 0.9078221321105957, "train/loss_error": 0.4485096037387848, "train/loss_total": 0.5403721332550049 }, { "epoch": 4.655623831151483, "step": 17426, "train/loss_ctc": 0.7164746522903442, "train/loss_error": 0.4305478036403656, "train/loss_total": 0.4877331852912903 }, { "epoch": 4.65589099652685, "step": 17427, "train/loss_ctc": 0.6128639578819275, "train/loss_error": 0.454171359539032, "train/loss_total": 0.4859098792076111 }, { "epoch": 4.656158161902217, "step": 17428, "train/loss_ctc": 0.8471089601516724, "train/loss_error": 0.36963075399398804, "train/loss_total": 0.4651263952255249 }, { "epoch": 4.656425327277585, "step": 17429, "train/loss_ctc": 0.4916587471961975, "train/loss_error": 0.41808244585990906, "train/loss_total": 0.43279770016670227 }, { "epoch": 4.656692492652952, "grad_norm": 2.6505377292633057, "learning_rate": 2.074271974352124e-06, "loss": 0.4518, "step": 17430 }, { "epoch": 4.656692492652952, "step": 17430, "train/loss_ctc": 0.7525264024734497, "train/loss_error": 0.5009655952453613, "train/loss_total": 0.551277756690979 }, { "epoch": 4.6569596580283195, "step": 17431, "train/loss_ctc": 0.4263060688972473, "train/loss_error": 0.518390953540802, "train/loss_total": 0.49997398257255554 }, { "epoch": 4.657226823403687, "step": 17432, "train/loss_ctc": 0.3958406448364258, "train/loss_error": 0.4620312750339508, "train/loss_total": 0.4487931728363037 }, { "epoch": 4.657493988779054, "step": 17433, "train/loss_ctc": 0.4333791732788086, "train/loss_error": 0.35215404629707336, "train/loss_total": 0.36839908361434937 }, { "epoch": 4.6577611541544215, "step": 17434, "train/loss_ctc": 0.8181450366973877, "train/loss_error": 0.4772963225841522, "train/loss_total": 0.5454660654067993 }, { "epoch": 4.658028319529789, "step": 17435, "train/loss_ctc": 0.437417209148407, "train/loss_error": 0.4262424111366272, "train/loss_total": 0.42847737669944763 }, { "epoch": 4.658295484905157, "step": 17436, "train/loss_ctc": 0.34794777631759644, "train/loss_error": 0.3651318848133087, "train/loss_total": 0.3616950809955597 }, { "epoch": 4.6585626502805235, "step": 17437, "train/loss_ctc": 0.4371204376220703, "train/loss_error": 0.424898236989975, "train/loss_total": 0.4273426830768585 }, { "epoch": 4.658829815655891, "step": 17438, "train/loss_ctc": 0.34567511081695557, "train/loss_error": 0.4367172122001648, "train/loss_total": 0.4185088276863098 }, { "epoch": 4.659096981031258, "step": 17439, "train/loss_ctc": 1.0527677536010742, "train/loss_error": 0.4046842157840729, "train/loss_total": 0.5343009233474731 }, { "epoch": 4.6593641464066256, "grad_norm": 2.8311760425567627, "learning_rate": 2.058242051830083e-06, "loss": 0.4584, "step": 17440 }, { "epoch": 4.6593641464066256, "step": 17440, "train/loss_ctc": 0.6951102018356323, "train/loss_error": 0.5024312138557434, "train/loss_total": 0.5409669876098633 }, { "epoch": 4.659631311781993, "step": 17441, "train/loss_ctc": 1.172208547592163, "train/loss_error": 0.4569474756717682, "train/loss_total": 0.599999725818634 }, { "epoch": 4.659898477157361, "step": 17442, "train/loss_ctc": 0.7947103977203369, "train/loss_error": 0.4211755096912384, "train/loss_total": 0.495882511138916 }, { "epoch": 4.660165642532728, "step": 17443, "train/loss_ctc": 0.5938022136688232, "train/loss_error": 0.42429107427597046, "train/loss_total": 0.458193302154541 }, { "epoch": 4.660432807908095, "step": 17444, "train/loss_ctc": 1.5365939140319824, "train/loss_error": 0.42907634377479553, "train/loss_total": 0.6505798697471619 }, { "epoch": 4.660699973283462, "step": 17445, "train/loss_ctc": 0.7567986249923706, "train/loss_error": 0.5011376142501831, "train/loss_total": 0.5522698163986206 }, { "epoch": 4.66096713865883, "step": 17446, "train/loss_ctc": 0.44378918409347534, "train/loss_error": 0.580473780632019, "train/loss_total": 0.5531368851661682 }, { "epoch": 4.661234304034197, "step": 17447, "train/loss_ctc": 0.6175810098648071, "train/loss_error": 0.47563132643699646, "train/loss_total": 0.5040212869644165 }, { "epoch": 4.661501469409565, "step": 17448, "train/loss_ctc": 0.6706039309501648, "train/loss_error": 0.424267441034317, "train/loss_total": 0.4735347628593445 }, { "epoch": 4.661768634784932, "step": 17449, "train/loss_ctc": 0.6488399505615234, "train/loss_error": 0.4338909983634949, "train/loss_total": 0.4768807888031006 }, { "epoch": 4.662035800160299, "grad_norm": 1.854032278060913, "learning_rate": 2.0422121293080417e-06, "loss": 0.5305, "step": 17450 }, { "epoch": 4.662035800160299, "step": 17450, "train/loss_ctc": 1.1901051998138428, "train/loss_error": 0.4078867435455322, "train/loss_total": 0.5643304586410522 }, { "epoch": 4.662302965535667, "step": 17451, "train/loss_ctc": 1.2210915088653564, "train/loss_error": 0.4086242914199829, "train/loss_total": 0.5711177587509155 }, { "epoch": 4.662570130911034, "step": 17452, "train/loss_ctc": 0.7219865918159485, "train/loss_error": 0.44034042954444885, "train/loss_total": 0.4966696798801422 }, { "epoch": 4.662837296286401, "step": 17453, "train/loss_ctc": 0.40262505412101746, "train/loss_error": 0.44082146883010864, "train/loss_total": 0.4331821799278259 }, { "epoch": 4.663104461661769, "step": 17454, "train/loss_ctc": 0.4964783787727356, "train/loss_error": 0.3817291855812073, "train/loss_total": 0.4046790301799774 }, { "epoch": 4.663371627037136, "step": 17455, "train/loss_ctc": 0.4992273151874542, "train/loss_error": 0.4257464110851288, "train/loss_total": 0.44044259190559387 }, { "epoch": 4.663638792412503, "step": 17456, "train/loss_ctc": 0.8431128263473511, "train/loss_error": 0.45181331038475037, "train/loss_total": 0.5300732254981995 }, { "epoch": 4.663905957787871, "step": 17457, "train/loss_ctc": 0.6193662881851196, "train/loss_error": 0.500731348991394, "train/loss_total": 0.5244583487510681 }, { "epoch": 4.664173123163238, "step": 17458, "train/loss_ctc": 0.47078633308410645, "train/loss_error": 0.3850918412208557, "train/loss_total": 0.40223076939582825 }, { "epoch": 4.664440288538605, "step": 17459, "train/loss_ctc": 0.5026552081108093, "train/loss_error": 0.44283828139305115, "train/loss_total": 0.45480167865753174 }, { "epoch": 4.664707453913973, "grad_norm": 2.2908453941345215, "learning_rate": 2.0261822067860006e-06, "loss": 0.4822, "step": 17460 }, { "epoch": 4.664707453913973, "step": 17460, "train/loss_ctc": 0.41719168424606323, "train/loss_error": 0.38382768630981445, "train/loss_total": 0.3905004858970642 }, { "epoch": 4.66497461928934, "step": 17461, "train/loss_ctc": 0.6037150621414185, "train/loss_error": 0.43420666456222534, "train/loss_total": 0.4681083559989929 }, { "epoch": 4.665241784664707, "step": 17462, "train/loss_ctc": 1.0810043811798096, "train/loss_error": 0.48012492060661316, "train/loss_total": 0.6003007888793945 }, { "epoch": 4.665508950040075, "step": 17463, "train/loss_ctc": 0.7671404480934143, "train/loss_error": 0.44476747512817383, "train/loss_total": 0.509242057800293 }, { "epoch": 4.665776115415442, "step": 17464, "train/loss_ctc": 0.9327690601348877, "train/loss_error": 0.3661918342113495, "train/loss_total": 0.4795072674751282 }, { "epoch": 4.666043280790809, "step": 17465, "train/loss_ctc": 0.5565103888511658, "train/loss_error": 0.5325102806091309, "train/loss_total": 0.5373103022575378 }, { "epoch": 4.666310446166177, "step": 17466, "train/loss_ctc": 0.7507325410842896, "train/loss_error": 0.4210303723812103, "train/loss_total": 0.48697081208229065 }, { "epoch": 4.666577611541545, "step": 17467, "train/loss_ctc": 0.8788619637489319, "train/loss_error": 0.4502187967300415, "train/loss_total": 0.5359474420547485 }, { "epoch": 4.6668447769169115, "step": 17468, "train/loss_ctc": 0.5679417848587036, "train/loss_error": 0.3718515634536743, "train/loss_total": 0.4110696315765381 }, { "epoch": 4.667111942292279, "step": 17469, "train/loss_ctc": 0.4212360978126526, "train/loss_error": 0.4961818754673004, "train/loss_total": 0.4811927378177643 }, { "epoch": 4.667379107667646, "grad_norm": 2.032264471054077, "learning_rate": 2.0101522842639596e-06, "loss": 0.49, "step": 17470 }, { "epoch": 4.667379107667646, "step": 17470, "train/loss_ctc": 0.6376233100891113, "train/loss_error": 0.4698968827724457, "train/loss_total": 0.5034421682357788 }, { "epoch": 4.6676462730430135, "step": 17471, "train/loss_ctc": 0.5726327300071716, "train/loss_error": 0.42116373777389526, "train/loss_total": 0.45145753026008606 }, { "epoch": 4.667913438418381, "step": 17472, "train/loss_ctc": 0.46777668595314026, "train/loss_error": 0.39292851090431213, "train/loss_total": 0.4078981578350067 }, { "epoch": 4.668180603793749, "step": 17473, "train/loss_ctc": 0.5314812660217285, "train/loss_error": 0.3759021759033203, "train/loss_total": 0.4070180058479309 }, { "epoch": 4.6684477691691155, "step": 17474, "train/loss_ctc": 0.35481855273246765, "train/loss_error": 0.4146879017353058, "train/loss_total": 0.4027140438556671 }, { "epoch": 4.668714934544483, "step": 17475, "train/loss_ctc": 0.4774576425552368, "train/loss_error": 0.3517940044403076, "train/loss_total": 0.3769267499446869 }, { "epoch": 4.66898209991985, "step": 17476, "train/loss_ctc": 0.9869748950004578, "train/loss_error": 0.4241536557674408, "train/loss_total": 0.5367178916931152 }, { "epoch": 4.6692492652952176, "step": 17477, "train/loss_ctc": 0.31221646070480347, "train/loss_error": 0.40855276584625244, "train/loss_total": 0.38928550481796265 }, { "epoch": 4.669516430670585, "step": 17478, "train/loss_ctc": 1.2650690078735352, "train/loss_error": 0.44701752066612244, "train/loss_total": 0.6106278300285339 }, { "epoch": 4.669783596045953, "step": 17479, "train/loss_ctc": 0.3654189705848694, "train/loss_error": 0.4234621524810791, "train/loss_total": 0.41185352206230164 }, { "epoch": 4.67005076142132, "grad_norm": 3.118190288543701, "learning_rate": 1.994122361741918e-06, "loss": 0.4498, "step": 17480 }, { "epoch": 4.67005076142132, "step": 17480, "train/loss_ctc": 0.6699365377426147, "train/loss_error": 0.41578298807144165, "train/loss_total": 0.4666137099266052 }, { "epoch": 4.670317926796687, "step": 17481, "train/loss_ctc": 1.5049594640731812, "train/loss_error": 0.3924179673194885, "train/loss_total": 0.614926278591156 }, { "epoch": 4.670585092172055, "step": 17482, "train/loss_ctc": 0.6221586465835571, "train/loss_error": 0.4559136927127838, "train/loss_total": 0.4891626834869385 }, { "epoch": 4.670852257547422, "step": 17483, "train/loss_ctc": 1.0010602474212646, "train/loss_error": 0.4635500907897949, "train/loss_total": 0.5710521340370178 }, { "epoch": 4.671119422922789, "step": 17484, "train/loss_ctc": 1.3467950820922852, "train/loss_error": 0.4779828190803528, "train/loss_total": 0.6517453193664551 }, { "epoch": 4.671386588298157, "step": 17485, "train/loss_ctc": 0.578031063079834, "train/loss_error": 0.42229411005973816, "train/loss_total": 0.4534415006637573 }, { "epoch": 4.671653753673524, "step": 17486, "train/loss_ctc": 0.9993535280227661, "train/loss_error": 0.4543907344341278, "train/loss_total": 0.5633833408355713 }, { "epoch": 4.671920919048891, "step": 17487, "train/loss_ctc": 0.8929082751274109, "train/loss_error": 0.4213646650314331, "train/loss_total": 0.5156733989715576 }, { "epoch": 4.672188084424259, "step": 17488, "train/loss_ctc": 0.6703722476959229, "train/loss_error": 0.44110897183418274, "train/loss_total": 0.48696163296699524 }, { "epoch": 4.672455249799626, "step": 17489, "train/loss_ctc": 1.1402623653411865, "train/loss_error": 0.47510233521461487, "train/loss_total": 0.6081343293190002 }, { "epoch": 4.672722415174993, "grad_norm": 1.9920099973678589, "learning_rate": 1.978092439219877e-06, "loss": 0.5421, "step": 17490 }, { "epoch": 4.672722415174993, "step": 17490, "train/loss_ctc": 0.29682856798171997, "train/loss_error": 0.4206146001815796, "train/loss_total": 0.39585739374160767 }, { "epoch": 4.672989580550361, "step": 17491, "train/loss_ctc": 0.4817020893096924, "train/loss_error": 0.42994433641433716, "train/loss_total": 0.44029590487480164 }, { "epoch": 4.673256745925728, "step": 17492, "train/loss_ctc": 1.1266725063323975, "train/loss_error": 0.4545222520828247, "train/loss_total": 0.5889523029327393 }, { "epoch": 4.673523911301095, "step": 17493, "train/loss_ctc": 0.9822795391082764, "train/loss_error": 0.431230366230011, "train/loss_total": 0.5414401888847351 }, { "epoch": 4.673791076676463, "step": 17494, "train/loss_ctc": 0.7620973587036133, "train/loss_error": 0.4469929337501526, "train/loss_total": 0.5100138187408447 }, { "epoch": 4.67405824205183, "step": 17495, "train/loss_ctc": 0.6214005947113037, "train/loss_error": 0.3976970314979553, "train/loss_total": 0.4424377679824829 }, { "epoch": 4.674325407427197, "step": 17496, "train/loss_ctc": 0.6696446537971497, "train/loss_error": 0.478229820728302, "train/loss_total": 0.5165128111839294 }, { "epoch": 4.674592572802565, "step": 17497, "train/loss_ctc": 1.0302221775054932, "train/loss_error": 0.4881437420845032, "train/loss_total": 0.5965594053268433 }, { "epoch": 4.674859738177932, "step": 17498, "train/loss_ctc": 0.4088935852050781, "train/loss_error": 0.4590780735015869, "train/loss_total": 0.4490411877632141 }, { "epoch": 4.675126903553299, "step": 17499, "train/loss_ctc": 0.7048178911209106, "train/loss_error": 0.37265291810035706, "train/loss_total": 0.4390859305858612 }, { "epoch": 4.675394068928667, "grad_norm": 1.7323054075241089, "learning_rate": 1.962062516697836e-06, "loss": 0.492, "step": 17500 }, { "epoch": 4.675394068928667, "step": 17500, "train/loss_ctc": 0.7928506731987, "train/loss_error": 0.4017618000507355, "train/loss_total": 0.47997957468032837 }, { "epoch": 4.675661234304034, "step": 17501, "train/loss_ctc": 0.8692706823348999, "train/loss_error": 0.38135504722595215, "train/loss_total": 0.47893819212913513 }, { "epoch": 4.675928399679401, "step": 17502, "train/loss_ctc": 0.920984148979187, "train/loss_error": 0.39538508653640747, "train/loss_total": 0.5005049109458923 }, { "epoch": 4.676195565054769, "step": 17503, "train/loss_ctc": 0.0894443616271019, "train/loss_error": 0.43777918815612793, "train/loss_total": 0.3681122362613678 }, { "epoch": 4.676462730430137, "step": 17504, "train/loss_ctc": 0.9148762226104736, "train/loss_error": 0.4138990342617035, "train/loss_total": 0.5140944719314575 }, { "epoch": 4.6767298958055035, "step": 17505, "train/loss_ctc": 0.7487977743148804, "train/loss_error": 0.44975221157073975, "train/loss_total": 0.50956130027771 }, { "epoch": 4.676997061180871, "step": 17506, "train/loss_ctc": 1.4868371486663818, "train/loss_error": 0.4869040548801422, "train/loss_total": 0.6868906617164612 }, { "epoch": 4.677264226556238, "step": 17507, "train/loss_ctc": 1.801972508430481, "train/loss_error": 0.4746427834033966, "train/loss_total": 0.7401087284088135 }, { "epoch": 4.6775313919316055, "step": 17508, "train/loss_ctc": 0.6773420572280884, "train/loss_error": 0.46654221415519714, "train/loss_total": 0.5087021589279175 }, { "epoch": 4.677798557306973, "step": 17509, "train/loss_ctc": 0.7791988849639893, "train/loss_error": 0.4271319806575775, "train/loss_total": 0.49754536151885986 }, { "epoch": 4.678065722682341, "grad_norm": 1.841254711151123, "learning_rate": 1.946032594175795e-06, "loss": 0.5284, "step": 17510 }, { "epoch": 4.678065722682341, "step": 17510, "train/loss_ctc": 0.4313424825668335, "train/loss_error": 0.37763357162475586, "train/loss_total": 0.38837534189224243 }, { "epoch": 4.6783328880577075, "step": 17511, "train/loss_ctc": 0.9774991273880005, "train/loss_error": 0.43448248505592346, "train/loss_total": 0.5430858135223389 }, { "epoch": 4.678600053433075, "step": 17512, "train/loss_ctc": 0.6310542225837708, "train/loss_error": 0.5093227624893188, "train/loss_total": 0.5336690545082092 }, { "epoch": 4.678867218808443, "step": 17513, "train/loss_ctc": 0.7548742294311523, "train/loss_error": 0.43398118019104004, "train/loss_total": 0.49815982580184937 }, { "epoch": 4.67913438418381, "step": 17514, "train/loss_ctc": 0.3934405446052551, "train/loss_error": 0.43742766976356506, "train/loss_total": 0.4286302626132965 }, { "epoch": 4.679401549559177, "step": 17515, "train/loss_ctc": 0.4844452738761902, "train/loss_error": 0.39009568095207214, "train/loss_total": 0.4089655876159668 }, { "epoch": 4.679668714934545, "step": 17516, "train/loss_ctc": 0.33096152544021606, "train/loss_error": 0.4353914260864258, "train/loss_total": 0.4145054519176483 }, { "epoch": 4.679935880309912, "step": 17517, "train/loss_ctc": 0.4457182288169861, "train/loss_error": 0.39935165643692017, "train/loss_total": 0.4086250066757202 }, { "epoch": 4.680203045685279, "step": 17518, "train/loss_ctc": 0.5762777328491211, "train/loss_error": 0.442521333694458, "train/loss_total": 0.4692726135253906 }, { "epoch": 4.680470211060647, "step": 17519, "train/loss_ctc": 0.8010127544403076, "train/loss_error": 0.43245795369148254, "train/loss_total": 0.5061689019203186 }, { "epoch": 4.680737376436014, "grad_norm": 1.829707384109497, "learning_rate": 1.930002671653754e-06, "loss": 0.4599, "step": 17520 }, { "epoch": 4.680737376436014, "step": 17520, "train/loss_ctc": 0.8432661294937134, "train/loss_error": 0.4564650356769562, "train/loss_total": 0.5338252782821655 }, { "epoch": 4.681004541811381, "step": 17521, "train/loss_ctc": 1.0136058330535889, "train/loss_error": 0.4350963830947876, "train/loss_total": 0.5507982969284058 }, { "epoch": 4.681271707186749, "step": 17522, "train/loss_ctc": 0.637784481048584, "train/loss_error": 0.46114444732666016, "train/loss_total": 0.49647247791290283 }, { "epoch": 4.681538872562116, "step": 17523, "train/loss_ctc": 0.7198889255523682, "train/loss_error": 0.41036367416381836, "train/loss_total": 0.4722687304019928 }, { "epoch": 4.681806037937483, "step": 17524, "train/loss_ctc": 0.3026498556137085, "train/loss_error": 0.4529755115509033, "train/loss_total": 0.4229103922843933 }, { "epoch": 4.682073203312851, "step": 17525, "train/loss_ctc": 0.9381576180458069, "train/loss_error": 0.43816784024238586, "train/loss_total": 0.538165807723999 }, { "epoch": 4.682340368688218, "step": 17526, "train/loss_ctc": 0.8152751326560974, "train/loss_error": 0.4508199095726013, "train/loss_total": 0.5237109661102295 }, { "epoch": 4.682607534063585, "step": 17527, "train/loss_ctc": 0.48482784628868103, "train/loss_error": 0.385185182094574, "train/loss_total": 0.40511372685432434 }, { "epoch": 4.682874699438953, "step": 17528, "train/loss_ctc": 0.7236083745956421, "train/loss_error": 0.4203208386898041, "train/loss_total": 0.4809783697128296 }, { "epoch": 4.68314186481432, "step": 17529, "train/loss_ctc": 0.3261626660823822, "train/loss_error": 0.3901577293872833, "train/loss_total": 0.37735873460769653 }, { "epoch": 4.683409030189687, "grad_norm": 9.129547119140625, "learning_rate": 1.9139727491317127e-06, "loss": 0.4802, "step": 17530 }, { "epoch": 4.683409030189687, "step": 17530, "train/loss_ctc": 0.4345339238643646, "train/loss_error": 0.4613897204399109, "train/loss_total": 0.4560185670852661 }, { "epoch": 4.683676195565055, "step": 17531, "train/loss_ctc": 0.37091100215911865, "train/loss_error": 0.39455586671829224, "train/loss_total": 0.3898269236087799 }, { "epoch": 4.683943360940422, "step": 17532, "train/loss_ctc": 0.7981505393981934, "train/loss_error": 0.3719075918197632, "train/loss_total": 0.4571561813354492 }, { "epoch": 4.684210526315789, "step": 17533, "train/loss_ctc": 0.7048709392547607, "train/loss_error": 0.3292982280254364, "train/loss_total": 0.40441277623176575 }, { "epoch": 4.684477691691157, "step": 17534, "train/loss_ctc": 0.7092728614807129, "train/loss_error": 0.46293529868125916, "train/loss_total": 0.512202799320221 }, { "epoch": 4.684744857066525, "step": 17535, "train/loss_ctc": 0.6008505821228027, "train/loss_error": 0.44302913546562195, "train/loss_total": 0.4745934307575226 }, { "epoch": 4.685012022441891, "step": 17536, "train/loss_ctc": 0.3156849145889282, "train/loss_error": 0.43175676465034485, "train/loss_total": 0.4085424244403839 }, { "epoch": 4.685279187817259, "step": 17537, "train/loss_ctc": 1.0979677438735962, "train/loss_error": 0.4794897139072418, "train/loss_total": 0.6031852960586548 }, { "epoch": 4.685546353192626, "step": 17538, "train/loss_ctc": 0.8011866807937622, "train/loss_error": 0.4122959077358246, "train/loss_total": 0.4900740683078766 }, { "epoch": 4.685813518567993, "step": 17539, "train/loss_ctc": 0.4846191704273224, "train/loss_error": 0.4357682764530182, "train/loss_total": 0.4455384612083435 }, { "epoch": 4.686080683943361, "grad_norm": 2.8183372020721436, "learning_rate": 1.8979428266096712e-06, "loss": 0.4642, "step": 17540 }, { "epoch": 4.686080683943361, "step": 17540, "train/loss_ctc": 0.5216051340103149, "train/loss_error": 0.43401849269866943, "train/loss_total": 0.45153582096099854 }, { "epoch": 4.686347849318729, "step": 17541, "train/loss_ctc": 0.770676851272583, "train/loss_error": 0.40204453468322754, "train/loss_total": 0.4757710099220276 }, { "epoch": 4.6866150146940955, "step": 17542, "train/loss_ctc": 0.44153130054473877, "train/loss_error": 0.4551789164543152, "train/loss_total": 0.45244938135147095 }, { "epoch": 4.686882180069463, "step": 17543, "train/loss_ctc": 0.618842363357544, "train/loss_error": 0.4133080840110779, "train/loss_total": 0.454414963722229 }, { "epoch": 4.68714934544483, "step": 17544, "train/loss_ctc": 0.3667181730270386, "train/loss_error": 0.4036003351211548, "train/loss_total": 0.39622390270233154 }, { "epoch": 4.6874165108201975, "step": 17545, "train/loss_ctc": 0.45216256380081177, "train/loss_error": 0.44580399990081787, "train/loss_total": 0.4470757246017456 }, { "epoch": 4.687683676195565, "step": 17546, "train/loss_ctc": 1.5045677423477173, "train/loss_error": 0.451066255569458, "train/loss_total": 0.661766529083252 }, { "epoch": 4.687950841570933, "step": 17547, "train/loss_ctc": 0.7195653319358826, "train/loss_error": 0.4355190396308899, "train/loss_total": 0.4923282861709595 }, { "epoch": 4.6882180069462995, "step": 17548, "train/loss_ctc": 0.733839213848114, "train/loss_error": 0.4489983022212982, "train/loss_total": 0.5059664845466614 }, { "epoch": 4.688485172321667, "step": 17549, "train/loss_ctc": 0.37027353048324585, "train/loss_error": 0.46205779910087585, "train/loss_total": 0.44370096921920776 }, { "epoch": 4.688752337697035, "grad_norm": 1.4307918548583984, "learning_rate": 1.8819129040876304e-06, "loss": 0.4781, "step": 17550 }, { "epoch": 4.688752337697035, "step": 17550, "train/loss_ctc": 0.9006777405738831, "train/loss_error": 0.4394731819629669, "train/loss_total": 0.5317140817642212 }, { "epoch": 4.689019503072402, "step": 17551, "train/loss_ctc": 0.4475521445274353, "train/loss_error": 0.4479016959667206, "train/loss_total": 0.44783180952072144 }, { "epoch": 4.689286668447769, "step": 17552, "train/loss_ctc": 1.0945132970809937, "train/loss_error": 0.4484668970108032, "train/loss_total": 0.5776761770248413 }, { "epoch": 4.689553833823137, "step": 17553, "train/loss_ctc": 0.9326138496398926, "train/loss_error": 0.4766644537448883, "train/loss_total": 0.5678543448448181 }, { "epoch": 4.689820999198504, "step": 17554, "train/loss_ctc": 0.5682556629180908, "train/loss_error": 0.48250022530555725, "train/loss_total": 0.49965131282806396 }, { "epoch": 4.690088164573871, "step": 17555, "train/loss_ctc": 1.3456391096115112, "train/loss_error": 0.45197340846061707, "train/loss_total": 0.6307065486907959 }, { "epoch": 4.690355329949239, "step": 17556, "train/loss_ctc": 1.223738431930542, "train/loss_error": 0.4340568780899048, "train/loss_total": 0.5919932126998901 }, { "epoch": 4.690622495324606, "step": 17557, "train/loss_ctc": 0.7121995091438293, "train/loss_error": 0.4955122768878937, "train/loss_total": 0.5388497114181519 }, { "epoch": 4.690889660699973, "step": 17558, "train/loss_ctc": 0.8152598142623901, "train/loss_error": 0.3457876145839691, "train/loss_total": 0.4396820664405823 }, { "epoch": 4.691156826075341, "step": 17559, "train/loss_ctc": 0.9827485084533691, "train/loss_error": 0.4243600368499756, "train/loss_total": 0.5360377430915833 }, { "epoch": 4.691423991450708, "grad_norm": 2.7779173851013184, "learning_rate": 1.865882981565589e-06, "loss": 0.5362, "step": 17560 }, { "epoch": 4.691423991450708, "step": 17560, "train/loss_ctc": 1.006334662437439, "train/loss_error": 0.4124814569950104, "train/loss_total": 0.5312520861625671 }, { "epoch": 4.691691156826075, "step": 17561, "train/loss_ctc": 1.0421009063720703, "train/loss_error": 0.4574589133262634, "train/loss_total": 0.5743873119354248 }, { "epoch": 4.691958322201443, "step": 17562, "train/loss_ctc": 0.44424936175346375, "train/loss_error": 0.41121023893356323, "train/loss_total": 0.4178180694580078 }, { "epoch": 4.69222548757681, "step": 17563, "train/loss_ctc": 0.6998100280761719, "train/loss_error": 0.42366358637809753, "train/loss_total": 0.47889286279678345 }, { "epoch": 4.692492652952177, "step": 17564, "train/loss_ctc": 0.31161442399024963, "train/loss_error": 0.40887537598609924, "train/loss_total": 0.3894231915473938 }, { "epoch": 4.692759818327545, "step": 17565, "train/loss_ctc": 0.8750495910644531, "train/loss_error": 0.39466771483421326, "train/loss_total": 0.49074411392211914 }, { "epoch": 4.693026983702912, "step": 17566, "train/loss_ctc": 0.486918568611145, "train/loss_error": 0.4006686508655548, "train/loss_total": 0.4179186224937439 }, { "epoch": 4.693294149078279, "step": 17567, "train/loss_ctc": 0.9311800003051758, "train/loss_error": 0.44980132579803467, "train/loss_total": 0.5460770726203918 }, { "epoch": 4.693561314453647, "step": 17568, "train/loss_ctc": 1.28520929813385, "train/loss_error": 0.5304316878318787, "train/loss_total": 0.681387186050415 }, { "epoch": 4.693828479829014, "step": 17569, "train/loss_ctc": 0.9792698621749878, "train/loss_error": 0.4019632637500763, "train/loss_total": 0.5174245834350586 }, { "epoch": 4.694095645204381, "grad_norm": 1.5973877906799316, "learning_rate": 1.849853059043548e-06, "loss": 0.5045, "step": 17570 }, { "epoch": 4.694095645204381, "step": 17570, "train/loss_ctc": 1.5526835918426514, "train/loss_error": 0.4087110757827759, "train/loss_total": 0.6375055909156799 }, { "epoch": 4.694362810579749, "step": 17571, "train/loss_ctc": 0.9103108644485474, "train/loss_error": 0.5187612771987915, "train/loss_total": 0.5970711708068848 }, { "epoch": 4.694629975955117, "step": 17572, "train/loss_ctc": 0.627724826335907, "train/loss_error": 0.408810019493103, "train/loss_total": 0.45259299874305725 }, { "epoch": 4.694897141330483, "step": 17573, "train/loss_ctc": 0.4700878858566284, "train/loss_error": 0.4667224884033203, "train/loss_total": 0.4673956036567688 }, { "epoch": 4.695164306705851, "step": 17574, "train/loss_ctc": 0.914039134979248, "train/loss_error": 0.4766331911087036, "train/loss_total": 0.5641143918037415 }, { "epoch": 4.695431472081218, "step": 17575, "train/loss_ctc": 1.3966120481491089, "train/loss_error": 0.44715940952301025, "train/loss_total": 0.6370499134063721 }, { "epoch": 4.6956986374565854, "step": 17576, "train/loss_ctc": 0.5057993531227112, "train/loss_error": 0.4215283989906311, "train/loss_total": 0.4383825957775116 }, { "epoch": 4.695965802831953, "step": 17577, "train/loss_ctc": 0.27555978298187256, "train/loss_error": 0.49967390298843384, "train/loss_total": 0.45485106110572815 }, { "epoch": 4.696232968207321, "step": 17578, "train/loss_ctc": 0.7378132343292236, "train/loss_error": 0.48393121361732483, "train/loss_total": 0.5347076654434204 }, { "epoch": 4.6965001335826875, "step": 17579, "train/loss_ctc": 1.1276646852493286, "train/loss_error": 0.4386817216873169, "train/loss_total": 0.5764783024787903 }, { "epoch": 4.696767298958055, "grad_norm": 2.671931028366089, "learning_rate": 1.833823136521507e-06, "loss": 0.536, "step": 17580 }, { "epoch": 4.696767298958055, "step": 17580, "train/loss_ctc": 0.7929052114486694, "train/loss_error": 0.4437088370323181, "train/loss_total": 0.5135481357574463 }, { "epoch": 4.697034464333423, "step": 17581, "train/loss_ctc": 0.8396915197372437, "train/loss_error": 0.3720186650753021, "train/loss_total": 0.4655532240867615 }, { "epoch": 4.6973016297087895, "step": 17582, "train/loss_ctc": 0.7976081371307373, "train/loss_error": 0.4587121605873108, "train/loss_total": 0.5264913439750671 }, { "epoch": 4.697568795084157, "step": 17583, "train/loss_ctc": 1.1363166570663452, "train/loss_error": 0.48257580399513245, "train/loss_total": 0.613323986530304 }, { "epoch": 4.697835960459525, "step": 17584, "train/loss_ctc": 0.23499327898025513, "train/loss_error": 0.3889419734477997, "train/loss_total": 0.35815224051475525 }, { "epoch": 4.6981031258348915, "step": 17585, "train/loss_ctc": 0.4276233911514282, "train/loss_error": 0.4633573293685913, "train/loss_total": 0.45621055364608765 }, { "epoch": 4.698370291210259, "step": 17586, "train/loss_ctc": 1.0993571281433105, "train/loss_error": 0.41631218791007996, "train/loss_total": 0.5529211759567261 }, { "epoch": 4.698637456585627, "step": 17587, "train/loss_ctc": 0.23540808260440826, "train/loss_error": 0.4082021415233612, "train/loss_total": 0.37364333868026733 }, { "epoch": 4.698904621960994, "step": 17588, "train/loss_ctc": 1.082769751548767, "train/loss_error": 0.43184351921081543, "train/loss_total": 0.5620287656784058 }, { "epoch": 4.699171787336361, "step": 17589, "train/loss_ctc": 0.49596700072288513, "train/loss_error": 0.41810041666030884, "train/loss_total": 0.4336737394332886 }, { "epoch": 4.699438952711729, "grad_norm": 1.8507791757583618, "learning_rate": 1.8177932139994659e-06, "loss": 0.4856, "step": 17590 }, { "epoch": 4.699438952711729, "step": 17590, "train/loss_ctc": 0.3785017728805542, "train/loss_error": 0.38093996047973633, "train/loss_total": 0.38045233488082886 }, { "epoch": 4.699706118087096, "step": 17591, "train/loss_ctc": 0.5971687436103821, "train/loss_error": 0.48604705929756165, "train/loss_total": 0.5082713961601257 }, { "epoch": 4.699973283462463, "step": 17592, "train/loss_ctc": 0.7962865233421326, "train/loss_error": 0.4422897398471832, "train/loss_total": 0.513089120388031 }, { "epoch": 4.700240448837831, "step": 17593, "train/loss_ctc": 0.5580986738204956, "train/loss_error": 0.42691850662231445, "train/loss_total": 0.4531545639038086 }, { "epoch": 4.700507614213198, "step": 17594, "train/loss_ctc": 0.6558796167373657, "train/loss_error": 0.33179548382759094, "train/loss_total": 0.396612286567688 }, { "epoch": 4.700774779588565, "step": 17595, "train/loss_ctc": 0.5312323570251465, "train/loss_error": 0.44147077202796936, "train/loss_total": 0.45942309498786926 }, { "epoch": 4.701041944963933, "step": 17596, "train/loss_ctc": 1.0542041063308716, "train/loss_error": 0.4222007095813751, "train/loss_total": 0.5486013889312744 }, { "epoch": 4.7013091103393, "step": 17597, "train/loss_ctc": 0.4512890577316284, "train/loss_error": 0.42414551973342896, "train/loss_total": 0.42957425117492676 }, { "epoch": 4.701576275714667, "step": 17598, "train/loss_ctc": 0.9847733974456787, "train/loss_error": 0.4202268421649933, "train/loss_total": 0.5331361889839172 }, { "epoch": 4.701843441090035, "step": 17599, "train/loss_ctc": 0.964612603187561, "train/loss_error": 0.4683576226234436, "train/loss_total": 0.5676085948944092 }, { "epoch": 4.702110606465402, "grad_norm": 3.418008327484131, "learning_rate": 1.8017632914774246e-06, "loss": 0.479, "step": 17600 }, { "epoch": 4.702110606465402, "step": 17600, "train/loss_ctc": 0.6308928728103638, "train/loss_error": 0.46896785497665405, "train/loss_total": 0.501352846622467 }, { "epoch": 4.702377771840769, "step": 17601, "train/loss_ctc": 1.1100363731384277, "train/loss_error": 0.4244566559791565, "train/loss_total": 0.5615726113319397 }, { "epoch": 4.702644937216137, "step": 17602, "train/loss_ctc": 0.690351128578186, "train/loss_error": 0.5073838829994202, "train/loss_total": 0.5439773797988892 }, { "epoch": 4.702912102591505, "step": 17603, "train/loss_ctc": 0.8423464298248291, "train/loss_error": 0.4766668975353241, "train/loss_total": 0.549802839756012 }, { "epoch": 4.703179267966871, "step": 17604, "train/loss_ctc": 0.3860741853713989, "train/loss_error": 0.40965989232063293, "train/loss_total": 0.40494275093078613 }, { "epoch": 4.703446433342239, "step": 17605, "train/loss_ctc": 0.7698347568511963, "train/loss_error": 0.47467055916786194, "train/loss_total": 0.5337033867835999 }, { "epoch": 4.703713598717606, "step": 17606, "train/loss_ctc": 0.45062533020973206, "train/loss_error": 0.446971595287323, "train/loss_total": 0.4477023482322693 }, { "epoch": 4.703980764092973, "step": 17607, "train/loss_ctc": 0.5810686349868774, "train/loss_error": 0.4301151931285858, "train/loss_total": 0.4603058993816376 }, { "epoch": 4.704247929468341, "step": 17608, "train/loss_ctc": 0.7323018908500671, "train/loss_error": 0.42546331882476807, "train/loss_total": 0.48683103919029236 }, { "epoch": 4.704515094843709, "step": 17609, "train/loss_ctc": 0.46971356868743896, "train/loss_error": 0.38213467597961426, "train/loss_total": 0.3996504545211792 }, { "epoch": 4.704782260219075, "grad_norm": 1.8574137687683105, "learning_rate": 1.7857333689553833e-06, "loss": 0.489, "step": 17610 }, { "epoch": 4.704782260219075, "step": 17610, "train/loss_ctc": 0.3719787299633026, "train/loss_error": 0.46395406126976013, "train/loss_total": 0.44555899500846863 }, { "epoch": 4.705049425594443, "step": 17611, "train/loss_ctc": 0.6354186534881592, "train/loss_error": 0.392408549785614, "train/loss_total": 0.44101059436798096 }, { "epoch": 4.705316590969811, "step": 17612, "train/loss_ctc": 0.513681948184967, "train/loss_error": 0.4285183250904083, "train/loss_total": 0.4455510377883911 }, { "epoch": 4.7055837563451774, "step": 17613, "train/loss_ctc": 0.943757951259613, "train/loss_error": 0.4327913224697113, "train/loss_total": 0.5349846482276917 }, { "epoch": 4.705850921720545, "step": 17614, "train/loss_ctc": 1.6859182119369507, "train/loss_error": 0.4485647976398468, "train/loss_total": 0.6960355043411255 }, { "epoch": 4.706118087095913, "step": 17615, "train/loss_ctc": 0.5668991804122925, "train/loss_error": 0.4767987132072449, "train/loss_total": 0.4948188066482544 }, { "epoch": 4.7063852524712795, "step": 17616, "train/loss_ctc": 1.2512365579605103, "train/loss_error": 0.46030887961387634, "train/loss_total": 0.61849445104599 }, { "epoch": 4.706652417846647, "step": 17617, "train/loss_ctc": 0.4622288942337036, "train/loss_error": 0.4562870264053345, "train/loss_total": 0.4574754238128662 }, { "epoch": 4.706919583222015, "step": 17618, "train/loss_ctc": 0.5083126425743103, "train/loss_error": 0.43202728033065796, "train/loss_total": 0.4472843408584595 }, { "epoch": 4.7071867485973815, "step": 17619, "train/loss_ctc": 0.43533021211624146, "train/loss_error": 0.4222639501094818, "train/loss_total": 0.42487722635269165 }, { "epoch": 4.707453913972749, "grad_norm": 2.223870038986206, "learning_rate": 1.7697034464333422e-06, "loss": 0.5006, "step": 17620 }, { "epoch": 4.707453913972749, "step": 17620, "train/loss_ctc": 1.4105881452560425, "train/loss_error": 0.4632303714752197, "train/loss_total": 0.6527019739151001 }, { "epoch": 4.707721079348117, "step": 17621, "train/loss_ctc": 0.5981515645980835, "train/loss_error": 0.4745309352874756, "train/loss_total": 0.49925506114959717 }, { "epoch": 4.7079882447234835, "step": 17622, "train/loss_ctc": 0.4583304822444916, "train/loss_error": 0.4643063247203827, "train/loss_total": 0.46311116218566895 }, { "epoch": 4.708255410098851, "step": 17623, "train/loss_ctc": 0.6761651039123535, "train/loss_error": 0.43980270624160767, "train/loss_total": 0.48707520961761475 }, { "epoch": 4.708522575474219, "step": 17624, "train/loss_ctc": 0.669288694858551, "train/loss_error": 0.4900398850440979, "train/loss_total": 0.5258896350860596 }, { "epoch": 4.708789740849586, "step": 17625, "train/loss_ctc": 1.1403851509094238, "train/loss_error": 0.3943893313407898, "train/loss_total": 0.5435885190963745 }, { "epoch": 4.709056906224953, "step": 17626, "train/loss_ctc": 0.8557037711143494, "train/loss_error": 0.4679727256298065, "train/loss_total": 0.5455189347267151 }, { "epoch": 4.709324071600321, "step": 17627, "train/loss_ctc": 0.6766344904899597, "train/loss_error": 0.4191611409187317, "train/loss_total": 0.47065579891204834 }, { "epoch": 4.709591236975688, "step": 17628, "train/loss_ctc": 0.6174103021621704, "train/loss_error": 0.5034319162368774, "train/loss_total": 0.526227593421936 }, { "epoch": 4.709858402351055, "step": 17629, "train/loss_ctc": 1.2461159229278564, "train/loss_error": 0.36014577746391296, "train/loss_total": 0.5373398065567017 }, { "epoch": 4.710125567726423, "grad_norm": 2.7953617572784424, "learning_rate": 1.7536735239113012e-06, "loss": 0.5251, "step": 17630 }, { "epoch": 4.710125567726423, "step": 17630, "train/loss_ctc": 0.25665098428726196, "train/loss_error": 0.3747856020927429, "train/loss_total": 0.3511587083339691 }, { "epoch": 4.71039273310179, "step": 17631, "train/loss_ctc": 0.46248143911361694, "train/loss_error": 0.3779684007167816, "train/loss_total": 0.3948709964752197 }, { "epoch": 4.710659898477157, "step": 17632, "train/loss_ctc": 1.0147770643234253, "train/loss_error": 0.42067205905914307, "train/loss_total": 0.5394930839538574 }, { "epoch": 4.710927063852525, "step": 17633, "train/loss_ctc": 1.0290534496307373, "train/loss_error": 0.46235042810440063, "train/loss_total": 0.5756910443305969 }, { "epoch": 4.7111942292278925, "step": 17634, "train/loss_ctc": 0.7777689695358276, "train/loss_error": 0.40197598934173584, "train/loss_total": 0.4771345853805542 }, { "epoch": 4.711461394603259, "step": 17635, "train/loss_ctc": 0.36388182640075684, "train/loss_error": 0.41243329644203186, "train/loss_total": 0.4027230143547058 }, { "epoch": 4.711728559978627, "step": 17636, "train/loss_ctc": 0.7254327535629272, "train/loss_error": 0.47402647137641907, "train/loss_total": 0.5243077278137207 }, { "epoch": 4.711995725353994, "step": 17637, "train/loss_ctc": 0.44052064418792725, "train/loss_error": 0.4586629867553711, "train/loss_total": 0.4550345242023468 }, { "epoch": 4.712262890729361, "step": 17638, "train/loss_ctc": 0.8120671510696411, "train/loss_error": 0.42491450905799866, "train/loss_total": 0.5023450255393982 }, { "epoch": 4.712530056104729, "step": 17639, "train/loss_ctc": 0.4817378520965576, "train/loss_error": 0.43571504950523376, "train/loss_total": 0.444919615983963 }, { "epoch": 4.712797221480097, "grad_norm": 2.0244719982147217, "learning_rate": 1.73764360138926e-06, "loss": 0.4668, "step": 17640 }, { "epoch": 4.712797221480097, "step": 17640, "train/loss_ctc": 1.153944969177246, "train/loss_error": 0.4907708466053009, "train/loss_total": 0.6234056949615479 }, { "epoch": 4.713064386855463, "step": 17641, "train/loss_ctc": 0.28253355622291565, "train/loss_error": 0.37601524591445923, "train/loss_total": 0.3573189377784729 }, { "epoch": 4.713331552230831, "step": 17642, "train/loss_ctc": 0.3354346454143524, "train/loss_error": 0.4318186044692993, "train/loss_total": 0.41254183650016785 }, { "epoch": 4.713598717606198, "step": 17643, "train/loss_ctc": 1.027035117149353, "train/loss_error": 0.44408920407295227, "train/loss_total": 0.5606783628463745 }, { "epoch": 4.713865882981565, "step": 17644, "train/loss_ctc": 0.3789313733577728, "train/loss_error": 0.35932767391204834, "train/loss_total": 0.36324840784072876 }, { "epoch": 4.714133048356933, "step": 17645, "train/loss_ctc": 1.4858224391937256, "train/loss_error": 0.41230836510658264, "train/loss_total": 0.6270111799240112 }, { "epoch": 4.714400213732301, "step": 17646, "train/loss_ctc": 0.37264472246170044, "train/loss_error": 0.38577964901924133, "train/loss_total": 0.38315266370773315 }, { "epoch": 4.714667379107667, "step": 17647, "train/loss_ctc": 0.2730261981487274, "train/loss_error": 0.4212605953216553, "train/loss_total": 0.3916137218475342 }, { "epoch": 4.714934544483035, "step": 17648, "train/loss_ctc": 0.42144063115119934, "train/loss_error": 0.4284295439720154, "train/loss_total": 0.4270317554473877 }, { "epoch": 4.715201709858403, "step": 17649, "train/loss_ctc": 0.881172776222229, "train/loss_error": 0.4601345658302307, "train/loss_total": 0.5443422198295593 }, { "epoch": 4.7154688752337695, "grad_norm": 1.3821250200271606, "learning_rate": 1.7216136788672188e-06, "loss": 0.469, "step": 17650 }, { "epoch": 4.7154688752337695, "step": 17650, "train/loss_ctc": 0.41291704773902893, "train/loss_error": 0.38649386167526245, "train/loss_total": 0.39177852869033813 }, { "epoch": 4.715736040609137, "step": 17651, "train/loss_ctc": 0.8054871559143066, "train/loss_error": 0.4570373594760895, "train/loss_total": 0.5267273187637329 }, { "epoch": 4.716003205984505, "step": 17652, "train/loss_ctc": 0.7689116597175598, "train/loss_error": 0.44241371750831604, "train/loss_total": 0.5077133178710938 }, { "epoch": 4.7162703713598715, "step": 17653, "train/loss_ctc": 0.2153625190258026, "train/loss_error": 0.42355990409851074, "train/loss_total": 0.3819204270839691 }, { "epoch": 4.716537536735239, "step": 17654, "train/loss_ctc": 1.5860064029693604, "train/loss_error": 0.38209766149520874, "train/loss_total": 0.6228793859481812 }, { "epoch": 4.716804702110607, "step": 17655, "train/loss_ctc": 0.5379481911659241, "train/loss_error": 0.45010295510292053, "train/loss_total": 0.4676719903945923 }, { "epoch": 4.7170718674859735, "step": 17656, "train/loss_ctc": 0.5243347883224487, "train/loss_error": 0.4109848141670227, "train/loss_total": 0.4336548149585724 }, { "epoch": 4.717339032861341, "step": 17657, "train/loss_ctc": 0.5403859615325928, "train/loss_error": 0.5126259326934814, "train/loss_total": 0.5181779265403748 }, { "epoch": 4.717606198236709, "step": 17658, "train/loss_ctc": 0.8222615718841553, "train/loss_error": 0.3875539302825928, "train/loss_total": 0.47449547052383423 }, { "epoch": 4.7178733636120755, "step": 17659, "train/loss_ctc": 0.43928956985473633, "train/loss_error": 0.4726162254810333, "train/loss_total": 0.4659509062767029 }, { "epoch": 4.718140528987443, "grad_norm": 1.5187925100326538, "learning_rate": 1.7055837563451778e-06, "loss": 0.4791, "step": 17660 }, { "epoch": 4.718140528987443, "step": 17660, "train/loss_ctc": 0.6178382635116577, "train/loss_error": 0.395221471786499, "train/loss_total": 0.43974483013153076 }, { "epoch": 4.718407694362811, "step": 17661, "train/loss_ctc": 1.0619423389434814, "train/loss_error": 0.4173470437526703, "train/loss_total": 0.5462661385536194 }, { "epoch": 4.718674859738178, "step": 17662, "train/loss_ctc": 0.753392219543457, "train/loss_error": 0.4437386393547058, "train/loss_total": 0.505669355392456 }, { "epoch": 4.718942025113545, "step": 17663, "train/loss_ctc": 0.45797914266586304, "train/loss_error": 0.38968178629875183, "train/loss_total": 0.40334126353263855 }, { "epoch": 4.719209190488913, "step": 17664, "train/loss_ctc": 0.4308086037635803, "train/loss_error": 0.41900166869163513, "train/loss_total": 0.42136308550834656 }, { "epoch": 4.71947635586428, "step": 17665, "train/loss_ctc": 0.42117074131965637, "train/loss_error": 0.48175081610679626, "train/loss_total": 0.4696348011493683 }, { "epoch": 4.719743521239647, "step": 17666, "train/loss_ctc": 0.6278850436210632, "train/loss_error": 0.4633541405200958, "train/loss_total": 0.4962603449821472 }, { "epoch": 4.720010686615015, "step": 17667, "train/loss_ctc": 1.0315221548080444, "train/loss_error": 0.45283043384552, "train/loss_total": 0.5685688257217407 }, { "epoch": 4.720277851990382, "step": 17668, "train/loss_ctc": 1.2419095039367676, "train/loss_error": 0.40369048714637756, "train/loss_total": 0.5713343024253845 }, { "epoch": 4.720545017365749, "step": 17669, "train/loss_ctc": 0.48673295974731445, "train/loss_error": 0.45929065346717834, "train/loss_total": 0.4647791385650635 }, { "epoch": 4.720812182741117, "grad_norm": 1.8897953033447266, "learning_rate": 1.6895538338231367e-06, "loss": 0.4887, "step": 17670 }, { "epoch": 4.720812182741117, "step": 17670, "train/loss_ctc": 0.7762461304664612, "train/loss_error": 0.3867993950843811, "train/loss_total": 0.4646887183189392 }, { "epoch": 4.7210793481164846, "step": 17671, "train/loss_ctc": 0.47249072790145874, "train/loss_error": 0.35832861065864563, "train/loss_total": 0.38116106390953064 }, { "epoch": 4.721346513491851, "step": 17672, "train/loss_ctc": 0.33195310831069946, "train/loss_error": 0.4249734878540039, "train/loss_total": 0.4063694179058075 }, { "epoch": 4.721613678867219, "step": 17673, "train/loss_ctc": 0.49527013301849365, "train/loss_error": 0.4123396575450897, "train/loss_total": 0.4289257824420929 }, { "epoch": 4.721880844242586, "step": 17674, "train/loss_ctc": 1.0884343385696411, "train/loss_error": 0.48414701223373413, "train/loss_total": 0.6050044894218445 }, { "epoch": 4.722148009617953, "step": 17675, "train/loss_ctc": 1.3063496351242065, "train/loss_error": 0.40346336364746094, "train/loss_total": 0.584040641784668 }, { "epoch": 4.722415174993321, "step": 17676, "train/loss_ctc": 0.6841908693313599, "train/loss_error": 0.5054931640625, "train/loss_total": 0.541232705116272 }, { "epoch": 4.722682340368689, "step": 17677, "train/loss_ctc": 0.47958672046661377, "train/loss_error": 0.39404726028442383, "train/loss_total": 0.41115516424179077 }, { "epoch": 4.722949505744055, "step": 17678, "train/loss_ctc": 1.047542929649353, "train/loss_error": 0.4298466145992279, "train/loss_total": 0.553385853767395 }, { "epoch": 4.723216671119423, "step": 17679, "train/loss_ctc": 1.6356260776519775, "train/loss_error": 0.46784019470214844, "train/loss_total": 0.7013974189758301 }, { "epoch": 4.723483836494791, "grad_norm": 1.6416497230529785, "learning_rate": 1.6735239113010954e-06, "loss": 0.5077, "step": 17680 }, { "epoch": 4.723483836494791, "step": 17680, "train/loss_ctc": 0.4439648389816284, "train/loss_error": 0.37526071071624756, "train/loss_total": 0.3890015482902527 }, { "epoch": 4.723751001870157, "step": 17681, "train/loss_ctc": 0.8000903129577637, "train/loss_error": 0.47569701075553894, "train/loss_total": 0.5405756831169128 }, { "epoch": 4.724018167245525, "step": 17682, "train/loss_ctc": 0.6468236446380615, "train/loss_error": 0.45420387387275696, "train/loss_total": 0.4927278459072113 }, { "epoch": 4.724285332620893, "step": 17683, "train/loss_ctc": 0.7057650089263916, "train/loss_error": 0.39568039774894714, "train/loss_total": 0.457697331905365 }, { "epoch": 4.724552497996259, "step": 17684, "train/loss_ctc": 0.718573808670044, "train/loss_error": 0.4331737160682678, "train/loss_total": 0.490253746509552 }, { "epoch": 4.724819663371627, "step": 17685, "train/loss_ctc": 0.9288424253463745, "train/loss_error": 0.4855863153934479, "train/loss_total": 0.574237585067749 }, { "epoch": 4.725086828746995, "step": 17686, "train/loss_ctc": 0.29056912660598755, "train/loss_error": 0.38812774419784546, "train/loss_total": 0.3686160147190094 }, { "epoch": 4.7253539941223615, "step": 17687, "train/loss_ctc": 0.7071095108985901, "train/loss_error": 0.4855884909629822, "train/loss_total": 0.5298926830291748 }, { "epoch": 4.725621159497729, "step": 17688, "train/loss_ctc": 0.6236278414726257, "train/loss_error": 0.367983877658844, "train/loss_total": 0.4191126823425293 }, { "epoch": 4.725888324873097, "step": 17689, "train/loss_ctc": 0.24752171337604523, "train/loss_error": 0.43622782826423645, "train/loss_total": 0.3984866142272949 }, { "epoch": 4.7261554902484635, "grad_norm": 1.1991909742355347, "learning_rate": 1.6574939887790543e-06, "loss": 0.4661, "step": 17690 }, { "epoch": 4.7261554902484635, "step": 17690, "train/loss_ctc": 0.8754009008407593, "train/loss_error": 0.3649641275405884, "train/loss_total": 0.4670514762401581 }, { "epoch": 4.726422655623831, "step": 17691, "train/loss_ctc": 0.7054803371429443, "train/loss_error": 0.460305780172348, "train/loss_total": 0.5093407034873962 }, { "epoch": 4.726689820999199, "step": 17692, "train/loss_ctc": 0.5257769227027893, "train/loss_error": 0.3754255771636963, "train/loss_total": 0.40549585223197937 }, { "epoch": 4.7269569863745655, "step": 17693, "train/loss_ctc": 0.6924022436141968, "train/loss_error": 0.4239931106567383, "train/loss_total": 0.4776749610900879 }, { "epoch": 4.727224151749933, "step": 17694, "train/loss_ctc": 0.2590826153755188, "train/loss_error": 0.40283939242362976, "train/loss_total": 0.3740880489349365 }, { "epoch": 4.727491317125301, "step": 17695, "train/loss_ctc": 0.5146657228469849, "train/loss_error": 0.3911045491695404, "train/loss_total": 0.4158167839050293 }, { "epoch": 4.7277584825006675, "step": 17696, "train/loss_ctc": 0.7936409711837769, "train/loss_error": 0.42540934681892395, "train/loss_total": 0.4990556836128235 }, { "epoch": 4.728025647876035, "step": 17697, "train/loss_ctc": 0.6903977394104004, "train/loss_error": 0.48628005385398865, "train/loss_total": 0.5271036028862 }, { "epoch": 4.728292813251403, "step": 17698, "train/loss_ctc": 0.8462070822715759, "train/loss_error": 0.4674067795276642, "train/loss_total": 0.5431668162345886 }, { "epoch": 4.72855997862677, "step": 17699, "train/loss_ctc": 1.2062320709228516, "train/loss_error": 0.4668954908847809, "train/loss_total": 0.6147628426551819 }, { "epoch": 4.728827144002137, "grad_norm": 2.3584296703338623, "learning_rate": 1.6414640662570133e-06, "loss": 0.4834, "step": 17700 }, { "epoch": 4.728827144002137, "step": 17700, "train/loss_ctc": 0.43946024775505066, "train/loss_error": 0.39889615774154663, "train/loss_total": 0.40700897574424744 }, { "epoch": 4.729094309377505, "step": 17701, "train/loss_ctc": 1.5904332399368286, "train/loss_error": 0.4252221882343292, "train/loss_total": 0.6582643985748291 }, { "epoch": 4.7293614747528725, "step": 17702, "train/loss_ctc": 0.8394449353218079, "train/loss_error": 0.4115180969238281, "train/loss_total": 0.4971034526824951 }, { "epoch": 4.729628640128239, "step": 17703, "train/loss_ctc": 1.374416708946228, "train/loss_error": 0.47433993220329285, "train/loss_total": 0.6543552875518799 }, { "epoch": 4.729895805503607, "step": 17704, "train/loss_ctc": 1.1189286708831787, "train/loss_error": 0.3676909804344177, "train/loss_total": 0.5179385542869568 }, { "epoch": 4.730162970878974, "step": 17705, "train/loss_ctc": 0.39388641715049744, "train/loss_error": 0.42282527685165405, "train/loss_total": 0.4170375168323517 }, { "epoch": 4.730430136254341, "step": 17706, "train/loss_ctc": 0.41115182638168335, "train/loss_error": 0.4518372416496277, "train/loss_total": 0.4437001645565033 }, { "epoch": 4.730697301629709, "step": 17707, "train/loss_ctc": 0.7033181190490723, "train/loss_error": 0.3779078423976898, "train/loss_total": 0.44298991560935974 }, { "epoch": 4.730964467005077, "step": 17708, "train/loss_ctc": 0.49052998423576355, "train/loss_error": 0.49018776416778564, "train/loss_total": 0.4902562201023102 }, { "epoch": 4.731231632380443, "step": 17709, "train/loss_ctc": 0.561120867729187, "train/loss_error": 0.4099893569946289, "train/loss_total": 0.4402156472206116 }, { "epoch": 4.731498797755811, "grad_norm": 1.5342706441879272, "learning_rate": 1.625434143734972e-06, "loss": 0.4969, "step": 17710 }, { "epoch": 4.731498797755811, "step": 17710, "train/loss_ctc": 0.3708629310131073, "train/loss_error": 0.428825706243515, "train/loss_total": 0.4172331690788269 }, { "epoch": 4.731765963131179, "step": 17711, "train/loss_ctc": 0.5750110149383545, "train/loss_error": 0.4374709129333496, "train/loss_total": 0.464978963136673 }, { "epoch": 4.732033128506545, "step": 17712, "train/loss_ctc": 0.5150871276855469, "train/loss_error": 0.4318418800830841, "train/loss_total": 0.4484909176826477 }, { "epoch": 4.732300293881913, "step": 17713, "train/loss_ctc": 1.0772218704223633, "train/loss_error": 0.43323656916618347, "train/loss_total": 0.5620336532592773 }, { "epoch": 4.732567459257281, "step": 17714, "train/loss_ctc": 0.34774261713027954, "train/loss_error": 0.3973950445652008, "train/loss_total": 0.3874645531177521 }, { "epoch": 4.732834624632647, "step": 17715, "train/loss_ctc": 0.45916038751602173, "train/loss_error": 0.425631582736969, "train/loss_total": 0.43233734369277954 }, { "epoch": 4.733101790008015, "step": 17716, "train/loss_ctc": 1.4213393926620483, "train/loss_error": 0.42940080165863037, "train/loss_total": 0.6277885437011719 }, { "epoch": 4.733368955383383, "step": 17717, "train/loss_ctc": 0.4148143529891968, "train/loss_error": 0.3618886172771454, "train/loss_total": 0.3724737763404846 }, { "epoch": 4.733636120758749, "step": 17718, "train/loss_ctc": 0.567467212677002, "train/loss_error": 0.4846017360687256, "train/loss_total": 0.501174807548523 }, { "epoch": 4.733903286134117, "step": 17719, "train/loss_ctc": 0.5737584233283997, "train/loss_error": 0.4409468173980713, "train/loss_total": 0.4675091505050659 }, { "epoch": 4.734170451509485, "grad_norm": 2.310244560241699, "learning_rate": 1.6094042212129307e-06, "loss": 0.4681, "step": 17720 }, { "epoch": 4.734170451509485, "step": 17720, "train/loss_ctc": 0.6127243638038635, "train/loss_error": 0.38639041781425476, "train/loss_total": 0.43165722489356995 }, { "epoch": 4.734437616884851, "step": 17721, "train/loss_ctc": 0.5655031204223633, "train/loss_error": 0.45104101300239563, "train/loss_total": 0.4739334285259247 }, { "epoch": 4.734704782260219, "step": 17722, "train/loss_ctc": 0.31714367866516113, "train/loss_error": 0.48175951838493347, "train/loss_total": 0.4488363564014435 }, { "epoch": 4.734971947635587, "step": 17723, "train/loss_ctc": 0.22633971273899078, "train/loss_error": 0.4445110261440277, "train/loss_total": 0.4008767604827881 }, { "epoch": 4.7352391130109535, "step": 17724, "train/loss_ctc": 1.4781033992767334, "train/loss_error": 0.4279700517654419, "train/loss_total": 0.6379967331886292 }, { "epoch": 4.735506278386321, "step": 17725, "train/loss_ctc": 0.6273428201675415, "train/loss_error": 0.40253156423568726, "train/loss_total": 0.44749385118484497 }, { "epoch": 4.735773443761689, "step": 17726, "train/loss_ctc": 0.5260661840438843, "train/loss_error": 0.3995511531829834, "train/loss_total": 0.4248541593551636 }, { "epoch": 4.7360406091370555, "step": 17727, "train/loss_ctc": 0.6351934671401978, "train/loss_error": 0.45292243361473083, "train/loss_total": 0.48937666416168213 }, { "epoch": 4.736307774512423, "step": 17728, "train/loss_ctc": 1.090494155883789, "train/loss_error": 0.4546155631542206, "train/loss_total": 0.5817912817001343 }, { "epoch": 4.736574939887791, "step": 17729, "train/loss_ctc": 0.7031108140945435, "train/loss_error": 0.3835202753543854, "train/loss_total": 0.44743838906288147 }, { "epoch": 4.7368421052631575, "grad_norm": 1.4991281032562256, "learning_rate": 1.5933742986908896e-06, "loss": 0.4784, "step": 17730 }, { "epoch": 4.7368421052631575, "step": 17730, "train/loss_ctc": 0.7638018131256104, "train/loss_error": 0.42416268587112427, "train/loss_total": 0.49209052324295044 }, { "epoch": 4.737109270638525, "step": 17731, "train/loss_ctc": 0.8121135830879211, "train/loss_error": 0.42496317625045776, "train/loss_total": 0.5023932456970215 }, { "epoch": 4.737376436013893, "step": 17732, "train/loss_ctc": 0.8587198257446289, "train/loss_error": 0.4297596514225006, "train/loss_total": 0.5155516862869263 }, { "epoch": 4.73764360138926, "step": 17733, "train/loss_ctc": 0.6470697522163391, "train/loss_error": 0.4151356518268585, "train/loss_total": 0.4615224599838257 }, { "epoch": 4.737910766764627, "step": 17734, "train/loss_ctc": 0.4095943570137024, "train/loss_error": 0.3687534034252167, "train/loss_total": 0.3769215941429138 }, { "epoch": 4.738177932139995, "step": 17735, "train/loss_ctc": 0.7956935167312622, "train/loss_error": 0.4313563108444214, "train/loss_total": 0.5042237639427185 }, { "epoch": 4.738445097515362, "step": 17736, "train/loss_ctc": 0.407956600189209, "train/loss_error": 0.40746116638183594, "train/loss_total": 0.4075602889060974 }, { "epoch": 4.738712262890729, "step": 17737, "train/loss_ctc": 0.6936292052268982, "train/loss_error": 0.4692339599132538, "train/loss_total": 0.5141130089759827 }, { "epoch": 4.738979428266097, "step": 17738, "train/loss_ctc": 0.3413449823856354, "train/loss_error": 0.43931859731674194, "train/loss_total": 0.41972386837005615 }, { "epoch": 4.7392465936414645, "step": 17739, "train/loss_ctc": 0.8209909200668335, "train/loss_error": 0.38217344880104065, "train/loss_total": 0.46993696689605713 }, { "epoch": 4.739513759016831, "grad_norm": 2.0473718643188477, "learning_rate": 1.5773443761688486e-06, "loss": 0.4664, "step": 17740 }, { "epoch": 4.739513759016831, "step": 17740, "train/loss_ctc": 1.174548864364624, "train/loss_error": 0.383505254983902, "train/loss_total": 0.5417139530181885 }, { "epoch": 4.739780924392199, "step": 17741, "train/loss_ctc": 0.9228609800338745, "train/loss_error": 0.4691894054412842, "train/loss_total": 0.5599237084388733 }, { "epoch": 4.740048089767566, "step": 17742, "train/loss_ctc": 0.5943851470947266, "train/loss_error": 0.38654786348342896, "train/loss_total": 0.4281153082847595 }, { "epoch": 4.740315255142933, "step": 17743, "train/loss_ctc": 0.8079203367233276, "train/loss_error": 0.4225051701068878, "train/loss_total": 0.4995881915092468 }, { "epoch": 4.740582420518301, "step": 17744, "train/loss_ctc": 0.5382150411605835, "train/loss_error": 0.4856953024864197, "train/loss_total": 0.49619925022125244 }, { "epoch": 4.740849585893669, "step": 17745, "train/loss_ctc": 0.5404148101806641, "train/loss_error": 0.4017714262008667, "train/loss_total": 0.42950010299682617 }, { "epoch": 4.741116751269035, "step": 17746, "train/loss_ctc": 0.5215222835540771, "train/loss_error": 0.3868752717971802, "train/loss_total": 0.41380468010902405 }, { "epoch": 4.741383916644403, "step": 17747, "train/loss_ctc": 0.8041651844978333, "train/loss_error": 0.45952415466308594, "train/loss_total": 0.5284523963928223 }, { "epoch": 4.741651082019771, "step": 17748, "train/loss_ctc": 0.28249073028564453, "train/loss_error": 0.449496865272522, "train/loss_total": 0.41609564423561096 }, { "epoch": 4.741918247395137, "step": 17749, "train/loss_ctc": 0.7418426275253296, "train/loss_error": 0.4645332396030426, "train/loss_total": 0.5199950933456421 }, { "epoch": 4.742185412770505, "grad_norm": 1.3203178644180298, "learning_rate": 1.5613144536468075e-06, "loss": 0.4833, "step": 17750 }, { "epoch": 4.742185412770505, "step": 17750, "train/loss_ctc": 0.5019143223762512, "train/loss_error": 0.41574206948280334, "train/loss_total": 0.43297651410102844 }, { "epoch": 4.742452578145873, "step": 17751, "train/loss_ctc": 1.0748845338821411, "train/loss_error": 0.4120107591152191, "train/loss_total": 0.5445855259895325 }, { "epoch": 4.742719743521239, "step": 17752, "train/loss_ctc": 1.2784212827682495, "train/loss_error": 0.4808482527732849, "train/loss_total": 0.6403628587722778 }, { "epoch": 4.742986908896607, "step": 17753, "train/loss_ctc": 0.2746110260486603, "train/loss_error": 0.4418752193450928, "train/loss_total": 0.4084223806858063 }, { "epoch": 4.743254074271975, "step": 17754, "train/loss_ctc": 0.3946611285209656, "train/loss_error": 0.49644455313682556, "train/loss_total": 0.47608786821365356 }, { "epoch": 4.743521239647341, "step": 17755, "train/loss_ctc": 0.3322264552116394, "train/loss_error": 0.39454904198646545, "train/loss_total": 0.38208451867103577 }, { "epoch": 4.743788405022709, "step": 17756, "train/loss_ctc": 0.37023067474365234, "train/loss_error": 0.44602271914482117, "train/loss_total": 0.4308643043041229 }, { "epoch": 4.744055570398077, "step": 17757, "train/loss_ctc": 0.35405904054641724, "train/loss_error": 0.4455502927303314, "train/loss_total": 0.42725205421447754 }, { "epoch": 4.744322735773443, "step": 17758, "train/loss_ctc": 0.8668526411056519, "train/loss_error": 0.3993076682090759, "train/loss_total": 0.492816686630249 }, { "epoch": 4.744589901148811, "step": 17759, "train/loss_ctc": 0.46113520860671997, "train/loss_error": 0.42406216263771057, "train/loss_total": 0.43147677183151245 }, { "epoch": 4.744857066524179, "grad_norm": 1.4684191942214966, "learning_rate": 1.5452845311247662e-06, "loss": 0.4667, "step": 17760 }, { "epoch": 4.744857066524179, "step": 17760, "train/loss_ctc": 0.6799799799919128, "train/loss_error": 0.47873586416244507, "train/loss_total": 0.5189846754074097 }, { "epoch": 4.7451242318995455, "step": 17761, "train/loss_ctc": 1.1116704940795898, "train/loss_error": 0.48361736536026, "train/loss_total": 0.6092280149459839 }, { "epoch": 4.745391397274913, "step": 17762, "train/loss_ctc": 0.44680652022361755, "train/loss_error": 0.4366687536239624, "train/loss_total": 0.43869632482528687 }, { "epoch": 4.745658562650281, "step": 17763, "train/loss_ctc": 0.342972069978714, "train/loss_error": 0.4396652281284332, "train/loss_total": 0.4203266203403473 }, { "epoch": 4.7459257280256475, "step": 17764, "train/loss_ctc": 0.45061150193214417, "train/loss_error": 0.4110466241836548, "train/loss_total": 0.4189596176147461 }, { "epoch": 4.746192893401015, "step": 17765, "train/loss_ctc": 0.674595832824707, "train/loss_error": 0.41571542620658875, "train/loss_total": 0.4674915075302124 }, { "epoch": 4.746460058776383, "step": 17766, "train/loss_ctc": 0.6705356240272522, "train/loss_error": 0.4136260151863098, "train/loss_total": 0.4650079607963562 }, { "epoch": 4.7467272241517495, "step": 17767, "train/loss_ctc": 0.4712100028991699, "train/loss_error": 0.4962628483772278, "train/loss_total": 0.4912523031234741 }, { "epoch": 4.746994389527117, "step": 17768, "train/loss_ctc": 0.43998605012893677, "train/loss_error": 0.46854516863822937, "train/loss_total": 0.46283334493637085 }, { "epoch": 4.747261554902485, "step": 17769, "train/loss_ctc": 0.43327632546424866, "train/loss_error": 0.45681822299957275, "train/loss_total": 0.45210984349250793 }, { "epoch": 4.747528720277852, "grad_norm": 1.0470134019851685, "learning_rate": 1.5292546086027251e-06, "loss": 0.4745, "step": 17770 }, { "epoch": 4.747528720277852, "step": 17770, "train/loss_ctc": 0.30519622564315796, "train/loss_error": 0.327163428068161, "train/loss_total": 0.32276999950408936 }, { "epoch": 4.747795885653219, "step": 17771, "train/loss_ctc": 0.6543145179748535, "train/loss_error": 0.4706934988498688, "train/loss_total": 0.5074177384376526 }, { "epoch": 4.748063051028587, "step": 17772, "train/loss_ctc": 0.762136697769165, "train/loss_error": 0.49080947041511536, "train/loss_total": 0.5450749397277832 }, { "epoch": 4.748330216403954, "step": 17773, "train/loss_ctc": 0.4193063974380493, "train/loss_error": 0.3881218433380127, "train/loss_total": 0.3943587839603424 }, { "epoch": 4.748597381779321, "step": 17774, "train/loss_ctc": 0.7014079689979553, "train/loss_error": 0.4059360921382904, "train/loss_total": 0.4650304913520813 }, { "epoch": 4.748864547154689, "step": 17775, "train/loss_ctc": 1.1910959482192993, "train/loss_error": 0.4224078357219696, "train/loss_total": 0.5761454701423645 }, { "epoch": 4.7491317125300565, "step": 17776, "train/loss_ctc": 0.6064728498458862, "train/loss_error": 0.49025046825408936, "train/loss_total": 0.5134949684143066 }, { "epoch": 4.749398877905423, "step": 17777, "train/loss_ctc": 0.35879355669021606, "train/loss_error": 0.44305649399757385, "train/loss_total": 0.4262039065361023 }, { "epoch": 4.749666043280791, "step": 17778, "train/loss_ctc": 0.7843032479286194, "train/loss_error": 0.4206046462059021, "train/loss_total": 0.49334436655044556 }, { "epoch": 4.7499332086561585, "step": 17779, "train/loss_ctc": 0.886459469795227, "train/loss_error": 0.44157031178474426, "train/loss_total": 0.5305481553077698 }, { "epoch": 4.750200374031525, "grad_norm": 1.6692419052124023, "learning_rate": 1.513224686080684e-06, "loss": 0.4774, "step": 17780 }, { "epoch": 4.750200374031525, "step": 17780, "train/loss_ctc": 0.5644139051437378, "train/loss_error": 0.40896689891815186, "train/loss_total": 0.44005632400512695 }, { "epoch": 4.750467539406893, "step": 17781, "train/loss_ctc": 0.45058539509773254, "train/loss_error": 0.4363279640674591, "train/loss_total": 0.4391794800758362 }, { "epoch": 4.750734704782261, "step": 17782, "train/loss_ctc": 1.1195731163024902, "train/loss_error": 0.40344202518463135, "train/loss_total": 0.546668291091919 }, { "epoch": 4.751001870157627, "step": 17783, "train/loss_ctc": 0.41379261016845703, "train/loss_error": 0.48104628920555115, "train/loss_total": 0.46759554743766785 }, { "epoch": 4.751269035532995, "step": 17784, "train/loss_ctc": 0.5587559938430786, "train/loss_error": 0.3792206645011902, "train/loss_total": 0.4151277244091034 }, { "epoch": 4.751536200908363, "step": 17785, "train/loss_ctc": 0.5723856687545776, "train/loss_error": 0.49256646633148193, "train/loss_total": 0.50853031873703 }, { "epoch": 4.751803366283729, "step": 17786, "train/loss_ctc": 0.4602128267288208, "train/loss_error": 0.3963148891925812, "train/loss_total": 0.4090944826602936 }, { "epoch": 4.752070531659097, "step": 17787, "train/loss_ctc": 0.46446794271469116, "train/loss_error": 0.37681618332862854, "train/loss_total": 0.39434656500816345 }, { "epoch": 4.752337697034465, "step": 17788, "train/loss_ctc": 0.4493514597415924, "train/loss_error": 0.4593730866909027, "train/loss_total": 0.45736879110336304 }, { "epoch": 4.752604862409831, "step": 17789, "train/loss_ctc": 1.0949329137802124, "train/loss_error": 0.4372405409812927, "train/loss_total": 0.5687790513038635 }, { "epoch": 4.752872027785199, "grad_norm": 1.587087631225586, "learning_rate": 1.497194763558643e-06, "loss": 0.4647, "step": 17790 }, { "epoch": 4.752872027785199, "step": 17790, "train/loss_ctc": 0.5968064665794373, "train/loss_error": 0.48159024119377136, "train/loss_total": 0.5046334862709045 }, { "epoch": 4.753139193160567, "step": 17791, "train/loss_ctc": 0.42690491676330566, "train/loss_error": 0.4072552025318146, "train/loss_total": 0.4111851453781128 }, { "epoch": 4.753406358535933, "step": 17792, "train/loss_ctc": 0.9495923519134521, "train/loss_error": 0.47968578338623047, "train/loss_total": 0.5736671090126038 }, { "epoch": 4.753673523911301, "step": 17793, "train/loss_ctc": 0.49365732073783875, "train/loss_error": 0.38197964429855347, "train/loss_total": 0.40431517362594604 }, { "epoch": 4.753940689286669, "step": 17794, "train/loss_ctc": 1.034099817276001, "train/loss_error": 0.401304692029953, "train/loss_total": 0.5278637409210205 }, { "epoch": 4.754207854662035, "step": 17795, "train/loss_ctc": 0.559070348739624, "train/loss_error": 0.37721699476242065, "train/loss_total": 0.41358768939971924 }, { "epoch": 4.754475020037403, "step": 17796, "train/loss_ctc": 1.1665925979614258, "train/loss_error": 0.4894235134124756, "train/loss_total": 0.6248573660850525 }, { "epoch": 4.754742185412771, "step": 17797, "train/loss_ctc": 0.6401221752166748, "train/loss_error": 0.4074536859989166, "train/loss_total": 0.4539874196052551 }, { "epoch": 4.7550093507881375, "step": 17798, "train/loss_ctc": 1.1461035013198853, "train/loss_error": 0.3705432713031769, "train/loss_total": 0.5256553292274475 }, { "epoch": 4.755276516163505, "step": 17799, "train/loss_ctc": 0.43237441778182983, "train/loss_error": 0.32633477449417114, "train/loss_total": 0.34754273295402527 }, { "epoch": 4.755543681538873, "grad_norm": 2.1323697566986084, "learning_rate": 1.4811648410366015e-06, "loss": 0.4787, "step": 17800 }, { "epoch": 4.755543681538873, "step": 17800, "train/loss_ctc": 1.6372389793395996, "train/loss_error": 0.4431770145893097, "train/loss_total": 0.6819894313812256 }, { "epoch": 4.75581084691424, "step": 17801, "train/loss_ctc": 0.3961502015590668, "train/loss_error": 0.4434525668621063, "train/loss_total": 0.43399208784103394 }, { "epoch": 4.756078012289607, "step": 17802, "train/loss_ctc": 0.34277570247650146, "train/loss_error": 0.35195475816726685, "train/loss_total": 0.3501189649105072 }, { "epoch": 4.756345177664975, "step": 17803, "train/loss_ctc": 0.8338534832000732, "train/loss_error": 0.5127968192100525, "train/loss_total": 0.5770081281661987 }, { "epoch": 4.7566123430403415, "step": 17804, "train/loss_ctc": 1.0386004447937012, "train/loss_error": 0.5343742966651917, "train/loss_total": 0.6352195143699646 }, { "epoch": 4.756879508415709, "step": 17805, "train/loss_ctc": 0.9042220711708069, "train/loss_error": 0.43460604548454285, "train/loss_total": 0.5285292267799377 }, { "epoch": 4.757146673791077, "step": 17806, "train/loss_ctc": 0.4565974473953247, "train/loss_error": 0.44879135489463806, "train/loss_total": 0.45035257935523987 }, { "epoch": 4.7574138391664444, "step": 17807, "train/loss_ctc": 0.6073497533798218, "train/loss_error": 0.36399930715560913, "train/loss_total": 0.4126693904399872 }, { "epoch": 4.757681004541811, "step": 17808, "train/loss_ctc": 0.8824732303619385, "train/loss_error": 0.4015353322029114, "train/loss_total": 0.49772292375564575 }, { "epoch": 4.757948169917179, "step": 17809, "train/loss_ctc": 0.7886373996734619, "train/loss_error": 0.44003018736839294, "train/loss_total": 0.5097516775131226 }, { "epoch": 4.758215335292546, "grad_norm": 1.7930641174316406, "learning_rate": 1.4651349185145604e-06, "loss": 0.5077, "step": 17810 }, { "epoch": 4.758215335292546, "step": 17810, "train/loss_ctc": 0.7437920570373535, "train/loss_error": 0.42715537548065186, "train/loss_total": 0.49048271775245667 }, { "epoch": 4.758482500667913, "step": 17811, "train/loss_ctc": 1.077087640762329, "train/loss_error": 0.46409305930137634, "train/loss_total": 0.5866919755935669 }, { "epoch": 4.758749666043281, "step": 17812, "train/loss_ctc": 1.4844238758087158, "train/loss_error": 0.46170103549957275, "train/loss_total": 0.6662455797195435 }, { "epoch": 4.7590168314186485, "step": 17813, "train/loss_ctc": 0.8991585373878479, "train/loss_error": 0.45339417457580566, "train/loss_total": 0.5425470471382141 }, { "epoch": 4.759283996794015, "step": 17814, "train/loss_ctc": 0.5726519823074341, "train/loss_error": 0.41074085235595703, "train/loss_total": 0.44312310218811035 }, { "epoch": 4.759551162169383, "step": 17815, "train/loss_ctc": 0.9260194301605225, "train/loss_error": 0.40755409002304077, "train/loss_total": 0.5112471580505371 }, { "epoch": 4.7598183275447505, "step": 17816, "train/loss_ctc": 0.6665377020835876, "train/loss_error": 0.4863983690738678, "train/loss_total": 0.5224262475967407 }, { "epoch": 4.760085492920117, "step": 17817, "train/loss_ctc": 0.42948514223098755, "train/loss_error": 0.4067678153514862, "train/loss_total": 0.4113112986087799 }, { "epoch": 4.760352658295485, "step": 17818, "train/loss_ctc": 1.7605243921279907, "train/loss_error": 0.5262922048568726, "train/loss_total": 0.7731386423110962 }, { "epoch": 4.760619823670853, "step": 17819, "train/loss_ctc": 0.64337557554245, "train/loss_error": 0.5139471292495728, "train/loss_total": 0.5398328304290771 }, { "epoch": 4.760886989046219, "grad_norm": 4.241901397705078, "learning_rate": 1.4491049959925194e-06, "loss": 0.5487, "step": 17820 }, { "epoch": 4.760886989046219, "step": 17820, "train/loss_ctc": 0.6520276069641113, "train/loss_error": 0.38567692041397095, "train/loss_total": 0.43894708156585693 }, { "epoch": 4.761154154421587, "step": 17821, "train/loss_ctc": 0.47227948904037476, "train/loss_error": 0.36508846282958984, "train/loss_total": 0.3865266740322113 }, { "epoch": 4.761421319796955, "step": 17822, "train/loss_ctc": 0.2413521707057953, "train/loss_error": 0.3630565404891968, "train/loss_total": 0.33871567249298096 }, { "epoch": 4.761688485172321, "step": 17823, "train/loss_ctc": 0.36763638257980347, "train/loss_error": 0.40375328063964844, "train/loss_total": 0.3965299129486084 }, { "epoch": 4.761955650547689, "step": 17824, "train/loss_ctc": 0.567075252532959, "train/loss_error": 0.4188726842403412, "train/loss_total": 0.4485132098197937 }, { "epoch": 4.762222815923057, "step": 17825, "train/loss_ctc": 0.8882749080657959, "train/loss_error": 0.4391450583934784, "train/loss_total": 0.5289710760116577 }, { "epoch": 4.762489981298423, "step": 17826, "train/loss_ctc": 0.33106881380081177, "train/loss_error": 0.4320056140422821, "train/loss_total": 0.411818265914917 }, { "epoch": 4.762757146673791, "step": 17827, "train/loss_ctc": 0.9150159358978271, "train/loss_error": 0.4465019702911377, "train/loss_total": 0.5402047634124756 }, { "epoch": 4.763024312049159, "step": 17828, "train/loss_ctc": 0.7321233153343201, "train/loss_error": 0.44330376386642456, "train/loss_total": 0.5010676980018616 }, { "epoch": 4.763291477424525, "step": 17829, "train/loss_ctc": 0.7489200830459595, "train/loss_error": 0.3961367607116699, "train/loss_total": 0.4666934013366699 }, { "epoch": 4.763558642799893, "grad_norm": 3.359762191772461, "learning_rate": 1.4330750734704783e-06, "loss": 0.4458, "step": 17830 }, { "epoch": 4.763558642799893, "step": 17830, "train/loss_ctc": 0.5553474426269531, "train/loss_error": 0.5105484127998352, "train/loss_total": 0.5195082426071167 }, { "epoch": 4.763825808175261, "step": 17831, "train/loss_ctc": 0.6247087717056274, "train/loss_error": 0.45203647017478943, "train/loss_total": 0.48657095432281494 }, { "epoch": 4.764092973550628, "step": 17832, "train/loss_ctc": 0.7104620933532715, "train/loss_error": 0.38268229365348816, "train/loss_total": 0.4482382535934448 }, { "epoch": 4.764360138925995, "step": 17833, "train/loss_ctc": 0.4965507984161377, "train/loss_error": 0.45372721552848816, "train/loss_total": 0.4622919261455536 }, { "epoch": 4.764627304301363, "step": 17834, "train/loss_ctc": 0.6276025772094727, "train/loss_error": 0.36501631140708923, "train/loss_total": 0.4175335764884949 }, { "epoch": 4.7648944696767295, "step": 17835, "train/loss_ctc": 0.6351682543754578, "train/loss_error": 0.4469599723815918, "train/loss_total": 0.4846016466617584 }, { "epoch": 4.765161635052097, "step": 17836, "train/loss_ctc": 0.731653094291687, "train/loss_error": 0.377625048160553, "train/loss_total": 0.4484306573867798 }, { "epoch": 4.765428800427465, "step": 17837, "train/loss_ctc": 0.447774201631546, "train/loss_error": 0.4400081932544708, "train/loss_total": 0.44156140089035034 }, { "epoch": 4.765695965802832, "step": 17838, "train/loss_ctc": 1.0737850666046143, "train/loss_error": 0.43101736903190613, "train/loss_total": 0.5595709085464478 }, { "epoch": 4.765963131178199, "step": 17839, "train/loss_ctc": 1.1777195930480957, "train/loss_error": 0.434822678565979, "train/loss_total": 0.5834020972251892 }, { "epoch": 4.766230296553567, "grad_norm": 1.7513877153396606, "learning_rate": 1.417045150948437e-06, "loss": 0.4852, "step": 17840 }, { "epoch": 4.766230296553567, "step": 17840, "train/loss_ctc": 0.8380266427993774, "train/loss_error": 0.459576278924942, "train/loss_total": 0.5352663397789001 }, { "epoch": 4.7664974619289335, "step": 17841, "train/loss_ctc": 1.1258312463760376, "train/loss_error": 0.448638916015625, "train/loss_total": 0.5840773582458496 }, { "epoch": 4.766764627304301, "step": 17842, "train/loss_ctc": 0.3401084542274475, "train/loss_error": 0.47020938992500305, "train/loss_total": 0.444189190864563 }, { "epoch": 4.767031792679669, "step": 17843, "train/loss_ctc": 0.592936635017395, "train/loss_error": 0.37969771027565, "train/loss_total": 0.42234551906585693 }, { "epoch": 4.7672989580550365, "step": 17844, "train/loss_ctc": 1.211423635482788, "train/loss_error": 0.4467412829399109, "train/loss_total": 0.5996777415275574 }, { "epoch": 4.767566123430403, "step": 17845, "train/loss_ctc": 0.6960753202438354, "train/loss_error": 0.4263966679573059, "train/loss_total": 0.4803324341773987 }, { "epoch": 4.767833288805771, "step": 17846, "train/loss_ctc": 0.6091349720954895, "train/loss_error": 0.33500373363494873, "train/loss_total": 0.38982999324798584 }, { "epoch": 4.7681004541811385, "step": 17847, "train/loss_ctc": 0.4588238000869751, "train/loss_error": 0.46925342082977295, "train/loss_total": 0.4671674966812134 }, { "epoch": 4.768367619556505, "step": 17848, "train/loss_ctc": 1.036370038986206, "train/loss_error": 0.43233680725097656, "train/loss_total": 0.5531434416770935 }, { "epoch": 4.768634784931873, "step": 17849, "train/loss_ctc": 0.4049358367919922, "train/loss_error": 0.3759494125843048, "train/loss_total": 0.38174670934677124 }, { "epoch": 4.7689019503072405, "grad_norm": 1.8728073835372925, "learning_rate": 1.401015228426396e-06, "loss": 0.4858, "step": 17850 }, { "epoch": 4.7689019503072405, "step": 17850, "train/loss_ctc": 0.48835641145706177, "train/loss_error": 0.44686341285705566, "train/loss_total": 0.45516204833984375 }, { "epoch": 4.769169115682607, "step": 17851, "train/loss_ctc": 0.5185417532920837, "train/loss_error": 0.42297616600990295, "train/loss_total": 0.4420892894268036 }, { "epoch": 4.769436281057975, "step": 17852, "train/loss_ctc": 0.6173199415206909, "train/loss_error": 0.4126943349838257, "train/loss_total": 0.45361945033073425 }, { "epoch": 4.7697034464333425, "step": 17853, "train/loss_ctc": 0.3833634853363037, "train/loss_error": 0.43743690848350525, "train/loss_total": 0.4266222417354584 }, { "epoch": 4.769970611808709, "step": 17854, "train/loss_ctc": 0.6092774868011475, "train/loss_error": 0.45142215490341187, "train/loss_total": 0.4829932153224945 }, { "epoch": 4.770237777184077, "step": 17855, "train/loss_ctc": 0.5669255256652832, "train/loss_error": 0.40578576922416687, "train/loss_total": 0.4380137324333191 }, { "epoch": 4.770504942559445, "step": 17856, "train/loss_ctc": 0.4231296479701996, "train/loss_error": 0.4453081488609314, "train/loss_total": 0.440872460603714 }, { "epoch": 4.770772107934811, "step": 17857, "train/loss_ctc": 1.0197218656539917, "train/loss_error": 0.5120089054107666, "train/loss_total": 0.6135514974594116 }, { "epoch": 4.771039273310179, "step": 17858, "train/loss_ctc": 0.7229692339897156, "train/loss_error": 0.4011329114437103, "train/loss_total": 0.4655001759529114 }, { "epoch": 4.771306438685547, "step": 17859, "train/loss_ctc": 0.6194677948951721, "train/loss_error": 0.46432197093963623, "train/loss_total": 0.4953511357307434 }, { "epoch": 4.771573604060913, "grad_norm": 1.7616395950317383, "learning_rate": 1.3849853059043549e-06, "loss": 0.4714, "step": 17860 }, { "epoch": 4.771573604060913, "step": 17860, "train/loss_ctc": 0.2986510097980499, "train/loss_error": 0.4147481322288513, "train/loss_total": 0.3915287256240845 }, { "epoch": 4.771840769436281, "step": 17861, "train/loss_ctc": 0.7945349216461182, "train/loss_error": 0.4587832987308502, "train/loss_total": 0.5259336233139038 }, { "epoch": 4.772107934811649, "step": 17862, "train/loss_ctc": 0.9624794721603394, "train/loss_error": 0.44238173961639404, "train/loss_total": 0.54640132188797 }, { "epoch": 4.772375100187015, "step": 17863, "train/loss_ctc": 0.3667225241661072, "train/loss_error": 0.404523104429245, "train/loss_total": 0.3969630002975464 }, { "epoch": 4.772642265562383, "step": 17864, "train/loss_ctc": 0.6761116981506348, "train/loss_error": 0.4130685329437256, "train/loss_total": 0.4656771719455719 }, { "epoch": 4.772909430937751, "step": 17865, "train/loss_ctc": 0.9611238241195679, "train/loss_error": 0.4831446707248688, "train/loss_total": 0.5787404775619507 }, { "epoch": 4.773176596313117, "step": 17866, "train/loss_ctc": 0.6731772422790527, "train/loss_error": 0.4869537353515625, "train/loss_total": 0.5241984128952026 }, { "epoch": 4.773443761688485, "step": 17867, "train/loss_ctc": 0.47345227003097534, "train/loss_error": 0.46069344878196716, "train/loss_total": 0.4632452130317688 }, { "epoch": 4.773710927063853, "step": 17868, "train/loss_ctc": 0.9310614466667175, "train/loss_error": 0.4369451701641083, "train/loss_total": 0.535768449306488 }, { "epoch": 4.77397809243922, "step": 17869, "train/loss_ctc": 0.7854105830192566, "train/loss_error": 0.3983118534088135, "train/loss_total": 0.47573161125183105 }, { "epoch": 4.774245257814587, "grad_norm": 1.882328748703003, "learning_rate": 1.3689553833823138e-06, "loss": 0.4904, "step": 17870 }, { "epoch": 4.774245257814587, "step": 17870, "train/loss_ctc": 0.8909207582473755, "train/loss_error": 0.4593343436717987, "train/loss_total": 0.5456516742706299 }, { "epoch": 4.774512423189955, "step": 17871, "train/loss_ctc": 0.4327217936515808, "train/loss_error": 0.45355015993118286, "train/loss_total": 0.44938451051712036 }, { "epoch": 4.7747795885653215, "step": 17872, "train/loss_ctc": 0.5590107440948486, "train/loss_error": 0.3874121308326721, "train/loss_total": 0.4217318594455719 }, { "epoch": 4.775046753940689, "step": 17873, "train/loss_ctc": 0.4850349724292755, "train/loss_error": 0.39168012142181396, "train/loss_total": 0.41035109758377075 }, { "epoch": 4.775313919316057, "step": 17874, "train/loss_ctc": 0.853243350982666, "train/loss_error": 0.4487070143222809, "train/loss_total": 0.529614269733429 }, { "epoch": 4.775581084691424, "step": 17875, "train/loss_ctc": 0.42505571246147156, "train/loss_error": 0.44764819741249084, "train/loss_total": 0.4431297183036804 }, { "epoch": 4.775848250066791, "step": 17876, "train/loss_ctc": 0.4780992269515991, "train/loss_error": 0.4502772390842438, "train/loss_total": 0.45584166049957275 }, { "epoch": 4.776115415442159, "step": 17877, "train/loss_ctc": 0.2940419316291809, "train/loss_error": 0.34877124428749084, "train/loss_total": 0.33782538771629333 }, { "epoch": 4.776382580817526, "step": 17878, "train/loss_ctc": 0.7393748164176941, "train/loss_error": 0.45552417635917664, "train/loss_total": 0.5122942924499512 }, { "epoch": 4.776649746192893, "step": 17879, "train/loss_ctc": 0.4235820174217224, "train/loss_error": 0.4183180332183838, "train/loss_total": 0.4193708300590515 }, { "epoch": 4.776916911568261, "grad_norm": 1.50437593460083, "learning_rate": 1.3529254608602725e-06, "loss": 0.4525, "step": 17880 }, { "epoch": 4.776916911568261, "step": 17880, "train/loss_ctc": 0.7774572968482971, "train/loss_error": 0.4503079950809479, "train/loss_total": 0.5157378315925598 }, { "epoch": 4.7771840769436285, "step": 17881, "train/loss_ctc": 1.1013600826263428, "train/loss_error": 0.4915509521961212, "train/loss_total": 0.6135128140449524 }, { "epoch": 4.777451242318995, "step": 17882, "train/loss_ctc": 1.3444362878799438, "train/loss_error": 0.39530935883522034, "train/loss_total": 0.585134744644165 }, { "epoch": 4.777718407694363, "step": 17883, "train/loss_ctc": 1.2551448345184326, "train/loss_error": 0.4192669987678528, "train/loss_total": 0.5864425897598267 }, { "epoch": 4.7779855730697305, "step": 17884, "train/loss_ctc": 0.36315035820007324, "train/loss_error": 0.4343557357788086, "train/loss_total": 0.420114666223526 }, { "epoch": 4.778252738445097, "step": 17885, "train/loss_ctc": 0.7635008096694946, "train/loss_error": 0.4831055700778961, "train/loss_total": 0.5391846299171448 }, { "epoch": 4.778519903820465, "step": 17886, "train/loss_ctc": 0.2606302499771118, "train/loss_error": 0.409556120634079, "train/loss_total": 0.379770964384079 }, { "epoch": 4.7787870691958325, "step": 17887, "train/loss_ctc": 1.5386830568313599, "train/loss_error": 0.4088752567768097, "train/loss_total": 0.6348367929458618 }, { "epoch": 4.779054234571199, "step": 17888, "train/loss_ctc": 0.5685544610023499, "train/loss_error": 0.4509810507297516, "train/loss_total": 0.4744957685470581 }, { "epoch": 4.779321399946567, "step": 17889, "train/loss_ctc": 0.48683416843414307, "train/loss_error": 0.4658282995223999, "train/loss_total": 0.47002947330474854 }, { "epoch": 4.7795885653219345, "grad_norm": 1.6983680725097656, "learning_rate": 1.3368955383382312e-06, "loss": 0.5219, "step": 17890 }, { "epoch": 4.7795885653219345, "step": 17890, "train/loss_ctc": 1.2066295146942139, "train/loss_error": 0.4800207018852234, "train/loss_total": 0.6253424882888794 }, { "epoch": 4.779855730697301, "step": 17891, "train/loss_ctc": 0.6042020320892334, "train/loss_error": 0.45108067989349365, "train/loss_total": 0.4817049503326416 }, { "epoch": 4.780122896072669, "step": 17892, "train/loss_ctc": 1.3371233940124512, "train/loss_error": 0.40755391120910645, "train/loss_total": 0.5934678316116333 }, { "epoch": 4.780390061448037, "step": 17893, "train/loss_ctc": 0.601660430431366, "train/loss_error": 0.3908274173736572, "train/loss_total": 0.4329940378665924 }, { "epoch": 4.780657226823403, "step": 17894, "train/loss_ctc": 0.4755876660346985, "train/loss_error": 0.4029262065887451, "train/loss_total": 0.41745850443840027 }, { "epoch": 4.780924392198771, "step": 17895, "train/loss_ctc": 1.1559882164001465, "train/loss_error": 0.45933327078819275, "train/loss_total": 0.5986642837524414 }, { "epoch": 4.781191557574139, "step": 17896, "train/loss_ctc": 0.748695969581604, "train/loss_error": 0.4447937607765198, "train/loss_total": 0.5055742263793945 }, { "epoch": 4.781458722949505, "step": 17897, "train/loss_ctc": 0.3750830888748169, "train/loss_error": 0.42031610012054443, "train/loss_total": 0.41126951575279236 }, { "epoch": 4.781725888324873, "step": 17898, "train/loss_ctc": 0.20440179109573364, "train/loss_error": 0.4863937199115753, "train/loss_total": 0.4299953281879425 }, { "epoch": 4.781993053700241, "step": 17899, "train/loss_ctc": 0.9360346794128418, "train/loss_error": 0.47882336378097534, "train/loss_total": 0.5702656507492065 }, { "epoch": 4.782260219075608, "grad_norm": 2.934645175933838, "learning_rate": 1.3208656158161902e-06, "loss": 0.5067, "step": 17900 }, { "epoch": 4.782260219075608, "step": 17900, "train/loss_ctc": 0.3434651792049408, "train/loss_error": 0.4678524136543274, "train/loss_total": 0.4429749846458435 }, { "epoch": 4.782527384450975, "step": 17901, "train/loss_ctc": 2.1873064041137695, "train/loss_error": 0.4874305725097656, "train/loss_total": 0.8274057507514954 }, { "epoch": 4.782794549826343, "step": 17902, "train/loss_ctc": 1.3062580823898315, "train/loss_error": 0.47500839829444885, "train/loss_total": 0.6412583589553833 }, { "epoch": 4.783061715201709, "step": 17903, "train/loss_ctc": 0.6013012528419495, "train/loss_error": 0.41527923941612244, "train/loss_total": 0.4524836540222168 }, { "epoch": 4.783328880577077, "step": 17904, "train/loss_ctc": 1.4156298637390137, "train/loss_error": 0.439166784286499, "train/loss_total": 0.634459376335144 }, { "epoch": 4.783596045952445, "step": 17905, "train/loss_ctc": 1.021550178527832, "train/loss_error": 0.3807860314846039, "train/loss_total": 0.5089388489723206 }, { "epoch": 4.783863211327812, "step": 17906, "train/loss_ctc": 0.3869553208351135, "train/loss_error": 0.3959449827671051, "train/loss_total": 0.39414703845977783 }, { "epoch": 4.784130376703179, "step": 17907, "train/loss_ctc": 0.38265883922576904, "train/loss_error": 0.4533679783344269, "train/loss_total": 0.4392261505126953 }, { "epoch": 4.784397542078547, "step": 17908, "train/loss_ctc": 0.7858142852783203, "train/loss_error": 0.4108118414878845, "train/loss_total": 0.48581236600875854 }, { "epoch": 4.7846647074539135, "step": 17909, "train/loss_ctc": 0.25342482328414917, "train/loss_error": 0.3819788098335266, "train/loss_total": 0.3562680184841156 }, { "epoch": 4.784931872829281, "grad_norm": 1.1182390451431274, "learning_rate": 1.304835693294149e-06, "loss": 0.5183, "step": 17910 }, { "epoch": 4.784931872829281, "step": 17910, "train/loss_ctc": 0.902603805065155, "train/loss_error": 0.4635288417339325, "train/loss_total": 0.5513438582420349 }, { "epoch": 4.785199038204649, "step": 17911, "train/loss_ctc": 0.5872678756713867, "train/loss_error": 0.375771164894104, "train/loss_total": 0.418070524930954 }, { "epoch": 4.785466203580016, "step": 17912, "train/loss_ctc": 1.0177992582321167, "train/loss_error": 0.4390036165714264, "train/loss_total": 0.5547627806663513 }, { "epoch": 4.785733368955383, "step": 17913, "train/loss_ctc": 0.43168026208877563, "train/loss_error": 0.388421893119812, "train/loss_total": 0.39707356691360474 }, { "epoch": 4.786000534330751, "step": 17914, "train/loss_ctc": 0.850364089012146, "train/loss_error": 0.4397534430027008, "train/loss_total": 0.5218756198883057 }, { "epoch": 4.786267699706118, "step": 17915, "train/loss_ctc": 0.3398662805557251, "train/loss_error": 0.38832104206085205, "train/loss_total": 0.3786301016807556 }, { "epoch": 4.786534865081485, "step": 17916, "train/loss_ctc": 0.37795302271842957, "train/loss_error": 0.45193326473236084, "train/loss_total": 0.4371372163295746 }, { "epoch": 4.786802030456853, "step": 17917, "train/loss_ctc": 0.6027260422706604, "train/loss_error": 0.4583999216556549, "train/loss_total": 0.48726513981819153 }, { "epoch": 4.7870691958322205, "step": 17918, "train/loss_ctc": 0.5383256077766418, "train/loss_error": 0.4896535873413086, "train/loss_total": 0.4993880093097687 }, { "epoch": 4.787336361207587, "step": 17919, "train/loss_ctc": 0.6479157209396362, "train/loss_error": 0.4618346095085144, "train/loss_total": 0.4990508556365967 }, { "epoch": 4.787603526582955, "grad_norm": 2.589179277420044, "learning_rate": 1.288805770772108e-06, "loss": 0.4745, "step": 17920 }, { "epoch": 4.787603526582955, "step": 17920, "train/loss_ctc": 0.4428715407848358, "train/loss_error": 0.4156911373138428, "train/loss_total": 0.42112722992897034 }, { "epoch": 4.7878706919583225, "step": 17921, "train/loss_ctc": 0.8986090421676636, "train/loss_error": 0.4382533133029938, "train/loss_total": 0.5303244590759277 }, { "epoch": 4.788137857333689, "step": 17922, "train/loss_ctc": 0.4932406544685364, "train/loss_error": 0.3849125802516937, "train/loss_total": 0.4065782129764557 }, { "epoch": 4.788405022709057, "step": 17923, "train/loss_ctc": 0.7521152496337891, "train/loss_error": 0.39119753241539, "train/loss_total": 0.4633810818195343 }, { "epoch": 4.7886721880844245, "step": 17924, "train/loss_ctc": 0.7275123000144958, "train/loss_error": 0.39642325043678284, "train/loss_total": 0.46264106035232544 }, { "epoch": 4.788939353459791, "step": 17925, "train/loss_ctc": 0.6897815465927124, "train/loss_error": 0.3724445104598999, "train/loss_total": 0.4359118938446045 }, { "epoch": 4.789206518835159, "step": 17926, "train/loss_ctc": 0.31333616375923157, "train/loss_error": 0.48697429895401, "train/loss_total": 0.45224666595458984 }, { "epoch": 4.7894736842105265, "step": 17927, "train/loss_ctc": 0.2985658049583435, "train/loss_error": 0.43705374002456665, "train/loss_total": 0.40935614705085754 }, { "epoch": 4.789740849585893, "step": 17928, "train/loss_ctc": 0.5039925575256348, "train/loss_error": 0.3874512314796448, "train/loss_total": 0.41075950860977173 }, { "epoch": 4.790008014961261, "step": 17929, "train/loss_ctc": 0.9988065958023071, "train/loss_error": 0.40158700942993164, "train/loss_total": 0.5210309028625488 }, { "epoch": 4.790275180336629, "grad_norm": 2.927647113800049, "learning_rate": 1.2727758482500668e-06, "loss": 0.4513, "step": 17930 }, { "epoch": 4.790275180336629, "step": 17930, "train/loss_ctc": 0.4224208891391754, "train/loss_error": 0.4782790243625641, "train/loss_total": 0.4671074151992798 }, { "epoch": 4.790542345711996, "step": 17931, "train/loss_ctc": 0.7944916486740112, "train/loss_error": 0.4278859496116638, "train/loss_total": 0.5012071132659912 }, { "epoch": 4.790809511087363, "step": 17932, "train/loss_ctc": 0.6414053440093994, "train/loss_error": 0.40204983949661255, "train/loss_total": 0.4499209523200989 }, { "epoch": 4.791076676462731, "step": 17933, "train/loss_ctc": 0.7819476127624512, "train/loss_error": 0.4539763927459717, "train/loss_total": 0.5195706486701965 }, { "epoch": 4.791343841838097, "step": 17934, "train/loss_ctc": 0.4576777517795563, "train/loss_error": 0.4372240900993347, "train/loss_total": 0.44131481647491455 }, { "epoch": 4.791611007213465, "step": 17935, "train/loss_ctc": 0.5542089343070984, "train/loss_error": 0.4481743574142456, "train/loss_total": 0.46938127279281616 }, { "epoch": 4.791878172588833, "step": 17936, "train/loss_ctc": 1.1606591939926147, "train/loss_error": 0.4433367848396301, "train/loss_total": 0.586801290512085 }, { "epoch": 4.7921453379642, "step": 17937, "train/loss_ctc": 0.40553730726242065, "train/loss_error": 0.46742451190948486, "train/loss_total": 0.455047070980072 }, { "epoch": 4.792412503339567, "step": 17938, "train/loss_ctc": 0.8094421625137329, "train/loss_error": 0.4525297284126282, "train/loss_total": 0.523912250995636 }, { "epoch": 4.792679668714935, "step": 17939, "train/loss_ctc": 0.7859855890274048, "train/loss_error": 0.45084550976753235, "train/loss_total": 0.5178735256195068 }, { "epoch": 4.792946834090301, "grad_norm": 2.8674445152282715, "learning_rate": 1.2567459257280257e-06, "loss": 0.4932, "step": 17940 }, { "epoch": 4.792946834090301, "step": 17940, "train/loss_ctc": 1.3596453666687012, "train/loss_error": 0.4191977083683014, "train/loss_total": 0.6072872877120972 }, { "epoch": 4.793213999465669, "step": 17941, "train/loss_ctc": 1.3405098915100098, "train/loss_error": 0.4936198890209198, "train/loss_total": 0.6629979014396667 }, { "epoch": 4.793481164841037, "step": 17942, "train/loss_ctc": 0.5701404213905334, "train/loss_error": 0.3611379563808441, "train/loss_total": 0.40293845534324646 }, { "epoch": 4.793748330216404, "step": 17943, "train/loss_ctc": 0.731587827205658, "train/loss_error": 0.4885427951812744, "train/loss_total": 0.5371518135070801 }, { "epoch": 4.794015495591771, "step": 17944, "train/loss_ctc": 0.43638551235198975, "train/loss_error": 0.378030389547348, "train/loss_total": 0.3897014260292053 }, { "epoch": 4.794282660967139, "step": 17945, "train/loss_ctc": 0.3927392065525055, "train/loss_error": 0.43718841671943665, "train/loss_total": 0.42829856276512146 }, { "epoch": 4.794549826342506, "step": 17946, "train/loss_ctc": 0.6125897169113159, "train/loss_error": 0.4079158306121826, "train/loss_total": 0.4488506019115448 }, { "epoch": 4.794816991717873, "step": 17947, "train/loss_ctc": 1.2171881198883057, "train/loss_error": 0.4290323257446289, "train/loss_total": 0.5866634845733643 }, { "epoch": 4.795084157093241, "step": 17948, "train/loss_ctc": 0.8939347863197327, "train/loss_error": 0.4329845905303955, "train/loss_total": 0.525174617767334 }, { "epoch": 4.795351322468608, "step": 17949, "train/loss_ctc": 0.6119652390480042, "train/loss_error": 0.44149500131607056, "train/loss_total": 0.4755890369415283 }, { "epoch": 4.795618487843975, "grad_norm": 2.106440782546997, "learning_rate": 1.2407160032059846e-06, "loss": 0.5065, "step": 17950 }, { "epoch": 4.795618487843975, "step": 17950, "train/loss_ctc": 0.6566072702407837, "train/loss_error": 0.4536110460758209, "train/loss_total": 0.49421030282974243 }, { "epoch": 4.795885653219343, "step": 17951, "train/loss_ctc": 0.5258400440216064, "train/loss_error": 0.41037705540657043, "train/loss_total": 0.43346965312957764 }, { "epoch": 4.79615281859471, "step": 17952, "train/loss_ctc": 0.8639580011367798, "train/loss_error": 0.4655342102050781, "train/loss_total": 0.5452189445495605 }, { "epoch": 4.796419983970077, "step": 17953, "train/loss_ctc": 0.44351130723953247, "train/loss_error": 0.3542468249797821, "train/loss_total": 0.37209972739219666 }, { "epoch": 4.796687149345445, "step": 17954, "train/loss_ctc": 0.5976388454437256, "train/loss_error": 0.460261732339859, "train/loss_total": 0.48773717880249023 }, { "epoch": 4.7969543147208125, "step": 17955, "train/loss_ctc": 0.46980607509613037, "train/loss_error": 0.42455893754959106, "train/loss_total": 0.43360835313796997 }, { "epoch": 4.797221480096179, "step": 17956, "train/loss_ctc": 1.042350172996521, "train/loss_error": 0.447708398103714, "train/loss_total": 0.5666367411613464 }, { "epoch": 4.797488645471547, "step": 17957, "train/loss_ctc": 0.917474627494812, "train/loss_error": 0.4676745533943176, "train/loss_total": 0.5576345920562744 }, { "epoch": 4.7977558108469145, "step": 17958, "train/loss_ctc": 1.1802351474761963, "train/loss_error": 0.4190349578857422, "train/loss_total": 0.571274995803833 }, { "epoch": 4.798022976222281, "step": 17959, "train/loss_ctc": 0.6639081239700317, "train/loss_error": 0.3968373239040375, "train/loss_total": 0.4502514898777008 }, { "epoch": 4.798290141597649, "grad_norm": 1.5370290279388428, "learning_rate": 1.2246860806839435e-06, "loss": 0.4912, "step": 17960 }, { "epoch": 4.798290141597649, "step": 17960, "train/loss_ctc": 1.0368256568908691, "train/loss_error": 0.45313534140586853, "train/loss_total": 0.5698733925819397 }, { "epoch": 4.7985573069730165, "step": 17961, "train/loss_ctc": 0.705889105796814, "train/loss_error": 0.4344640076160431, "train/loss_total": 0.48874902725219727 }, { "epoch": 4.798824472348383, "step": 17962, "train/loss_ctc": 0.612443208694458, "train/loss_error": 0.3845752775669098, "train/loss_total": 0.4301488697528839 }, { "epoch": 4.799091637723751, "step": 17963, "train/loss_ctc": 0.7636896371841431, "train/loss_error": 0.38320013880729675, "train/loss_total": 0.4592980742454529 }, { "epoch": 4.7993588030991186, "step": 17964, "train/loss_ctc": 0.7524105310440063, "train/loss_error": 0.46630173921585083, "train/loss_total": 0.5235235095024109 }, { "epoch": 4.799625968474485, "step": 17965, "train/loss_ctc": 0.5309344530105591, "train/loss_error": 0.3387247622013092, "train/loss_total": 0.3771667182445526 }, { "epoch": 4.799893133849853, "step": 17966, "train/loss_ctc": 0.4417799711227417, "train/loss_error": 0.4715944230556488, "train/loss_total": 0.46563154458999634 }, { "epoch": 4.800160299225221, "step": 17967, "train/loss_ctc": 0.5593454837799072, "train/loss_error": 0.4200862944126129, "train/loss_total": 0.44793814420700073 }, { "epoch": 4.800427464600588, "step": 17968, "train/loss_ctc": 0.593272864818573, "train/loss_error": 0.5151121020317078, "train/loss_total": 0.5307442545890808 }, { "epoch": 4.800694629975955, "step": 17969, "train/loss_ctc": 0.36410757899284363, "train/loss_error": 0.5112338066101074, "train/loss_total": 0.4818085730075836 }, { "epoch": 4.800961795351323, "grad_norm": 1.8221518993377686, "learning_rate": 1.2086561581619023e-06, "loss": 0.4775, "step": 17970 }, { "epoch": 4.800961795351323, "step": 17970, "train/loss_ctc": 0.5978918075561523, "train/loss_error": 0.45862051844596863, "train/loss_total": 0.48647478222846985 }, { "epoch": 4.801228960726689, "step": 17971, "train/loss_ctc": 0.6451952457427979, "train/loss_error": 0.4233480989933014, "train/loss_total": 0.4677175283432007 }, { "epoch": 4.801496126102057, "step": 17972, "train/loss_ctc": 0.9219377636909485, "train/loss_error": 0.5223790407180786, "train/loss_total": 0.6022908091545105 }, { "epoch": 4.801763291477425, "step": 17973, "train/loss_ctc": 0.46235403418540955, "train/loss_error": 0.3991027772426605, "train/loss_total": 0.4117530584335327 }, { "epoch": 4.802030456852792, "step": 17974, "train/loss_ctc": 0.5287991166114807, "train/loss_error": 0.45074570178985596, "train/loss_total": 0.46635639667510986 }, { "epoch": 4.802297622228159, "step": 17975, "train/loss_ctc": 1.015518307685852, "train/loss_error": 0.4222695529460907, "train/loss_total": 0.540919303894043 }, { "epoch": 4.802564787603527, "step": 17976, "train/loss_ctc": 0.7316847443580627, "train/loss_error": 0.40677475929260254, "train/loss_total": 0.4717567563056946 }, { "epoch": 4.802831952978894, "step": 17977, "train/loss_ctc": 0.8230159282684326, "train/loss_error": 0.5576456785202026, "train/loss_total": 0.6107197403907776 }, { "epoch": 4.803099118354261, "step": 17978, "train/loss_ctc": 0.4828030467033386, "train/loss_error": 0.4398156702518463, "train/loss_total": 0.4484131336212158 }, { "epoch": 4.803366283729629, "step": 17979, "train/loss_ctc": 0.23254282772541046, "train/loss_error": 0.37345319986343384, "train/loss_total": 0.34527111053466797 }, { "epoch": 4.803633449104996, "grad_norm": 1.230653166770935, "learning_rate": 1.1926262356398612e-06, "loss": 0.4852, "step": 17980 }, { "epoch": 4.803633449104996, "step": 17980, "train/loss_ctc": 0.43449074029922485, "train/loss_error": 0.41339489817619324, "train/loss_total": 0.41761407256126404 }, { "epoch": 4.803900614480363, "step": 17981, "train/loss_ctc": 1.288603663444519, "train/loss_error": 0.4550016522407532, "train/loss_total": 0.6217221021652222 }, { "epoch": 4.804167779855731, "step": 17982, "train/loss_ctc": 0.6117146015167236, "train/loss_error": 0.4314895570278168, "train/loss_total": 0.4675345718860626 }, { "epoch": 4.804434945231098, "step": 17983, "train/loss_ctc": 0.6031283140182495, "train/loss_error": 0.39090630412101746, "train/loss_total": 0.43335071206092834 }, { "epoch": 4.804702110606465, "step": 17984, "train/loss_ctc": 0.6543954610824585, "train/loss_error": 0.39400625228881836, "train/loss_total": 0.44608408212661743 }, { "epoch": 4.804969275981833, "step": 17985, "train/loss_ctc": 0.49758610129356384, "train/loss_error": 0.41802161931991577, "train/loss_total": 0.4339345395565033 }, { "epoch": 4.8052364413572, "step": 17986, "train/loss_ctc": 0.38522711396217346, "train/loss_error": 0.3621543347835541, "train/loss_total": 0.36676889657974243 }, { "epoch": 4.805503606732567, "step": 17987, "train/loss_ctc": 0.5311294794082642, "train/loss_error": 0.40729010105133057, "train/loss_total": 0.4320580065250397 }, { "epoch": 4.805770772107935, "step": 17988, "train/loss_ctc": 0.6971666812896729, "train/loss_error": 0.4146103262901306, "train/loss_total": 0.471121609210968 }, { "epoch": 4.806037937483302, "step": 17989, "train/loss_ctc": 1.145848274230957, "train/loss_error": 0.45188719034194946, "train/loss_total": 0.590679407119751 }, { "epoch": 4.806305102858669, "grad_norm": 1.3010269403457642, "learning_rate": 1.17659631311782e-06, "loss": 0.4681, "step": 17990 }, { "epoch": 4.806305102858669, "step": 17990, "train/loss_ctc": 0.22714585065841675, "train/loss_error": 0.4348805546760559, "train/loss_total": 0.3933336138725281 }, { "epoch": 4.806572268234037, "step": 17991, "train/loss_ctc": 0.8523625731468201, "train/loss_error": 0.48395073413848877, "train/loss_total": 0.557633101940155 }, { "epoch": 4.8068394336094045, "step": 17992, "train/loss_ctc": 0.4351356029510498, "train/loss_error": 0.4283953309059143, "train/loss_total": 0.4297434091567993 }, { "epoch": 4.807106598984771, "step": 17993, "train/loss_ctc": 0.9464124441146851, "train/loss_error": 0.5278052687644958, "train/loss_total": 0.6115267276763916 }, { "epoch": 4.807373764360139, "step": 17994, "train/loss_ctc": 0.6084566712379456, "train/loss_error": 0.4522766172885895, "train/loss_total": 0.48351263999938965 }, { "epoch": 4.8076409297355065, "step": 17995, "train/loss_ctc": 0.9424090385437012, "train/loss_error": 0.4726148843765259, "train/loss_total": 0.5665737390518188 }, { "epoch": 4.807908095110873, "step": 17996, "train/loss_ctc": 0.49087828397750854, "train/loss_error": 0.39964622259140015, "train/loss_total": 0.4178926348686218 }, { "epoch": 4.808175260486241, "step": 17997, "train/loss_ctc": 0.8580548763275146, "train/loss_error": 0.46700403094291687, "train/loss_total": 0.5452141761779785 }, { "epoch": 4.8084424258616085, "step": 17998, "train/loss_ctc": 0.5705268383026123, "train/loss_error": 0.4922540485858917, "train/loss_total": 0.5079085826873779 }, { "epoch": 4.808709591236976, "step": 17999, "train/loss_ctc": 0.4208890497684479, "train/loss_error": 0.30481991171836853, "train/loss_total": 0.3280337452888489 }, { "epoch": 4.808976756612343, "grad_norm": 2.1525347232818604, "learning_rate": 1.1605663905957788e-06, "loss": 0.4841, "step": 18000 }, { "epoch": 4.808976756612343, "step": 18000, "train/loss_ctc": 0.8218168020248413, "train/loss_error": 0.5394428372383118, "train/loss_total": 0.5959176421165466 }, { "epoch": 4.809243921987711, "step": 18001, "train/loss_ctc": 0.5483487248420715, "train/loss_error": 0.4412526488304138, "train/loss_total": 0.4626718759536743 }, { "epoch": 4.809511087363077, "step": 18002, "train/loss_ctc": 0.8866704702377319, "train/loss_error": 0.4461979866027832, "train/loss_total": 0.534292459487915 }, { "epoch": 4.809778252738445, "step": 18003, "train/loss_ctc": 0.646774172782898, "train/loss_error": 0.45538705587387085, "train/loss_total": 0.4936644732952118 }, { "epoch": 4.810045418113813, "step": 18004, "train/loss_ctc": 0.6627479791641235, "train/loss_error": 0.3859901428222656, "train/loss_total": 0.44134169816970825 }, { "epoch": 4.81031258348918, "step": 18005, "train/loss_ctc": 1.0951963663101196, "train/loss_error": 0.45741164684295654, "train/loss_total": 0.584968626499176 }, { "epoch": 4.810579748864547, "step": 18006, "train/loss_ctc": 0.5489899516105652, "train/loss_error": 0.442021906375885, "train/loss_total": 0.4634155035018921 }, { "epoch": 4.810846914239915, "step": 18007, "train/loss_ctc": 0.6237604022026062, "train/loss_error": 0.466413676738739, "train/loss_total": 0.49788302183151245 }, { "epoch": 4.811114079615281, "step": 18008, "train/loss_ctc": 0.46116191148757935, "train/loss_error": 0.38811224699020386, "train/loss_total": 0.40272217988967896 }, { "epoch": 4.811381244990649, "step": 18009, "train/loss_ctc": 0.9529362916946411, "train/loss_error": 0.4630804657936096, "train/loss_total": 0.5610516667366028 }, { "epoch": 4.811648410366017, "grad_norm": 1.3913418054580688, "learning_rate": 1.1445364680737376e-06, "loss": 0.5038, "step": 18010 }, { "epoch": 4.811648410366017, "step": 18010, "train/loss_ctc": 0.7137866616249084, "train/loss_error": 0.4008491635322571, "train/loss_total": 0.46343666315078735 }, { "epoch": 4.811915575741384, "step": 18011, "train/loss_ctc": 0.1923312246799469, "train/loss_error": 0.44894638657569885, "train/loss_total": 0.39762336015701294 }, { "epoch": 4.812182741116751, "step": 18012, "train/loss_ctc": 0.6181150674819946, "train/loss_error": 0.4156830310821533, "train/loss_total": 0.456169456243515 }, { "epoch": 4.812449906492119, "step": 18013, "train/loss_ctc": 0.4834859371185303, "train/loss_error": 0.37157726287841797, "train/loss_total": 0.3939589858055115 }, { "epoch": 4.812717071867486, "step": 18014, "train/loss_ctc": 0.9598456621170044, "train/loss_error": 0.460456907749176, "train/loss_total": 0.5603346824645996 }, { "epoch": 4.812984237242853, "step": 18015, "train/loss_ctc": 0.4708560109138489, "train/loss_error": 0.402392715215683, "train/loss_total": 0.4160853624343872 }, { "epoch": 4.813251402618221, "step": 18016, "train/loss_ctc": 0.8789380788803101, "train/loss_error": 0.46551382541656494, "train/loss_total": 0.5481986999511719 }, { "epoch": 4.813518567993588, "step": 18017, "train/loss_ctc": 0.7174457907676697, "train/loss_error": 0.42622435092926025, "train/loss_total": 0.48446863889694214 }, { "epoch": 4.813785733368955, "step": 18018, "train/loss_ctc": 0.4670693874359131, "train/loss_error": 0.47769325971603394, "train/loss_total": 0.4755685031414032 }, { "epoch": 4.814052898744323, "step": 18019, "train/loss_ctc": 1.399422526359558, "train/loss_error": 0.4699494540691376, "train/loss_total": 0.6558440923690796 }, { "epoch": 4.81432006411969, "grad_norm": 2.277458667755127, "learning_rate": 1.1285065455516965e-06, "loss": 0.4852, "step": 18020 }, { "epoch": 4.81432006411969, "step": 18020, "train/loss_ctc": 1.0598984956741333, "train/loss_error": 0.38824424147605896, "train/loss_total": 0.5225750803947449 }, { "epoch": 4.814587229495057, "step": 18021, "train/loss_ctc": 0.6000195741653442, "train/loss_error": 0.45552000403404236, "train/loss_total": 0.48441991209983826 }, { "epoch": 4.814854394870425, "step": 18022, "train/loss_ctc": 1.07257878780365, "train/loss_error": 0.461448073387146, "train/loss_total": 0.5836742520332336 }, { "epoch": 4.815121560245792, "step": 18023, "train/loss_ctc": 0.5331679582595825, "train/loss_error": 0.4725797772407532, "train/loss_total": 0.4846974313259125 }, { "epoch": 4.815388725621159, "step": 18024, "train/loss_ctc": 0.9665170907974243, "train/loss_error": 0.43109169602394104, "train/loss_total": 0.5381767749786377 }, { "epoch": 4.815655890996527, "step": 18025, "train/loss_ctc": 0.5581978559494019, "train/loss_error": 0.42801016569137573, "train/loss_total": 0.4540477395057678 }, { "epoch": 4.815923056371894, "step": 18026, "train/loss_ctc": 0.68490070104599, "train/loss_error": 0.4001888930797577, "train/loss_total": 0.4571312665939331 }, { "epoch": 4.816190221747261, "step": 18027, "train/loss_ctc": 0.6095929741859436, "train/loss_error": 0.46639594435691833, "train/loss_total": 0.4950353503227234 }, { "epoch": 4.816457387122629, "step": 18028, "train/loss_ctc": 0.33096227049827576, "train/loss_error": 0.34451574087142944, "train/loss_total": 0.34180504083633423 }, { "epoch": 4.8167245524979965, "step": 18029, "train/loss_ctc": 1.0257487297058105, "train/loss_error": 0.4280364513397217, "train/loss_total": 0.5475789308547974 }, { "epoch": 4.816991717873364, "grad_norm": 3.0076324939727783, "learning_rate": 1.1124766230296554e-06, "loss": 0.4909, "step": 18030 }, { "epoch": 4.816991717873364, "step": 18030, "train/loss_ctc": 0.49455177783966064, "train/loss_error": 0.5284854769706726, "train/loss_total": 0.5216987133026123 }, { "epoch": 4.817258883248731, "step": 18031, "train/loss_ctc": 0.5854388475418091, "train/loss_error": 0.4730057716369629, "train/loss_total": 0.4954923987388611 }, { "epoch": 4.8175260486240985, "step": 18032, "train/loss_ctc": 0.6982865333557129, "train/loss_error": 0.41931262612342834, "train/loss_total": 0.47510743141174316 }, { "epoch": 4.817793213999465, "step": 18033, "train/loss_ctc": 0.7968103885650635, "train/loss_error": 0.3981354236602783, "train/loss_total": 0.4778704345226288 }, { "epoch": 4.818060379374833, "step": 18034, "train/loss_ctc": 0.33331364393234253, "train/loss_error": 0.47405850887298584, "train/loss_total": 0.4459095299243927 }, { "epoch": 4.8183275447502005, "step": 18035, "train/loss_ctc": 0.4136737883090973, "train/loss_error": 0.4498891532421112, "train/loss_total": 0.4426460862159729 }, { "epoch": 4.818594710125568, "step": 18036, "train/loss_ctc": 0.4200325012207031, "train/loss_error": 0.4942103624343872, "train/loss_total": 0.47937482595443726 }, { "epoch": 4.818861875500935, "step": 18037, "train/loss_ctc": 1.4615205526351929, "train/loss_error": 0.49664685130119324, "train/loss_total": 0.68962162733078 }, { "epoch": 4.819129040876303, "step": 18038, "train/loss_ctc": 0.5208050012588501, "train/loss_error": 0.47660598158836365, "train/loss_total": 0.4854457974433899 }, { "epoch": 4.819396206251669, "step": 18039, "train/loss_ctc": 0.6050524115562439, "train/loss_error": 0.44516992568969727, "train/loss_total": 0.4771464169025421 }, { "epoch": 4.819663371627037, "grad_norm": 2.4461019039154053, "learning_rate": 1.0964467005076143e-06, "loss": 0.499, "step": 18040 }, { "epoch": 4.819663371627037, "step": 18040, "train/loss_ctc": 0.45865845680236816, "train/loss_error": 0.3897349238395691, "train/loss_total": 0.4035196304321289 }, { "epoch": 4.819930537002405, "step": 18041, "train/loss_ctc": 0.5416836738586426, "train/loss_error": 0.43337687849998474, "train/loss_total": 0.45503824949264526 }, { "epoch": 4.820197702377772, "step": 18042, "train/loss_ctc": 0.7538717985153198, "train/loss_error": 0.4161330461502075, "train/loss_total": 0.4836808145046234 }, { "epoch": 4.820464867753139, "step": 18043, "train/loss_ctc": 1.191009759902954, "train/loss_error": 0.39587464928627014, "train/loss_total": 0.554901659488678 }, { "epoch": 4.820732033128507, "step": 18044, "train/loss_ctc": 0.8037886619567871, "train/loss_error": 0.4305901527404785, "train/loss_total": 0.5052298903465271 }, { "epoch": 4.820999198503874, "step": 18045, "train/loss_ctc": 0.801609992980957, "train/loss_error": 0.48582491278648376, "train/loss_total": 0.5489819049835205 }, { "epoch": 4.821266363879241, "step": 18046, "train/loss_ctc": 0.6109875440597534, "train/loss_error": 0.3725121319293976, "train/loss_total": 0.4202072322368622 }, { "epoch": 4.821533529254609, "step": 18047, "train/loss_ctc": 1.0013599395751953, "train/loss_error": 0.48590418696403503, "train/loss_total": 0.5889953374862671 }, { "epoch": 4.821800694629976, "step": 18048, "train/loss_ctc": 0.4139474034309387, "train/loss_error": 0.42795905470848083, "train/loss_total": 0.42515674233436584 }, { "epoch": 4.822067860005343, "step": 18049, "train/loss_ctc": 0.17087116837501526, "train/loss_error": 0.43498629331588745, "train/loss_total": 0.38216328620910645 }, { "epoch": 4.822335025380711, "grad_norm": 1.563861608505249, "learning_rate": 1.080416777985573e-06, "loss": 0.4768, "step": 18050 }, { "epoch": 4.822335025380711, "step": 18050, "train/loss_ctc": 0.35417717695236206, "train/loss_error": 0.35074567794799805, "train/loss_total": 0.3514319956302643 }, { "epoch": 4.822602190756078, "step": 18051, "train/loss_ctc": 0.30713891983032227, "train/loss_error": 0.36476200819015503, "train/loss_total": 0.3532373905181885 }, { "epoch": 4.822869356131445, "step": 18052, "train/loss_ctc": 0.5144767165184021, "train/loss_error": 0.4125155508518219, "train/loss_total": 0.4329077899456024 }, { "epoch": 4.823136521506813, "step": 18053, "train/loss_ctc": 0.24601447582244873, "train/loss_error": 0.39805665612220764, "train/loss_total": 0.3676482141017914 }, { "epoch": 4.82340368688218, "step": 18054, "train/loss_ctc": 0.8258658647537231, "train/loss_error": 0.49692273139953613, "train/loss_total": 0.5627113580703735 }, { "epoch": 4.823670852257547, "step": 18055, "train/loss_ctc": 0.8287380933761597, "train/loss_error": 0.4496263563632965, "train/loss_total": 0.525448739528656 }, { "epoch": 4.823938017632915, "step": 18056, "train/loss_ctc": 0.47290676832199097, "train/loss_error": 0.4632055461406708, "train/loss_total": 0.4651457965373993 }, { "epoch": 4.824205183008282, "step": 18057, "train/loss_ctc": 0.6680493354797363, "train/loss_error": 0.45225977897644043, "train/loss_total": 0.4954177141189575 }, { "epoch": 4.824472348383649, "step": 18058, "train/loss_ctc": 0.5068225264549255, "train/loss_error": 0.4350266754627228, "train/loss_total": 0.4493858814239502 }, { "epoch": 4.824739513759017, "step": 18059, "train/loss_ctc": 0.49780529737472534, "train/loss_error": 0.47930777072906494, "train/loss_total": 0.4830072820186615 }, { "epoch": 4.825006679134384, "grad_norm": 2.957578420639038, "learning_rate": 1.064386855463532e-06, "loss": 0.4486, "step": 18060 }, { "epoch": 4.825006679134384, "step": 18060, "train/loss_ctc": 0.23101527988910675, "train/loss_error": 0.3841402530670166, "train/loss_total": 0.35351526737213135 }, { "epoch": 4.825273844509751, "step": 18061, "train/loss_ctc": 0.9913153648376465, "train/loss_error": 0.4363393783569336, "train/loss_total": 0.5473345518112183 }, { "epoch": 4.825541009885119, "step": 18062, "train/loss_ctc": 0.8285722732543945, "train/loss_error": 0.509146511554718, "train/loss_total": 0.5730316638946533 }, { "epoch": 4.825808175260486, "step": 18063, "train/loss_ctc": 0.6658003330230713, "train/loss_error": 0.41724422574043274, "train/loss_total": 0.4669554829597473 }, { "epoch": 4.826075340635853, "step": 18064, "train/loss_ctc": 1.5107778310775757, "train/loss_error": 0.5142271518707275, "train/loss_total": 0.713537335395813 }, { "epoch": 4.826342506011221, "step": 18065, "train/loss_ctc": 0.6427382826805115, "train/loss_error": 0.43348821997642517, "train/loss_total": 0.4753382205963135 }, { "epoch": 4.8266096713865885, "step": 18066, "train/loss_ctc": 0.6502116322517395, "train/loss_error": 0.46912682056427, "train/loss_total": 0.5053437948226929 }, { "epoch": 4.826876836761956, "step": 18067, "train/loss_ctc": 0.245322585105896, "train/loss_error": 0.3774554133415222, "train/loss_total": 0.3510288596153259 }, { "epoch": 4.827144002137323, "step": 18068, "train/loss_ctc": 0.5837273001670837, "train/loss_error": 0.42870578169822693, "train/loss_total": 0.45971009135246277 }, { "epoch": 4.8274111675126905, "step": 18069, "train/loss_ctc": 1.0260586738586426, "train/loss_error": 0.406632661819458, "train/loss_total": 0.5305178761482239 }, { "epoch": 4.827678332888057, "grad_norm": 4.036640644073486, "learning_rate": 1.048356932941491e-06, "loss": 0.4976, "step": 18070 }, { "epoch": 4.827678332888057, "step": 18070, "train/loss_ctc": 0.932134747505188, "train/loss_error": 0.45889273285865784, "train/loss_total": 0.5535411238670349 }, { "epoch": 4.827945498263425, "step": 18071, "train/loss_ctc": 1.0705749988555908, "train/loss_error": 0.4331783652305603, "train/loss_total": 0.5606576800346375 }, { "epoch": 4.8282126636387925, "step": 18072, "train/loss_ctc": 0.49238473176956177, "train/loss_error": 0.4414674937725067, "train/loss_total": 0.4516509473323822 }, { "epoch": 4.82847982901416, "step": 18073, "train/loss_ctc": 1.1900070905685425, "train/loss_error": 0.43053480982780457, "train/loss_total": 0.5824292898178101 }, { "epoch": 4.828746994389527, "step": 18074, "train/loss_ctc": 0.7677332162857056, "train/loss_error": 0.3881378173828125, "train/loss_total": 0.46405690908432007 }, { "epoch": 4.829014159764895, "step": 18075, "train/loss_ctc": 0.6424677968025208, "train/loss_error": 0.46326589584350586, "train/loss_total": 0.4991062879562378 }, { "epoch": 4.829281325140262, "step": 18076, "train/loss_ctc": 0.6925147175788879, "train/loss_error": 0.4413549304008484, "train/loss_total": 0.4915868639945984 }, { "epoch": 4.829548490515629, "step": 18077, "train/loss_ctc": 0.5666014552116394, "train/loss_error": 0.44651955366134644, "train/loss_total": 0.47053593397140503 }, { "epoch": 4.829815655890997, "step": 18078, "train/loss_ctc": 0.817286491394043, "train/loss_error": 0.43951651453971863, "train/loss_total": 0.5150705575942993 }, { "epoch": 4.830082821266364, "step": 18079, "train/loss_ctc": 0.9748518466949463, "train/loss_error": 0.4487849473953247, "train/loss_total": 0.5539983510971069 }, { "epoch": 4.830349986641731, "grad_norm": 1.3471280336380005, "learning_rate": 1.0323270104194496e-06, "loss": 0.5143, "step": 18080 }, { "epoch": 4.830349986641731, "step": 18080, "train/loss_ctc": 0.7988733649253845, "train/loss_error": 0.48437076807022095, "train/loss_total": 0.5472713112831116 }, { "epoch": 4.830617152017099, "step": 18081, "train/loss_ctc": 0.5463530421257019, "train/loss_error": 0.46789851784706116, "train/loss_total": 0.48358941078186035 }, { "epoch": 4.830884317392466, "step": 18082, "train/loss_ctc": 0.47674450278282166, "train/loss_error": 0.2780880033969879, "train/loss_total": 0.3178192973136902 }, { "epoch": 4.831151482767833, "step": 18083, "train/loss_ctc": 1.0808026790618896, "train/loss_error": 0.3982691168785095, "train/loss_total": 0.5347758531570435 }, { "epoch": 4.831418648143201, "step": 18084, "train/loss_ctc": 0.8793982267379761, "train/loss_error": 0.41607701778411865, "train/loss_total": 0.5087412595748901 }, { "epoch": 4.831685813518568, "step": 18085, "train/loss_ctc": 0.6723450422286987, "train/loss_error": 0.4468366503715515, "train/loss_total": 0.49193835258483887 }, { "epoch": 4.831952978893935, "step": 18086, "train/loss_ctc": 1.071502447128296, "train/loss_error": 0.4389323592185974, "train/loss_total": 0.5654463768005371 }, { "epoch": 4.832220144269303, "step": 18087, "train/loss_ctc": 0.8412153720855713, "train/loss_error": 0.5200888514518738, "train/loss_total": 0.5843141674995422 }, { "epoch": 4.83248730964467, "step": 18088, "train/loss_ctc": 0.4421629011631012, "train/loss_error": 0.40965452790260315, "train/loss_total": 0.41615620255470276 }, { "epoch": 4.832754475020037, "step": 18089, "train/loss_ctc": 0.7893904447555542, "train/loss_error": 0.3991202414035797, "train/loss_total": 0.4771742820739746 }, { "epoch": 4.833021640395405, "grad_norm": 1.8688865900039673, "learning_rate": 1.0162970878974084e-06, "loss": 0.4927, "step": 18090 }, { "epoch": 4.833021640395405, "step": 18090, "train/loss_ctc": 0.29020988941192627, "train/loss_error": 0.46448901295661926, "train/loss_total": 0.4296332001686096 }, { "epoch": 4.833288805770772, "step": 18091, "train/loss_ctc": 0.5667427778244019, "train/loss_error": 0.4277563691139221, "train/loss_total": 0.45555365085601807 }, { "epoch": 4.833555971146139, "step": 18092, "train/loss_ctc": 0.3661409616470337, "train/loss_error": 0.41483575105667114, "train/loss_total": 0.4050968289375305 }, { "epoch": 4.833823136521507, "step": 18093, "train/loss_ctc": 0.4595188498497009, "train/loss_error": 0.3817926049232483, "train/loss_total": 0.3973378539085388 }, { "epoch": 4.834090301896874, "step": 18094, "train/loss_ctc": 0.5022384524345398, "train/loss_error": 0.43097999691963196, "train/loss_total": 0.44523167610168457 }, { "epoch": 4.834357467272241, "step": 18095, "train/loss_ctc": 1.4110487699508667, "train/loss_error": 0.4410006105899811, "train/loss_total": 0.6350102424621582 }, { "epoch": 4.834624632647609, "step": 18096, "train/loss_ctc": 0.5726690292358398, "train/loss_error": 0.45623818039894104, "train/loss_total": 0.4795243740081787 }, { "epoch": 4.834891798022976, "step": 18097, "train/loss_ctc": 1.0279620885849, "train/loss_error": 0.4406709671020508, "train/loss_total": 0.5581291913986206 }, { "epoch": 4.835158963398344, "step": 18098, "train/loss_ctc": 1.317735195159912, "train/loss_error": 0.46853047609329224, "train/loss_total": 0.6383714079856873 }, { "epoch": 4.835426128773711, "step": 18099, "train/loss_ctc": 0.654671311378479, "train/loss_error": 0.4698544144630432, "train/loss_total": 0.5068178176879883 }, { "epoch": 4.8356932941490784, "grad_norm": 2.177523136138916, "learning_rate": 1.0002671653753673e-06, "loss": 0.4951, "step": 18100 }, { "epoch": 4.8356932941490784, "step": 18100, "train/loss_ctc": 0.6701433658599854, "train/loss_error": 0.3883320391178131, "train/loss_total": 0.44469431042671204 }, { "epoch": 4.835960459524445, "step": 18101, "train/loss_ctc": 0.4960319399833679, "train/loss_error": 0.43192392587661743, "train/loss_total": 0.4447455406188965 }, { "epoch": 4.836227624899813, "step": 18102, "train/loss_ctc": 0.543265700340271, "train/loss_error": 0.3996463119983673, "train/loss_total": 0.4283701777458191 }, { "epoch": 4.8364947902751805, "step": 18103, "train/loss_ctc": 0.6968216896057129, "train/loss_error": 0.44487646222114563, "train/loss_total": 0.49526554346084595 }, { "epoch": 4.836761955650548, "step": 18104, "train/loss_ctc": 0.9711220860481262, "train/loss_error": 0.44086727499961853, "train/loss_total": 0.5469182729721069 }, { "epoch": 4.837029121025915, "step": 18105, "train/loss_ctc": 0.45290178060531616, "train/loss_error": 0.3395549952983856, "train/loss_total": 0.3622243404388428 }, { "epoch": 4.8372962864012825, "step": 18106, "train/loss_ctc": 1.2604135274887085, "train/loss_error": 0.4598884582519531, "train/loss_total": 0.6199934482574463 }, { "epoch": 4.837563451776649, "step": 18107, "train/loss_ctc": 0.9380630850791931, "train/loss_error": 0.38965821266174316, "train/loss_total": 0.49933919310569763 }, { "epoch": 4.837830617152017, "step": 18108, "train/loss_ctc": 0.683509111404419, "train/loss_error": 0.44124215841293335, "train/loss_total": 0.48969554901123047 }, { "epoch": 4.8380977825273845, "step": 18109, "train/loss_ctc": 0.5230444669723511, "train/loss_error": 0.4877575933933258, "train/loss_total": 0.4948149621486664 }, { "epoch": 4.838364947902752, "grad_norm": 1.8111920356750488, "learning_rate": 9.842372428533262e-07, "loss": 0.4826, "step": 18110 }, { "epoch": 4.838364947902752, "step": 18110, "train/loss_ctc": 0.6953246593475342, "train/loss_error": 0.43498173356056213, "train/loss_total": 0.487050324678421 }, { "epoch": 4.838632113278119, "step": 18111, "train/loss_ctc": 1.0950663089752197, "train/loss_error": 0.43996912240982056, "train/loss_total": 0.5709885358810425 }, { "epoch": 4.838899278653487, "step": 18112, "train/loss_ctc": 0.49612846970558167, "train/loss_error": 0.6063470244407654, "train/loss_total": 0.5843033194541931 }, { "epoch": 4.839166444028854, "step": 18113, "train/loss_ctc": 0.33549344539642334, "train/loss_error": 0.398905873298645, "train/loss_total": 0.38622337579727173 }, { "epoch": 4.839433609404221, "step": 18114, "train/loss_ctc": 0.8031707406044006, "train/loss_error": 0.45155608654022217, "train/loss_total": 0.5218790173530579 }, { "epoch": 4.839700774779589, "step": 18115, "train/loss_ctc": 0.31200680136680603, "train/loss_error": 0.4967483878135681, "train/loss_total": 0.4598000645637512 }, { "epoch": 4.839967940154956, "step": 18116, "train/loss_ctc": 0.4825791120529175, "train/loss_error": 0.42376261949539185, "train/loss_total": 0.43552592396736145 }, { "epoch": 4.840235105530323, "step": 18117, "train/loss_ctc": 1.059960126876831, "train/loss_error": 0.4648658335208893, "train/loss_total": 0.5838847160339355 }, { "epoch": 4.840502270905691, "step": 18118, "train/loss_ctc": 0.5064513683319092, "train/loss_error": 0.44914641976356506, "train/loss_total": 0.4606074392795563 }, { "epoch": 4.840769436281058, "step": 18119, "train/loss_ctc": 0.8307793736457825, "train/loss_error": 0.4486341178417206, "train/loss_total": 0.525063157081604 }, { "epoch": 4.841036601656425, "grad_norm": 1.3776198625564575, "learning_rate": 9.682073203312852e-07, "loss": 0.5015, "step": 18120 }, { "epoch": 4.841036601656425, "step": 18120, "train/loss_ctc": 0.4395383298397064, "train/loss_error": 0.3972529172897339, "train/loss_total": 0.40571001172065735 }, { "epoch": 4.841303767031793, "step": 18121, "train/loss_ctc": 0.7765889167785645, "train/loss_error": 0.399707168340683, "train/loss_total": 0.47508352994918823 }, { "epoch": 4.84157093240716, "step": 18122, "train/loss_ctc": 0.6743857264518738, "train/loss_error": 0.4229726195335388, "train/loss_total": 0.4732552468776703 }, { "epoch": 4.841838097782527, "step": 18123, "train/loss_ctc": 0.6737813949584961, "train/loss_error": 0.4798800051212311, "train/loss_total": 0.518660306930542 }, { "epoch": 4.842105263157895, "step": 18124, "train/loss_ctc": 0.3351418375968933, "train/loss_error": 0.3841670751571655, "train/loss_total": 0.374362051486969 }, { "epoch": 4.842372428533262, "step": 18125, "train/loss_ctc": 0.7108122110366821, "train/loss_error": 0.45540106296539307, "train/loss_total": 0.5064833164215088 }, { "epoch": 4.842639593908629, "step": 18126, "train/loss_ctc": 0.3340984880924225, "train/loss_error": 0.44992855191230774, "train/loss_total": 0.42676255106925964 }, { "epoch": 4.842906759283997, "step": 18127, "train/loss_ctc": 0.3969138264656067, "train/loss_error": 0.42093056440353394, "train/loss_total": 0.4161272346973419 }, { "epoch": 4.843173924659364, "step": 18128, "train/loss_ctc": 0.6289159655570984, "train/loss_error": 0.42321643233299255, "train/loss_total": 0.46435636281967163 }, { "epoch": 4.843441090034731, "step": 18129, "train/loss_ctc": 0.7191376686096191, "train/loss_error": 0.4909745156764984, "train/loss_total": 0.5366071462631226 }, { "epoch": 4.843708255410099, "grad_norm": 2.169334888458252, "learning_rate": 9.521773978092439e-07, "loss": 0.4597, "step": 18130 }, { "epoch": 4.843708255410099, "step": 18130, "train/loss_ctc": 0.6761542558670044, "train/loss_error": 0.4005262851715088, "train/loss_total": 0.4556518793106079 }, { "epoch": 4.843975420785466, "step": 18131, "train/loss_ctc": 0.5727618932723999, "train/loss_error": 0.4508005678653717, "train/loss_total": 0.4751928448677063 }, { "epoch": 4.844242586160833, "step": 18132, "train/loss_ctc": 1.0605382919311523, "train/loss_error": 0.4953731298446655, "train/loss_total": 0.6084061861038208 }, { "epoch": 4.844509751536201, "step": 18133, "train/loss_ctc": 0.9386721849441528, "train/loss_error": 0.49368882179260254, "train/loss_total": 0.5826855301856995 }, { "epoch": 4.844776916911568, "step": 18134, "train/loss_ctc": 0.43179693818092346, "train/loss_error": 0.42241933941841125, "train/loss_total": 0.4242948591709137 }, { "epoch": 4.845044082286936, "step": 18135, "train/loss_ctc": 1.0456379652023315, "train/loss_error": 0.4721190929412842, "train/loss_total": 0.5868228673934937 }, { "epoch": 4.845311247662303, "step": 18136, "train/loss_ctc": 0.6517291069030762, "train/loss_error": 0.4683443605899811, "train/loss_total": 0.505021333694458 }, { "epoch": 4.8455784130376705, "step": 18137, "train/loss_ctc": 0.4687550365924835, "train/loss_error": 0.35875463485717773, "train/loss_total": 0.3807547390460968 }, { "epoch": 4.845845578413037, "step": 18138, "train/loss_ctc": 1.3638405799865723, "train/loss_error": 0.468233197927475, "train/loss_total": 0.6473546624183655 }, { "epoch": 4.846112743788405, "step": 18139, "train/loss_ctc": 0.7792988419532776, "train/loss_error": 0.404250830411911, "train/loss_total": 0.4792604446411133 }, { "epoch": 4.8463799091637725, "grad_norm": 1.1325085163116455, "learning_rate": 9.361474752872028e-07, "loss": 0.5145, "step": 18140 }, { "epoch": 4.8463799091637725, "step": 18140, "train/loss_ctc": 0.43103909492492676, "train/loss_error": 0.35410207509994507, "train/loss_total": 0.36948949098587036 }, { "epoch": 4.84664707453914, "step": 18141, "train/loss_ctc": 0.4394712746143341, "train/loss_error": 0.443323016166687, "train/loss_total": 0.44255268573760986 }, { "epoch": 4.846914239914507, "step": 18142, "train/loss_ctc": 0.978377640247345, "train/loss_error": 0.4256700873374939, "train/loss_total": 0.5362116098403931 }, { "epoch": 4.8471814052898745, "step": 18143, "train/loss_ctc": 0.6519812345504761, "train/loss_error": 0.412466824054718, "train/loss_total": 0.46036970615386963 }, { "epoch": 4.847448570665242, "step": 18144, "train/loss_ctc": 0.5342280864715576, "train/loss_error": 0.5262492895126343, "train/loss_total": 0.527845025062561 }, { "epoch": 4.847715736040609, "step": 18145, "train/loss_ctc": 0.7545168399810791, "train/loss_error": 0.395292192697525, "train/loss_total": 0.4671371281147003 }, { "epoch": 4.8479829014159765, "step": 18146, "train/loss_ctc": 0.529199481010437, "train/loss_error": 0.396523118019104, "train/loss_total": 0.423058420419693 }, { "epoch": 4.848250066791344, "step": 18147, "train/loss_ctc": 0.5269147157669067, "train/loss_error": 0.48956936597824097, "train/loss_total": 0.49703845381736755 }, { "epoch": 4.848517232166711, "step": 18148, "train/loss_ctc": 1.8466452360153198, "train/loss_error": 0.4323865473270416, "train/loss_total": 0.7152383327484131 }, { "epoch": 4.848784397542079, "step": 18149, "train/loss_ctc": 0.7967298030853271, "train/loss_error": 0.43734705448150635, "train/loss_total": 0.5092236399650574 }, { "epoch": 4.849051562917446, "grad_norm": 1.6985136270523071, "learning_rate": 9.201175527651616e-07, "loss": 0.4948, "step": 18150 }, { "epoch": 4.849051562917446, "step": 18150, "train/loss_ctc": 0.7087802886962891, "train/loss_error": 0.41860949993133545, "train/loss_total": 0.4766436517238617 }, { "epoch": 4.849318728292813, "step": 18151, "train/loss_ctc": 0.8073579668998718, "train/loss_error": 0.40571263432502747, "train/loss_total": 0.48604172468185425 }, { "epoch": 4.849585893668181, "step": 18152, "train/loss_ctc": 0.4590321183204651, "train/loss_error": 0.46411770582199097, "train/loss_total": 0.4631006121635437 }, { "epoch": 4.849853059043548, "step": 18153, "train/loss_ctc": 1.1695103645324707, "train/loss_error": 0.4529497027397156, "train/loss_total": 0.5962618589401245 }, { "epoch": 4.850120224418915, "step": 18154, "train/loss_ctc": 0.8265840411186218, "train/loss_error": 0.48670271039009094, "train/loss_total": 0.5546789765357971 }, { "epoch": 4.850387389794283, "step": 18155, "train/loss_ctc": 0.41550707817077637, "train/loss_error": 0.5234084129333496, "train/loss_total": 0.501828134059906 }, { "epoch": 4.85065455516965, "step": 18156, "train/loss_ctc": 0.5188960433006287, "train/loss_error": 0.4285169839859009, "train/loss_total": 0.4465928077697754 }, { "epoch": 4.850921720545017, "step": 18157, "train/loss_ctc": 0.32745659351348877, "train/loss_error": 0.42814260721206665, "train/loss_total": 0.40800541639328003 }, { "epoch": 4.851188885920385, "step": 18158, "train/loss_ctc": 0.7704727649688721, "train/loss_error": 0.45345398783683777, "train/loss_total": 0.5168577432632446 }, { "epoch": 4.851456051295752, "step": 18159, "train/loss_ctc": 0.6551952362060547, "train/loss_error": 0.4354614317417145, "train/loss_total": 0.4794082045555115 }, { "epoch": 4.851723216671119, "grad_norm": 3.0569846630096436, "learning_rate": 9.040876302431204e-07, "loss": 0.4929, "step": 18160 }, { "epoch": 4.851723216671119, "step": 18160, "train/loss_ctc": 1.0513113737106323, "train/loss_error": 0.43353432416915894, "train/loss_total": 0.5570897459983826 }, { "epoch": 4.851990382046487, "step": 18161, "train/loss_ctc": 0.4202935993671417, "train/loss_error": 0.37361255288124084, "train/loss_total": 0.38294878602027893 }, { "epoch": 4.852257547421854, "step": 18162, "train/loss_ctc": 0.6563506722450256, "train/loss_error": 0.46903085708618164, "train/loss_total": 0.5064948201179504 }, { "epoch": 4.852524712797221, "step": 18163, "train/loss_ctc": 0.6468141674995422, "train/loss_error": 0.4271925091743469, "train/loss_total": 0.471116840839386 }, { "epoch": 4.852791878172589, "step": 18164, "train/loss_ctc": 0.5893957018852234, "train/loss_error": 0.4991498589515686, "train/loss_total": 0.5171990394592285 }, { "epoch": 4.853059043547956, "step": 18165, "train/loss_ctc": 0.9148423075675964, "train/loss_error": 0.3996739685535431, "train/loss_total": 0.5027076601982117 }, { "epoch": 4.853326208923324, "step": 18166, "train/loss_ctc": 1.0885930061340332, "train/loss_error": 0.4539405107498169, "train/loss_total": 0.5808709859848022 }, { "epoch": 4.853593374298691, "step": 18167, "train/loss_ctc": 0.8109753727912903, "train/loss_error": 0.4362955093383789, "train/loss_total": 0.5112314820289612 }, { "epoch": 4.853860539674058, "step": 18168, "train/loss_ctc": 0.2919993996620178, "train/loss_error": 0.4134849011898041, "train/loss_total": 0.3891878128051758 }, { "epoch": 4.854127705049425, "step": 18169, "train/loss_ctc": 0.643362820148468, "train/loss_error": 0.4118453860282898, "train/loss_total": 0.45814889669418335 }, { "epoch": 4.854394870424793, "grad_norm": 1.6673839092254639, "learning_rate": 8.880577077210794e-07, "loss": 0.4877, "step": 18170 }, { "epoch": 4.854394870424793, "step": 18170, "train/loss_ctc": 0.6246020793914795, "train/loss_error": 0.445512980222702, "train/loss_total": 0.4813308119773865 }, { "epoch": 4.85466203580016, "step": 18171, "train/loss_ctc": 0.8078285455703735, "train/loss_error": 0.4351276159286499, "train/loss_total": 0.5096678137779236 }, { "epoch": 4.854929201175528, "step": 18172, "train/loss_ctc": 0.7525403499603271, "train/loss_error": 0.3335411250591278, "train/loss_total": 0.4173409938812256 }, { "epoch": 4.855196366550895, "step": 18173, "train/loss_ctc": 0.5971955060958862, "train/loss_error": 0.42870479822158813, "train/loss_total": 0.46240293979644775 }, { "epoch": 4.8554635319262625, "step": 18174, "train/loss_ctc": 1.5972681045532227, "train/loss_error": 0.45566004514694214, "train/loss_total": 0.6839816570281982 }, { "epoch": 4.85573069730163, "step": 18175, "train/loss_ctc": 0.6076653003692627, "train/loss_error": 0.42279529571533203, "train/loss_total": 0.4597693085670471 }, { "epoch": 4.855997862676997, "step": 18176, "train/loss_ctc": 1.746463418006897, "train/loss_error": 0.46216559410095215, "train/loss_total": 0.7190251350402832 }, { "epoch": 4.8562650280523645, "step": 18177, "train/loss_ctc": 0.30841487646102905, "train/loss_error": 0.3774927258491516, "train/loss_total": 0.36367714405059814 }, { "epoch": 4.856532193427732, "step": 18178, "train/loss_ctc": 1.3929810523986816, "train/loss_error": 0.4976017475128174, "train/loss_total": 0.6766775846481323 }, { "epoch": 4.856799358803099, "step": 18179, "train/loss_ctc": 0.7062941789627075, "train/loss_error": 0.5146491527557373, "train/loss_total": 0.5529781579971313 }, { "epoch": 4.8570665241784665, "grad_norm": 2.773897409439087, "learning_rate": 8.720277851990382e-07, "loss": 0.5327, "step": 18180 }, { "epoch": 4.8570665241784665, "step": 18180, "train/loss_ctc": 0.5996494293212891, "train/loss_error": 0.37130501866340637, "train/loss_total": 0.41697388887405396 }, { "epoch": 4.857333689553834, "step": 18181, "train/loss_ctc": 0.9352578520774841, "train/loss_error": 0.4681467115879059, "train/loss_total": 0.5615689754486084 }, { "epoch": 4.857600854929201, "step": 18182, "train/loss_ctc": 0.7058855295181274, "train/loss_error": 0.43974441289901733, "train/loss_total": 0.49297261238098145 }, { "epoch": 4.8578680203045685, "step": 18183, "train/loss_ctc": 0.5543000102043152, "train/loss_error": 0.4356035888195038, "train/loss_total": 0.459342896938324 }, { "epoch": 4.858135185679936, "step": 18184, "train/loss_ctc": 0.9814574718475342, "train/loss_error": 0.4804416298866272, "train/loss_total": 0.5806447863578796 }, { "epoch": 4.858402351055303, "step": 18185, "train/loss_ctc": 0.21150387823581696, "train/loss_error": 0.504011332988739, "train/loss_total": 0.4455098509788513 }, { "epoch": 4.858669516430671, "step": 18186, "train/loss_ctc": 0.9821926355361938, "train/loss_error": 0.4921013116836548, "train/loss_total": 0.5901196002960205 }, { "epoch": 4.858936681806038, "step": 18187, "train/loss_ctc": 0.5136003494262695, "train/loss_error": 0.4788277745246887, "train/loss_total": 0.48578229546546936 }, { "epoch": 4.859203847181405, "step": 18188, "train/loss_ctc": 0.8860820531845093, "train/loss_error": 0.4890297055244446, "train/loss_total": 0.5684401988983154 }, { "epoch": 4.859471012556773, "step": 18189, "train/loss_ctc": 0.5605535507202148, "train/loss_error": 0.41384291648864746, "train/loss_total": 0.443185031414032 }, { "epoch": 4.85973817793214, "grad_norm": 1.3778042793273926, "learning_rate": 8.559978626769971e-07, "loss": 0.5045, "step": 18190 }, { "epoch": 4.85973817793214, "step": 18190, "train/loss_ctc": 0.456265389919281, "train/loss_error": 0.3988563120365143, "train/loss_total": 0.4103381335735321 }, { "epoch": 4.860005343307507, "step": 18191, "train/loss_ctc": 0.5352916121482849, "train/loss_error": 0.441813588142395, "train/loss_total": 0.46050918102264404 }, { "epoch": 4.860272508682875, "step": 18192, "train/loss_ctc": 0.5127308368682861, "train/loss_error": 0.45034947991371155, "train/loss_total": 0.4628257751464844 }, { "epoch": 4.860539674058242, "step": 18193, "train/loss_ctc": 0.7699671983718872, "train/loss_error": 0.4809231460094452, "train/loss_total": 0.5387319922447205 }, { "epoch": 4.860806839433609, "step": 18194, "train/loss_ctc": 0.868575394153595, "train/loss_error": 0.4227619469165802, "train/loss_total": 0.5119246244430542 }, { "epoch": 4.861074004808977, "step": 18195, "train/loss_ctc": 0.7683935165405273, "train/loss_error": 0.35178083181381226, "train/loss_total": 0.4351033568382263 }, { "epoch": 4.861341170184344, "step": 18196, "train/loss_ctc": 0.6894537210464478, "train/loss_error": 0.406011164188385, "train/loss_total": 0.46269965171813965 }, { "epoch": 4.861608335559712, "step": 18197, "train/loss_ctc": 0.8646311163902283, "train/loss_error": 0.5042940974235535, "train/loss_total": 0.5763615369796753 }, { "epoch": 4.861875500935079, "step": 18198, "train/loss_ctc": 0.6028494834899902, "train/loss_error": 0.39786311984062195, "train/loss_total": 0.4388604164123535 }, { "epoch": 4.862142666310446, "step": 18199, "train/loss_ctc": 0.9579205513000488, "train/loss_error": 0.44942715764045715, "train/loss_total": 0.5511258840560913 }, { "epoch": 4.862409831685813, "grad_norm": 3.6045048236846924, "learning_rate": 8.399679401549559e-07, "loss": 0.4848, "step": 18200 }, { "epoch": 4.862409831685813, "step": 18200, "train/loss_ctc": 1.9065245389938354, "train/loss_error": 0.4598003029823303, "train/loss_total": 0.7491451501846313 }, { "epoch": 4.862676997061181, "step": 18201, "train/loss_ctc": 0.3881158232688904, "train/loss_error": 0.48391833901405334, "train/loss_total": 0.4647578299045563 }, { "epoch": 4.862944162436548, "step": 18202, "train/loss_ctc": 0.7061389088630676, "train/loss_error": 0.3597019612789154, "train/loss_total": 0.42898935079574585 }, { "epoch": 4.863211327811916, "step": 18203, "train/loss_ctc": 0.9270521998405457, "train/loss_error": 0.4225340485572815, "train/loss_total": 0.5234376788139343 }, { "epoch": 4.863478493187283, "step": 18204, "train/loss_ctc": 1.2154682874679565, "train/loss_error": 0.4732016324996948, "train/loss_total": 0.6216549873352051 }, { "epoch": 4.86374565856265, "step": 18205, "train/loss_ctc": 0.7658183574676514, "train/loss_error": 0.42841827869415283, "train/loss_total": 0.4958983063697815 }, { "epoch": 4.864012823938017, "step": 18206, "train/loss_ctc": 0.9683375358581543, "train/loss_error": 0.42305809259414673, "train/loss_total": 0.5321139693260193 }, { "epoch": 4.864279989313385, "step": 18207, "train/loss_ctc": 0.4450204372406006, "train/loss_error": 0.42161303758621216, "train/loss_total": 0.4262945353984833 }, { "epoch": 4.864547154688752, "step": 18208, "train/loss_ctc": 0.2651122808456421, "train/loss_error": 0.3883461356163025, "train/loss_total": 0.363699346780777 }, { "epoch": 4.86481432006412, "step": 18209, "train/loss_ctc": 0.44132930040359497, "train/loss_error": 0.46726924180984497, "train/loss_total": 0.46208125352859497 }, { "epoch": 4.865081485439487, "grad_norm": 2.2685015201568604, "learning_rate": 8.239380176329148e-07, "loss": 0.5068, "step": 18210 }, { "epoch": 4.865081485439487, "step": 18210, "train/loss_ctc": 0.7079217433929443, "train/loss_error": 0.4569130837917328, "train/loss_total": 0.507114827632904 }, { "epoch": 4.8653486508148545, "step": 18211, "train/loss_ctc": 1.0586048364639282, "train/loss_error": 0.4107773005962372, "train/loss_total": 0.5403428077697754 }, { "epoch": 4.865615816190222, "step": 18212, "train/loss_ctc": 0.5611665844917297, "train/loss_error": 0.3412121832370758, "train/loss_total": 0.3852030634880066 }, { "epoch": 4.865882981565589, "step": 18213, "train/loss_ctc": 0.5060156583786011, "train/loss_error": 0.48126745223999023, "train/loss_total": 0.48621711134910583 }, { "epoch": 4.8661501469409565, "step": 18214, "train/loss_ctc": 0.931311845779419, "train/loss_error": 0.4059267044067383, "train/loss_total": 0.5110037326812744 }, { "epoch": 4.866417312316324, "step": 18215, "train/loss_ctc": 0.5183939933776855, "train/loss_error": 0.3958560824394226, "train/loss_total": 0.4203636646270752 }, { "epoch": 4.866684477691691, "step": 18216, "train/loss_ctc": 0.23058751225471497, "train/loss_error": 0.36258262395858765, "train/loss_total": 0.3361836075782776 }, { "epoch": 4.8669516430670585, "step": 18217, "train/loss_ctc": 0.6351643204689026, "train/loss_error": 0.4924503564834595, "train/loss_total": 0.520993173122406 }, { "epoch": 4.867218808442426, "step": 18218, "train/loss_ctc": 0.9705345630645752, "train/loss_error": 0.42244473099708557, "train/loss_total": 0.5320627093315125 }, { "epoch": 4.867485973817793, "step": 18219, "train/loss_ctc": 0.6308141946792603, "train/loss_error": 0.4031485617160797, "train/loss_total": 0.44868171215057373 }, { "epoch": 4.8677531391931605, "grad_norm": 1.4136481285095215, "learning_rate": 8.079080951108736e-07, "loss": 0.4688, "step": 18220 }, { "epoch": 4.8677531391931605, "step": 18220, "train/loss_ctc": 0.7561922073364258, "train/loss_error": 0.4528045654296875, "train/loss_total": 0.5134820938110352 }, { "epoch": 4.868020304568528, "step": 18221, "train/loss_ctc": 0.6570397615432739, "train/loss_error": 0.46819615364074707, "train/loss_total": 0.5059648752212524 }, { "epoch": 4.868287469943895, "step": 18222, "train/loss_ctc": 0.5402410626411438, "train/loss_error": 0.519913375377655, "train/loss_total": 0.5239789485931396 }, { "epoch": 4.868554635319263, "step": 18223, "train/loss_ctc": 0.8850052356719971, "train/loss_error": 0.4948975443840027, "train/loss_total": 0.5729190707206726 }, { "epoch": 4.86882180069463, "step": 18224, "train/loss_ctc": 0.4528111219406128, "train/loss_error": 0.3933829367160797, "train/loss_total": 0.4052685797214508 }, { "epoch": 4.869088966069997, "step": 18225, "train/loss_ctc": 0.7076583504676819, "train/loss_error": 0.4125109910964966, "train/loss_total": 0.4715404808521271 }, { "epoch": 4.869356131445365, "step": 18226, "train/loss_ctc": 0.6023399829864502, "train/loss_error": 0.4233056604862213, "train/loss_total": 0.4591125249862671 }, { "epoch": 4.869623296820732, "step": 18227, "train/loss_ctc": 0.47623446583747864, "train/loss_error": 0.4400652050971985, "train/loss_total": 0.447299063205719 }, { "epoch": 4.869890462196099, "step": 18228, "train/loss_ctc": 1.0762836933135986, "train/loss_error": 0.45552366971969604, "train/loss_total": 0.5796756744384766 }, { "epoch": 4.870157627571467, "step": 18229, "train/loss_ctc": 0.6264915466308594, "train/loss_error": 0.46057456731796265, "train/loss_total": 0.493757963180542 }, { "epoch": 4.870424792946834, "grad_norm": 1.5653575658798218, "learning_rate": 7.918781725888325e-07, "loss": 0.4973, "step": 18230 }, { "epoch": 4.870424792946834, "step": 18230, "train/loss_ctc": 0.7090364694595337, "train/loss_error": 0.4304892420768738, "train/loss_total": 0.4861987233161926 }, { "epoch": 4.870691958322201, "step": 18231, "train/loss_ctc": 0.8605886101722717, "train/loss_error": 0.4956572353839874, "train/loss_total": 0.5686435103416443 }, { "epoch": 4.870959123697569, "step": 18232, "train/loss_ctc": 0.40220215916633606, "train/loss_error": 0.3879298269748688, "train/loss_total": 0.39078429341316223 }, { "epoch": 4.871226289072936, "step": 18233, "train/loss_ctc": 0.8404274582862854, "train/loss_error": 0.39343276619911194, "train/loss_total": 0.4828317165374756 }, { "epoch": 4.871493454448304, "step": 18234, "train/loss_ctc": 0.880061149597168, "train/loss_error": 0.423669695854187, "train/loss_total": 0.5149480104446411 }, { "epoch": 4.871760619823671, "step": 18235, "train/loss_ctc": 0.5400236248970032, "train/loss_error": 0.4896739423274994, "train/loss_total": 0.49974387884140015 }, { "epoch": 4.872027785199038, "step": 18236, "train/loss_ctc": 0.7522048950195312, "train/loss_error": 0.423806369304657, "train/loss_total": 0.48948609828948975 }, { "epoch": 4.872294950574405, "step": 18237, "train/loss_ctc": 1.0204626321792603, "train/loss_error": 0.4531031847000122, "train/loss_total": 0.5665750503540039 }, { "epoch": 4.872562115949773, "step": 18238, "train/loss_ctc": 0.9302765130996704, "train/loss_error": 0.4413997232913971, "train/loss_total": 0.5391750931739807 }, { "epoch": 4.87282928132514, "step": 18239, "train/loss_ctc": 0.705564022064209, "train/loss_error": 0.40001532435417175, "train/loss_total": 0.46112507581710815 }, { "epoch": 4.873096446700508, "grad_norm": 3.8083949089050293, "learning_rate": 7.758482500667915e-07, "loss": 0.5, "step": 18240 }, { "epoch": 4.873096446700508, "step": 18240, "train/loss_ctc": 0.5109077095985413, "train/loss_error": 0.47052496671676636, "train/loss_total": 0.4786015450954437 }, { "epoch": 4.873363612075875, "step": 18241, "train/loss_ctc": 0.6922498941421509, "train/loss_error": 0.40244805812835693, "train/loss_total": 0.46040844917297363 }, { "epoch": 4.873630777451242, "step": 18242, "train/loss_ctc": 0.5973522067070007, "train/loss_error": 0.4142701029777527, "train/loss_total": 0.4508865475654602 }, { "epoch": 4.87389794282661, "step": 18243, "train/loss_ctc": 0.7616490721702576, "train/loss_error": 0.4559413194656372, "train/loss_total": 0.5170828700065613 }, { "epoch": 4.874165108201977, "step": 18244, "train/loss_ctc": 0.6536118984222412, "train/loss_error": 0.40860632061958313, "train/loss_total": 0.4576074481010437 }, { "epoch": 4.874432273577344, "step": 18245, "train/loss_ctc": 0.3706827163696289, "train/loss_error": 0.4737548828125, "train/loss_total": 0.4531404674053192 }, { "epoch": 4.874699438952712, "step": 18246, "train/loss_ctc": 0.6619952917098999, "train/loss_error": 0.4298757016658783, "train/loss_total": 0.4762996435165405 }, { "epoch": 4.874966604328079, "step": 18247, "train/loss_ctc": 0.7229994535446167, "train/loss_error": 0.3697206676006317, "train/loss_total": 0.4403764605522156 }, { "epoch": 4.8752337697034465, "step": 18248, "train/loss_ctc": 0.7522684931755066, "train/loss_error": 0.37464195489883423, "train/loss_total": 0.45016729831695557 }, { "epoch": 4.875500935078814, "step": 18249, "train/loss_ctc": 0.5704130530357361, "train/loss_error": 0.4358305037021637, "train/loss_total": 0.4627470076084137 }, { "epoch": 4.875768100454181, "grad_norm": 1.2913731336593628, "learning_rate": 7.598183275447502e-07, "loss": 0.4647, "step": 18250 }, { "epoch": 4.875768100454181, "step": 18250, "train/loss_ctc": 0.9527022838592529, "train/loss_error": 0.4188346564769745, "train/loss_total": 0.5256081819534302 }, { "epoch": 4.8760352658295485, "step": 18251, "train/loss_ctc": 0.8473072052001953, "train/loss_error": 0.4955596327781677, "train/loss_total": 0.5659091472625732 }, { "epoch": 4.876302431204916, "step": 18252, "train/loss_ctc": 1.1271064281463623, "train/loss_error": 0.39879080653190613, "train/loss_total": 0.5444539189338684 }, { "epoch": 4.876569596580283, "step": 18253, "train/loss_ctc": 0.7109228372573853, "train/loss_error": 0.47990328073501587, "train/loss_total": 0.5261071920394897 }, { "epoch": 4.8768367619556505, "step": 18254, "train/loss_ctc": 0.5804023742675781, "train/loss_error": 0.41372498869895935, "train/loss_total": 0.4470604658126831 }, { "epoch": 4.877103927331018, "step": 18255, "train/loss_ctc": 1.28402578830719, "train/loss_error": 0.445853590965271, "train/loss_total": 0.6134880185127258 }, { "epoch": 4.877371092706385, "step": 18256, "train/loss_ctc": 1.1810659170150757, "train/loss_error": 0.4756849408149719, "train/loss_total": 0.6167611479759216 }, { "epoch": 4.8776382580817526, "step": 18257, "train/loss_ctc": 0.7763001918792725, "train/loss_error": 0.48734819889068604, "train/loss_total": 0.5451385974884033 }, { "epoch": 4.87790542345712, "step": 18258, "train/loss_ctc": 1.054206132888794, "train/loss_error": 0.4794984459877014, "train/loss_total": 0.5944399833679199 }, { "epoch": 4.878172588832487, "step": 18259, "train/loss_ctc": 0.7881633043289185, "train/loss_error": 0.4179346263408661, "train/loss_total": 0.4919803738594055 }, { "epoch": 4.878439754207855, "grad_norm": 2.2020516395568848, "learning_rate": 7.437884050227091e-07, "loss": 0.5471, "step": 18260 }, { "epoch": 4.878439754207855, "step": 18260, "train/loss_ctc": 0.818933367729187, "train/loss_error": 0.39407235383987427, "train/loss_total": 0.4790445566177368 }, { "epoch": 4.878706919583222, "step": 18261, "train/loss_ctc": 0.585692822933197, "train/loss_error": 0.39961618185043335, "train/loss_total": 0.4368315041065216 }, { "epoch": 4.878974084958589, "step": 18262, "train/loss_ctc": 0.48165085911750793, "train/loss_error": 0.4252800941467285, "train/loss_total": 0.4365542531013489 }, { "epoch": 4.879241250333957, "step": 18263, "train/loss_ctc": 0.41376322507858276, "train/loss_error": 0.3645080626010895, "train/loss_total": 0.3743591010570526 }, { "epoch": 4.879508415709324, "step": 18264, "train/loss_ctc": 0.4755038022994995, "train/loss_error": 0.4041260778903961, "train/loss_total": 0.4184016287326813 }, { "epoch": 4.879775581084692, "step": 18265, "train/loss_ctc": 1.6783866882324219, "train/loss_error": 0.4600541591644287, "train/loss_total": 0.7037206888198853 }, { "epoch": 4.880042746460059, "step": 18266, "train/loss_ctc": 0.884332001209259, "train/loss_error": 0.43761369585990906, "train/loss_total": 0.5269573330879211 }, { "epoch": 4.880309911835426, "step": 18267, "train/loss_ctc": 0.2824719548225403, "train/loss_error": 0.44920292496681213, "train/loss_total": 0.4158567190170288 }, { "epoch": 4.880577077210793, "step": 18268, "train/loss_ctc": 0.9264627695083618, "train/loss_error": 0.4153357148170471, "train/loss_total": 0.517561137676239 }, { "epoch": 4.880844242586161, "step": 18269, "train/loss_ctc": 0.7225697040557861, "train/loss_error": 0.4186587333679199, "train/loss_total": 0.47944092750549316 }, { "epoch": 4.881111407961528, "grad_norm": 1.761707067489624, "learning_rate": 7.277584825006679e-07, "loss": 0.4789, "step": 18270 }, { "epoch": 4.881111407961528, "step": 18270, "train/loss_ctc": 0.23564371466636658, "train/loss_error": 0.4591282904148102, "train/loss_total": 0.4144313633441925 }, { "epoch": 4.881378573336896, "step": 18271, "train/loss_ctc": 0.5124279856681824, "train/loss_error": 0.35318461060523987, "train/loss_total": 0.3850332796573639 }, { "epoch": 4.881645738712263, "step": 18272, "train/loss_ctc": 1.0244630575180054, "train/loss_error": 0.43836429715156555, "train/loss_total": 0.5555840730667114 }, { "epoch": 4.88191290408763, "step": 18273, "train/loss_ctc": 0.6134499311447144, "train/loss_error": 0.4185565710067749, "train/loss_total": 0.4575352668762207 }, { "epoch": 4.882180069462997, "step": 18274, "train/loss_ctc": 0.5351803302764893, "train/loss_error": 0.4835822582244873, "train/loss_total": 0.49390190839767456 }, { "epoch": 4.882447234838365, "step": 18275, "train/loss_ctc": 0.8428727388381958, "train/loss_error": 0.42334628105163574, "train/loss_total": 0.5072515606880188 }, { "epoch": 4.882714400213732, "step": 18276, "train/loss_ctc": 0.279865026473999, "train/loss_error": 0.44613465666770935, "train/loss_total": 0.41288071870803833 }, { "epoch": 4.8829815655891, "step": 18277, "train/loss_ctc": 1.461649775505066, "train/loss_error": 0.44069671630859375, "train/loss_total": 0.6448873281478882 }, { "epoch": 4.883248730964467, "step": 18278, "train/loss_ctc": 0.8518889546394348, "train/loss_error": 0.4314453899860382, "train/loss_total": 0.5155341029167175 }, { "epoch": 4.883515896339834, "step": 18279, "train/loss_ctc": 0.8589571714401245, "train/loss_error": 0.43521860241889954, "train/loss_total": 0.5199663639068604 }, { "epoch": 4.883783061715202, "grad_norm": 1.9149302244186401, "learning_rate": 7.117285599786269e-07, "loss": 0.4907, "step": 18280 }, { "epoch": 4.883783061715202, "step": 18280, "train/loss_ctc": 0.7721824645996094, "train/loss_error": 0.4083062708377838, "train/loss_total": 0.4810815155506134 }, { "epoch": 4.884050227090569, "step": 18281, "train/loss_ctc": 0.8586716651916504, "train/loss_error": 0.4039909243583679, "train/loss_total": 0.4949270784854889 }, { "epoch": 4.884317392465936, "step": 18282, "train/loss_ctc": 0.9511709809303284, "train/loss_error": 0.37551024556159973, "train/loss_total": 0.4906424283981323 }, { "epoch": 4.884584557841304, "step": 18283, "train/loss_ctc": 1.0297585725784302, "train/loss_error": 0.41298970580101013, "train/loss_total": 0.5363434553146362 }, { "epoch": 4.884851723216671, "step": 18284, "train/loss_ctc": 0.5938245058059692, "train/loss_error": 0.43264564871788025, "train/loss_total": 0.46488142013549805 }, { "epoch": 4.8851188885920385, "step": 18285, "train/loss_ctc": 0.77812659740448, "train/loss_error": 0.4668152332305908, "train/loss_total": 0.5290775299072266 }, { "epoch": 4.885386053967406, "step": 18286, "train/loss_ctc": 0.3284766972064972, "train/loss_error": 0.36239007115364075, "train/loss_total": 0.35560742020606995 }, { "epoch": 4.885653219342773, "step": 18287, "train/loss_ctc": 0.653670072555542, "train/loss_error": 0.4839910566806793, "train/loss_total": 0.5179268717765808 }, { "epoch": 4.8859203847181405, "step": 18288, "train/loss_ctc": 0.7280590534210205, "train/loss_error": 0.43899446725845337, "train/loss_total": 0.49680739641189575 }, { "epoch": 4.886187550093508, "step": 18289, "train/loss_ctc": 0.6451082229614258, "train/loss_error": 0.4931512475013733, "train/loss_total": 0.5235426425933838 }, { "epoch": 4.886454715468875, "grad_norm": 1.5634779930114746, "learning_rate": 6.956986374565856e-07, "loss": 0.4891, "step": 18290 }, { "epoch": 4.886454715468875, "step": 18290, "train/loss_ctc": 0.8841180801391602, "train/loss_error": 0.38500502705574036, "train/loss_total": 0.4848276376724243 }, { "epoch": 4.8867218808442425, "step": 18291, "train/loss_ctc": 0.6022944450378418, "train/loss_error": 0.5221896767616272, "train/loss_total": 0.5382106304168701 }, { "epoch": 4.88698904621961, "step": 18292, "train/loss_ctc": 0.8786570429801941, "train/loss_error": 0.42761555314064026, "train/loss_total": 0.5178238749504089 }, { "epoch": 4.887256211594977, "step": 18293, "train/loss_ctc": 0.520897626876831, "train/loss_error": 0.4672289490699768, "train/loss_total": 0.4779627025127411 }, { "epoch": 4.887523376970345, "step": 18294, "train/loss_ctc": 0.730424165725708, "train/loss_error": 0.46688854694366455, "train/loss_total": 0.5195956826210022 }, { "epoch": 4.887790542345712, "step": 18295, "train/loss_ctc": 0.43859487771987915, "train/loss_error": 0.41525471210479736, "train/loss_total": 0.41992276906967163 }, { "epoch": 4.88805770772108, "step": 18296, "train/loss_ctc": 0.481982558965683, "train/loss_error": 0.490817666053772, "train/loss_total": 0.48905065655708313 }, { "epoch": 4.888324873096447, "step": 18297, "train/loss_ctc": 0.7331946492195129, "train/loss_error": 0.3969530165195465, "train/loss_total": 0.4642013609409332 }, { "epoch": 4.888592038471814, "step": 18298, "train/loss_ctc": 0.43431922793388367, "train/loss_error": 0.44302377104759216, "train/loss_total": 0.44128286838531494 }, { "epoch": 4.888859203847181, "step": 18299, "train/loss_ctc": 0.4296962022781372, "train/loss_error": 0.4157421886920929, "train/loss_total": 0.4185330271720886 }, { "epoch": 4.889126369222549, "grad_norm": 2.7830147743225098, "learning_rate": 6.796687149345445e-07, "loss": 0.4771, "step": 18300 }, { "epoch": 4.889126369222549, "step": 18300, "train/loss_ctc": 0.8961509466171265, "train/loss_error": 0.4357489347457886, "train/loss_total": 0.5278293490409851 }, { "epoch": 4.889393534597916, "step": 18301, "train/loss_ctc": 0.519516110420227, "train/loss_error": 0.4752044379711151, "train/loss_total": 0.48406678438186646 }, { "epoch": 4.889660699973284, "step": 18302, "train/loss_ctc": 0.21588963270187378, "train/loss_error": 0.41795119643211365, "train/loss_total": 0.37753888964653015 }, { "epoch": 4.889927865348651, "step": 18303, "train/loss_ctc": 1.1854708194732666, "train/loss_error": 0.4590379297733307, "train/loss_total": 0.6043245196342468 }, { "epoch": 4.890195030724018, "step": 18304, "train/loss_ctc": 0.602636456489563, "train/loss_error": 0.4538613259792328, "train/loss_total": 0.48361635208129883 }, { "epoch": 4.890462196099385, "step": 18305, "train/loss_ctc": 0.6807067394256592, "train/loss_error": 0.4832783043384552, "train/loss_total": 0.5227639675140381 }, { "epoch": 4.890729361474753, "step": 18306, "train/loss_ctc": 0.3879026174545288, "train/loss_error": 0.4238568842411041, "train/loss_total": 0.41666603088378906 }, { "epoch": 4.89099652685012, "step": 18307, "train/loss_ctc": 0.6993044018745422, "train/loss_error": 0.4223507344722748, "train/loss_total": 0.4777414798736572 }, { "epoch": 4.891263692225488, "step": 18308, "train/loss_ctc": 0.5061684250831604, "train/loss_error": 0.3829648792743683, "train/loss_total": 0.4076055884361267 }, { "epoch": 4.891530857600855, "step": 18309, "train/loss_ctc": 0.9491578936576843, "train/loss_error": 0.4520678222179413, "train/loss_total": 0.5514858365058899 }, { "epoch": 4.891798022976222, "grad_norm": 1.3122516870498657, "learning_rate": 6.636387924125033e-07, "loss": 0.4854, "step": 18310 }, { "epoch": 4.891798022976222, "step": 18310, "train/loss_ctc": 0.9421617984771729, "train/loss_error": 0.42733222246170044, "train/loss_total": 0.530298113822937 }, { "epoch": 4.89206518835159, "step": 18311, "train/loss_ctc": 0.7258892059326172, "train/loss_error": 0.45264673233032227, "train/loss_total": 0.5072952508926392 }, { "epoch": 4.892332353726957, "step": 18312, "train/loss_ctc": 0.6852917671203613, "train/loss_error": 0.44266533851623535, "train/loss_total": 0.4911906123161316 }, { "epoch": 4.892599519102324, "step": 18313, "train/loss_ctc": 0.6053916811943054, "train/loss_error": 0.3913715183734894, "train/loss_total": 0.4341755509376526 }, { "epoch": 4.892866684477692, "step": 18314, "train/loss_ctc": 1.115663766860962, "train/loss_error": 0.47887325286865234, "train/loss_total": 0.6062313318252563 }, { "epoch": 4.893133849853059, "step": 18315, "train/loss_ctc": 0.8841978311538696, "train/loss_error": 0.4400380849838257, "train/loss_total": 0.5288700461387634 }, { "epoch": 4.893401015228426, "step": 18316, "train/loss_ctc": 1.0177228450775146, "train/loss_error": 0.44567427039146423, "train/loss_total": 0.5600839853286743 }, { "epoch": 4.893668180603794, "step": 18317, "train/loss_ctc": 0.9390447735786438, "train/loss_error": 0.47613659501075745, "train/loss_total": 0.5687182545661926 }, { "epoch": 4.893935345979161, "step": 18318, "train/loss_ctc": 0.9887243509292603, "train/loss_error": 0.4985370934009552, "train/loss_total": 0.5965745449066162 }, { "epoch": 4.894202511354528, "step": 18319, "train/loss_ctc": 0.7531553506851196, "train/loss_error": 0.388177752494812, "train/loss_total": 0.46117326617240906 }, { "epoch": 4.894469676729896, "grad_norm": 3.3117477893829346, "learning_rate": 6.476088698904623e-07, "loss": 0.5285, "step": 18320 }, { "epoch": 4.894469676729896, "step": 18320, "train/loss_ctc": 0.8359732627868652, "train/loss_error": 0.5139884948730469, "train/loss_total": 0.5783854722976685 }, { "epoch": 4.894736842105263, "step": 18321, "train/loss_ctc": 0.3909149169921875, "train/loss_error": 0.36383774876594543, "train/loss_total": 0.3692531883716583 }, { "epoch": 4.8950040074806305, "step": 18322, "train/loss_ctc": 1.0656101703643799, "train/loss_error": 0.42729127407073975, "train/loss_total": 0.5549550652503967 }, { "epoch": 4.895271172855998, "step": 18323, "train/loss_ctc": 0.1551520824432373, "train/loss_error": 0.4048375189304352, "train/loss_total": 0.35490044951438904 }, { "epoch": 4.895538338231365, "step": 18324, "train/loss_ctc": 0.7804447412490845, "train/loss_error": 0.4209502041339874, "train/loss_total": 0.49284911155700684 }, { "epoch": 4.8958055036067325, "step": 18325, "train/loss_ctc": 0.35891300439834595, "train/loss_error": 0.5152596235275269, "train/loss_total": 0.48399031162261963 }, { "epoch": 4.8960726689821, "step": 18326, "train/loss_ctc": 0.4861316382884979, "train/loss_error": 0.43119099736213684, "train/loss_total": 0.4421791136264801 }, { "epoch": 4.896339834357467, "step": 18327, "train/loss_ctc": 0.39350083470344543, "train/loss_error": 0.4378913938999176, "train/loss_total": 0.42901331186294556 }, { "epoch": 4.8966069997328345, "step": 18328, "train/loss_ctc": 0.49945178627967834, "train/loss_error": 0.42743608355522156, "train/loss_total": 0.44183921813964844 }, { "epoch": 4.896874165108202, "step": 18329, "train/loss_ctc": 1.2498188018798828, "train/loss_error": 0.3971533477306366, "train/loss_total": 0.5676864385604858 }, { "epoch": 4.897141330483569, "grad_norm": 1.1731452941894531, "learning_rate": 6.315789473684211e-07, "loss": 0.4715, "step": 18330 }, { "epoch": 4.897141330483569, "step": 18330, "train/loss_ctc": 0.40196293592453003, "train/loss_error": 0.37142759561538696, "train/loss_total": 0.3775346875190735 }, { "epoch": 4.897408495858937, "step": 18331, "train/loss_ctc": 0.6202521324157715, "train/loss_error": 0.4384584426879883, "train/loss_total": 0.4748171865940094 }, { "epoch": 4.897675661234304, "step": 18332, "train/loss_ctc": 0.5125651359558105, "train/loss_error": 0.41345804929733276, "train/loss_total": 0.43327945470809937 }, { "epoch": 4.897942826609672, "step": 18333, "train/loss_ctc": 0.6157413721084595, "train/loss_error": 0.4001806974411011, "train/loss_total": 0.44329285621643066 }, { "epoch": 4.898209991985039, "step": 18334, "train/loss_ctc": 0.5609256625175476, "train/loss_error": 0.3417263329029083, "train/loss_total": 0.38556623458862305 }, { "epoch": 4.898477157360406, "step": 18335, "train/loss_ctc": 0.5500748753547668, "train/loss_error": 0.4491303563117981, "train/loss_total": 0.46931925415992737 }, { "epoch": 4.898744322735773, "step": 18336, "train/loss_ctc": 0.5362282991409302, "train/loss_error": 0.4256914556026459, "train/loss_total": 0.44779881834983826 }, { "epoch": 4.899011488111141, "step": 18337, "train/loss_ctc": 0.7017639875411987, "train/loss_error": 0.4178696870803833, "train/loss_total": 0.47464853525161743 }, { "epoch": 4.899278653486508, "step": 18338, "train/loss_ctc": 0.40347257256507874, "train/loss_error": 0.389592707157135, "train/loss_total": 0.39236870408058167 }, { "epoch": 4.899545818861876, "step": 18339, "train/loss_ctc": 0.5790642499923706, "train/loss_error": 0.4391229450702667, "train/loss_total": 0.4671112298965454 }, { "epoch": 4.899812984237243, "grad_norm": 3.2362308502197266, "learning_rate": 6.155490248463799e-07, "loss": 0.4366, "step": 18340 }, { "epoch": 4.899812984237243, "step": 18340, "train/loss_ctc": 0.5790163278579712, "train/loss_error": 0.4217110574245453, "train/loss_total": 0.45317211747169495 }, { "epoch": 4.90008014961261, "step": 18341, "train/loss_ctc": 0.665082573890686, "train/loss_error": 0.3954726457595825, "train/loss_total": 0.4493946433067322 }, { "epoch": 4.900347314987978, "step": 18342, "train/loss_ctc": 0.539582371711731, "train/loss_error": 0.41943320631980896, "train/loss_total": 0.4434630572795868 }, { "epoch": 4.900614480363345, "step": 18343, "train/loss_ctc": 0.46026480197906494, "train/loss_error": 0.4778103530406952, "train/loss_total": 0.4743012487888336 }, { "epoch": 4.900881645738712, "step": 18344, "train/loss_ctc": 1.0227305889129639, "train/loss_error": 0.41485917568206787, "train/loss_total": 0.5364334583282471 }, { "epoch": 4.90114881111408, "step": 18345, "train/loss_ctc": 0.4651026427745819, "train/loss_error": 0.5139119625091553, "train/loss_total": 0.5041500926017761 }, { "epoch": 4.901415976489447, "step": 18346, "train/loss_ctc": 0.7024701833724976, "train/loss_error": 0.42061251401901245, "train/loss_total": 0.47698402404785156 }, { "epoch": 4.901683141864814, "step": 18347, "train/loss_ctc": 0.46021953225135803, "train/loss_error": 0.48367565870285034, "train/loss_total": 0.47898444533348083 }, { "epoch": 4.901950307240182, "step": 18348, "train/loss_ctc": 0.6381430625915527, "train/loss_error": 0.4453204870223999, "train/loss_total": 0.4838849902153015 }, { "epoch": 4.902217472615549, "step": 18349, "train/loss_ctc": 0.6362302303314209, "train/loss_error": 0.4485607445240021, "train/loss_total": 0.4860946536064148 }, { "epoch": 4.902484637990916, "grad_norm": 0.9479608535766602, "learning_rate": 5.995191023243387e-07, "loss": 0.4787, "step": 18350 }, { "epoch": 4.902484637990916, "step": 18350, "train/loss_ctc": 0.7153241634368896, "train/loss_error": 0.5155019760131836, "train/loss_total": 0.5554664134979248 }, { "epoch": 4.902751803366284, "step": 18351, "train/loss_ctc": 0.9790475964546204, "train/loss_error": 0.5089026093482971, "train/loss_total": 0.6029316186904907 }, { "epoch": 4.903018968741651, "step": 18352, "train/loss_ctc": 1.4536303281784058, "train/loss_error": 0.4818790555000305, "train/loss_total": 0.6762293577194214 }, { "epoch": 4.903286134117018, "step": 18353, "train/loss_ctc": 0.8975935578346252, "train/loss_error": 0.4343949258327484, "train/loss_total": 0.5270346403121948 }, { "epoch": 4.903553299492386, "step": 18354, "train/loss_ctc": 0.3776349723339081, "train/loss_error": 0.448532372713089, "train/loss_total": 0.4343528747558594 }, { "epoch": 4.903820464867753, "step": 18355, "train/loss_ctc": 1.5013818740844727, "train/loss_error": 0.4481590986251831, "train/loss_total": 0.6588036417961121 }, { "epoch": 4.90408763024312, "step": 18356, "train/loss_ctc": 0.48240721225738525, "train/loss_error": 0.4884335398674011, "train/loss_total": 0.48722827434539795 }, { "epoch": 4.904354795618488, "step": 18357, "train/loss_ctc": 1.2682077884674072, "train/loss_error": 0.41305315494537354, "train/loss_total": 0.5840840935707092 }, { "epoch": 4.904621960993855, "step": 18358, "train/loss_ctc": 0.8276438117027283, "train/loss_error": 0.40484553575515747, "train/loss_total": 0.48940521478652954 }, { "epoch": 4.9048891263692225, "step": 18359, "train/loss_ctc": 0.5161022543907166, "train/loss_error": 0.5295805335044861, "train/loss_total": 0.5268848538398743 }, { "epoch": 4.90515629174459, "grad_norm": 1.1652783155441284, "learning_rate": 5.834891798022977e-07, "loss": 0.5542, "step": 18360 }, { "epoch": 4.90515629174459, "step": 18360, "train/loss_ctc": 0.35561278462409973, "train/loss_error": 0.40917056798934937, "train/loss_total": 0.3984590172767639 }, { "epoch": 4.905423457119957, "step": 18361, "train/loss_ctc": 0.5449955463409424, "train/loss_error": 0.36107534170150757, "train/loss_total": 0.3978593945503235 }, { "epoch": 4.9056906224953245, "step": 18362, "train/loss_ctc": 0.33602702617645264, "train/loss_error": 0.3686332106590271, "train/loss_total": 0.36211198568344116 }, { "epoch": 4.905957787870692, "step": 18363, "train/loss_ctc": 0.35837799310684204, "train/loss_error": 0.4474169611930847, "train/loss_total": 0.42960917949676514 }, { "epoch": 4.90622495324606, "step": 18364, "train/loss_ctc": 0.39014214277267456, "train/loss_error": 0.400088906288147, "train/loss_total": 0.3980995714664459 }, { "epoch": 4.9064921186214265, "step": 18365, "train/loss_ctc": 0.6602920889854431, "train/loss_error": 0.47350141406059265, "train/loss_total": 0.5108595490455627 }, { "epoch": 4.906759283996794, "step": 18366, "train/loss_ctc": 0.22712847590446472, "train/loss_error": 0.41028711199760437, "train/loss_total": 0.37365537881851196 }, { "epoch": 4.907026449372161, "step": 18367, "train/loss_ctc": 0.8741260170936584, "train/loss_error": 0.41507700085639954, "train/loss_total": 0.5068868398666382 }, { "epoch": 4.907293614747529, "step": 18368, "train/loss_ctc": 0.7398645877838135, "train/loss_error": 0.40602147579193115, "train/loss_total": 0.4727901220321655 }, { "epoch": 4.907560780122896, "step": 18369, "train/loss_ctc": 1.1520733833312988, "train/loss_error": 0.49141770601272583, "train/loss_total": 0.6235488653182983 }, { "epoch": 4.907827945498264, "grad_norm": 1.8138453960418701, "learning_rate": 5.674592572802565e-07, "loss": 0.4474, "step": 18370 }, { "epoch": 4.907827945498264, "step": 18370, "train/loss_ctc": 0.46250051259994507, "train/loss_error": 0.4169389307498932, "train/loss_total": 0.4260512590408325 }, { "epoch": 4.908095110873631, "step": 18371, "train/loss_ctc": 0.6821535229682922, "train/loss_error": 0.42474451661109924, "train/loss_total": 0.4762263298034668 }, { "epoch": 4.908362276248998, "step": 18372, "train/loss_ctc": 0.56895911693573, "train/loss_error": 0.38845589756965637, "train/loss_total": 0.42455655336380005 }, { "epoch": 4.908629441624365, "step": 18373, "train/loss_ctc": 1.166393518447876, "train/loss_error": 0.43943727016448975, "train/loss_total": 0.5848285555839539 }, { "epoch": 4.908896606999733, "step": 18374, "train/loss_ctc": 0.8846362233161926, "train/loss_error": 0.5661693811416626, "train/loss_total": 0.6298627853393555 }, { "epoch": 4.9091637723751, "step": 18375, "train/loss_ctc": 0.4079318642616272, "train/loss_error": 0.45585572719573975, "train/loss_total": 0.4462709426879883 }, { "epoch": 4.909430937750468, "step": 18376, "train/loss_ctc": 1.002884030342102, "train/loss_error": 0.4536987841129303, "train/loss_total": 0.5635358095169067 }, { "epoch": 4.909698103125835, "step": 18377, "train/loss_ctc": 0.47100141644477844, "train/loss_error": 0.23676307499408722, "train/loss_total": 0.2836107611656189 }, { "epoch": 4.909965268501202, "step": 18378, "train/loss_ctc": 0.477615088224411, "train/loss_error": 0.46869170665740967, "train/loss_total": 0.4704763889312744 }, { "epoch": 4.91023243387657, "step": 18379, "train/loss_ctc": 0.5370691418647766, "train/loss_error": 0.4527468979358673, "train/loss_total": 0.46961134672164917 }, { "epoch": 4.910499599251937, "grad_norm": 2.0399749279022217, "learning_rate": 5.514293347582154e-07, "loss": 0.4775, "step": 18380 }, { "epoch": 4.910499599251937, "step": 18380, "train/loss_ctc": 1.6677594184875488, "train/loss_error": 0.5530767440795898, "train/loss_total": 0.7760132551193237 }, { "epoch": 4.910766764627304, "step": 18381, "train/loss_ctc": 1.1409783363342285, "train/loss_error": 0.4286327064037323, "train/loss_total": 0.5711018443107605 }, { "epoch": 4.911033930002672, "step": 18382, "train/loss_ctc": 0.996924877166748, "train/loss_error": 0.38139820098876953, "train/loss_total": 0.5045035481452942 }, { "epoch": 4.911301095378039, "step": 18383, "train/loss_ctc": 0.9368851184844971, "train/loss_error": 0.4355175197124481, "train/loss_total": 0.5357910394668579 }, { "epoch": 4.911568260753406, "step": 18384, "train/loss_ctc": 0.6389347314834595, "train/loss_error": 0.4867115318775177, "train/loss_total": 0.517156183719635 }, { "epoch": 4.911835426128774, "step": 18385, "train/loss_ctc": 0.71370929479599, "train/loss_error": 0.4360354244709015, "train/loss_total": 0.49157020449638367 }, { "epoch": 4.912102591504141, "step": 18386, "train/loss_ctc": 0.8657475709915161, "train/loss_error": 0.4379462003707886, "train/loss_total": 0.5235064625740051 }, { "epoch": 4.912369756879508, "step": 18387, "train/loss_ctc": 0.45335519313812256, "train/loss_error": 0.4819330871105194, "train/loss_total": 0.47621750831604004 }, { "epoch": 4.912636922254876, "step": 18388, "train/loss_ctc": 0.39206451177597046, "train/loss_error": 0.41515135765075684, "train/loss_total": 0.4105340242385864 }, { "epoch": 4.912904087630243, "step": 18389, "train/loss_ctc": 0.23340043425559998, "train/loss_error": 0.4523867666721344, "train/loss_total": 0.40858951210975647 }, { "epoch": 4.91317125300561, "grad_norm": 3.4445245265960693, "learning_rate": 5.353994122361741e-07, "loss": 0.5215, "step": 18390 }, { "epoch": 4.91317125300561, "step": 18390, "train/loss_ctc": 0.3887956738471985, "train/loss_error": 0.49383535981178284, "train/loss_total": 0.4728274345397949 }, { "epoch": 4.913438418380978, "step": 18391, "train/loss_ctc": 0.8114761114120483, "train/loss_error": 0.41875746846199036, "train/loss_total": 0.4973011910915375 }, { "epoch": 4.913705583756345, "step": 18392, "train/loss_ctc": 0.5139704346656799, "train/loss_error": 0.4928513467311859, "train/loss_total": 0.4970751702785492 }, { "epoch": 4.9139727491317124, "step": 18393, "train/loss_ctc": 0.5323204398155212, "train/loss_error": 0.4538148045539856, "train/loss_total": 0.46951594948768616 }, { "epoch": 4.91423991450708, "step": 18394, "train/loss_ctc": 0.45891398191452026, "train/loss_error": 0.39734575152397156, "train/loss_total": 0.40965941548347473 }, { "epoch": 4.914507079882448, "step": 18395, "train/loss_ctc": 0.37147361040115356, "train/loss_error": 0.48547542095184326, "train/loss_total": 0.4626750648021698 }, { "epoch": 4.9147742452578145, "step": 18396, "train/loss_ctc": 0.4655974507331848, "train/loss_error": 0.4257422089576721, "train/loss_total": 0.43371328711509705 }, { "epoch": 4.915041410633182, "step": 18397, "train/loss_ctc": 0.450927197933197, "train/loss_error": 0.4718470573425293, "train/loss_total": 0.46766307950019836 }, { "epoch": 4.915308576008549, "step": 18398, "train/loss_ctc": 0.4092380702495575, "train/loss_error": 0.4456179738044739, "train/loss_total": 0.43834200501441956 }, { "epoch": 4.9155757413839165, "step": 18399, "train/loss_ctc": 0.7974449396133423, "train/loss_error": 0.4406013488769531, "train/loss_total": 0.5119701027870178 }, { "epoch": 4.915842906759284, "grad_norm": 24.97324562072754, "learning_rate": 5.193694897141331e-07, "loss": 0.4661, "step": 18400 }, { "epoch": 4.915842906759284, "step": 18400, "train/loss_ctc": 0.4506103992462158, "train/loss_error": 0.48439404368400574, "train/loss_total": 0.47763732075691223 }, { "epoch": 4.916110072134652, "step": 18401, "train/loss_ctc": 0.7833291292190552, "train/loss_error": 0.38462910056114197, "train/loss_total": 0.46436911821365356 }, { "epoch": 4.9163772375100185, "step": 18402, "train/loss_ctc": 0.7704088687896729, "train/loss_error": 0.3726691007614136, "train/loss_total": 0.4522170424461365 }, { "epoch": 4.916644402885386, "step": 18403, "train/loss_ctc": 0.970228910446167, "train/loss_error": 0.4623478353023529, "train/loss_total": 0.5639240741729736 }, { "epoch": 4.916911568260753, "step": 18404, "train/loss_ctc": 0.636381983757019, "train/loss_error": 0.43625351786613464, "train/loss_total": 0.47627919912338257 }, { "epoch": 4.917178733636121, "step": 18405, "train/loss_ctc": 0.5808699727058411, "train/loss_error": 0.4903300702571869, "train/loss_total": 0.5084380507469177 }, { "epoch": 4.917445899011488, "step": 18406, "train/loss_ctc": 0.39844757318496704, "train/loss_error": 0.44423800706863403, "train/loss_total": 0.4350799322128296 }, { "epoch": 4.917713064386856, "step": 18407, "train/loss_ctc": 0.39972084760665894, "train/loss_error": 0.48154765367507935, "train/loss_total": 0.4651823043823242 }, { "epoch": 4.917980229762223, "step": 18408, "train/loss_ctc": 0.5946782231330872, "train/loss_error": 0.4018494188785553, "train/loss_total": 0.4404151737689972 }, { "epoch": 4.91824739513759, "step": 18409, "train/loss_ctc": 1.2687163352966309, "train/loss_error": 0.48419761657714844, "train/loss_total": 0.6411013603210449 }, { "epoch": 4.918514560512958, "grad_norm": 2.161372423171997, "learning_rate": 5.033395671920919e-07, "loss": 0.4925, "step": 18410 }, { "epoch": 4.918514560512958, "step": 18410, "train/loss_ctc": 0.8846758008003235, "train/loss_error": 0.4465998709201813, "train/loss_total": 0.5342150926589966 }, { "epoch": 4.918781725888325, "step": 18411, "train/loss_ctc": 0.9712648391723633, "train/loss_error": 0.4686165452003479, "train/loss_total": 0.5691462159156799 }, { "epoch": 4.919048891263692, "step": 18412, "train/loss_ctc": 0.48703405261039734, "train/loss_error": 0.40687498450279236, "train/loss_total": 0.4229067862033844 }, { "epoch": 4.91931605663906, "step": 18413, "train/loss_ctc": 0.8008623719215393, "train/loss_error": 0.40830475091934204, "train/loss_total": 0.48681628704071045 }, { "epoch": 4.919583222014427, "step": 18414, "train/loss_ctc": 0.4722626805305481, "train/loss_error": 0.38832908868789673, "train/loss_total": 0.4051158130168915 }, { "epoch": 4.919850387389794, "step": 18415, "train/loss_ctc": 0.7695960998535156, "train/loss_error": 0.4935600757598877, "train/loss_total": 0.5487673282623291 }, { "epoch": 4.920117552765162, "step": 18416, "train/loss_ctc": 0.9388328194618225, "train/loss_error": 0.41444462537765503, "train/loss_total": 0.5193222761154175 }, { "epoch": 4.920384718140529, "step": 18417, "train/loss_ctc": 0.41197311878204346, "train/loss_error": 0.3026762008666992, "train/loss_total": 0.3245355784893036 }, { "epoch": 4.920651883515896, "step": 18418, "train/loss_ctc": 0.8389166593551636, "train/loss_error": 0.4133511781692505, "train/loss_total": 0.49846428632736206 }, { "epoch": 4.920919048891264, "step": 18419, "train/loss_ctc": 0.8116499185562134, "train/loss_error": 0.5135118961334229, "train/loss_total": 0.573139488697052 }, { "epoch": 4.921186214266631, "grad_norm": 15.799108505249023, "learning_rate": 4.873096446700508e-07, "loss": 0.4882, "step": 18420 }, { "epoch": 4.921186214266631, "step": 18420, "train/loss_ctc": 0.23186945915222168, "train/loss_error": 0.5277813673019409, "train/loss_total": 0.46859899163246155 }, { "epoch": 4.921453379641998, "step": 18421, "train/loss_ctc": 1.1240531206130981, "train/loss_error": 0.46797823905944824, "train/loss_total": 0.5991932153701782 }, { "epoch": 4.921720545017366, "step": 18422, "train/loss_ctc": 0.8897326588630676, "train/loss_error": 0.40781325101852417, "train/loss_total": 0.5041971206665039 }, { "epoch": 4.921987710392733, "step": 18423, "train/loss_ctc": 0.743354320526123, "train/loss_error": 0.37044045329093933, "train/loss_total": 0.44502323865890503 }, { "epoch": 4.9222548757681, "step": 18424, "train/loss_ctc": 0.5435886383056641, "train/loss_error": 0.43904128670692444, "train/loss_total": 0.4599507749080658 }, { "epoch": 4.922522041143468, "step": 18425, "train/loss_ctc": 1.4201722145080566, "train/loss_error": 0.45136260986328125, "train/loss_total": 0.6451245546340942 }, { "epoch": 4.922789206518835, "step": 18426, "train/loss_ctc": 0.5266904234886169, "train/loss_error": 0.430436372756958, "train/loss_total": 0.4496872127056122 }, { "epoch": 4.923056371894202, "step": 18427, "train/loss_ctc": 0.48927444219589233, "train/loss_error": 0.5292580127716064, "train/loss_total": 0.5212613344192505 }, { "epoch": 4.92332353726957, "step": 18428, "train/loss_ctc": 0.8023239374160767, "train/loss_error": 0.3703594207763672, "train/loss_total": 0.45675233006477356 }, { "epoch": 4.923590702644937, "step": 18429, "train/loss_ctc": 0.7801229357719421, "train/loss_error": 0.45523765683174133, "train/loss_total": 0.5202147364616394 }, { "epoch": 4.9238578680203045, "grad_norm": 2.345933198928833, "learning_rate": 4.712797221480096e-07, "loss": 0.507, "step": 18430 }, { "epoch": 4.9238578680203045, "step": 18430, "train/loss_ctc": 1.1538136005401611, "train/loss_error": 0.44129103422164917, "train/loss_total": 0.5837955474853516 }, { "epoch": 4.924125033395672, "step": 18431, "train/loss_ctc": 1.063791036605835, "train/loss_error": 0.4566895365715027, "train/loss_total": 0.578109860420227 }, { "epoch": 4.92439219877104, "step": 18432, "train/loss_ctc": 0.2242630422115326, "train/loss_error": 0.4333699643611908, "train/loss_total": 0.39154860377311707 }, { "epoch": 4.9246593641464065, "step": 18433, "train/loss_ctc": 0.6847295761108398, "train/loss_error": 0.4805421531200409, "train/loss_total": 0.5213796496391296 }, { "epoch": 4.924926529521774, "step": 18434, "train/loss_ctc": 0.6276095509529114, "train/loss_error": 0.45089370012283325, "train/loss_total": 0.4862368702888489 }, { "epoch": 4.925193694897141, "step": 18435, "train/loss_ctc": 0.7738951444625854, "train/loss_error": 0.3970091640949249, "train/loss_total": 0.47238636016845703 }, { "epoch": 4.9254608602725085, "step": 18436, "train/loss_ctc": 0.38207489252090454, "train/loss_error": 0.3908555805683136, "train/loss_total": 0.38909944891929626 }, { "epoch": 4.925728025647876, "step": 18437, "train/loss_ctc": 1.045723795890808, "train/loss_error": 0.47073641419410706, "train/loss_total": 0.5857338905334473 }, { "epoch": 4.925995191023244, "step": 18438, "train/loss_ctc": 1.1114121675491333, "train/loss_error": 0.42547017335891724, "train/loss_total": 0.5626585483551025 }, { "epoch": 4.9262623563986105, "step": 18439, "train/loss_ctc": 0.535437285900116, "train/loss_error": 0.45021846890449524, "train/loss_total": 0.46726223826408386 }, { "epoch": 4.926529521773978, "grad_norm": 2.3223845958709717, "learning_rate": 4.5524979962596853e-07, "loss": 0.5038, "step": 18440 }, { "epoch": 4.926529521773978, "step": 18440, "train/loss_ctc": 0.3753114938735962, "train/loss_error": 0.47435617446899414, "train/loss_total": 0.454547256231308 }, { "epoch": 4.926796687149346, "step": 18441, "train/loss_ctc": 0.7399849891662598, "train/loss_error": 0.39601290225982666, "train/loss_total": 0.46480733156204224 }, { "epoch": 4.927063852524713, "step": 18442, "train/loss_ctc": 0.9436044692993164, "train/loss_error": 0.43379127979278564, "train/loss_total": 0.5357539057731628 }, { "epoch": 4.92733101790008, "step": 18443, "train/loss_ctc": 0.8300120830535889, "train/loss_error": 0.4941006898880005, "train/loss_total": 0.5612829923629761 }, { "epoch": 4.927598183275448, "step": 18444, "train/loss_ctc": 1.126768708229065, "train/loss_error": 0.4704841375350952, "train/loss_total": 0.6017410755157471 }, { "epoch": 4.927865348650815, "step": 18445, "train/loss_ctc": 0.9360021352767944, "train/loss_error": 0.41192469000816345, "train/loss_total": 0.5167402029037476 }, { "epoch": 4.928132514026182, "step": 18446, "train/loss_ctc": 0.8522141575813293, "train/loss_error": 0.5123242735862732, "train/loss_total": 0.5803022384643555 }, { "epoch": 4.92839967940155, "step": 18447, "train/loss_ctc": 0.3324524164199829, "train/loss_error": 0.40972110629081726, "train/loss_total": 0.39426738023757935 }, { "epoch": 4.928666844776917, "step": 18448, "train/loss_ctc": 0.35772836208343506, "train/loss_error": 0.3786020278930664, "train/loss_total": 0.37442731857299805 }, { "epoch": 4.928934010152284, "step": 18449, "train/loss_ctc": 0.3963814973831177, "train/loss_error": 0.46776047348976135, "train/loss_total": 0.4534846842288971 }, { "epoch": 4.929201175527652, "grad_norm": 3.3009531497955322, "learning_rate": 4.3921987710392736e-07, "loss": 0.4937, "step": 18450 }, { "epoch": 4.929201175527652, "step": 18450, "train/loss_ctc": 1.003233790397644, "train/loss_error": 0.3917747139930725, "train/loss_total": 0.5140665769577026 }, { "epoch": 4.929468340903019, "step": 18451, "train/loss_ctc": 0.480438232421875, "train/loss_error": 0.4608549177646637, "train/loss_total": 0.464771568775177 }, { "epoch": 4.929735506278386, "step": 18452, "train/loss_ctc": 0.8068547248840332, "train/loss_error": 0.45473572611808777, "train/loss_total": 0.5251595377922058 }, { "epoch": 4.930002671653754, "step": 18453, "train/loss_ctc": 0.24646452069282532, "train/loss_error": 0.43233922123908997, "train/loss_total": 0.39516428112983704 }, { "epoch": 4.930269837029121, "step": 18454, "train/loss_ctc": 0.5687357187271118, "train/loss_error": 0.52699875831604, "train/loss_total": 0.5353461503982544 }, { "epoch": 4.930537002404488, "step": 18455, "train/loss_ctc": 0.897922933101654, "train/loss_error": 0.4303075969219208, "train/loss_total": 0.5238306522369385 }, { "epoch": 4.930804167779856, "step": 18456, "train/loss_ctc": 0.5764950513839722, "train/loss_error": 0.4144788384437561, "train/loss_total": 0.44688209891319275 }, { "epoch": 4.931071333155223, "step": 18457, "train/loss_ctc": 0.6881861090660095, "train/loss_error": 0.46248969435691833, "train/loss_total": 0.5076289772987366 }, { "epoch": 4.93133849853059, "step": 18458, "train/loss_ctc": 1.587253451347351, "train/loss_error": 0.4585978388786316, "train/loss_total": 0.6843289732933044 }, { "epoch": 4.931605663905958, "step": 18459, "train/loss_ctc": 0.9472365379333496, "train/loss_error": 0.44744059443473816, "train/loss_total": 0.5473997592926025 }, { "epoch": 4.931872829281325, "grad_norm": 1.6245994567871094, "learning_rate": 4.2318995458188624e-07, "loss": 0.5145, "step": 18460 }, { "epoch": 4.931872829281325, "step": 18460, "train/loss_ctc": 0.40142443776130676, "train/loss_error": 0.48639538884162903, "train/loss_total": 0.46940121054649353 }, { "epoch": 4.932139994656692, "step": 18461, "train/loss_ctc": 0.9102709293365479, "train/loss_error": 0.4077724814414978, "train/loss_total": 0.5082721710205078 }, { "epoch": 4.93240716003206, "step": 18462, "train/loss_ctc": 0.44489142298698425, "train/loss_error": 0.4502313435077667, "train/loss_total": 0.44916337728500366 }, { "epoch": 4.932674325407428, "step": 18463, "train/loss_ctc": 1.1704585552215576, "train/loss_error": 0.42992955446243286, "train/loss_total": 0.5780353546142578 }, { "epoch": 4.932941490782794, "step": 18464, "train/loss_ctc": 0.7359052896499634, "train/loss_error": 0.4173927307128906, "train/loss_total": 0.48109525442123413 }, { "epoch": 4.933208656158162, "step": 18465, "train/loss_ctc": 0.7555192708969116, "train/loss_error": 0.4968935251235962, "train/loss_total": 0.5486186742782593 }, { "epoch": 4.933475821533529, "step": 18466, "train/loss_ctc": 0.5001139640808105, "train/loss_error": 0.45741453766822815, "train/loss_total": 0.46595442295074463 }, { "epoch": 4.9337429869088965, "step": 18467, "train/loss_ctc": 0.9055293202400208, "train/loss_error": 0.47336384654045105, "train/loss_total": 0.559796929359436 }, { "epoch": 4.934010152284264, "step": 18468, "train/loss_ctc": 0.8094614744186401, "train/loss_error": 0.3968037962913513, "train/loss_total": 0.47933533787727356 }, { "epoch": 4.934277317659632, "step": 18469, "train/loss_ctc": 1.0851069688796997, "train/loss_error": 0.4744209051132202, "train/loss_total": 0.5965580940246582 }, { "epoch": 4.9345444830349985, "grad_norm": 3.9691667556762695, "learning_rate": 4.0716003205984506e-07, "loss": 0.5136, "step": 18470 }, { "epoch": 4.9345444830349985, "step": 18470, "train/loss_ctc": 1.2732963562011719, "train/loss_error": 0.46787962317466736, "train/loss_total": 0.6289629936218262 }, { "epoch": 4.934811648410366, "step": 18471, "train/loss_ctc": 0.8339986801147461, "train/loss_error": 0.4374029338359833, "train/loss_total": 0.5167220830917358 }, { "epoch": 4.935078813785733, "step": 18472, "train/loss_ctc": 0.45727723836898804, "train/loss_error": 0.5074431300163269, "train/loss_total": 0.49740996956825256 }, { "epoch": 4.9353459791611005, "step": 18473, "train/loss_ctc": 0.30790597200393677, "train/loss_error": 0.4615526795387268, "train/loss_total": 0.43082335591316223 }, { "epoch": 4.935613144536468, "step": 18474, "train/loss_ctc": 0.8676671385765076, "train/loss_error": 0.4252189099788666, "train/loss_total": 0.5137085318565369 }, { "epoch": 4.935880309911836, "step": 18475, "train/loss_ctc": 0.9790877103805542, "train/loss_error": 0.4535807967185974, "train/loss_total": 0.5586822032928467 }, { "epoch": 4.9361474752872025, "step": 18476, "train/loss_ctc": 0.9100385904312134, "train/loss_error": 0.34901416301727295, "train/loss_total": 0.46121907234191895 }, { "epoch": 4.93641464066257, "step": 18477, "train/loss_ctc": 0.2916540205478668, "train/loss_error": 0.32845407724380493, "train/loss_total": 0.3210940659046173 }, { "epoch": 4.936681806037938, "step": 18478, "train/loss_ctc": 0.5933108925819397, "train/loss_error": 0.42211249470710754, "train/loss_total": 0.456352174282074 }, { "epoch": 4.936948971413305, "step": 18479, "train/loss_ctc": 0.5337618589401245, "train/loss_error": 0.452465683221817, "train/loss_total": 0.46872490644454956 }, { "epoch": 4.937216136788672, "grad_norm": 2.120197296142578, "learning_rate": 3.9113010953780394e-07, "loss": 0.4854, "step": 18480 }, { "epoch": 4.937216136788672, "step": 18480, "train/loss_ctc": 1.1776968240737915, "train/loss_error": 0.36167511343955994, "train/loss_total": 0.5248794555664062 }, { "epoch": 4.93748330216404, "step": 18481, "train/loss_ctc": 0.6214834451675415, "train/loss_error": 0.38674023747444153, "train/loss_total": 0.4336888790130615 }, { "epoch": 4.937750467539407, "step": 18482, "train/loss_ctc": 0.8526098728179932, "train/loss_error": 0.3504200577735901, "train/loss_total": 0.4508580267429352 }, { "epoch": 4.938017632914774, "step": 18483, "train/loss_ctc": 0.5776753425598145, "train/loss_error": 0.4628620147705078, "train/loss_total": 0.48582470417022705 }, { "epoch": 4.938284798290142, "step": 18484, "train/loss_ctc": 0.9997625946998596, "train/loss_error": 0.4649266004562378, "train/loss_total": 0.5718938112258911 }, { "epoch": 4.938551963665509, "step": 18485, "train/loss_ctc": 0.7783790826797485, "train/loss_error": 0.4603038430213928, "train/loss_total": 0.523918867111206 }, { "epoch": 4.938819129040876, "step": 18486, "train/loss_ctc": 1.1434087753295898, "train/loss_error": 0.43957045674324036, "train/loss_total": 0.5803381204605103 }, { "epoch": 4.939086294416244, "step": 18487, "train/loss_ctc": 0.6524984836578369, "train/loss_error": 0.4483550488948822, "train/loss_total": 0.4891837239265442 }, { "epoch": 4.939353459791611, "step": 18488, "train/loss_ctc": 0.9314870238304138, "train/loss_error": 0.4386669397354126, "train/loss_total": 0.5372309684753418 }, { "epoch": 4.939620625166978, "step": 18489, "train/loss_ctc": 0.7000606060028076, "train/loss_error": 0.45198604464530945, "train/loss_total": 0.501600980758667 }, { "epoch": 4.939887790542346, "grad_norm": 2.4102442264556885, "learning_rate": 3.7510018701576276e-07, "loss": 0.5099, "step": 18490 }, { "epoch": 4.939887790542346, "step": 18490, "train/loss_ctc": 1.0929930210113525, "train/loss_error": 0.4413917064666748, "train/loss_total": 0.5717120170593262 }, { "epoch": 4.940154955917713, "step": 18491, "train/loss_ctc": 0.5192736983299255, "train/loss_error": 0.4312838613986969, "train/loss_total": 0.4488818347454071 }, { "epoch": 4.94042212129308, "step": 18492, "train/loss_ctc": 0.5774286985397339, "train/loss_error": 0.41886448860168457, "train/loss_total": 0.4505773186683655 }, { "epoch": 4.940689286668448, "step": 18493, "train/loss_ctc": 0.6667329668998718, "train/loss_error": 0.4669587314128876, "train/loss_total": 0.5069136023521423 }, { "epoch": 4.940956452043816, "step": 18494, "train/loss_ctc": 0.6508669853210449, "train/loss_error": 0.44170936942100525, "train/loss_total": 0.4835408926010132 }, { "epoch": 4.941223617419182, "step": 18495, "train/loss_ctc": 0.46378833055496216, "train/loss_error": 0.4377609193325043, "train/loss_total": 0.44296640157699585 }, { "epoch": 4.94149078279455, "step": 18496, "train/loss_ctc": 0.8476307392120361, "train/loss_error": 0.4046017825603485, "train/loss_total": 0.49320757389068604 }, { "epoch": 4.941757948169917, "step": 18497, "train/loss_ctc": 1.2112252712249756, "train/loss_error": 0.46285733580589294, "train/loss_total": 0.6125309467315674 }, { "epoch": 4.942025113545284, "step": 18498, "train/loss_ctc": 0.6431455612182617, "train/loss_error": 0.46521928906440735, "train/loss_total": 0.5008045434951782 }, { "epoch": 4.942292278920652, "step": 18499, "train/loss_ctc": 0.7286018133163452, "train/loss_error": 0.44122445583343506, "train/loss_total": 0.49869993329048157 }, { "epoch": 4.94255944429602, "grad_norm": 1.1366701126098633, "learning_rate": 3.5907026449372164e-07, "loss": 0.501, "step": 18500 }, { "epoch": 4.94255944429602, "step": 18500, "train/loss_ctc": 0.9920846819877625, "train/loss_error": 0.479985773563385, "train/loss_total": 0.5824055671691895 }, { "epoch": 4.942826609671386, "step": 18501, "train/loss_ctc": 0.33267873525619507, "train/loss_error": 0.41091546416282654, "train/loss_total": 0.39526811242103577 }, { "epoch": 4.943093775046754, "step": 18502, "train/loss_ctc": 0.9306952357292175, "train/loss_error": 0.4381648600101471, "train/loss_total": 0.5366709232330322 }, { "epoch": 4.943360940422121, "step": 18503, "train/loss_ctc": 0.8204091787338257, "train/loss_error": 0.3768685460090637, "train/loss_total": 0.4655766785144806 }, { "epoch": 4.9436281057974885, "step": 18504, "train/loss_ctc": 1.2237826585769653, "train/loss_error": 0.34160760045051575, "train/loss_total": 0.5180426239967346 }, { "epoch": 4.943895271172856, "step": 18505, "train/loss_ctc": 0.9331648349761963, "train/loss_error": 0.44377878308296204, "train/loss_total": 0.5416560173034668 }, { "epoch": 4.944162436548224, "step": 18506, "train/loss_ctc": 0.677636981010437, "train/loss_error": 0.502494752407074, "train/loss_total": 0.5375232100486755 }, { "epoch": 4.9444296019235905, "step": 18507, "train/loss_ctc": 1.5050337314605713, "train/loss_error": 0.44742655754089355, "train/loss_total": 0.6589480042457581 }, { "epoch": 4.944696767298958, "step": 18508, "train/loss_ctc": 0.6425398588180542, "train/loss_error": 0.3859204947948456, "train/loss_total": 0.43724438548088074 }, { "epoch": 4.944963932674326, "step": 18509, "train/loss_ctc": 0.8982757925987244, "train/loss_error": 0.5416051745414734, "train/loss_total": 0.6129392981529236 }, { "epoch": 4.9452310980496925, "grad_norm": 5.29604959487915, "learning_rate": 3.430403419716805e-07, "loss": 0.5286, "step": 18510 }, { "epoch": 4.9452310980496925, "step": 18510, "train/loss_ctc": 0.6400436162948608, "train/loss_error": 0.43254831433296204, "train/loss_total": 0.47404739260673523 }, { "epoch": 4.94549826342506, "step": 18511, "train/loss_ctc": 0.5872693657875061, "train/loss_error": 0.3821602463722229, "train/loss_total": 0.42318207025527954 }, { "epoch": 4.945765428800428, "step": 18512, "train/loss_ctc": 0.7502180933952332, "train/loss_error": 0.45416542887687683, "train/loss_total": 0.513375997543335 }, { "epoch": 4.9460325941757946, "step": 18513, "train/loss_ctc": 0.5519763231277466, "train/loss_error": 0.4045587480068207, "train/loss_total": 0.4340422749519348 }, { "epoch": 4.946299759551162, "step": 18514, "train/loss_ctc": 0.539201021194458, "train/loss_error": 0.39390912652015686, "train/loss_total": 0.4229675233364105 }, { "epoch": 4.94656692492653, "step": 18515, "train/loss_ctc": 1.1746999025344849, "train/loss_error": 0.4438650608062744, "train/loss_total": 0.5900320410728455 }, { "epoch": 4.946834090301897, "step": 18516, "train/loss_ctc": 0.5547602772712708, "train/loss_error": 0.42642202973365784, "train/loss_total": 0.45208966732025146 }, { "epoch": 4.947101255677264, "step": 18517, "train/loss_ctc": 0.2839178442955017, "train/loss_error": 0.40102827548980713, "train/loss_total": 0.37760618329048157 }, { "epoch": 4.947368421052632, "step": 18518, "train/loss_ctc": 1.1900945901870728, "train/loss_error": 0.44960397481918335, "train/loss_total": 0.5977020859718323 }, { "epoch": 4.947635586427999, "step": 18519, "train/loss_ctc": 0.9904882907867432, "train/loss_error": 0.4245830476284027, "train/loss_total": 0.5377641320228577 }, { "epoch": 4.947902751803366, "grad_norm": 8.704681396484375, "learning_rate": 3.2701041944963934e-07, "loss": 0.4823, "step": 18520 }, { "epoch": 4.947902751803366, "step": 18520, "train/loss_ctc": 0.5088198184967041, "train/loss_error": 0.34461212158203125, "train/loss_total": 0.37745365500450134 }, { "epoch": 4.948169917178734, "step": 18521, "train/loss_ctc": 0.24511563777923584, "train/loss_error": 0.41017070412635803, "train/loss_total": 0.3771596848964691 }, { "epoch": 4.948437082554101, "step": 18522, "train/loss_ctc": 0.48887333273887634, "train/loss_error": 0.4455728828907013, "train/loss_total": 0.45423299074172974 }, { "epoch": 4.948704247929468, "step": 18523, "train/loss_ctc": 0.7442673444747925, "train/loss_error": 0.46971479058265686, "train/loss_total": 0.524625301361084 }, { "epoch": 4.948971413304836, "step": 18524, "train/loss_ctc": 1.2185183763504028, "train/loss_error": 0.45740678906440735, "train/loss_total": 0.6096290946006775 }, { "epoch": 4.949238578680203, "step": 18525, "train/loss_ctc": 0.7542240023612976, "train/loss_error": 0.40491458773612976, "train/loss_total": 0.4747764468193054 }, { "epoch": 4.94950574405557, "step": 18526, "train/loss_ctc": 1.107193946838379, "train/loss_error": 0.3832702040672302, "train/loss_total": 0.52805495262146 }, { "epoch": 4.949772909430938, "step": 18527, "train/loss_ctc": 0.8807530999183655, "train/loss_error": 0.46266987919807434, "train/loss_total": 0.5462865233421326 }, { "epoch": 4.950040074806305, "step": 18528, "train/loss_ctc": 0.5730050802230835, "train/loss_error": 0.5059388279914856, "train/loss_total": 0.5193520784378052 }, { "epoch": 4.950307240181672, "step": 18529, "train/loss_ctc": 0.7543739080429077, "train/loss_error": 0.4061339497566223, "train/loss_total": 0.4757819175720215 }, { "epoch": 4.95057440555704, "grad_norm": 3.612644910812378, "learning_rate": 3.109804969275982e-07, "loss": 0.4887, "step": 18530 }, { "epoch": 4.95057440555704, "step": 18530, "train/loss_ctc": 0.3162084221839905, "train/loss_error": 0.38720402121543884, "train/loss_total": 0.3730049133300781 }, { "epoch": 4.950841570932408, "step": 18531, "train/loss_ctc": 0.669013261795044, "train/loss_error": 0.3992070257663727, "train/loss_total": 0.45316827297210693 }, { "epoch": 4.951108736307774, "step": 18532, "train/loss_ctc": 0.41914689540863037, "train/loss_error": 0.38215553760528564, "train/loss_total": 0.38955381512641907 }, { "epoch": 4.951375901683142, "step": 18533, "train/loss_ctc": 0.511085569858551, "train/loss_error": 0.4450387954711914, "train/loss_total": 0.4582481384277344 }, { "epoch": 4.951643067058509, "step": 18534, "train/loss_ctc": 0.8697695732116699, "train/loss_error": 0.41479063034057617, "train/loss_total": 0.5057864189147949 }, { "epoch": 4.951910232433876, "step": 18535, "train/loss_ctc": 0.6803817749023438, "train/loss_error": 0.4317971467971802, "train/loss_total": 0.4815140962600708 }, { "epoch": 4.952177397809244, "step": 18536, "train/loss_ctc": 0.5615166425704956, "train/loss_error": 0.37877383828163147, "train/loss_total": 0.4153224229812622 }, { "epoch": 4.952444563184612, "step": 18537, "train/loss_ctc": 0.47555187344551086, "train/loss_error": 0.349672794342041, "train/loss_total": 0.3748486340045929 }, { "epoch": 4.952711728559978, "step": 18538, "train/loss_ctc": 0.4652160406112671, "train/loss_error": 0.4371042549610138, "train/loss_total": 0.44272661209106445 }, { "epoch": 4.952978893935346, "step": 18539, "train/loss_ctc": 0.7694604992866516, "train/loss_error": 0.4474899172782898, "train/loss_total": 0.5118840336799622 }, { "epoch": 4.953246059310714, "grad_norm": 2.3225207328796387, "learning_rate": 2.9495057440555704e-07, "loss": 0.4406, "step": 18540 }, { "epoch": 4.953246059310714, "step": 18540, "train/loss_ctc": 0.5558003187179565, "train/loss_error": 0.4434879422187805, "train/loss_total": 0.4659504294395447 }, { "epoch": 4.9535132246860805, "step": 18541, "train/loss_ctc": 0.5744670033454895, "train/loss_error": 0.4109382927417755, "train/loss_total": 0.44364404678344727 }, { "epoch": 4.953780390061448, "step": 18542, "train/loss_ctc": 1.0401921272277832, "train/loss_error": 0.44740816950798035, "train/loss_total": 0.5659649968147278 }, { "epoch": 4.954047555436816, "step": 18543, "train/loss_ctc": 0.6675909161567688, "train/loss_error": 0.4211186468601227, "train/loss_total": 0.47041311860084534 }, { "epoch": 4.9543147208121825, "step": 18544, "train/loss_ctc": 0.38363856077194214, "train/loss_error": 0.34582599997520447, "train/loss_total": 0.3533885180950165 }, { "epoch": 4.95458188618755, "step": 18545, "train/loss_ctc": 0.45153599977493286, "train/loss_error": 0.38945919275283813, "train/loss_total": 0.4018745720386505 }, { "epoch": 4.954849051562918, "step": 18546, "train/loss_ctc": 0.31409651041030884, "train/loss_error": 0.42816469073295593, "train/loss_total": 0.40535107254981995 }, { "epoch": 4.9551162169382845, "step": 18547, "train/loss_ctc": 0.598950207233429, "train/loss_error": 0.409557968378067, "train/loss_total": 0.4474364221096039 }, { "epoch": 4.955383382313652, "step": 18548, "train/loss_ctc": 0.515943706035614, "train/loss_error": 0.39687108993530273, "train/loss_total": 0.42068561911582947 }, { "epoch": 4.95565054768902, "step": 18549, "train/loss_ctc": 0.5193259119987488, "train/loss_error": 0.42654356360435486, "train/loss_total": 0.4451000392436981 }, { "epoch": 4.9559177130643866, "grad_norm": 2.206554889678955, "learning_rate": 2.789206518835159e-07, "loss": 0.442, "step": 18550 }, { "epoch": 4.9559177130643866, "step": 18550, "train/loss_ctc": 0.686362624168396, "train/loss_error": 0.4855912923812866, "train/loss_total": 0.5257455706596375 }, { "epoch": 4.956184878439754, "step": 18551, "train/loss_ctc": 0.3675239086151123, "train/loss_error": 0.49439477920532227, "train/loss_total": 0.4690206050872803 }, { "epoch": 4.956452043815122, "step": 18552, "train/loss_ctc": 1.3273661136627197, "train/loss_error": 0.46252351999282837, "train/loss_total": 0.6354920268058777 }, { "epoch": 4.956719209190489, "step": 18553, "train/loss_ctc": 0.55523681640625, "train/loss_error": 0.43382713198661804, "train/loss_total": 0.4581090807914734 }, { "epoch": 4.956986374565856, "step": 18554, "train/loss_ctc": 0.763677716255188, "train/loss_error": 0.41255733370780945, "train/loss_total": 0.48278141021728516 }, { "epoch": 4.957253539941224, "step": 18555, "train/loss_ctc": 0.8101323843002319, "train/loss_error": 0.44243890047073364, "train/loss_total": 0.5159776210784912 }, { "epoch": 4.957520705316591, "step": 18556, "train/loss_ctc": 0.8413362503051758, "train/loss_error": 0.4259016215801239, "train/loss_total": 0.5089885592460632 }, { "epoch": 4.957787870691958, "step": 18557, "train/loss_ctc": 0.8631284236907959, "train/loss_error": 0.3938238024711609, "train/loss_total": 0.4876847267150879 }, { "epoch": 4.958055036067326, "step": 18558, "train/loss_ctc": 0.6860193610191345, "train/loss_error": 0.4668382406234741, "train/loss_total": 0.5106744766235352 }, { "epoch": 4.958322201442693, "step": 18559, "train/loss_ctc": 0.654869794845581, "train/loss_error": 0.45529600977897644, "train/loss_total": 0.49521076679229736 }, { "epoch": 4.95858936681806, "grad_norm": 4.112331867218018, "learning_rate": 2.6289072936147474e-07, "loss": 0.509, "step": 18560 }, { "epoch": 4.95858936681806, "step": 18560, "train/loss_ctc": 0.5414286851882935, "train/loss_error": 0.4196982979774475, "train/loss_total": 0.44404441118240356 }, { "epoch": 4.958856532193428, "step": 18561, "train/loss_ctc": 1.1302884817123413, "train/loss_error": 0.49624380469322205, "train/loss_total": 0.623052716255188 }, { "epoch": 4.959123697568796, "step": 18562, "train/loss_ctc": 1.0427970886230469, "train/loss_error": 0.4372856020927429, "train/loss_total": 0.5583878755569458 }, { "epoch": 4.959390862944162, "step": 18563, "train/loss_ctc": 0.4895046353340149, "train/loss_error": 0.4477745294570923, "train/loss_total": 0.4561205506324768 }, { "epoch": 4.95965802831953, "step": 18564, "train/loss_ctc": 0.467216432094574, "train/loss_error": 0.4620067775249481, "train/loss_total": 0.46304869651794434 }, { "epoch": 4.959925193694897, "step": 18565, "train/loss_ctc": 0.9580649733543396, "train/loss_error": 0.4324493408203125, "train/loss_total": 0.5375725030899048 }, { "epoch": 4.960192359070264, "step": 18566, "train/loss_ctc": 0.5586056113243103, "train/loss_error": 0.4527222514152527, "train/loss_total": 0.4738989472389221 }, { "epoch": 4.960459524445632, "step": 18567, "train/loss_ctc": 0.6343231201171875, "train/loss_error": 0.4170917570590973, "train/loss_total": 0.46053802967071533 }, { "epoch": 4.960726689821, "step": 18568, "train/loss_ctc": 0.8005228042602539, "train/loss_error": 0.3989192843437195, "train/loss_total": 0.4792400002479553 }, { "epoch": 4.960993855196366, "step": 18569, "train/loss_ctc": 0.6858240365982056, "train/loss_error": 0.39388638734817505, "train/loss_total": 0.4522739052772522 }, { "epoch": 4.961261020571734, "grad_norm": 1.891562581062317, "learning_rate": 2.468608068394336e-07, "loss": 0.4948, "step": 18570 }, { "epoch": 4.961261020571734, "step": 18570, "train/loss_ctc": 0.6117855310440063, "train/loss_error": 0.3518240749835968, "train/loss_total": 0.4038163721561432 }, { "epoch": 4.961528185947101, "step": 18571, "train/loss_ctc": 0.8191152811050415, "train/loss_error": 0.4980553388595581, "train/loss_total": 0.5622673034667969 }, { "epoch": 4.961795351322468, "step": 18572, "train/loss_ctc": 0.26003894209861755, "train/loss_error": 0.4204840362071991, "train/loss_total": 0.3883950412273407 }, { "epoch": 4.962062516697836, "step": 18573, "train/loss_ctc": 0.7990569472312927, "train/loss_error": 0.37349310517311096, "train/loss_total": 0.45860588550567627 }, { "epoch": 4.962329682073204, "step": 18574, "train/loss_ctc": 0.5649638175964355, "train/loss_error": 0.4034532606601715, "train/loss_total": 0.4357553720474243 }, { "epoch": 4.96259684744857, "step": 18575, "train/loss_ctc": 1.251352071762085, "train/loss_error": 0.49444183707237244, "train/loss_total": 0.6458238959312439 }, { "epoch": 4.962864012823938, "step": 18576, "train/loss_ctc": 0.43900591135025024, "train/loss_error": 0.43753474950790405, "train/loss_total": 0.43782898783683777 }, { "epoch": 4.963131178199306, "step": 18577, "train/loss_ctc": 0.4400615692138672, "train/loss_error": 0.404025137424469, "train/loss_total": 0.4112324118614197 }, { "epoch": 4.9633983435746725, "step": 18578, "train/loss_ctc": 0.40306249260902405, "train/loss_error": 0.3776331841945648, "train/loss_total": 0.3827190697193146 }, { "epoch": 4.96366550895004, "step": 18579, "train/loss_ctc": 0.4401116371154785, "train/loss_error": 0.47845733165740967, "train/loss_total": 0.4707881808280945 }, { "epoch": 4.963932674325408, "grad_norm": 1.3451240062713623, "learning_rate": 2.3083088431739247e-07, "loss": 0.4597, "step": 18580 }, { "epoch": 4.963932674325408, "step": 18580, "train/loss_ctc": 0.7545377016067505, "train/loss_error": 0.4797884523868561, "train/loss_total": 0.534738302230835 }, { "epoch": 4.9641998397007745, "step": 18581, "train/loss_ctc": 0.49122345447540283, "train/loss_error": 0.4683211147785187, "train/loss_total": 0.4729015827178955 }, { "epoch": 4.964467005076142, "step": 18582, "train/loss_ctc": 0.8946260213851929, "train/loss_error": 0.375965416431427, "train/loss_total": 0.4796975255012512 }, { "epoch": 4.96473417045151, "step": 18583, "train/loss_ctc": 0.38242098689079285, "train/loss_error": 0.4983844757080078, "train/loss_total": 0.47519180178642273 }, { "epoch": 4.9650013358268765, "step": 18584, "train/loss_ctc": 0.5673075318336487, "train/loss_error": 0.3422441780567169, "train/loss_total": 0.3872568607330322 }, { "epoch": 4.965268501202244, "step": 18585, "train/loss_ctc": 0.5468193292617798, "train/loss_error": 0.41929009556770325, "train/loss_total": 0.44479596614837646 }, { "epoch": 4.965535666577612, "step": 18586, "train/loss_ctc": 0.45941948890686035, "train/loss_error": 0.43743640184402466, "train/loss_total": 0.4418330192565918 }, { "epoch": 4.965802831952979, "step": 18587, "train/loss_ctc": 0.4629017114639282, "train/loss_error": 0.35280904173851013, "train/loss_total": 0.3748275935649872 }, { "epoch": 4.966069997328346, "step": 18588, "train/loss_ctc": 0.9622985124588013, "train/loss_error": 0.4362180829048157, "train/loss_total": 0.5414341688156128 }, { "epoch": 4.966337162703714, "step": 18589, "train/loss_ctc": 0.6128636002540588, "train/loss_error": 0.4239376485347748, "train/loss_total": 0.46172285079956055 }, { "epoch": 4.966604328079081, "grad_norm": 3.3162219524383545, "learning_rate": 2.1480096179535132e-07, "loss": 0.4614, "step": 18590 }, { "epoch": 4.966604328079081, "step": 18590, "train/loss_ctc": 0.6423502564430237, "train/loss_error": 0.4522833228111267, "train/loss_total": 0.49029672145843506 }, { "epoch": 4.966871493454448, "step": 18591, "train/loss_ctc": 0.8196977972984314, "train/loss_error": 0.4168790876865387, "train/loss_total": 0.4974428415298462 }, { "epoch": 4.967138658829816, "step": 18592, "train/loss_ctc": 0.7621356248855591, "train/loss_error": 0.4054383933544159, "train/loss_total": 0.4767778515815735 }, { "epoch": 4.9674058242051835, "step": 18593, "train/loss_ctc": 1.061350703239441, "train/loss_error": 0.43571075797080994, "train/loss_total": 0.5608387589454651 }, { "epoch": 4.96767298958055, "step": 18594, "train/loss_ctc": 0.3250090777873993, "train/loss_error": 0.42349499464035034, "train/loss_total": 0.40379780530929565 }, { "epoch": 4.967940154955918, "step": 18595, "train/loss_ctc": 0.6901514530181885, "train/loss_error": 0.47307276725769043, "train/loss_total": 0.5164885520935059 }, { "epoch": 4.968207320331285, "step": 18596, "train/loss_ctc": 0.9321879148483276, "train/loss_error": 0.4375385344028473, "train/loss_total": 0.5364684462547302 }, { "epoch": 4.968474485706652, "step": 18597, "train/loss_ctc": 0.6527688503265381, "train/loss_error": 0.46453118324279785, "train/loss_total": 0.5021787285804749 }, { "epoch": 4.96874165108202, "step": 18598, "train/loss_ctc": 1.0409302711486816, "train/loss_error": 0.4477698504924774, "train/loss_total": 0.5664019584655762 }, { "epoch": 4.969008816457388, "step": 18599, "train/loss_ctc": 0.8692806959152222, "train/loss_error": 0.4762226343154907, "train/loss_total": 0.554834246635437 }, { "epoch": 4.969275981832754, "grad_norm": 2.282349109649658, "learning_rate": 1.9877103927331017e-07, "loss": 0.5106, "step": 18600 }, { "epoch": 4.969275981832754, "step": 18600, "train/loss_ctc": 0.8348454236984253, "train/loss_error": 0.41688090562820435, "train/loss_total": 0.5004738569259644 }, { "epoch": 4.969543147208122, "step": 18601, "train/loss_ctc": 0.5824679136276245, "train/loss_error": 0.4601750075817108, "train/loss_total": 0.48463359475135803 }, { "epoch": 4.969810312583489, "step": 18602, "train/loss_ctc": 0.9663945436477661, "train/loss_error": 0.3977351784706116, "train/loss_total": 0.5114670991897583 }, { "epoch": 4.970077477958856, "step": 18603, "train/loss_ctc": 1.2736525535583496, "train/loss_error": 0.42823857069015503, "train/loss_total": 0.5973213911056519 }, { "epoch": 4.970344643334224, "step": 18604, "train/loss_ctc": 0.5786442756652832, "train/loss_error": 0.41793400049209595, "train/loss_total": 0.45007607340812683 }, { "epoch": 4.970611808709592, "step": 18605, "train/loss_ctc": 0.5590112209320068, "train/loss_error": 0.5044015645980835, "train/loss_total": 0.5153235197067261 }, { "epoch": 4.970878974084958, "step": 18606, "train/loss_ctc": 0.35124921798706055, "train/loss_error": 0.3858034610748291, "train/loss_total": 0.3788926303386688 }, { "epoch": 4.971146139460326, "step": 18607, "train/loss_ctc": 1.4268105030059814, "train/loss_error": 0.4628690481185913, "train/loss_total": 0.6556573510169983 }, { "epoch": 4.971413304835694, "step": 18608, "train/loss_ctc": 1.1615943908691406, "train/loss_error": 0.473937451839447, "train/loss_total": 0.6114688515663147 }, { "epoch": 4.97168047021106, "step": 18609, "train/loss_ctc": 0.7236102819442749, "train/loss_error": 0.44800493121147156, "train/loss_total": 0.5031260251998901 }, { "epoch": 4.971947635586428, "grad_norm": 1.539057970046997, "learning_rate": 1.8274111675126905e-07, "loss": 0.5208, "step": 18610 }, { "epoch": 4.971947635586428, "step": 18610, "train/loss_ctc": 1.1777170896530151, "train/loss_error": 0.4153076112270355, "train/loss_total": 0.5677894949913025 }, { "epoch": 4.972214800961796, "step": 18611, "train/loss_ctc": 0.7053682804107666, "train/loss_error": 0.4958760142326355, "train/loss_total": 0.5377745032310486 }, { "epoch": 4.972481966337162, "step": 18612, "train/loss_ctc": 1.182775855064392, "train/loss_error": 0.46220532059669495, "train/loss_total": 0.6063194274902344 }, { "epoch": 4.97274913171253, "step": 18613, "train/loss_ctc": 1.2840900421142578, "train/loss_error": 0.45645198225975037, "train/loss_total": 0.6219795942306519 }, { "epoch": 4.973016297087898, "step": 18614, "train/loss_ctc": 1.195807933807373, "train/loss_error": 0.4299066960811615, "train/loss_total": 0.5830869674682617 }, { "epoch": 4.9732834624632645, "step": 18615, "train/loss_ctc": 0.5736970901489258, "train/loss_error": 0.4033273160457611, "train/loss_total": 0.43740126490592957 }, { "epoch": 4.973550627838632, "step": 18616, "train/loss_ctc": 0.30595487356185913, "train/loss_error": 0.4327565133571625, "train/loss_total": 0.4073961675167084 }, { "epoch": 4.973817793214, "step": 18617, "train/loss_ctc": 1.211712121963501, "train/loss_error": 0.43653783202171326, "train/loss_total": 0.5915727019309998 }, { "epoch": 4.9740849585893665, "step": 18618, "train/loss_ctc": 0.9997656941413879, "train/loss_error": 0.4396519958972931, "train/loss_total": 0.5516747236251831 }, { "epoch": 4.974352123964734, "step": 18619, "train/loss_ctc": 0.5545680522918701, "train/loss_error": 0.5141168832778931, "train/loss_total": 0.5222071409225464 }, { "epoch": 4.974619289340102, "grad_norm": 1.5022746324539185, "learning_rate": 1.667111942292279e-07, "loss": 0.5427, "step": 18620 }, { "epoch": 4.974619289340102, "step": 18620, "train/loss_ctc": 0.3499707579612732, "train/loss_error": 0.43309396505355835, "train/loss_total": 0.4164693355560303 }, { "epoch": 4.9748864547154685, "step": 18621, "train/loss_ctc": 0.7250839471817017, "train/loss_error": 0.4021695852279663, "train/loss_total": 0.46675246953964233 }, { "epoch": 4.975153620090836, "step": 18622, "train/loss_ctc": 0.3626883625984192, "train/loss_error": 0.4209105372428894, "train/loss_total": 0.4092661142349243 }, { "epoch": 4.975420785466204, "step": 18623, "train/loss_ctc": 0.9048370718955994, "train/loss_error": 0.44448360800743103, "train/loss_total": 0.5365543365478516 }, { "epoch": 4.975687950841571, "step": 18624, "train/loss_ctc": 0.4577976167201996, "train/loss_error": 0.46039146184921265, "train/loss_total": 0.45987269282341003 }, { "epoch": 4.975955116216938, "step": 18625, "train/loss_ctc": 0.42017731070518494, "train/loss_error": 0.42101117968559265, "train/loss_total": 0.4208444058895111 }, { "epoch": 4.976222281592306, "step": 18626, "train/loss_ctc": 0.5546854734420776, "train/loss_error": 0.530079185962677, "train/loss_total": 0.5350004434585571 }, { "epoch": 4.976489446967673, "step": 18627, "train/loss_ctc": 0.8591393828392029, "train/loss_error": 0.46342524886131287, "train/loss_total": 0.5425680875778198 }, { "epoch": 4.97675661234304, "step": 18628, "train/loss_ctc": 0.636172890663147, "train/loss_error": 0.37963414192199707, "train/loss_total": 0.4309419095516205 }, { "epoch": 4.977023777718408, "step": 18629, "train/loss_ctc": 0.709801435470581, "train/loss_error": 0.5267384052276611, "train/loss_total": 0.563351035118103 }, { "epoch": 4.9772909430937755, "grad_norm": 2.3581621646881104, "learning_rate": 1.5068127170718675e-07, "loss": 0.4782, "step": 18630 }, { "epoch": 4.9772909430937755, "step": 18630, "train/loss_ctc": 0.9646596908569336, "train/loss_error": 0.4506654739379883, "train/loss_total": 0.5534642934799194 }, { "epoch": 4.977558108469142, "step": 18631, "train/loss_ctc": 0.38882726430892944, "train/loss_error": 0.43427348136901855, "train/loss_total": 0.4251842498779297 }, { "epoch": 4.97782527384451, "step": 18632, "train/loss_ctc": 0.7212526798248291, "train/loss_error": 0.43842390179634094, "train/loss_total": 0.49498966336250305 }, { "epoch": 4.978092439219877, "step": 18633, "train/loss_ctc": 0.5518912076950073, "train/loss_error": 0.4672775864601135, "train/loss_total": 0.48420029878616333 }, { "epoch": 4.978359604595244, "step": 18634, "train/loss_ctc": 0.5832160711288452, "train/loss_error": 0.4094744622707367, "train/loss_total": 0.4442228078842163 }, { "epoch": 4.978626769970612, "step": 18635, "train/loss_ctc": 0.5968416929244995, "train/loss_error": 0.412908673286438, "train/loss_total": 0.44969528913497925 }, { "epoch": 4.97889393534598, "step": 18636, "train/loss_ctc": 1.35615074634552, "train/loss_error": 0.441472589969635, "train/loss_total": 0.6244082450866699 }, { "epoch": 4.979161100721346, "step": 18637, "train/loss_ctc": 0.7011151909828186, "train/loss_error": 0.4687228798866272, "train/loss_total": 0.5152013301849365 }, { "epoch": 4.979428266096714, "step": 18638, "train/loss_ctc": 0.19106735289096832, "train/loss_error": 0.44134804606437683, "train/loss_total": 0.39129191637039185 }, { "epoch": 4.979695431472082, "step": 18639, "train/loss_ctc": 0.286675363779068, "train/loss_error": 0.45480239391326904, "train/loss_total": 0.4211769998073578 }, { "epoch": 4.979962596847448, "grad_norm": 2.4555504322052, "learning_rate": 1.346513491851456e-07, "loss": 0.4804, "step": 18640 }, { "epoch": 4.979962596847448, "step": 18640, "train/loss_ctc": 0.369998037815094, "train/loss_error": 0.4814891219139099, "train/loss_total": 0.45919090509414673 }, { "epoch": 4.980229762222816, "step": 18641, "train/loss_ctc": 0.41604095697402954, "train/loss_error": 0.3952978849411011, "train/loss_total": 0.3994465172290802 }, { "epoch": 4.980496927598184, "step": 18642, "train/loss_ctc": 1.037980556488037, "train/loss_error": 0.50454181432724, "train/loss_total": 0.6112295389175415 }, { "epoch": 4.98076409297355, "step": 18643, "train/loss_ctc": 0.4258373975753784, "train/loss_error": 0.44127440452575684, "train/loss_total": 0.43818700313568115 }, { "epoch": 4.981031258348918, "step": 18644, "train/loss_ctc": 0.5863071084022522, "train/loss_error": 0.4870372712612152, "train/loss_total": 0.5068912506103516 }, { "epoch": 4.981298423724286, "step": 18645, "train/loss_ctc": 0.7047684192657471, "train/loss_error": 0.40773075819015503, "train/loss_total": 0.46713829040527344 }, { "epoch": 4.981565589099652, "step": 18646, "train/loss_ctc": 0.7152245044708252, "train/loss_error": 0.3296447694301605, "train/loss_total": 0.4067607522010803 }, { "epoch": 4.98183275447502, "step": 18647, "train/loss_ctc": 0.38912704586982727, "train/loss_error": 0.44570592045783997, "train/loss_total": 0.434390127658844 }, { "epoch": 4.982099919850388, "step": 18648, "train/loss_ctc": 0.7902785539627075, "train/loss_error": 0.43764376640319824, "train/loss_total": 0.5081707239151001 }, { "epoch": 4.982367085225754, "step": 18649, "train/loss_ctc": 0.4656018316745758, "train/loss_error": 0.5039870738983154, "train/loss_total": 0.4963100254535675 }, { "epoch": 4.982634250601122, "grad_norm": 1.8413487672805786, "learning_rate": 1.1862142666310445e-07, "loss": 0.4728, "step": 18650 }, { "epoch": 4.982634250601122, "step": 18650, "train/loss_ctc": 0.42281240224838257, "train/loss_error": 0.49571681022644043, "train/loss_total": 0.48113593459129333 }, { "epoch": 4.98290141597649, "step": 18651, "train/loss_ctc": 1.0045597553253174, "train/loss_error": 0.4655313193798065, "train/loss_total": 0.5733370184898376 }, { "epoch": 4.9831685813518565, "step": 18652, "train/loss_ctc": 0.5468242168426514, "train/loss_error": 0.5243475437164307, "train/loss_total": 0.5288428664207458 }, { "epoch": 4.983435746727224, "step": 18653, "train/loss_ctc": 0.710015058517456, "train/loss_error": 0.4352317154407501, "train/loss_total": 0.4901884198188782 }, { "epoch": 4.983702912102592, "step": 18654, "train/loss_ctc": 0.823832631111145, "train/loss_error": 0.433415949344635, "train/loss_total": 0.511499285697937 }, { "epoch": 4.9839700774779585, "step": 18655, "train/loss_ctc": 0.9596200585365295, "train/loss_error": 0.5428151488304138, "train/loss_total": 0.626176118850708 }, { "epoch": 4.984237242853326, "step": 18656, "train/loss_ctc": 0.3520207703113556, "train/loss_error": 0.3913087248802185, "train/loss_total": 0.3834511637687683 }, { "epoch": 4.984504408228694, "step": 18657, "train/loss_ctc": 0.7702459096908569, "train/loss_error": 0.3890862762928009, "train/loss_total": 0.4653182029724121 }, { "epoch": 4.9847715736040605, "step": 18658, "train/loss_ctc": 0.13156309723854065, "train/loss_error": 0.4259766936302185, "train/loss_total": 0.3670939803123474 }, { "epoch": 4.985038738979428, "step": 18659, "train/loss_ctc": 1.4197068214416504, "train/loss_error": 0.43316584825515747, "train/loss_total": 0.6304740309715271 }, { "epoch": 4.985305904354796, "grad_norm": 1.516066551208496, "learning_rate": 1.0259150414106333e-07, "loss": 0.5058, "step": 18660 }, { "epoch": 4.985305904354796, "step": 18660, "train/loss_ctc": 0.5633564591407776, "train/loss_error": 0.4025796353816986, "train/loss_total": 0.4347350001335144 }, { "epoch": 4.9855730697301635, "step": 18661, "train/loss_ctc": 0.33603426814079285, "train/loss_error": 0.3878382444381714, "train/loss_total": 0.3774774670600891 }, { "epoch": 4.98584023510553, "step": 18662, "train/loss_ctc": 0.3810655474662781, "train/loss_error": 0.41431254148483276, "train/loss_total": 0.40766316652297974 }, { "epoch": 4.986107400480898, "step": 18663, "train/loss_ctc": 0.5487233996391296, "train/loss_error": 0.421830952167511, "train/loss_total": 0.4472094774246216 }, { "epoch": 4.986374565856265, "step": 18664, "train/loss_ctc": 0.7352845072746277, "train/loss_error": 0.43741899728775024, "train/loss_total": 0.4969921112060547 }, { "epoch": 4.986641731231632, "step": 18665, "train/loss_ctc": 0.7385026812553406, "train/loss_error": 0.37941402196884155, "train/loss_total": 0.45123177766799927 }, { "epoch": 4.986908896607, "step": 18666, "train/loss_ctc": 0.42601439356803894, "train/loss_error": 0.36418062448501587, "train/loss_total": 0.37654736638069153 }, { "epoch": 4.9871760619823675, "step": 18667, "train/loss_ctc": 0.4200526177883148, "train/loss_error": 0.39606180787086487, "train/loss_total": 0.4008599519729614 }, { "epoch": 4.987443227357734, "step": 18668, "train/loss_ctc": 0.6942234635353088, "train/loss_error": 0.4203527271747589, "train/loss_total": 0.47512689232826233 }, { "epoch": 4.987710392733102, "step": 18669, "train/loss_ctc": 1.004412293434143, "train/loss_error": 0.44682809710502625, "train/loss_total": 0.5583449602127075 }, { "epoch": 4.987977558108469, "grad_norm": 1.5578181743621826, "learning_rate": 8.656158161902218e-08, "loss": 0.4426, "step": 18670 }, { "epoch": 4.987977558108469, "step": 18670, "train/loss_ctc": 0.5278873443603516, "train/loss_error": 0.42041221261024475, "train/loss_total": 0.44190725684165955 }, { "epoch": 4.988244723483836, "step": 18671, "train/loss_ctc": 0.796130895614624, "train/loss_error": 0.5179845690727234, "train/loss_total": 0.5736138820648193 }, { "epoch": 4.988511888859204, "step": 18672, "train/loss_ctc": 0.3437592387199402, "train/loss_error": 0.3766275644302368, "train/loss_total": 0.37005388736724854 }, { "epoch": 4.988779054234572, "step": 18673, "train/loss_ctc": 0.7978397607803345, "train/loss_error": 0.4319758117198944, "train/loss_total": 0.5051486492156982 }, { "epoch": 4.989046219609938, "step": 18674, "train/loss_ctc": 0.8555976152420044, "train/loss_error": 0.41033080220222473, "train/loss_total": 0.49938416481018066 }, { "epoch": 4.989313384985306, "step": 18675, "train/loss_ctc": 0.31573110818862915, "train/loss_error": 0.46219924092292786, "train/loss_total": 0.4329056441783905 }, { "epoch": 4.989580550360674, "step": 18676, "train/loss_ctc": 0.5233941078186035, "train/loss_error": 0.35347867012023926, "train/loss_total": 0.38746178150177 }, { "epoch": 4.98984771573604, "step": 18677, "train/loss_ctc": 0.550459086894989, "train/loss_error": 0.3861149549484253, "train/loss_total": 0.4189838171005249 }, { "epoch": 4.990114881111408, "step": 18678, "train/loss_ctc": 1.2430038452148438, "train/loss_error": 0.47798535227775574, "train/loss_total": 0.6309890747070312 }, { "epoch": 4.990382046486776, "step": 18679, "train/loss_ctc": 0.6641982793807983, "train/loss_error": 0.479931503534317, "train/loss_total": 0.5167848467826843 }, { "epoch": 4.990649211862142, "grad_norm": 2.6955668926239014, "learning_rate": 7.053165909698103e-08, "loss": 0.4777, "step": 18680 }, { "epoch": 4.990649211862142, "step": 18680, "train/loss_ctc": 0.4932776689529419, "train/loss_error": 0.43407055735588074, "train/loss_total": 0.4459120035171509 }, { "epoch": 4.99091637723751, "step": 18681, "train/loss_ctc": 0.8078809976577759, "train/loss_error": 0.38915684819221497, "train/loss_total": 0.47290170192718506 }, { "epoch": 4.991183542612878, "step": 18682, "train/loss_ctc": 0.5388038158416748, "train/loss_error": 0.40878814458847046, "train/loss_total": 0.4347912669181824 }, { "epoch": 4.991450707988244, "step": 18683, "train/loss_ctc": 0.42723095417022705, "train/loss_error": 0.3684963583946228, "train/loss_total": 0.38024330139160156 }, { "epoch": 4.991717873363612, "step": 18684, "train/loss_ctc": 0.8363562822341919, "train/loss_error": 0.3745158910751343, "train/loss_total": 0.46688398718833923 }, { "epoch": 4.99198503873898, "step": 18685, "train/loss_ctc": 0.13137686252593994, "train/loss_error": 0.36469417810440063, "train/loss_total": 0.3180307149887085 }, { "epoch": 4.9922522041143464, "step": 18686, "train/loss_ctc": 0.5042941570281982, "train/loss_error": 0.4274044632911682, "train/loss_total": 0.4427824020385742 }, { "epoch": 4.992519369489714, "step": 18687, "train/loss_ctc": 0.41533562541007996, "train/loss_error": 0.39533093571662903, "train/loss_total": 0.39933186769485474 }, { "epoch": 4.992786534865082, "step": 18688, "train/loss_ctc": 0.7768594026565552, "train/loss_error": 0.418621301651001, "train/loss_total": 0.4902689456939697 }, { "epoch": 4.9930537002404485, "step": 18689, "train/loss_ctc": 0.9369009137153625, "train/loss_error": 0.41363298892974854, "train/loss_total": 0.5182865858078003 }, { "epoch": 4.993320865615816, "grad_norm": 1.6175777912139893, "learning_rate": 5.450173657493989e-08, "loss": 0.4369, "step": 18690 }, { "epoch": 4.993320865615816, "step": 18690, "train/loss_ctc": 1.029822587966919, "train/loss_error": 0.4807032346725464, "train/loss_total": 0.5905271172523499 }, { "epoch": 4.993588030991184, "step": 18691, "train/loss_ctc": 0.32183223962783813, "train/loss_error": 0.45035603642463684, "train/loss_total": 0.42465129494667053 }, { "epoch": 4.9938551963665505, "step": 18692, "train/loss_ctc": 0.6225292682647705, "train/loss_error": 0.437999427318573, "train/loss_total": 0.474905401468277 }, { "epoch": 4.994122361741918, "step": 18693, "train/loss_ctc": 0.4702432155609131, "train/loss_error": 0.45778438448905945, "train/loss_total": 0.46027615666389465 }, { "epoch": 4.994389527117286, "step": 18694, "train/loss_ctc": 1.1546010971069336, "train/loss_error": 0.4048396944999695, "train/loss_total": 0.5547919869422913 }, { "epoch": 4.9946566924926525, "step": 18695, "train/loss_ctc": 1.1476432085037231, "train/loss_error": 0.4476011097431183, "train/loss_total": 0.5876095294952393 }, { "epoch": 4.99492385786802, "step": 18696, "train/loss_ctc": 0.6571122407913208, "train/loss_error": 0.4255405068397522, "train/loss_total": 0.4718548655509949 }, { "epoch": 4.995191023243388, "step": 18697, "train/loss_ctc": 0.7942937612533569, "train/loss_error": 0.42890071868896484, "train/loss_total": 0.5019793510437012 }, { "epoch": 4.9954581886187555, "step": 18698, "train/loss_ctc": 0.3287259042263031, "train/loss_error": 0.3948122262954712, "train/loss_total": 0.3815949559211731 }, { "epoch": 4.995725353994122, "step": 18699, "train/loss_ctc": 1.6621654033660889, "train/loss_error": 0.4081901013851166, "train/loss_total": 0.6589851379394531 }, { "epoch": 4.99599251936949, "grad_norm": 2.089096784591675, "learning_rate": 3.8471814052898745e-08, "loss": 0.5107, "step": 18700 }, { "epoch": 4.99599251936949, "step": 18700, "train/loss_ctc": 0.37074726819992065, "train/loss_error": 0.40486571192741394, "train/loss_total": 0.3980420231819153 }, { "epoch": 4.996259684744857, "step": 18701, "train/loss_ctc": 0.48544156551361084, "train/loss_error": 0.47258830070495605, "train/loss_total": 0.4751589596271515 }, { "epoch": 4.996526850120224, "step": 18702, "train/loss_ctc": 0.39411693811416626, "train/loss_error": 0.44812607765197754, "train/loss_total": 0.43732425570487976 }, { "epoch": 4.996794015495592, "step": 18703, "train/loss_ctc": 1.2709037065505981, "train/loss_error": 0.4442140758037567, "train/loss_total": 0.6095520257949829 }, { "epoch": 4.9970611808709595, "step": 18704, "train/loss_ctc": 0.39125263690948486, "train/loss_error": 0.40049269795417786, "train/loss_total": 0.39864468574523926 }, { "epoch": 4.997328346246326, "step": 18705, "train/loss_ctc": 0.7974259853363037, "train/loss_error": 0.4064117968082428, "train/loss_total": 0.48461467027664185 }, { "epoch": 4.997595511621694, "step": 18706, "train/loss_ctc": 1.483520269393921, "train/loss_error": 0.43257030844688416, "train/loss_total": 0.6427602767944336 }, { "epoch": 4.9978626769970615, "step": 18707, "train/loss_ctc": 0.5496208071708679, "train/loss_error": 0.4075310528278351, "train/loss_total": 0.43594902753829956 }, { "epoch": 4.998129842372428, "step": 18708, "train/loss_ctc": 0.2839885950088501, "train/loss_error": 0.46296048164367676, "train/loss_total": 0.4271661043167114 }, { "epoch": 4.998397007747796, "step": 18709, "train/loss_ctc": 0.41874685883522034, "train/loss_error": 0.4055101275444031, "train/loss_total": 0.40815749764442444 }, { "epoch": 4.998664173123164, "grad_norm": 2.0244860649108887, "learning_rate": 2.2441891530857602e-08, "loss": 0.4717, "step": 18710 }, { "epoch": 4.998664173123164, "step": 18710, "train/loss_ctc": 0.8933818340301514, "train/loss_error": 0.46992361545562744, "train/loss_total": 0.5546152591705322 }, { "epoch": 4.99893133849853, "step": 18711, "train/loss_ctc": 0.3144800066947937, "train/loss_error": 0.3905956447124481, "train/loss_total": 0.3753725290298462 }, { "epoch": 4.999198503873898, "step": 18712, "train/loss_ctc": 0.5692902207374573, "train/loss_error": 0.41961854696273804, "train/loss_total": 0.44955289363861084 }, { "epoch": 4.999465669249266, "step": 18713, "train/loss_ctc": 1.0883933305740356, "train/loss_error": 0.5519706606864929, "train/loss_total": 0.6592552065849304 }, { "epoch": 4.999732834624632, "step": 18714, "train/loss_ctc": 0.7072224617004395, "train/loss_error": 0.40985390543937683, "train/loss_total": 0.4693276286125183 }, { "epoch": 5.0, "eval_eval/f1_0": 0.6314021944999695, "eval_eval/f1_1": 0.8258109092712402, "eval_eval/precision_0": 0.7823147177696228, "eval_eval/precision_1": 0.7568181753158569, "eval_eval/recall_0": 0.5292978882789612, "eval_eval/recall_1": 0.9086443781852722, "eval_eval/wer": 0.15778418382440604, "eval_runtime": 34.9008, "eval_samples_per_second": 13.152, "eval_steps_per_second": 13.152, "step": 18715 } ], "logging_steps": 10, "max_steps": 18715, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }