| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.1060723371308483, | |
| "eval_steps": 500, | |
| "global_step": 10000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.011060723371308484, | |
| "grad_norm": 2677.314208984375, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 227.5515, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.022121446742616967, | |
| "grad_norm": 635.984375, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 94.427, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03318217011392545, | |
| "grad_norm": 661.0513305664062, | |
| "learning_rate": 3e-06, | |
| "loss": 73.1687, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.044242893485233935, | |
| "grad_norm": 3017.78662109375, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 173.9834, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05530361685654242, | |
| "grad_norm": 519.4037475585938, | |
| "learning_rate": 5e-06, | |
| "loss": 137.7788, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.0663643402278509, | |
| "grad_norm": 8911.849609375, | |
| "learning_rate": 6e-06, | |
| "loss": 187.6141, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.07742506359915939, | |
| "grad_norm": 471.71282958984375, | |
| "learning_rate": 7e-06, | |
| "loss": 58.5892, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.08848578697046787, | |
| "grad_norm": 3600.653564453125, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 58.4563, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.09954651034177635, | |
| "grad_norm": 593.4425048828125, | |
| "learning_rate": 9e-06, | |
| "loss": 90.9575, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.11060723371308484, | |
| "grad_norm": 520.9338989257812, | |
| "learning_rate": 1e-05, | |
| "loss": 86.6733, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.12166795708439332, | |
| "grad_norm": 2047.7371826171875, | |
| "learning_rate": 9.88888888888889e-06, | |
| "loss": 173.2546, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.1327286804557018, | |
| "grad_norm": 814.9510498046875, | |
| "learning_rate": 9.777777777777779e-06, | |
| "loss": 101.3527, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.14378940382701028, | |
| "grad_norm": 334.23162841796875, | |
| "learning_rate": 9.666666666666667e-06, | |
| "loss": 106.3003, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.15485012719831878, | |
| "grad_norm": 714.3182983398438, | |
| "learning_rate": 9.555555555555556e-06, | |
| "loss": 47.5703, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.16591085056962726, | |
| "grad_norm": 180.67575073242188, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 51.4053, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.17697157394093574, | |
| "grad_norm": 489.76275634765625, | |
| "learning_rate": 9.333333333333334e-06, | |
| "loss": 101.9069, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.18803229731224422, | |
| "grad_norm": 1117.69970703125, | |
| "learning_rate": 9.222222222222224e-06, | |
| "loss": 101.1306, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.1990930206835527, | |
| "grad_norm": 482.6600646972656, | |
| "learning_rate": 9.111111111111112e-06, | |
| "loss": 105.3138, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.21015374405486117, | |
| "grad_norm": 9838.111328125, | |
| "learning_rate": 9e-06, | |
| "loss": 100.1129, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.22121446742616968, | |
| "grad_norm": 1945.24951171875, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 127.0558, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.23227519079747816, | |
| "grad_norm": 474.45806884765625, | |
| "learning_rate": 8.777777777777778e-06, | |
| "loss": 76.4998, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.24333591416878664, | |
| "grad_norm": 866.0442504882812, | |
| "learning_rate": 8.666666666666668e-06, | |
| "loss": 84.0351, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.2543966375400951, | |
| "grad_norm": 983.19384765625, | |
| "learning_rate": 8.555555555555556e-06, | |
| "loss": 124.6492, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.2654573609114036, | |
| "grad_norm": 539.8901977539062, | |
| "learning_rate": 8.444444444444446e-06, | |
| "loss": 67.798, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.2765180842827121, | |
| "grad_norm": 143.80145263671875, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 134.1144, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.28757880765402055, | |
| "grad_norm": 609.1976318359375, | |
| "learning_rate": 8.222222222222222e-06, | |
| "loss": 89.9258, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.29863953102532903, | |
| "grad_norm": 1026.5499267578125, | |
| "learning_rate": 8.111111111111112e-06, | |
| "loss": 60.0396, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.30970025439663756, | |
| "grad_norm": 760.7676391601562, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 130.439, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.32076097776794604, | |
| "grad_norm": 1219.1871337890625, | |
| "learning_rate": 7.88888888888889e-06, | |
| "loss": 55.5903, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.3318217011392545, | |
| "grad_norm": 171.1070556640625, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 88.8811, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.342882424510563, | |
| "grad_norm": 344.57537841796875, | |
| "learning_rate": 7.666666666666667e-06, | |
| "loss": 96.9235, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.3539431478818715, | |
| "grad_norm": 908.0119018554688, | |
| "learning_rate": 7.555555555555556e-06, | |
| "loss": 100.6853, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.36500387125317996, | |
| "grad_norm": 108.43099975585938, | |
| "learning_rate": 7.444444444444445e-06, | |
| "loss": 100.0964, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.37606459462448844, | |
| "grad_norm": 417.69342041015625, | |
| "learning_rate": 7.333333333333333e-06, | |
| "loss": 94.0516, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.3871253179957969, | |
| "grad_norm": 438.87359619140625, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 91.7939, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3981860413671054, | |
| "grad_norm": 539.2892456054688, | |
| "learning_rate": 7.111111111111112e-06, | |
| "loss": 59.7248, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.40924676473841387, | |
| "grad_norm": 1536.0924072265625, | |
| "learning_rate": 7e-06, | |
| "loss": 166.3196, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.42030748810972235, | |
| "grad_norm": 147.51727294921875, | |
| "learning_rate": 6.88888888888889e-06, | |
| "loss": 93.8634, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.4313682114810309, | |
| "grad_norm": 327.2782287597656, | |
| "learning_rate": 6.777777777777779e-06, | |
| "loss": 70.0039, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.44242893485233936, | |
| "grad_norm": 454.30511474609375, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 109.2739, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.45348965822364784, | |
| "grad_norm": 188.76947021484375, | |
| "learning_rate": 6.555555555555556e-06, | |
| "loss": 86.5162, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.4645503815949563, | |
| "grad_norm": 270.0735778808594, | |
| "learning_rate": 6.444444444444445e-06, | |
| "loss": 148.0442, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.4756111049662648, | |
| "grad_norm": 866.9409790039062, | |
| "learning_rate": 6.333333333333333e-06, | |
| "loss": 112.571, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.4866718283375733, | |
| "grad_norm": 304.2856750488281, | |
| "learning_rate": 6.222222222222223e-06, | |
| "loss": 71.1989, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.49773255170888175, | |
| "grad_norm": 115.608154296875, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 61.8786, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.5087932750801902, | |
| "grad_norm": 590.6244506835938, | |
| "learning_rate": 6e-06, | |
| "loss": 84.0039, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.5198539984514987, | |
| "grad_norm": 764.8768920898438, | |
| "learning_rate": 5.88888888888889e-06, | |
| "loss": 110.3587, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.5309147218228072, | |
| "grad_norm": 970.3759765625, | |
| "learning_rate": 5.777777777777778e-06, | |
| "loss": 69.0591, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.5419754451941157, | |
| "grad_norm": 835.3163452148438, | |
| "learning_rate": 5.666666666666667e-06, | |
| "loss": 61.6823, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.5530361685654241, | |
| "grad_norm": 329.2018737792969, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 81.483, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5640968919367326, | |
| "grad_norm": 714.6965942382812, | |
| "learning_rate": 5.444444444444445e-06, | |
| "loss": 53.5503, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.5751576153080411, | |
| "grad_norm": 634.34033203125, | |
| "learning_rate": 5.333333333333334e-06, | |
| "loss": 85.4892, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.5862183386793496, | |
| "grad_norm": 751.374755859375, | |
| "learning_rate": 5.2222222222222226e-06, | |
| "loss": 58.7359, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.5972790620506581, | |
| "grad_norm": 320.8452453613281, | |
| "learning_rate": 5.1111111111111115e-06, | |
| "loss": 72.5238, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.6083397854219667, | |
| "grad_norm": 1012.67333984375, | |
| "learning_rate": 5e-06, | |
| "loss": 81.7175, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.6194005087932751, | |
| "grad_norm": 545.6947631835938, | |
| "learning_rate": 4.888888888888889e-06, | |
| "loss": 73.193, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.6304612321645836, | |
| "grad_norm": 441.7222595214844, | |
| "learning_rate": 4.777777777777778e-06, | |
| "loss": 73.501, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.6415219555358921, | |
| "grad_norm": 1395.952880859375, | |
| "learning_rate": 4.666666666666667e-06, | |
| "loss": 152.7282, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.6525826789072006, | |
| "grad_norm": 1098.189697265625, | |
| "learning_rate": 4.555555555555556e-06, | |
| "loss": 68.6455, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.663643402278509, | |
| "grad_norm": 255.7035369873047, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 99.0397, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6747041256498175, | |
| "grad_norm": 2269.526123046875, | |
| "learning_rate": 4.333333333333334e-06, | |
| "loss": 104.7769, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.685764849021126, | |
| "grad_norm": 753.7120361328125, | |
| "learning_rate": 4.222222222222223e-06, | |
| "loss": 40.9474, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.6968255723924345, | |
| "grad_norm": 809.7864990234375, | |
| "learning_rate": 4.111111111111111e-06, | |
| "loss": 69.8394, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.707886295763743, | |
| "grad_norm": 427.16754150390625, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 125.2336, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.7189470191350514, | |
| "grad_norm": 805.5377807617188, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 91.8197, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.7300077425063599, | |
| "grad_norm": 160.71058654785156, | |
| "learning_rate": 3.777777777777778e-06, | |
| "loss": 77.6574, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.7410684658776684, | |
| "grad_norm": 965.3944702148438, | |
| "learning_rate": 3.6666666666666666e-06, | |
| "loss": 77.2402, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.7521291892489769, | |
| "grad_norm": 903.8233642578125, | |
| "learning_rate": 3.555555555555556e-06, | |
| "loss": 46.0224, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.7631899126202853, | |
| "grad_norm": 667.3284301757812, | |
| "learning_rate": 3.444444444444445e-06, | |
| "loss": 77.027, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.7742506359915938, | |
| "grad_norm": 1243.2249755859375, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 67.4235, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7853113593629023, | |
| "grad_norm": 366.35675048828125, | |
| "learning_rate": 3.2222222222222227e-06, | |
| "loss": 72.4485, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.7963720827342108, | |
| "grad_norm": 708.7745361328125, | |
| "learning_rate": 3.1111111111111116e-06, | |
| "loss": 97.0563, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.8074328061055193, | |
| "grad_norm": 218.93495178222656, | |
| "learning_rate": 3e-06, | |
| "loss": 50.6086, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.8184935294768277, | |
| "grad_norm": 478.40936279296875, | |
| "learning_rate": 2.888888888888889e-06, | |
| "loss": 70.7387, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.8295542528481362, | |
| "grad_norm": 2558.1591796875, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 67.7935, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8406149762194447, | |
| "grad_norm": 1078.4237060546875, | |
| "learning_rate": 2.666666666666667e-06, | |
| "loss": 50.7353, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.8516756995907533, | |
| "grad_norm": 1874.4305419921875, | |
| "learning_rate": 2.5555555555555557e-06, | |
| "loss": 70.5969, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.8627364229620618, | |
| "grad_norm": 21617.517578125, | |
| "learning_rate": 2.4444444444444447e-06, | |
| "loss": 128.802, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.8737971463333702, | |
| "grad_norm": 363.6821594238281, | |
| "learning_rate": 2.3333333333333336e-06, | |
| "loss": 75.1114, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.8848578697046787, | |
| "grad_norm": 366.4112548828125, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 97.5475, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8959185930759872, | |
| "grad_norm": 803.3827514648438, | |
| "learning_rate": 2.1111111111111114e-06, | |
| "loss": 46.939, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.9069793164472957, | |
| "grad_norm": 818.63330078125, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 64.0652, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.9180400398186042, | |
| "grad_norm": 441.73773193359375, | |
| "learning_rate": 1.888888888888889e-06, | |
| "loss": 79.8003, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.9291007631899126, | |
| "grad_norm": 981.2015991210938, | |
| "learning_rate": 1.777777777777778e-06, | |
| "loss": 134.9718, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.9401614865612211, | |
| "grad_norm": 688.312744140625, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 61.6925, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.9512222099325296, | |
| "grad_norm": 924.4016723632812, | |
| "learning_rate": 1.5555555555555558e-06, | |
| "loss": 62.4553, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.9622829333038381, | |
| "grad_norm": 121.21617126464844, | |
| "learning_rate": 1.4444444444444445e-06, | |
| "loss": 156.6417, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.9733436566751466, | |
| "grad_norm": 238.2010498046875, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 58.9229, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.984404380046455, | |
| "grad_norm": 137.2640380859375, | |
| "learning_rate": 1.2222222222222223e-06, | |
| "loss": 43.9339, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 0.9954651034177635, | |
| "grad_norm": 2417.302001953125, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 47.7252, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 68.28670501708984, | |
| "eval_runtime": 62.8173, | |
| "eval_samples_per_second": 31.998, | |
| "eval_steps_per_second": 15.999, | |
| "step": 9041 | |
| }, | |
| { | |
| "epoch": 1.006525826789072, | |
| "grad_norm": 470.1504211425781, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 106.8215, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 1.0175865501603805, | |
| "grad_norm": 309.5218811035156, | |
| "learning_rate": 8.88888888888889e-07, | |
| "loss": 56.8555, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 1.028647273531689, | |
| "grad_norm": 785.29443359375, | |
| "learning_rate": 7.777777777777779e-07, | |
| "loss": 56.0117, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.0397079969029974, | |
| "grad_norm": 1061.9769287109375, | |
| "learning_rate": 6.666666666666667e-07, | |
| "loss": 95.929, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.050768720274306, | |
| "grad_norm": 526.0447387695312, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 46.0885, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.0618294436456144, | |
| "grad_norm": 1534.6865234375, | |
| "learning_rate": 4.444444444444445e-07, | |
| "loss": 45.3248, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.0728901670169229, | |
| "grad_norm": 539.015380859375, | |
| "learning_rate": 3.3333333333333335e-07, | |
| "loss": 66.5751, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.0839508903882313, | |
| "grad_norm": 2555.524658203125, | |
| "learning_rate": 2.2222222222222224e-07, | |
| "loss": 81.8048, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.0950116137595398, | |
| "grad_norm": 816.99072265625, | |
| "learning_rate": 1.1111111111111112e-07, | |
| "loss": 58.4326, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 1.1060723371308483, | |
| "grad_norm": 1056.107666015625, | |
| "learning_rate": 0.0, | |
| "loss": 110.9712, | |
| "step": 10000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |