| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.5188916876574305, | |
| "eval_steps": 100, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012594458438287154, | |
| "grad_norm": 1.2670824527740479, | |
| "learning_rate": 4.999782569758238e-05, | |
| "loss": 2.5898, | |
| "num_input_tokens_seen": 17064, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02518891687657431, | |
| "grad_norm": 1.8668310642242432, | |
| "learning_rate": 4.9991303168536793e-05, | |
| "loss": 2.69, | |
| "num_input_tokens_seen": 31136, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.037783375314861464, | |
| "grad_norm": 1.4463753700256348, | |
| "learning_rate": 4.9980433547419305e-05, | |
| "loss": 2.2158, | |
| "num_input_tokens_seen": 47520, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05037783375314862, | |
| "grad_norm": 1.9465786218643188, | |
| "learning_rate": 4.996521872493738e-05, | |
| "loss": 1.8421, | |
| "num_input_tokens_seen": 66432, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06297229219143577, | |
| "grad_norm": 1.3088130950927734, | |
| "learning_rate": 4.994566134762105e-05, | |
| "loss": 2.0699, | |
| "num_input_tokens_seen": 83544, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07556675062972293, | |
| "grad_norm": 1.5839314460754395, | |
| "learning_rate": 4.992176481736254e-05, | |
| "loss": 1.4037, | |
| "num_input_tokens_seen": 97680, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08816120906801007, | |
| "grad_norm": 2.81331729888916, | |
| "learning_rate": 4.989353329082452e-05, | |
| "loss": 1.8734, | |
| "num_input_tokens_seen": 115456, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.10075566750629723, | |
| "grad_norm": 1.999861240386963, | |
| "learning_rate": 4.986097167871711e-05, | |
| "loss": 1.584, | |
| "num_input_tokens_seen": 132872, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11335012594458438, | |
| "grad_norm": 1.38965904712677, | |
| "learning_rate": 4.982408564494367e-05, | |
| "loss": 1.0849, | |
| "num_input_tokens_seen": 148296, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.12594458438287154, | |
| "grad_norm": 1.410891056060791, | |
| "learning_rate": 4.978288160561558e-05, | |
| "loss": 1.0605, | |
| "num_input_tokens_seen": 167504, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1385390428211587, | |
| "grad_norm": 1.8659024238586426, | |
| "learning_rate": 4.9737366727936235e-05, | |
| "loss": 1.596, | |
| "num_input_tokens_seen": 182536, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.15113350125944586, | |
| "grad_norm": 0.825343906879425, | |
| "learning_rate": 4.968754892895432e-05, | |
| "loss": 1.1978, | |
| "num_input_tokens_seen": 201568, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.163727959697733, | |
| "grad_norm": 1.3539998531341553, | |
| "learning_rate": 4.963343687418669e-05, | |
| "loss": 1.1129, | |
| "num_input_tokens_seen": 217584, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.17632241813602015, | |
| "grad_norm": 1.6213319301605225, | |
| "learning_rate": 4.9575039976111084e-05, | |
| "loss": 1.3955, | |
| "num_input_tokens_seen": 235280, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.1889168765743073, | |
| "grad_norm": 1.7147520780563354, | |
| "learning_rate": 4.9512368392528806e-05, | |
| "loss": 1.029, | |
| "num_input_tokens_seen": 253544, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.20151133501259447, | |
| "grad_norm": 1.0490851402282715, | |
| "learning_rate": 4.9445433024797936e-05, | |
| "loss": 1.1447, | |
| "num_input_tokens_seen": 270112, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2141057934508816, | |
| "grad_norm": 1.7510355710983276, | |
| "learning_rate": 4.937424551593702e-05, | |
| "loss": 1.0042, | |
| "num_input_tokens_seen": 287296, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.22670025188916876, | |
| "grad_norm": 1.5315502882003784, | |
| "learning_rate": 4.929881824859985e-05, | |
| "loss": 1.3522, | |
| "num_input_tokens_seen": 302304, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.23929471032745592, | |
| "grad_norm": 1.2641905546188354, | |
| "learning_rate": 4.9219164342921634e-05, | |
| "loss": 1.2398, | |
| "num_input_tokens_seen": 317624, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.2518891687657431, | |
| "grad_norm": 1.116264820098877, | |
| "learning_rate": 4.9135297654236724e-05, | |
| "loss": 0.8733, | |
| "num_input_tokens_seen": 332920, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2518891687657431, | |
| "eval_accuracy": 0.7302086027292422, | |
| "eval_loss": 1.238808035850525, | |
| "eval_runtime": 533.8541, | |
| "eval_samples_per_second": 0.332, | |
| "eval_steps_per_second": 0.332, | |
| "num_input_tokens_seen": 332920, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.26448362720403024, | |
| "grad_norm": 1.540872573852539, | |
| "learning_rate": 4.904723277066864e-05, | |
| "loss": 1.1192, | |
| "num_input_tokens_seen": 347680, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.2770780856423174, | |
| "grad_norm": 1.2526932954788208, | |
| "learning_rate": 4.8954985010592534e-05, | |
| "loss": 1.1241, | |
| "num_input_tokens_seen": 364952, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.28967254408060455, | |
| "grad_norm": 1.2870938777923584, | |
| "learning_rate": 4.8858570419970616e-05, | |
| "loss": 1.1762, | |
| "num_input_tokens_seen": 383104, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.3022670025188917, | |
| "grad_norm": 1.6976910829544067, | |
| "learning_rate": 4.875800576956108e-05, | |
| "loss": 1.0041, | |
| "num_input_tokens_seen": 401448, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3148614609571788, | |
| "grad_norm": 1.2784565687179565, | |
| "learning_rate": 4.865330855200094e-05, | |
| "loss": 0.9044, | |
| "num_input_tokens_seen": 419184, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.327455919395466, | |
| "grad_norm": 2.2473819255828857, | |
| "learning_rate": 4.854449697876325e-05, | |
| "loss": 1.5228, | |
| "num_input_tokens_seen": 436976, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.34005037783375314, | |
| "grad_norm": 1.252365231513977, | |
| "learning_rate": 4.843158997698936e-05, | |
| "loss": 1.2336, | |
| "num_input_tokens_seen": 455432, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3526448362720403, | |
| "grad_norm": 1.5875591039657593, | |
| "learning_rate": 4.831460718619661e-05, | |
| "loss": 1.0085, | |
| "num_input_tokens_seen": 473896, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.36523929471032746, | |
| "grad_norm": 1.8101682662963867, | |
| "learning_rate": 4.819356895486219e-05, | |
| "loss": 1.3333, | |
| "num_input_tokens_seen": 488936, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3778337531486146, | |
| "grad_norm": 1.2537903785705566, | |
| "learning_rate": 4.806849633688363e-05, | |
| "loss": 1.1225, | |
| "num_input_tokens_seen": 507800, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3904282115869018, | |
| "grad_norm": 1.2231327295303345, | |
| "learning_rate": 4.7939411087916566e-05, | |
| "loss": 1.0405, | |
| "num_input_tokens_seen": 524704, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.40302267002518893, | |
| "grad_norm": 0.9885507822036743, | |
| "learning_rate": 4.7806335661590526e-05, | |
| "loss": 1.0268, | |
| "num_input_tokens_seen": 542320, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4156171284634761, | |
| "grad_norm": 1.273047685623169, | |
| "learning_rate": 4.7669293205603196e-05, | |
| "loss": 1.1071, | |
| "num_input_tokens_seen": 560488, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.4282115869017632, | |
| "grad_norm": 1.8347108364105225, | |
| "learning_rate": 4.752830755769405e-05, | |
| "loss": 1.169, | |
| "num_input_tokens_seen": 577680, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.44080604534005036, | |
| "grad_norm": 1.9854867458343506, | |
| "learning_rate": 4.73834032414979e-05, | |
| "loss": 0.9519, | |
| "num_input_tokens_seen": 596208, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4534005037783375, | |
| "grad_norm": 1.2936229705810547, | |
| "learning_rate": 4.723460546227914e-05, | |
| "loss": 1.2277, | |
| "num_input_tokens_seen": 613120, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4659949622166247, | |
| "grad_norm": 0.9850680232048035, | |
| "learning_rate": 4.7081940102547463e-05, | |
| "loss": 0.9588, | |
| "num_input_tokens_seen": 630336, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.47858942065491183, | |
| "grad_norm": 1.643853783607483, | |
| "learning_rate": 4.692543371755572e-05, | |
| "loss": 1.0816, | |
| "num_input_tokens_seen": 644488, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.491183879093199, | |
| "grad_norm": 1.7307960987091064, | |
| "learning_rate": 4.6765113530680825e-05, | |
| "loss": 0.8216, | |
| "num_input_tokens_seen": 660432, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.5037783375314862, | |
| "grad_norm": 1.731389045715332, | |
| "learning_rate": 4.660100742868836e-05, | |
| "loss": 0.8872, | |
| "num_input_tokens_seen": 680080, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5037783375314862, | |
| "eval_accuracy": 0.7530999930068348, | |
| "eval_loss": 1.129120111465454, | |
| "eval_runtime": 534.7922, | |
| "eval_samples_per_second": 0.331, | |
| "eval_steps_per_second": 0.331, | |
| "num_input_tokens_seen": 680080, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5163727959697733, | |
| "grad_norm": 2.0154528617858887, | |
| "learning_rate": 4.643314395688188e-05, | |
| "loss": 1.0193, | |
| "num_input_tokens_seen": 695688, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.5289672544080605, | |
| "grad_norm": 1.5947645902633667, | |
| "learning_rate": 4.626155231413758e-05, | |
| "loss": 1.1722, | |
| "num_input_tokens_seen": 718192, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5415617128463476, | |
| "grad_norm": 1.233261227607727, | |
| "learning_rate": 4.608626234782536e-05, | |
| "loss": 0.9888, | |
| "num_input_tokens_seen": 733136, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.5541561712846348, | |
| "grad_norm": 2.006932497024536, | |
| "learning_rate": 4.5907304548617024e-05, | |
| "loss": 1.018, | |
| "num_input_tokens_seen": 748016, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5667506297229219, | |
| "grad_norm": 1.6554147005081177, | |
| "learning_rate": 4.572471004518261e-05, | |
| "loss": 0.8822, | |
| "num_input_tokens_seen": 763264, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5793450881612091, | |
| "grad_norm": 2.6174421310424805, | |
| "learning_rate": 4.553851059877573e-05, | |
| "loss": 1.1984, | |
| "num_input_tokens_seen": 778632, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5919395465994962, | |
| "grad_norm": 1.639770269393921, | |
| "learning_rate": 4.534873859770892e-05, | |
| "loss": 1.0492, | |
| "num_input_tokens_seen": 799096, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.6045340050377834, | |
| "grad_norm": 1.863054871559143, | |
| "learning_rate": 4.515542705171981e-05, | |
| "loss": 0.7339, | |
| "num_input_tokens_seen": 815240, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6171284634760705, | |
| "grad_norm": 2.6653268337249756, | |
| "learning_rate": 4.495860958622937e-05, | |
| "loss": 1.2535, | |
| "num_input_tokens_seen": 831320, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.6297229219143576, | |
| "grad_norm": 3.9443094730377197, | |
| "learning_rate": 4.475832043649287e-05, | |
| "loss": 1.883, | |
| "num_input_tokens_seen": 850376, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6423173803526449, | |
| "grad_norm": 3.2649178504943848, | |
| "learning_rate": 4.455459444164492e-05, | |
| "loss": 0.9972, | |
| "num_input_tokens_seen": 868192, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.654911838790932, | |
| "grad_norm": 2.0120456218719482, | |
| "learning_rate": 4.4347467038639364e-05, | |
| "loss": 1.0848, | |
| "num_input_tokens_seen": 883560, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6675062972292192, | |
| "grad_norm": 1.7830870151519775, | |
| "learning_rate": 4.4136974256085236e-05, | |
| "loss": 0.8549, | |
| "num_input_tokens_seen": 902952, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.6801007556675063, | |
| "grad_norm": 1.284114956855774, | |
| "learning_rate": 4.392315270797985e-05, | |
| "loss": 0.5925, | |
| "num_input_tokens_seen": 919584, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6926952141057935, | |
| "grad_norm": 1.58772873878479, | |
| "learning_rate": 4.3706039587339894e-05, | |
| "loss": 1.2722, | |
| "num_input_tokens_seen": 940200, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.7052896725440806, | |
| "grad_norm": 1.5675506591796875, | |
| "learning_rate": 4.3485672659732034e-05, | |
| "loss": 0.9741, | |
| "num_input_tokens_seen": 961256, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7178841309823678, | |
| "grad_norm": 1.5801304578781128, | |
| "learning_rate": 4.3262090256703736e-05, | |
| "loss": 0.9787, | |
| "num_input_tokens_seen": 978000, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.7304785894206549, | |
| "grad_norm": 1.2644524574279785, | |
| "learning_rate": 4.303533126911577e-05, | |
| "loss": 1.1364, | |
| "num_input_tokens_seen": 997512, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.743073047858942, | |
| "grad_norm": 1.307681918144226, | |
| "learning_rate": 4.280543514037731e-05, | |
| "loss": 1.1322, | |
| "num_input_tokens_seen": 1016824, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.7556675062972292, | |
| "grad_norm": 1.8267008066177368, | |
| "learning_rate": 4.257244185958505e-05, | |
| "loss": 1.0074, | |
| "num_input_tokens_seen": 1036168, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7556675062972292, | |
| "eval_accuracy": 0.7609178510672793, | |
| "eval_loss": 1.095440149307251, | |
| "eval_runtime": 537.6469, | |
| "eval_samples_per_second": 0.329, | |
| "eval_steps_per_second": 0.329, | |
| "num_input_tokens_seen": 1036168, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7682619647355163, | |
| "grad_norm": 1.6457531452178955, | |
| "learning_rate": 4.233639195456729e-05, | |
| "loss": 0.939, | |
| "num_input_tokens_seen": 1053264, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.7808564231738035, | |
| "grad_norm": 1.752995252609253, | |
| "learning_rate": 4.2097326484834346e-05, | |
| "loss": 1.0468, | |
| "num_input_tokens_seen": 1068696, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7934508816120907, | |
| "grad_norm": 1.001474142074585, | |
| "learning_rate": 4.1855287034436555e-05, | |
| "loss": 0.8325, | |
| "num_input_tokens_seen": 1085264, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.8060453400503779, | |
| "grad_norm": 1.4869558811187744, | |
| "learning_rate": 4.1610315704730854e-05, | |
| "loss": 0.8035, | |
| "num_input_tokens_seen": 1102368, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.818639798488665, | |
| "grad_norm": 1.8002692461013794, | |
| "learning_rate": 4.136245510705762e-05, | |
| "loss": 1.0207, | |
| "num_input_tokens_seen": 1117768, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.8312342569269522, | |
| "grad_norm": 1.747013807296753, | |
| "learning_rate": 4.111174835532857e-05, | |
| "loss": 1.2914, | |
| "num_input_tokens_seen": 1133440, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8438287153652393, | |
| "grad_norm": 1.6267316341400146, | |
| "learning_rate": 4.085823905852745e-05, | |
| "loss": 1.2979, | |
| "num_input_tokens_seen": 1146480, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.8564231738035264, | |
| "grad_norm": 1.2453759908676147, | |
| "learning_rate": 4.06019713131244e-05, | |
| "loss": 0.6644, | |
| "num_input_tokens_seen": 1162248, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8690176322418136, | |
| "grad_norm": 0.9214743971824646, | |
| "learning_rate": 4.034298969540567e-05, | |
| "loss": 1.1669, | |
| "num_input_tokens_seen": 1179224, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.8816120906801007, | |
| "grad_norm": 1.0969208478927612, | |
| "learning_rate": 4.008133925371988e-05, | |
| "loss": 1.2072, | |
| "num_input_tokens_seen": 1195240, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8942065491183879, | |
| "grad_norm": 1.6422204971313477, | |
| "learning_rate": 3.981706550064208e-05, | |
| "loss": 0.9078, | |
| "num_input_tokens_seen": 1213056, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.906801007556675, | |
| "grad_norm": 1.5316386222839355, | |
| "learning_rate": 3.955021440505706e-05, | |
| "loss": 0.4814, | |
| "num_input_tokens_seen": 1230744, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9193954659949622, | |
| "grad_norm": 1.2854864597320557, | |
| "learning_rate": 3.928083238416342e-05, | |
| "loss": 0.9272, | |
| "num_input_tokens_seen": 1246904, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.9319899244332494, | |
| "grad_norm": 1.3557329177856445, | |
| "learning_rate": 3.9008966295399494e-05, | |
| "loss": 0.9995, | |
| "num_input_tokens_seen": 1263880, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9445843828715366, | |
| "grad_norm": 1.2092177867889404, | |
| "learning_rate": 3.873466342829281e-05, | |
| "loss": 0.8696, | |
| "num_input_tokens_seen": 1283816, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.9571788413098237, | |
| "grad_norm": 2.7501771450042725, | |
| "learning_rate": 3.845797149623434e-05, | |
| "loss": 1.4119, | |
| "num_input_tokens_seen": 1300192, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9697732997481109, | |
| "grad_norm": 1.222266435623169, | |
| "learning_rate": 3.817893862817902e-05, | |
| "loss": 0.8804, | |
| "num_input_tokens_seen": 1317224, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.982367758186398, | |
| "grad_norm": 0.6030636429786682, | |
| "learning_rate": 3.789761336027403e-05, | |
| "loss": 0.5944, | |
| "num_input_tokens_seen": 1335296, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9949622166246851, | |
| "grad_norm": 1.5092536211013794, | |
| "learning_rate": 3.761404462741618e-05, | |
| "loss": 1.0303, | |
| "num_input_tokens_seen": 1351920, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 1.0075566750629723, | |
| "grad_norm": 2.0633888244628906, | |
| "learning_rate": 3.7328281754739974e-05, | |
| "loss": 0.9671, | |
| "num_input_tokens_seen": 1370864, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0075566750629723, | |
| "eval_accuracy": 0.7659399606125864, | |
| "eval_loss": 1.0698517560958862, | |
| "eval_runtime": 535.9639, | |
| "eval_samples_per_second": 0.33, | |
| "eval_steps_per_second": 0.33, | |
| "num_input_tokens_seen": 1370864, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0201511335012594, | |
| "grad_norm": 1.7792761325836182, | |
| "learning_rate": 3.704037444903782e-05, | |
| "loss": 1.0106, | |
| "num_input_tokens_seen": 1390136, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.0327455919395465, | |
| "grad_norm": 1.8169097900390625, | |
| "learning_rate": 3.6750372790113766e-05, | |
| "loss": 0.7452, | |
| "num_input_tokens_seen": 1411432, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0453400503778338, | |
| "grad_norm": 1.3711882829666138, | |
| "learning_rate": 3.645832722207248e-05, | |
| "loss": 0.9704, | |
| "num_input_tokens_seen": 1429024, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.057934508816121, | |
| "grad_norm": 0.9055928587913513, | |
| "learning_rate": 3.6164288544544725e-05, | |
| "loss": 0.5268, | |
| "num_input_tokens_seen": 1445848, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.070528967254408, | |
| "grad_norm": 1.2504442930221558, | |
| "learning_rate": 3.586830790385109e-05, | |
| "loss": 0.6362, | |
| "num_input_tokens_seen": 1463232, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.0831234256926952, | |
| "grad_norm": 2.108982563018799, | |
| "learning_rate": 3.55704367841054e-05, | |
| "loss": 0.5694, | |
| "num_input_tokens_seen": 1478584, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.0957178841309823, | |
| "grad_norm": 2.6890852451324463, | |
| "learning_rate": 3.52707269982593e-05, | |
| "loss": 0.5836, | |
| "num_input_tokens_seen": 1495112, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 1.1083123425692696, | |
| "grad_norm": 3.2210803031921387, | |
| "learning_rate": 3.496923067908977e-05, | |
| "loss": 1.0356, | |
| "num_input_tokens_seen": 1513000, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.1209068010075567, | |
| "grad_norm": 1.5383672714233398, | |
| "learning_rate": 3.466600027013084e-05, | |
| "loss": 1.0125, | |
| "num_input_tokens_seen": 1526896, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 1.1335012594458438, | |
| "grad_norm": 2.0438127517700195, | |
| "learning_rate": 3.436108851655143e-05, | |
| "loss": 1.0554, | |
| "num_input_tokens_seen": 1542448, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.146095717884131, | |
| "grad_norm": 2.1045315265655518, | |
| "learning_rate": 3.4054548455980565e-05, | |
| "loss": 0.714, | |
| "num_input_tokens_seen": 1557656, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 1.1586901763224182, | |
| "grad_norm": 2.4777379035949707, | |
| "learning_rate": 3.3746433409281844e-05, | |
| "loss": 0.8676, | |
| "num_input_tokens_seen": 1575192, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.1712846347607053, | |
| "grad_norm": 2.529090642929077, | |
| "learning_rate": 3.3436796971278526e-05, | |
| "loss": 0.6624, | |
| "num_input_tokens_seen": 1596112, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 1.1838790931989924, | |
| "grad_norm": 2.93548846244812, | |
| "learning_rate": 3.312569300143108e-05, | |
| "loss": 0.795, | |
| "num_input_tokens_seen": 1610768, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.1964735516372795, | |
| "grad_norm": 2.8596348762512207, | |
| "learning_rate": 3.2813175614468604e-05, | |
| "loss": 1.3433, | |
| "num_input_tokens_seen": 1627672, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.2090680100755669, | |
| "grad_norm": 3.336879253387451, | |
| "learning_rate": 3.24992991709759e-05, | |
| "loss": 1.0594, | |
| "num_input_tokens_seen": 1641912, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.221662468513854, | |
| "grad_norm": 2.030346155166626, | |
| "learning_rate": 3.218411826793777e-05, | |
| "loss": 0.972, | |
| "num_input_tokens_seen": 1659832, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.234256926952141, | |
| "grad_norm": 2.6392228603363037, | |
| "learning_rate": 3.186768772924216e-05, | |
| "loss": 1.032, | |
| "num_input_tokens_seen": 1679064, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.2468513853904282, | |
| "grad_norm": 2.443204402923584, | |
| "learning_rate": 3.1550062596143886e-05, | |
| "loss": 1.3751, | |
| "num_input_tokens_seen": 1692608, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.2594458438287153, | |
| "grad_norm": 1.4907230138778687, | |
| "learning_rate": 3.1231298117690554e-05, | |
| "loss": 0.7884, | |
| "num_input_tokens_seen": 1709712, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2594458438287153, | |
| "eval_accuracy": 0.7672396326295029, | |
| "eval_loss": 1.0675625801086426, | |
| "eval_runtime": 535.3969, | |
| "eval_samples_per_second": 0.331, | |
| "eval_steps_per_second": 0.331, | |
| "num_input_tokens_seen": 1709712, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2720403022670026, | |
| "grad_norm": 0.9061765074729919, | |
| "learning_rate": 3.091144974111224e-05, | |
| "loss": 0.781, | |
| "num_input_tokens_seen": 1729424, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.2846347607052897, | |
| "grad_norm": 2.395761251449585, | |
| "learning_rate": 3.059057310217683e-05, | |
| "loss": 0.8264, | |
| "num_input_tokens_seen": 1749136, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.2972292191435768, | |
| "grad_norm": 2.069194793701172, | |
| "learning_rate": 3.0268724015512463e-05, | |
| "loss": 0.6579, | |
| "num_input_tokens_seen": 1765216, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.309823677581864, | |
| "grad_norm": 1.5534199476242065, | |
| "learning_rate": 2.994595846489892e-05, | |
| "loss": 0.7195, | |
| "num_input_tokens_seen": 1781320, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.322418136020151, | |
| "grad_norm": 1.4709012508392334, | |
| "learning_rate": 2.9622332593529563e-05, | |
| "loss": 0.5226, | |
| "num_input_tokens_seen": 1797760, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.3350125944584383, | |
| "grad_norm": 2.4601175785064697, | |
| "learning_rate": 2.9297902694245542e-05, | |
| "loss": 1.2005, | |
| "num_input_tokens_seen": 1813224, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.3476070528967254, | |
| "grad_norm": 2.9571943283081055, | |
| "learning_rate": 2.8972725199744033e-05, | |
| "loss": 0.7554, | |
| "num_input_tokens_seen": 1830728, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 1.3602015113350125, | |
| "grad_norm": 1.610404372215271, | |
| "learning_rate": 2.864685667276201e-05, | |
| "loss": 1.0766, | |
| "num_input_tokens_seen": 1848816, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.3727959697732999, | |
| "grad_norm": 2.7103452682495117, | |
| "learning_rate": 2.8320353796237553e-05, | |
| "loss": 0.8778, | |
| "num_input_tokens_seen": 1863224, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 1.385390428211587, | |
| "grad_norm": 3.026928424835205, | |
| "learning_rate": 2.7993273363450184e-05, | |
| "loss": 0.6799, | |
| "num_input_tokens_seen": 1880288, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.397984886649874, | |
| "grad_norm": 1.5982580184936523, | |
| "learning_rate": 2.7665672268141956e-05, | |
| "loss": 0.4951, | |
| "num_input_tokens_seen": 1896552, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 1.4105793450881612, | |
| "grad_norm": 2.5049729347229004, | |
| "learning_rate": 2.7337607494621152e-05, | |
| "loss": 0.9428, | |
| "num_input_tokens_seen": 1915872, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.4231738035264483, | |
| "grad_norm": 2.13607120513916, | |
| "learning_rate": 2.7009136107850185e-05, | |
| "loss": 0.8936, | |
| "num_input_tokens_seen": 1934704, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.4357682619647356, | |
| "grad_norm": 3.7449216842651367, | |
| "learning_rate": 2.668031524351949e-05, | |
| "loss": 0.8481, | |
| "num_input_tokens_seen": 1951816, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.4483627204030227, | |
| "grad_norm": 2.881800651550293, | |
| "learning_rate": 2.6351202098109083e-05, | |
| "loss": 1.1778, | |
| "num_input_tokens_seen": 1970640, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.4609571788413098, | |
| "grad_norm": 3.845482110977173, | |
| "learning_rate": 2.6021853918939587e-05, | |
| "loss": 0.7675, | |
| "num_input_tokens_seen": 1986504, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.473551637279597, | |
| "grad_norm": 2.6726672649383545, | |
| "learning_rate": 2.5692327994214383e-05, | |
| "loss": 0.8112, | |
| "num_input_tokens_seen": 2003440, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 1.486146095717884, | |
| "grad_norm": 3.1416702270507812, | |
| "learning_rate": 2.536268164305465e-05, | |
| "loss": 1.3447, | |
| "num_input_tokens_seen": 2020568, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.4987405541561714, | |
| "grad_norm": 1.6394110918045044, | |
| "learning_rate": 2.5032972205529044e-05, | |
| "loss": 0.9512, | |
| "num_input_tokens_seen": 2037096, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 1.5113350125944585, | |
| "grad_norm": 4.283825874328613, | |
| "learning_rate": 2.4703257032679744e-05, | |
| "loss": 1.0526, | |
| "num_input_tokens_seen": 2053296, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.5113350125944585, | |
| "eval_accuracy": 0.7689241678712095, | |
| "eval_loss": 1.0595225095748901, | |
| "eval_runtime": 536.6614, | |
| "eval_samples_per_second": 0.33, | |
| "eval_steps_per_second": 0.33, | |
| "num_input_tokens_seen": 2053296, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.5239294710327456, | |
| "grad_norm": 2.488013505935669, | |
| "learning_rate": 2.437359347654655e-05, | |
| "loss": 0.7499, | |
| "num_input_tokens_seen": 2070840, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 1.536523929471033, | |
| "grad_norm": 2.6227219104766846, | |
| "learning_rate": 2.4044038880190824e-05, | |
| "loss": 0.6816, | |
| "num_input_tokens_seen": 2087952, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.5491183879093198, | |
| "grad_norm": 1.874306082725525, | |
| "learning_rate": 2.3714650567721016e-05, | |
| "loss": 0.7719, | |
| "num_input_tokens_seen": 2103824, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 1.561712846347607, | |
| "grad_norm": 2.8610432147979736, | |
| "learning_rate": 2.338548583432144e-05, | |
| "loss": 1.1229, | |
| "num_input_tokens_seen": 2120240, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.5743073047858942, | |
| "grad_norm": 6.437571048736572, | |
| "learning_rate": 2.305660193628618e-05, | |
| "loss": 1.1712, | |
| "num_input_tokens_seen": 2135416, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.5869017632241813, | |
| "grad_norm": 2.076413154602051, | |
| "learning_rate": 2.272805608105958e-05, | |
| "loss": 0.6688, | |
| "num_input_tokens_seen": 2151904, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.5994962216624686, | |
| "grad_norm": 2.047494888305664, | |
| "learning_rate": 2.2399905417285434e-05, | |
| "loss": 0.8043, | |
| "num_input_tokens_seen": 2168952, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 1.6120906801007555, | |
| "grad_norm": 2.4243392944335938, | |
| "learning_rate": 2.2072207024866266e-05, | |
| "loss": 0.5582, | |
| "num_input_tokens_seen": 2185192, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.6246851385390428, | |
| "grad_norm": 2.66733717918396, | |
| "learning_rate": 2.1745017905034625e-05, | |
| "loss": 1.1033, | |
| "num_input_tokens_seen": 2200856, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.63727959697733, | |
| "grad_norm": 2.6124303340911865, | |
| "learning_rate": 2.141839497043806e-05, | |
| "loss": 0.8529, | |
| "num_input_tokens_seen": 2215080, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.649874055415617, | |
| "grad_norm": 2.1553502082824707, | |
| "learning_rate": 2.1092395035239472e-05, | |
| "loss": 0.7331, | |
| "num_input_tokens_seen": 2229808, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 1.6624685138539044, | |
| "grad_norm": 2.305765390396118, | |
| "learning_rate": 2.076707480523464e-05, | |
| "loss": 0.9966, | |
| "num_input_tokens_seen": 2247584, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.6750629722921915, | |
| "grad_norm": 1.9919112920761108, | |
| "learning_rate": 2.0442490867988582e-05, | |
| "loss": 1.0719, | |
| "num_input_tokens_seen": 2264280, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.6876574307304786, | |
| "grad_norm": 2.5872271060943604, | |
| "learning_rate": 2.011869968299245e-05, | |
| "loss": 0.8667, | |
| "num_input_tokens_seen": 2281600, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.700251889168766, | |
| "grad_norm": 2.303976535797119, | |
| "learning_rate": 1.9795757571842744e-05, | |
| "loss": 0.559, | |
| "num_input_tokens_seen": 2298688, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.7128463476070528, | |
| "grad_norm": 2.5704333782196045, | |
| "learning_rate": 1.947372070844452e-05, | |
| "loss": 0.899, | |
| "num_input_tokens_seen": 2318312, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.7254408060453401, | |
| "grad_norm": 2.638066530227661, | |
| "learning_rate": 1.915264510924022e-05, | |
| "loss": 0.7052, | |
| "num_input_tokens_seen": 2334496, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 1.7380352644836272, | |
| "grad_norm": 2.046243906021118, | |
| "learning_rate": 1.883258662346596e-05, | |
| "loss": 0.9922, | |
| "num_input_tokens_seen": 2355640, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.7506297229219143, | |
| "grad_norm": 2.731766700744629, | |
| "learning_rate": 1.8513600923436923e-05, | |
| "loss": 0.9633, | |
| "num_input_tokens_seen": 2375368, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 1.7632241813602016, | |
| "grad_norm": 4.190962791442871, | |
| "learning_rate": 1.8195743494863387e-05, | |
| "loss": 1.2255, | |
| "num_input_tokens_seen": 2392384, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7632241813602016, | |
| "eval_accuracy": 0.7701849009613134, | |
| "eval_loss": 1.0550936460494995, | |
| "eval_runtime": 535.3581, | |
| "eval_samples_per_second": 0.331, | |
| "eval_steps_per_second": 0.331, | |
| "num_input_tokens_seen": 2392384, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7758186397984885, | |
| "grad_norm": 6.717029094696045, | |
| "learning_rate": 1.787906962719939e-05, | |
| "loss": 0.7964, | |
| "num_input_tokens_seen": 2409672, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 1.7884130982367759, | |
| "grad_norm": 2.1568119525909424, | |
| "learning_rate": 1.7563634404025414e-05, | |
| "loss": 0.7568, | |
| "num_input_tokens_seen": 2425144, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.801007556675063, | |
| "grad_norm": 2.602142810821533, | |
| "learning_rate": 1.7249492693466934e-05, | |
| "loss": 0.739, | |
| "num_input_tokens_seen": 2445872, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 1.81360201511335, | |
| "grad_norm": 2.0826470851898193, | |
| "learning_rate": 1.6936699138650397e-05, | |
| "loss": 0.7168, | |
| "num_input_tokens_seen": 2463232, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.8261964735516374, | |
| "grad_norm": 3.3417489528656006, | |
| "learning_rate": 1.6625308148198413e-05, | |
| "loss": 0.7757, | |
| "num_input_tokens_seen": 2480816, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.8387909319899243, | |
| "grad_norm": 2.6503310203552246, | |
| "learning_rate": 1.6315373886765646e-05, | |
| "loss": 0.6779, | |
| "num_input_tokens_seen": 2498488, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.8513853904282116, | |
| "grad_norm": 3.9675190448760986, | |
| "learning_rate": 1.600695026561721e-05, | |
| "loss": 0.9367, | |
| "num_input_tokens_seen": 2516792, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 1.8639798488664987, | |
| "grad_norm": 2.195193290710449, | |
| "learning_rate": 1.5700090933251115e-05, | |
| "loss": 0.447, | |
| "num_input_tokens_seen": 2533232, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.8765743073047858, | |
| "grad_norm": 2.9985008239746094, | |
| "learning_rate": 1.5394849266066416e-05, | |
| "loss": 0.6294, | |
| "num_input_tokens_seen": 2552824, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 1.8891687657430731, | |
| "grad_norm": 3.8788790702819824, | |
| "learning_rate": 1.509127835907872e-05, | |
| "loss": 1.3992, | |
| "num_input_tokens_seen": 2571512, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.9017632241813602, | |
| "grad_norm": 1.619795799255371, | |
| "learning_rate": 1.4789431016684558e-05, | |
| "loss": 0.8268, | |
| "num_input_tokens_seen": 2588120, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 1.9143576826196473, | |
| "grad_norm": 3.272700786590576, | |
| "learning_rate": 1.4489359743476461e-05, | |
| "loss": 0.6765, | |
| "num_input_tokens_seen": 2605248, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.9269521410579347, | |
| "grad_norm": 2.847982168197632, | |
| "learning_rate": 1.4191116735110007e-05, | |
| "loss": 1.1278, | |
| "num_input_tokens_seen": 2621656, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 1.9395465994962215, | |
| "grad_norm": 3.8507871627807617, | |
| "learning_rate": 1.3894753869224725e-05, | |
| "loss": 0.6863, | |
| "num_input_tokens_seen": 2639440, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.9521410579345089, | |
| "grad_norm": 2.1526479721069336, | |
| "learning_rate": 1.3600322696420275e-05, | |
| "loss": 0.6884, | |
| "num_input_tokens_seen": 2657952, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.964735516372796, | |
| "grad_norm": 2.447094202041626, | |
| "learning_rate": 1.330787443128953e-05, | |
| "loss": 0.6405, | |
| "num_input_tokens_seen": 2673752, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.977329974811083, | |
| "grad_norm": 1.717159628868103, | |
| "learning_rate": 1.3017459943510084e-05, | |
| "loss": 0.6037, | |
| "num_input_tokens_seen": 2689440, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 1.9899244332493704, | |
| "grad_norm": 3.210221529006958, | |
| "learning_rate": 1.2729129748995749e-05, | |
| "loss": 0.9172, | |
| "num_input_tokens_seen": 2706624, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.0025188916876573, | |
| "grad_norm": 1.6862154006958008, | |
| "learning_rate": 1.2442934001109671e-05, | |
| "loss": 0.7949, | |
| "num_input_tokens_seen": 2723496, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 2.0151133501259446, | |
| "grad_norm": 2.2537808418273926, | |
| "learning_rate": 1.2158922481940361e-05, | |
| "loss": 0.5226, | |
| "num_input_tokens_seen": 2738304, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.0151133501259446, | |
| "eval_accuracy": 0.7704949500614456, | |
| "eval_loss": 1.0528764724731445, | |
| "eval_runtime": 535.8681, | |
| "eval_samples_per_second": 0.33, | |
| "eval_steps_per_second": 0.33, | |
| "num_input_tokens_seen": 2738304, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.027707808564232, | |
| "grad_norm": 5.461361408233643, | |
| "learning_rate": 1.1877144593642439e-05, | |
| "loss": 0.9266, | |
| "num_input_tokens_seen": 2755120, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 2.040302267002519, | |
| "grad_norm": 2.930683135986328, | |
| "learning_rate": 1.1597649349843413e-05, | |
| "loss": 0.9805, | |
| "num_input_tokens_seen": 2771080, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.052896725440806, | |
| "grad_norm": 3.590545654296875, | |
| "learning_rate": 1.1320485367118017e-05, | |
| "loss": 1.1796, | |
| "num_input_tokens_seen": 2786752, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 2.065491183879093, | |
| "grad_norm": 2.6750876903533936, | |
| "learning_rate": 1.1045700856531668e-05, | |
| "loss": 0.4839, | |
| "num_input_tokens_seen": 2804392, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.0780856423173804, | |
| "grad_norm": 1.7390782833099365, | |
| "learning_rate": 1.0773343615254446e-05, | |
| "loss": 0.359, | |
| "num_input_tokens_seen": 2818640, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 2.0906801007556677, | |
| "grad_norm": 3.9132091999053955, | |
| "learning_rate": 1.0503461018246977e-05, | |
| "loss": 1.0472, | |
| "num_input_tokens_seen": 2836256, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.1032745591939546, | |
| "grad_norm": 3.056783437728882, | |
| "learning_rate": 1.0236100010019919e-05, | |
| "loss": 0.8781, | |
| "num_input_tokens_seen": 2855496, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 2.115869017632242, | |
| "grad_norm": 3.0513293743133545, | |
| "learning_rate": 9.971307096468203e-06, | |
| "loss": 0.9041, | |
| "num_input_tokens_seen": 2872200, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.1284634760705288, | |
| "grad_norm": 2.966620445251465, | |
| "learning_rate": 9.709128336781592e-06, | |
| "loss": 0.7187, | |
| "num_input_tokens_seen": 2888256, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 2.141057934508816, | |
| "grad_norm": 2.2629482746124268, | |
| "learning_rate": 9.449609335432972e-06, | |
| "loss": 0.8696, | |
| "num_input_tokens_seen": 2905920, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.1536523929471034, | |
| "grad_norm": 5.053290367126465, | |
| "learning_rate": 9.192795234245697e-06, | |
| "loss": 0.6862, | |
| "num_input_tokens_seen": 2921056, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 2.1662468513853903, | |
| "grad_norm": 2.8935582637786865, | |
| "learning_rate": 8.938730704541473e-06, | |
| "loss": 0.702, | |
| "num_input_tokens_seen": 2942096, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.1788413098236776, | |
| "grad_norm": 4.863999843597412, | |
| "learning_rate": 8.687459939369983e-06, | |
| "loss": 0.9868, | |
| "num_input_tokens_seen": 2959320, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 2.1914357682619645, | |
| "grad_norm": 2.9665706157684326, | |
| "learning_rate": 8.439026645821802e-06, | |
| "loss": 0.5647, | |
| "num_input_tokens_seen": 2976800, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.204030226700252, | |
| "grad_norm": 4.912269115447998, | |
| "learning_rate": 8.193474037425794e-06, | |
| "loss": 0.7983, | |
| "num_input_tokens_seen": 2994536, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 2.216624685138539, | |
| "grad_norm": 3.118041515350342, | |
| "learning_rate": 7.950844826632373e-06, | |
| "loss": 0.9227, | |
| "num_input_tokens_seen": 3010504, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.229219143576826, | |
| "grad_norm": 1.5938191413879395, | |
| "learning_rate": 7.711181217383896e-06, | |
| "loss": 0.9203, | |
| "num_input_tokens_seen": 3027240, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 2.2418136020151134, | |
| "grad_norm": 2.1337177753448486, | |
| "learning_rate": 7.474524897773555e-06, | |
| "loss": 0.5222, | |
| "num_input_tokens_seen": 3044024, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.2544080604534007, | |
| "grad_norm": 3.2635252475738525, | |
| "learning_rate": 7.240917032794003e-06, | |
| "loss": 0.5499, | |
| "num_input_tokens_seen": 3060632, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 2.2670025188916876, | |
| "grad_norm": 2.3565027713775635, | |
| "learning_rate": 7.010398257176878e-06, | |
| "loss": 0.7812, | |
| "num_input_tokens_seen": 3075440, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.2670025188916876, | |
| "eval_accuracy": 0.7686542323215242, | |
| "eval_loss": 1.077448844909668, | |
| "eval_runtime": 536.5671, | |
| "eval_samples_per_second": 0.33, | |
| "eval_steps_per_second": 0.33, | |
| "num_input_tokens_seen": 3075440, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.279596977329975, | |
| "grad_norm": 2.752980947494507, | |
| "learning_rate": 6.78300866832467e-06, | |
| "loss": 0.9713, | |
| "num_input_tokens_seen": 3094664, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 2.292191435768262, | |
| "grad_norm": 3.7448384761810303, | |
| "learning_rate": 6.558787819336002e-06, | |
| "loss": 0.6824, | |
| "num_input_tokens_seen": 3111856, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.304785894206549, | |
| "grad_norm": 4.047982692718506, | |
| "learning_rate": 6.337774712125597e-06, | |
| "loss": 0.7068, | |
| "num_input_tokens_seen": 3128312, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 2.3173803526448364, | |
| "grad_norm": 1.6455196142196655, | |
| "learning_rate": 6.120007790640123e-06, | |
| "loss": 0.7046, | |
| "num_input_tokens_seen": 3146240, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.3299748110831233, | |
| "grad_norm": 3.6144297122955322, | |
| "learning_rate": 5.905524934171086e-06, | |
| "loss": 0.493, | |
| "num_input_tokens_seen": 3164896, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 2.3425692695214106, | |
| "grad_norm": 2.6038029193878174, | |
| "learning_rate": 5.694363450765958e-06, | |
| "loss": 0.5957, | |
| "num_input_tokens_seen": 3180744, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.355163727959698, | |
| "grad_norm": 2.931267023086548, | |
| "learning_rate": 5.486560070738647e-06, | |
| "loss": 0.5896, | |
| "num_input_tokens_seen": 3196144, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 2.367758186397985, | |
| "grad_norm": 3.1074607372283936, | |
| "learning_rate": 5.282150940280481e-06, | |
| "loss": 0.7852, | |
| "num_input_tokens_seen": 3214888, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.380352644836272, | |
| "grad_norm": 2.5608012676239014, | |
| "learning_rate": 5.081171615172781e-06, | |
| "loss": 0.6306, | |
| "num_input_tokens_seen": 3231664, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 2.392947103274559, | |
| "grad_norm": 3.663986921310425, | |
| "learning_rate": 4.883657054602148e-06, | |
| "loss": 0.5392, | |
| "num_input_tokens_seen": 3249000, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.4055415617128464, | |
| "grad_norm": 3.1661274433135986, | |
| "learning_rate": 4.689641615079499e-06, | |
| "loss": 0.6259, | |
| "num_input_tokens_seen": 3267528, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 2.4181360201511337, | |
| "grad_norm": 3.51973819732666, | |
| "learning_rate": 4.499159044463983e-06, | |
| "loss": 1.0237, | |
| "num_input_tokens_seen": 3284096, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.4307304785894206, | |
| "grad_norm": 3.3107500076293945, | |
| "learning_rate": 4.312242476092698e-06, | |
| "loss": 0.4255, | |
| "num_input_tokens_seen": 3300344, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 2.443324937027708, | |
| "grad_norm": 2.484478712081909, | |
| "learning_rate": 4.1289244230173715e-06, | |
| "loss": 0.4904, | |
| "num_input_tokens_seen": 3317032, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.455919395465995, | |
| "grad_norm": 2.773125410079956, | |
| "learning_rate": 3.9492367723488685e-06, | |
| "loss": 0.7385, | |
| "num_input_tokens_seen": 3335752, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 2.468513853904282, | |
| "grad_norm": 4.129367351531982, | |
| "learning_rate": 3.773210779710662e-06, | |
| "loss": 0.5983, | |
| "num_input_tokens_seen": 3352968, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.4811083123425695, | |
| "grad_norm": 2.1071560382843018, | |
| "learning_rate": 3.600877063802055e-06, | |
| "loss": 0.6822, | |
| "num_input_tokens_seen": 3366088, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 2.4937027707808563, | |
| "grad_norm": 1.8715922832489014, | |
| "learning_rate": 3.4322656010722542e-06, | |
| "loss": 0.5104, | |
| "num_input_tokens_seen": 3382936, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.5062972292191437, | |
| "grad_norm": 3.2055535316467285, | |
| "learning_rate": 3.267405720506156e-06, | |
| "loss": 0.6008, | |
| "num_input_tokens_seen": 3397648, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 2.5188916876574305, | |
| "grad_norm": 3.4338555335998535, | |
| "learning_rate": 3.106326098522705e-06, | |
| "loss": 0.6864, | |
| "num_input_tokens_seen": 3416024, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.5188916876574305, | |
| "eval_accuracy": 0.7695559062966267, | |
| "eval_loss": 1.0843136310577393, | |
| "eval_runtime": 538.1124, | |
| "eval_samples_per_second": 0.329, | |
| "eval_steps_per_second": 0.329, | |
| "num_input_tokens_seen": 3416024, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1191, | |
| "num_input_tokens_seen": 3416024, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.5121076737651507e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |