| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "global_step": 67130, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.000297765529569492, | |
| "loss": 4.1249, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00029553105913898405, | |
| "loss": 3.6267, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00029329658870847603, | |
| "loss": 3.4522, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00029106211827796807, | |
| "loss": 3.3665, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0002888276478474601, | |
| "loss": 3.3154, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0002865931774169522, | |
| "loss": 3.2553, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0002843587069864442, | |
| "loss": 3.2225, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0002821242365559362, | |
| "loss": 3.1854, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00027988976612542825, | |
| "loss": 3.1867, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.0002776552956949203, | |
| "loss": 3.1265, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "eval_gen_len": 15.5914, | |
| "eval_loss": 2.7246766090393066, | |
| "eval_rouge1": 26.8378, | |
| "eval_rouge2": 9.3464, | |
| "eval_rougeL": 23.9521, | |
| "eval_rougeLsum": 24.2837, | |
| "eval_runtime": 527.2879, | |
| "eval_samples_per_second": 18.965, | |
| "eval_steps_per_second": 2.371, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.0002754208252644123, | |
| "loss": 3.1287, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0002731863548339043, | |
| "loss": 3.1078, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00027095188440339634, | |
| "loss": 3.0787, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.0002687174139728884, | |
| "loss": 3.0054, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.0002664829435423804, | |
| "loss": 2.8889, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00026424847311187245, | |
| "loss": 2.9183, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.0002620140026813645, | |
| "loss": 2.8848, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00025977953225085653, | |
| "loss": 2.872, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00025754506182034857, | |
| "loss": 2.898, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.0002553105913898406, | |
| "loss": 2.8786, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "eval_gen_len": 15.3272, | |
| "eval_loss": 2.653170347213745, | |
| "eval_rouge1": 27.5869, | |
| "eval_rouge2": 10.0861, | |
| "eval_rougeL": 24.7406, | |
| "eval_rougeLsum": 25.0245, | |
| "eval_runtime": 532.273, | |
| "eval_samples_per_second": 18.787, | |
| "eval_steps_per_second": 2.348, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.0002530761209593326, | |
| "loss": 2.8664, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 0.0002508416505288246, | |
| "loss": 2.873, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 0.00024860718009831666, | |
| "loss": 2.8685, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 0.0002463727096678087, | |
| "loss": 2.8713, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 0.00024413823923730073, | |
| "loss": 2.8558, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 0.00024190376880679277, | |
| "loss": 2.8361, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 0.0002396692983762848, | |
| "loss": 2.8026, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 0.00023743482794577684, | |
| "loss": 2.6527, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 0.00023520035751526885, | |
| "loss": 2.6452, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 0.0002329658870847609, | |
| "loss": 2.6587, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "eval_gen_len": 15.4338, | |
| "eval_loss": 2.6080288887023926, | |
| "eval_rouge1": 28.2336, | |
| "eval_rouge2": 10.5229, | |
| "eval_rougeL": 25.3053, | |
| "eval_rougeLsum": 25.6716, | |
| "eval_runtime": 535.3754, | |
| "eval_samples_per_second": 18.678, | |
| "eval_steps_per_second": 2.335, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 0.00023073141665425293, | |
| "loss": 2.6667, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 0.00022849694622374496, | |
| "loss": 2.6723, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 0.000226262475793237, | |
| "loss": 2.6652, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 0.000224028005362729, | |
| "loss": 2.6577, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 0.00022179353493222105, | |
| "loss": 2.6673, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 0.00021955906450171308, | |
| "loss": 2.6533, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 0.00021732459407120512, | |
| "loss": 2.6571, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 0.00021509012364069713, | |
| "loss": 2.6636, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 0.00021285565321018917, | |
| "loss": 2.6579, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 0.0002106211827796812, | |
| "loss": 2.664, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "eval_gen_len": 15.6255, | |
| "eval_loss": 2.5629758834838867, | |
| "eval_rouge1": 28.6673, | |
| "eval_rouge2": 10.8421, | |
| "eval_rougeL": 25.7032, | |
| "eval_rougeLsum": 26.0245, | |
| "eval_runtime": 536.8291, | |
| "eval_samples_per_second": 18.628, | |
| "eval_steps_per_second": 2.328, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 0.00020838671234917324, | |
| "loss": 2.512, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 0.00020615224191866528, | |
| "loss": 2.4748, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 0.0002039177714881573, | |
| "loss": 2.4773, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 0.00020168330105764932, | |
| "loss": 2.4694, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 0.00019944883062714136, | |
| "loss": 2.4928, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 0.0001972143601966334, | |
| "loss": 2.4941, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 0.0001949798897661254, | |
| "loss": 2.4867, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 0.00019274541933561744, | |
| "loss": 2.4978, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 0.00019051094890510948, | |
| "loss": 2.5118, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 0.00018827647847460152, | |
| "loss": 2.4896, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "eval_gen_len": 16.1841, | |
| "eval_loss": 2.567906141281128, | |
| "eval_rouge1": 28.842, | |
| "eval_rouge2": 10.885, | |
| "eval_rougeL": 25.6757, | |
| "eval_rougeLsum": 26.0633, | |
| "eval_runtime": 528.6739, | |
| "eval_samples_per_second": 18.915, | |
| "eval_steps_per_second": 2.364, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 0.00018604200804409355, | |
| "loss": 2.5141, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 0.00018380753761358556, | |
| "loss": 2.5001, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 0.0001815730671830776, | |
| "loss": 2.4924, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 0.00017933859675256964, | |
| "loss": 2.4512, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 0.00017710412632206167, | |
| "loss": 2.3092, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 0.00017486965589155368, | |
| "loss": 2.3359, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 0.00017263518546104572, | |
| "loss": 2.3522, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "learning_rate": 0.00017040071503053776, | |
| "loss": 2.3723, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 0.0001681662446000298, | |
| "loss": 2.3489, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 0.00016593177416952183, | |
| "loss": 2.34, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "eval_gen_len": 15.7826, | |
| "eval_loss": 2.5564002990722656, | |
| "eval_rouge1": 29.3246, | |
| "eval_rouge2": 11.1981, | |
| "eval_rougeL": 26.1637, | |
| "eval_rougeLsum": 26.5392, | |
| "eval_runtime": 536.1351, | |
| "eval_samples_per_second": 18.652, | |
| "eval_steps_per_second": 2.332, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 0.00016369730373901384, | |
| "loss": 2.3463, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 0.00016146283330850588, | |
| "loss": 2.3537, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 0.00015922836287799791, | |
| "loss": 2.345, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 0.00015699389244748995, | |
| "loss": 2.345, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 0.00015475942201698196, | |
| "loss": 2.3586, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 0.000152524951586474, | |
| "loss": 2.3495, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 0.00015029048115596604, | |
| "loss": 2.3716, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 5.06, | |
| "learning_rate": 0.00014805601072545804, | |
| "loss": 2.216, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 5.14, | |
| "learning_rate": 0.00014582154029495008, | |
| "loss": 2.211, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 5.21, | |
| "learning_rate": 0.00014358706986444212, | |
| "loss": 2.2204, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 5.21, | |
| "eval_gen_len": 15.8374, | |
| "eval_loss": 2.5744452476501465, | |
| "eval_rouge1": 29.5545, | |
| "eval_rouge2": 11.3806, | |
| "eval_rougeL": 26.3237, | |
| "eval_rougeLsum": 26.6993, | |
| "eval_runtime": 531.1942, | |
| "eval_samples_per_second": 18.826, | |
| "eval_steps_per_second": 2.353, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 5.29, | |
| "learning_rate": 0.00014135259943393416, | |
| "loss": 2.2043, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 5.36, | |
| "learning_rate": 0.0001391181290034262, | |
| "loss": 2.2349, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 5.44, | |
| "learning_rate": 0.0001368836585729182, | |
| "loss": 2.2155, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 5.51, | |
| "learning_rate": 0.00013464918814241024, | |
| "loss": 2.2326, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 5.59, | |
| "learning_rate": 0.00013241471771190228, | |
| "loss": 2.2205, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 5.66, | |
| "learning_rate": 0.0001301802472813943, | |
| "loss": 2.2288, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 5.74, | |
| "learning_rate": 0.00012794577685088632, | |
| "loss": 2.228, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 5.81, | |
| "learning_rate": 0.00012571130642037836, | |
| "loss": 2.2365, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 5.88, | |
| "learning_rate": 0.00012347683598987037, | |
| "loss": 2.2164, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "learning_rate": 0.00012124236555936242, | |
| "loss": 2.2301, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "eval_gen_len": 15.7213, | |
| "eval_loss": 2.5613908767700195, | |
| "eval_rouge1": 29.5872, | |
| "eval_rouge2": 11.4227, | |
| "eval_rougeL": 26.3139, | |
| "eval_rougeLsum": 26.7196, | |
| "eval_runtime": 535.5579, | |
| "eval_samples_per_second": 18.672, | |
| "eval_steps_per_second": 2.334, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 6.03, | |
| "learning_rate": 0.00011900789512885446, | |
| "loss": 2.1538, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "learning_rate": 0.00011677342469834648, | |
| "loss": 2.0981, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 6.18, | |
| "learning_rate": 0.00011453895426783852, | |
| "loss": 2.0973, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 6.26, | |
| "learning_rate": 0.00011230448383733054, | |
| "loss": 2.0959, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "learning_rate": 0.00011007001340682258, | |
| "loss": 2.1013, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 6.41, | |
| "learning_rate": 0.0001078355429763146, | |
| "loss": 2.1126, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 6.48, | |
| "learning_rate": 0.00010560107254580664, | |
| "loss": 2.097, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 6.55, | |
| "learning_rate": 0.00010336660211529866, | |
| "loss": 2.1278, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 6.63, | |
| "learning_rate": 0.0001011321316847907, | |
| "loss": 2.118, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "learning_rate": 9.889766125428273e-05, | |
| "loss": 2.1219, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "eval_gen_len": 15.936, | |
| "eval_loss": 2.5617337226867676, | |
| "eval_rouge1": 29.8256, | |
| "eval_rouge2": 11.3702, | |
| "eval_rougeL": 26.4156, | |
| "eval_rougeLsum": 26.8465, | |
| "eval_runtime": 532.4007, | |
| "eval_samples_per_second": 18.783, | |
| "eval_steps_per_second": 2.348, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 6.78, | |
| "learning_rate": 9.666319082377476e-05, | |
| "loss": 2.128, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 6.85, | |
| "learning_rate": 9.44287203932668e-05, | |
| "loss": 2.1422, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 6.93, | |
| "learning_rate": 9.219424996275882e-05, | |
| "loss": 2.1481, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 8.995977953225085e-05, | |
| "loss": 2.1266, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 7.08, | |
| "learning_rate": 8.772530910174288e-05, | |
| "loss": 2.0216, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 7.15, | |
| "learning_rate": 8.549083867123491e-05, | |
| "loss": 2.0065, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 7.22, | |
| "learning_rate": 8.325636824072694e-05, | |
| "loss": 2.0364, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 7.3, | |
| "learning_rate": 8.102189781021897e-05, | |
| "loss": 2.0222, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 7.37, | |
| "learning_rate": 7.8787427379711e-05, | |
| "loss": 2.0166, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 7.45, | |
| "learning_rate": 7.655295694920303e-05, | |
| "loss": 2.007, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 7.45, | |
| "eval_gen_len": 15.7144, | |
| "eval_loss": 2.6014492511749268, | |
| "eval_rouge1": 29.743, | |
| "eval_rouge2": 11.4336, | |
| "eval_rougeL": 26.38, | |
| "eval_rougeLsum": 26.772, | |
| "eval_runtime": 532.2148, | |
| "eval_samples_per_second": 18.789, | |
| "eval_steps_per_second": 2.349, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 7.52, | |
| "learning_rate": 7.431848651869506e-05, | |
| "loss": 2.0252, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 7.6, | |
| "learning_rate": 7.20840160881871e-05, | |
| "loss": 2.0376, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 7.67, | |
| "learning_rate": 6.984954565767912e-05, | |
| "loss": 2.0143, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 7.75, | |
| "learning_rate": 6.761507522717115e-05, | |
| "loss": 2.0242, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 7.82, | |
| "learning_rate": 6.538060479666319e-05, | |
| "loss": 2.0094, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 7.9, | |
| "learning_rate": 6.314613436615521e-05, | |
| "loss": 2.03, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 7.97, | |
| "learning_rate": 6.091166393564725e-05, | |
| "loss": 2.02, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 8.04, | |
| "learning_rate": 5.867719350513928e-05, | |
| "loss": 1.972, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 8.12, | |
| "learning_rate": 5.6442723074631304e-05, | |
| "loss": 1.9319, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 8.19, | |
| "learning_rate": 5.4208252644123334e-05, | |
| "loss": 1.9398, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 8.19, | |
| "eval_gen_len": 15.9308, | |
| "eval_loss": 2.608029365539551, | |
| "eval_rouge1": 29.9478, | |
| "eval_rouge2": 11.4801, | |
| "eval_rougeL": 26.5352, | |
| "eval_rougeLsum": 26.9746, | |
| "eval_runtime": 532.8851, | |
| "eval_samples_per_second": 18.766, | |
| "eval_steps_per_second": 2.346, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 8.27, | |
| "learning_rate": 5.1973782213615364e-05, | |
| "loss": 1.9379, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 8.34, | |
| "learning_rate": 4.9739311783107394e-05, | |
| "loss": 1.9406, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 8.42, | |
| "learning_rate": 4.7504841352599425e-05, | |
| "loss": 1.9487, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 8.49, | |
| "learning_rate": 4.5270370922091455e-05, | |
| "loss": 1.9554, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 8.57, | |
| "learning_rate": 4.303590049158349e-05, | |
| "loss": 1.9453, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 8.64, | |
| "learning_rate": 4.080143006107552e-05, | |
| "loss": 1.9522, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 8.71, | |
| "learning_rate": 3.856695963056755e-05, | |
| "loss": 1.958, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "learning_rate": 3.633248920005958e-05, | |
| "loss": 1.9495, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 8.86, | |
| "learning_rate": 3.409801876955161e-05, | |
| "loss": 1.9276, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 8.94, | |
| "learning_rate": 3.186354833904364e-05, | |
| "loss": 1.9426, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 8.94, | |
| "eval_gen_len": 15.8598, | |
| "eval_loss": 2.6021728515625, | |
| "eval_rouge1": 30.097, | |
| "eval_rouge2": 11.5602, | |
| "eval_rougeL": 26.705, | |
| "eval_rougeLsum": 27.1092, | |
| "eval_runtime": 531.6176, | |
| "eval_samples_per_second": 18.811, | |
| "eval_steps_per_second": 2.351, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 9.01, | |
| "learning_rate": 2.9629077908535672e-05, | |
| "loss": 1.9438, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 9.09, | |
| "learning_rate": 2.7394607478027705e-05, | |
| "loss": 1.8807, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 9.16, | |
| "learning_rate": 2.5160137047519735e-05, | |
| "loss": 1.8708, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 9.24, | |
| "learning_rate": 2.2925666617011765e-05, | |
| "loss": 1.8727, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 9.31, | |
| "learning_rate": 2.0691196186503795e-05, | |
| "loss": 1.8911, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 9.38, | |
| "learning_rate": 1.845672575599583e-05, | |
| "loss": 1.8895, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 9.46, | |
| "learning_rate": 1.622225532548786e-05, | |
| "loss": 1.8851, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 9.53, | |
| "learning_rate": 1.3987784894979889e-05, | |
| "loss": 1.9082, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 9.61, | |
| "learning_rate": 1.1753314464471919e-05, | |
| "loss": 1.8864, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 9.68, | |
| "learning_rate": 9.518844033963949e-06, | |
| "loss": 1.8853, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 9.68, | |
| "eval_gen_len": 15.803, | |
| "eval_loss": 2.6137681007385254, | |
| "eval_rouge1": 30.1588, | |
| "eval_rouge2": 11.5823, | |
| "eval_rougeL": 26.6984, | |
| "eval_rougeLsum": 27.1371, | |
| "eval_runtime": 529.3936, | |
| "eval_samples_per_second": 18.89, | |
| "eval_steps_per_second": 2.361, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 9.76, | |
| "learning_rate": 7.284373603455981e-06, | |
| "loss": 1.8973, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 9.83, | |
| "learning_rate": 5.04990317294801e-06, | |
| "loss": 1.8832, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 9.91, | |
| "learning_rate": 2.815432742440042e-06, | |
| "loss": 1.8678, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 9.98, | |
| "learning_rate": 5.809623119320721e-07, | |
| "loss": 1.9084, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 67130, | |
| "total_flos": 2.0764683296037274e+17, | |
| "train_loss": 2.3876063485090873, | |
| "train_runtime": 39696.1157, | |
| "train_samples_per_second": 13.528, | |
| "train_steps_per_second": 1.691 | |
| } | |
| ], | |
| "max_steps": 67130, | |
| "num_train_epochs": 10, | |
| "total_flos": 2.0764683296037274e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |