| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 288, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0, | |
| "eval_loss": 4.369085311889648, | |
| "eval_num_tokens": 0.0, | |
| "eval_runtime": 50.8277, | |
| "eval_samples_per_second": 43.559, | |
| "eval_steps_per_second": 1.377, | |
| "step": 0 | |
| }, | |
| { | |
| "epoch": 0.010416666666666666, | |
| "grad_norm": 58.74515151977539, | |
| "learning_rate": 0.0, | |
| "loss": 16.1181, | |
| "num_tokens": 1835008.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.020833333333333332, | |
| "grad_norm": 49.36063766479492, | |
| "learning_rate": 1.3793103448275863e-05, | |
| "loss": 14.338, | |
| "num_tokens": 3669861.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03125, | |
| "grad_norm": 29.762441635131836, | |
| "learning_rate": 2.7586206896551727e-05, | |
| "loss": 11.9609, | |
| "num_tokens": 5504404.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.041666666666666664, | |
| "grad_norm": 15.370545387268066, | |
| "learning_rate": 4.1379310344827587e-05, | |
| "loss": 10.0978, | |
| "num_tokens": 7338364.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.052083333333333336, | |
| "grad_norm": 8.646096229553223, | |
| "learning_rate": 5.517241379310345e-05, | |
| "loss": 9.3408, | |
| "num_tokens": 9170987.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 19.0706729888916, | |
| "learning_rate": 6.896551724137931e-05, | |
| "loss": 12.5751, | |
| "num_tokens": 10988206.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.07291666666666667, | |
| "grad_norm": 10.962234497070312, | |
| "learning_rate": 8.275862068965517e-05, | |
| "loss": 10.2249, | |
| "num_tokens": 12735535.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.08333333333333333, | |
| "grad_norm": 8.556324005126953, | |
| "learning_rate": 9.655172413793105e-05, | |
| "loss": 8.5934, | |
| "num_tokens": 14570434.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.09375, | |
| "grad_norm": 5.068751811981201, | |
| "learning_rate": 0.0001103448275862069, | |
| "loss": 7.9362, | |
| "num_tokens": 16405103.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.10416666666666667, | |
| "grad_norm": 3.8277363777160645, | |
| "learning_rate": 0.00012413793103448277, | |
| "loss": 7.8178, | |
| "num_tokens": 18239342.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11458333333333333, | |
| "grad_norm": 3.6145989894866943, | |
| "learning_rate": 0.00013793103448275863, | |
| "loss": 7.5281, | |
| "num_tokens": 20072748.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 4.158721446990967, | |
| "learning_rate": 0.00015172413793103449, | |
| "loss": 7.0684, | |
| "num_tokens": 21901095.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.13541666666666666, | |
| "grad_norm": 6.527482509613037, | |
| "learning_rate": 0.00016551724137931035, | |
| "loss": 6.7699, | |
| "num_tokens": 23677594.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.14583333333333334, | |
| "grad_norm": 4.1348772048950195, | |
| "learning_rate": 0.0001793103448275862, | |
| "loss": 7.4985, | |
| "num_tokens": 25512546.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.15625, | |
| "grad_norm": 3.333523750305176, | |
| "learning_rate": 0.0001931034482758621, | |
| "loss": 7.3158, | |
| "num_tokens": 27347253.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 2.9023752212524414, | |
| "learning_rate": 0.00020689655172413795, | |
| "loss": 7.5429, | |
| "num_tokens": 29181547.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.17708333333333334, | |
| "grad_norm": 2.394178628921509, | |
| "learning_rate": 0.0002206896551724138, | |
| "loss": 7.268, | |
| "num_tokens": 31015010.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 1.8874393701553345, | |
| "learning_rate": 0.00023448275862068965, | |
| "loss": 6.7181, | |
| "num_tokens": 32845451.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.19791666666666666, | |
| "grad_norm": 3.6218419075012207, | |
| "learning_rate": 0.00024827586206896553, | |
| "loss": 5.6449, | |
| "num_tokens": 34610156.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.20833333333333334, | |
| "grad_norm": 3.005610227584839, | |
| "learning_rate": 0.00026206896551724137, | |
| "loss": 7.1118, | |
| "num_tokens": 36445159.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21875, | |
| "grad_norm": 2.0965850353240967, | |
| "learning_rate": 0.00027586206896551725, | |
| "loss": 7.2549, | |
| "num_tokens": 38279897.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.22916666666666666, | |
| "grad_norm": 1.978231430053711, | |
| "learning_rate": 0.00028965517241379314, | |
| "loss": 7.1056, | |
| "num_tokens": 40114299.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.23958333333333334, | |
| "grad_norm": 1.7989438772201538, | |
| "learning_rate": 0.00030344827586206897, | |
| "loss": 6.9942, | |
| "num_tokens": 41948073.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 1.8263142108917236, | |
| "learning_rate": 0.00031724137931034486, | |
| "loss": 6.9085, | |
| "num_tokens": 43779853.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.2604166666666667, | |
| "grad_norm": 3.1426234245300293, | |
| "learning_rate": 0.0003310344827586207, | |
| "loss": 4.5817, | |
| "num_tokens": 45542999.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2708333333333333, | |
| "grad_norm": 2.298151969909668, | |
| "learning_rate": 0.0003448275862068965, | |
| "loss": 6.7974, | |
| "num_tokens": 47378007.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.28125, | |
| "grad_norm": 2.075996160507202, | |
| "learning_rate": 0.0003586206896551724, | |
| "loss": 7.0411, | |
| "num_tokens": 49212861.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.2916666666666667, | |
| "grad_norm": 2.4668593406677246, | |
| "learning_rate": 0.0003724137931034483, | |
| "loss": 7.2994, | |
| "num_tokens": 51047392.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3020833333333333, | |
| "grad_norm": 1.7986633777618408, | |
| "learning_rate": 0.0003862068965517242, | |
| "loss": 7.0758, | |
| "num_tokens": 52881351.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 1.2921061515808105, | |
| "learning_rate": 0.0004, | |
| "loss": 6.8975, | |
| "num_tokens": 54714034.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3229166666666667, | |
| "grad_norm": 4.423031330108643, | |
| "learning_rate": 0.000399985287214871, | |
| "loss": 5.5865, | |
| "num_tokens": 56533703.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 2.8978898525238037, | |
| "learning_rate": 0.00039994115102414443, | |
| "loss": 6.1077, | |
| "num_tokens": 58295030.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.34375, | |
| "grad_norm": 2.146373748779297, | |
| "learning_rate": 0.0003998675979214832, | |
| "loss": 6.8829, | |
| "num_tokens": 60129965.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.3541666666666667, | |
| "grad_norm": 2.34240460395813, | |
| "learning_rate": 0.0003997646387285973, | |
| "loss": 7.2702, | |
| "num_tokens": 61964623.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.3645833333333333, | |
| "grad_norm": 2.9134490489959717, | |
| "learning_rate": 0.0003996322885936515, | |
| "loss": 6.8983, | |
| "num_tokens": 63798869.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 2.008151054382324, | |
| "learning_rate": 0.00039947056698903674, | |
| "loss": 6.8729, | |
| "num_tokens": 65632233.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.3854166666666667, | |
| "grad_norm": 2.377063512802124, | |
| "learning_rate": 0.0003992794977085053, | |
| "loss": 6.6035, | |
| "num_tokens": 67461424.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.3958333333333333, | |
| "grad_norm": 1.4472414255142212, | |
| "learning_rate": 0.0003990591088636698, | |
| "loss": 5.1503, | |
| "num_tokens": 69229515.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.40625, | |
| "grad_norm": 1.7487033605575562, | |
| "learning_rate": 0.0003988094328798676, | |
| "loss": 6.7834, | |
| "num_tokens": 71064475.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.4166666666666667, | |
| "grad_norm": 1.5388942956924438, | |
| "learning_rate": 0.00039853050649138943, | |
| "loss": 7.1453, | |
| "num_tokens": 72899173.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4270833333333333, | |
| "grad_norm": 1.8272168636322021, | |
| "learning_rate": 0.00039822237073607573, | |
| "loss": 7.0121, | |
| "num_tokens": 74733506.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.4375, | |
| "grad_norm": 1.3105082511901855, | |
| "learning_rate": 0.0003978850709492779, | |
| "loss": 6.8996, | |
| "num_tokens": 76567089.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.4479166666666667, | |
| "grad_norm": 1.8905762434005737, | |
| "learning_rate": 0.000397518656757189, | |
| "loss": 6.5482, | |
| "num_tokens": 78397511.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.4583333333333333, | |
| "grad_norm": 11.841277122497559, | |
| "learning_rate": 0.0003971231820695417, | |
| "loss": 4.8639, | |
| "num_tokens": 80155993.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.46875, | |
| "grad_norm": 3.4380874633789062, | |
| "learning_rate": 0.000396698705071677, | |
| "loss": 6.6794, | |
| "num_tokens": 81991001.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.4791666666666667, | |
| "grad_norm": 1.8084958791732788, | |
| "learning_rate": 0.0003962452882159836, | |
| "loss": 7.0229, | |
| "num_tokens": 83825755.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.4895833333333333, | |
| "grad_norm": 3.5802125930786133, | |
| "learning_rate": 0.0003957629982127092, | |
| "loss": 7.0927, | |
| "num_tokens": 85660194.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 2.4923155307769775, | |
| "learning_rate": 0.00039525190602014563, | |
| "loss": 6.8655, | |
| "num_tokens": 87494025.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.5104166666666666, | |
| "grad_norm": 2.5725772380828857, | |
| "learning_rate": 0.00039471208683418895, | |
| "loss": 6.6733, | |
| "num_tokens": 89326018.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.5208333333333334, | |
| "grad_norm": 2.174575090408325, | |
| "learning_rate": 0.00039414362007727616, | |
| "loss": 4.716, | |
| "num_tokens": 91084381.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.53125, | |
| "grad_norm": 3.2442007064819336, | |
| "learning_rate": 0.0003935465893866998, | |
| "loss": 6.532, | |
| "num_tokens": 92919389.0, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.5416666666666666, | |
| "grad_norm": 2.0102264881134033, | |
| "learning_rate": 0.0003929210826023024, | |
| "loss": 6.8666, | |
| "num_tokens": 94754271.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.5520833333333334, | |
| "grad_norm": 2.0294554233551025, | |
| "learning_rate": 0.00039226719175355316, | |
| "loss": 6.9534, | |
| "num_tokens": 96588840.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.5625, | |
| "grad_norm": 1.8881115913391113, | |
| "learning_rate": 0.0003915850130460076, | |
| "loss": 7.0359, | |
| "num_tokens": 98422941.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.5729166666666666, | |
| "grad_norm": 1.5385727882385254, | |
| "learning_rate": 0.00039087464684715325, | |
| "loss": 6.8394, | |
| "num_tokens": 100255975.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.5833333333333334, | |
| "grad_norm": 1.7957665920257568, | |
| "learning_rate": 0.0003901361976716425, | |
| "loss": 5.6838, | |
| "num_tokens": 102078387.0, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.59375, | |
| "grad_norm": 3.3215200901031494, | |
| "learning_rate": 0.0003893697741659158, | |
| "loss": 5.8232, | |
| "num_tokens": 103850401.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.6041666666666666, | |
| "grad_norm": 1.703303337097168, | |
| "learning_rate": 0.00038857548909221687, | |
| "loss": 6.6663, | |
| "num_tokens": 105685318.0, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.6145833333333334, | |
| "grad_norm": 1.336297869682312, | |
| "learning_rate": 0.00038775345931200175, | |
| "loss": 6.9368, | |
| "num_tokens": 107519970.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 1.9724664688110352, | |
| "learning_rate": 0.00038690380576874585, | |
| "loss": 6.898, | |
| "num_tokens": 109354164.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6354166666666666, | |
| "grad_norm": 1.3228670358657837, | |
| "learning_rate": 0.0003860266534701491, | |
| "loss": 6.7593, | |
| "num_tokens": 111187370.0, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.6458333333333334, | |
| "grad_norm": 1.9427003860473633, | |
| "learning_rate": 0.00038512213146974476, | |
| "loss": 5.7435, | |
| "num_tokens": 113012336.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.65625, | |
| "grad_norm": 1.1739882230758667, | |
| "learning_rate": 0.00038419037284791093, | |
| "loss": 5.1107, | |
| "num_tokens": 114767666.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 1.6651009321212769, | |
| "learning_rate": 0.0003832315146922917, | |
| "loss": 6.5357, | |
| "num_tokens": 116602636.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.6770833333333334, | |
| "grad_norm": 1.9049042463302612, | |
| "learning_rate": 0.0003822456980776272, | |
| "loss": 6.806, | |
| "num_tokens": 118437323.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.6875, | |
| "grad_norm": 1.4705581665039062, | |
| "learning_rate": 0.00038123306804499756, | |
| "loss": 6.844, | |
| "num_tokens": 120271633.0, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.6979166666666666, | |
| "grad_norm": 2.246387243270874, | |
| "learning_rate": 0.0003801937735804838, | |
| "loss": 6.6984, | |
| "num_tokens": 122105100.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.7083333333333334, | |
| "grad_norm": 1.2600950002670288, | |
| "learning_rate": 0.0003791279675932473, | |
| "loss": 6.3485, | |
| "num_tokens": 123935290.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.71875, | |
| "grad_norm": 1.3609380722045898, | |
| "learning_rate": 0.0003780358068930329, | |
| "loss": 4.6996, | |
| "num_tokens": 125671781.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.7291666666666666, | |
| "grad_norm": 1.9205929040908813, | |
| "learning_rate": 0.00037691745216709754, | |
| "loss": 6.5454, | |
| "num_tokens": 127506783.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7395833333333334, | |
| "grad_norm": 1.366835117340088, | |
| "learning_rate": 0.0003757730679565692, | |
| "loss": 6.5876, | |
| "num_tokens": 129341530.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 1.225515365600586, | |
| "learning_rate": 0.00037460282263223764, | |
| "loss": 6.8365, | |
| "num_tokens": 131175924.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.7604166666666666, | |
| "grad_norm": 1.4327815771102905, | |
| "learning_rate": 0.000373406888369783, | |
| "loss": 6.7364, | |
| "num_tokens": 133009659.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.7708333333333334, | |
| "grad_norm": 0.9915421605110168, | |
| "learning_rate": 0.00037218544112444375, | |
| "loss": 6.5099, | |
| "num_tokens": 134841522.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.78125, | |
| "grad_norm": 1.3610036373138428, | |
| "learning_rate": 0.00037093866060512834, | |
| "loss": 3.9674, | |
| "num_tokens": 136607712.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.7916666666666666, | |
| "grad_norm": 2.1861977577209473, | |
| "learning_rate": 0.0003696667302479757, | |
| "loss": 6.5836, | |
| "num_tokens": 138442720.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.8020833333333334, | |
| "grad_norm": 1.3921793699264526, | |
| "learning_rate": 0.00036836983718936624, | |
| "loss": 6.7691, | |
| "num_tokens": 140277585.0, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.8125, | |
| "grad_norm": 1.543531060218811, | |
| "learning_rate": 0.00036704817223838905, | |
| "loss": 7.0449, | |
| "num_tokens": 142112123.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.8229166666666666, | |
| "grad_norm": 1.3533965349197388, | |
| "learning_rate": 0.00036570192984876847, | |
| "loss": 6.8044, | |
| "num_tokens": 143946109.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 1.278024673461914, | |
| "learning_rate": 0.0003643313080902546, | |
| "loss": 6.3323, | |
| "num_tokens": 145778859.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.84375, | |
| "grad_norm": 1.2391217947006226, | |
| "learning_rate": 0.0003629365086194818, | |
| "loss": 5.207, | |
| "num_tokens": 147600412.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.8541666666666666, | |
| "grad_norm": 1.343796968460083, | |
| "learning_rate": 0.000361517736650299, | |
| "loss": 5.7432, | |
| "num_tokens": 149380352.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.8645833333333334, | |
| "grad_norm": 1.04697847366333, | |
| "learning_rate": 0.00036007520092357765, | |
| "loss": 6.4807, | |
| "num_tokens": 151215279.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 1.4012295007705688, | |
| "learning_rate": 0.00035860911367649955, | |
| "loss": 6.8154, | |
| "num_tokens": 153049931.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.8854166666666666, | |
| "grad_norm": 1.1908930540084839, | |
| "learning_rate": 0.0003571196906113313, | |
| "loss": 6.7018, | |
| "num_tokens": 154884077.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.8958333333333334, | |
| "grad_norm": 1.061625361442566, | |
| "learning_rate": 0.00035560715086368794, | |
| "loss": 6.5545, | |
| "num_tokens": 156717218.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.90625, | |
| "grad_norm": 1.244210958480835, | |
| "learning_rate": 0.00035407171697029267, | |
| "loss": 5.8259, | |
| "num_tokens": 158544449.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.9166666666666666, | |
| "grad_norm": 0.9685031175613403, | |
| "learning_rate": 0.0003525136148362349, | |
| "loss": 4.8024, | |
| "num_tokens": 160281956.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.9270833333333334, | |
| "grad_norm": 1.398451566696167, | |
| "learning_rate": 0.0003509330737017339, | |
| "loss": 6.6325, | |
| "num_tokens": 162116927.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.9375, | |
| "grad_norm": 1.2298660278320312, | |
| "learning_rate": 0.0003493303261084105, | |
| "loss": 6.7604, | |
| "num_tokens": 163951622.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9479166666666666, | |
| "grad_norm": 1.2719202041625977, | |
| "learning_rate": 0.0003477056078650743, | |
| "loss": 6.7175, | |
| "num_tokens": 165785936.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.9583333333333334, | |
| "grad_norm": 1.0676854848861694, | |
| "learning_rate": 0.0003460591580130295, | |
| "loss": 6.4563, | |
| "num_tokens": 167619514.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.96875, | |
| "grad_norm": 1.5075618028640747, | |
| "learning_rate": 0.0003443912187909049, | |
| "loss": 6.4595, | |
| "num_tokens": 169450177.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.9791666666666666, | |
| "grad_norm": 1.055530071258545, | |
| "learning_rate": 0.00034270203559901447, | |
| "loss": 4.1973, | |
| "num_tokens": 171227988.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.9895833333333334, | |
| "grad_norm": 1.5485455989837646, | |
| "learning_rate": 0.0003409918569632517, | |
| "loss": 6.774, | |
| "num_tokens": 173062297.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.83399498462677, | |
| "learning_rate": 0.00033926093449852444, | |
| "loss": 5.6044, | |
| "num_tokens": 174873718.0, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.39385467767715454, | |
| "eval_num_tokens": 174873718.0, | |
| "eval_runtime": 51.1623, | |
| "eval_samples_per_second": 43.274, | |
| "eval_steps_per_second": 1.368, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.0104166666666667, | |
| "grad_norm": 1.165719747543335, | |
| "learning_rate": 0.00033750952287173576, | |
| "loss": 5.8215, | |
| "num_tokens": 176708726.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.0208333333333333, | |
| "grad_norm": 1.237050175666809, | |
| "learning_rate": 0.00033573787976431507, | |
| "loss": 5.9031, | |
| "num_tokens": 178543577.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.03125, | |
| "grad_norm": 1.146803379058838, | |
| "learning_rate": 0.00033394626583430596, | |
| "loss": 6.0078, | |
| "num_tokens": 180378152.0, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.0416666666666667, | |
| "grad_norm": 1.4786773920059204, | |
| "learning_rate": 0.0003321349446780163, | |
| "loss": 6.0364, | |
| "num_tokens": 182212167.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0520833333333333, | |
| "grad_norm": 1.0227315425872803, | |
| "learning_rate": 0.0003303041827912359, | |
| "loss": 5.6446, | |
| "num_tokens": 184044813.0, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.0625, | |
| "grad_norm": 1.1760964393615723, | |
| "learning_rate": 0.0003284542495300272, | |
| "loss": 4.3708, | |
| "num_tokens": 185861645.0, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.0729166666666667, | |
| "grad_norm": 1.1126259565353394, | |
| "learning_rate": 0.00032658541707109614, | |
| "loss": 5.1844, | |
| "num_tokens": 187638317.0, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.0833333333333333, | |
| "grad_norm": 0.9875500798225403, | |
| "learning_rate": 0.00032469796037174674, | |
| "loss": 5.8541, | |
| "num_tokens": 189473221.0, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.09375, | |
| "grad_norm": 1.4258309602737427, | |
| "learning_rate": 0.00032279215712942755, | |
| "loss": 5.8896, | |
| "num_tokens": 191307863.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.1041666666666667, | |
| "grad_norm": 1.0853471755981445, | |
| "learning_rate": 0.0003208682877408746, | |
| "loss": 5.9438, | |
| "num_tokens": 193142059.0, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.1145833333333333, | |
| "grad_norm": 1.2616301774978638, | |
| "learning_rate": 0.00031892663526085735, | |
| "loss": 5.7077, | |
| "num_tokens": 194975275.0, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.125, | |
| "grad_norm": 1.1230659484863281, | |
| "learning_rate": 0.00031696748536053294, | |
| "loss": 5.2283, | |
| "num_tokens": 196803152.0, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.1354166666666667, | |
| "grad_norm": 0.7507144212722778, | |
| "learning_rate": 0.0003149911262854166, | |
| "loss": 4.1003, | |
| "num_tokens": 198575028.0, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.1458333333333333, | |
| "grad_norm": 1.062694787979126, | |
| "learning_rate": 0.00031299784881297246, | |
| "loss": 5.7008, | |
| "num_tokens": 200409979.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.15625, | |
| "grad_norm": 0.884606122970581, | |
| "learning_rate": 0.0003109879462098321, | |
| "loss": 5.893, | |
| "num_tokens": 202244654.0, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.1666666666666667, | |
| "grad_norm": 1.1134356260299683, | |
| "learning_rate": 0.00030896171418864677, | |
| "loss": 6.06, | |
| "num_tokens": 204078947.0, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.1770833333333333, | |
| "grad_norm": 0.848483145236969, | |
| "learning_rate": 0.00030691945086458, | |
| "loss": 5.6066, | |
| "num_tokens": 205912447.0, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.1875, | |
| "grad_norm": 0.945598304271698, | |
| "learning_rate": 0.00030486145671144635, | |
| "loss": 5.4445, | |
| "num_tokens": 207742842.0, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.1979166666666667, | |
| "grad_norm": 1.2866934537887573, | |
| "learning_rate": 0.0003027880345175036, | |
| "loss": 3.7599, | |
| "num_tokens": 209498845.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.2083333333333333, | |
| "grad_norm": 1.5104707479476929, | |
| "learning_rate": 0.000300699489340904, | |
| "loss": 5.8123, | |
| "num_tokens": 211333853.0, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.21875, | |
| "grad_norm": 1.3005720376968384, | |
| "learning_rate": 0.00029859612846481164, | |
| "loss": 5.904, | |
| "num_tokens": 213168584.0, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.2291666666666667, | |
| "grad_norm": 1.2542551755905151, | |
| "learning_rate": 0.00029647826135219274, | |
| "loss": 5.9603, | |
| "num_tokens": 215002979.0, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.2395833333333333, | |
| "grad_norm": 1.2026963233947754, | |
| "learning_rate": 0.0002943461996002849, | |
| "loss": 5.776, | |
| "num_tokens": 216836768.0, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 1.2762895822525024, | |
| "learning_rate": 0.00029220025689475243, | |
| "loss": 5.5901, | |
| "num_tokens": 218668686.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.2604166666666667, | |
| "grad_norm": 0.869133472442627, | |
| "learning_rate": 0.00029004074896353467, | |
| "loss": 3.7871, | |
| "num_tokens": 220419497.0, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.2708333333333333, | |
| "grad_norm": 1.0662851333618164, | |
| "learning_rate": 0.00028786799353039335, | |
| "loss": 5.6359, | |
| "num_tokens": 222254505.0, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.28125, | |
| "grad_norm": 1.0052399635314941, | |
| "learning_rate": 0.00028568231026816673, | |
| "loss": 5.9654, | |
| "num_tokens": 224089351.0, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.2916666666666667, | |
| "grad_norm": 0.9917957186698914, | |
| "learning_rate": 0.00028348402075173683, | |
| "loss": 6.1669, | |
| "num_tokens": 225923865.0, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.3020833333333333, | |
| "grad_norm": 1.2801611423492432, | |
| "learning_rate": 0.0002812734484107166, | |
| "loss": 5.9294, | |
| "num_tokens": 227757870.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.3125, | |
| "grad_norm": 1.0231451988220215, | |
| "learning_rate": 0.00027905091848186476, | |
| "loss": 5.7678, | |
| "num_tokens": 229590660.0, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.3229166666666667, | |
| "grad_norm": 1.0218455791473389, | |
| "learning_rate": 0.00027681675796123424, | |
| "loss": 4.2449, | |
| "num_tokens": 231409634.0, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.9382010102272034, | |
| "learning_rate": 0.00027457129555606176, | |
| "loss": 5.0048, | |
| "num_tokens": 233161046.0, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.34375, | |
| "grad_norm": 0.9842782020568848, | |
| "learning_rate": 0.00027231486163640617, | |
| "loss": 5.736, | |
| "num_tokens": 234995958.0, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.3541666666666667, | |
| "grad_norm": 1.1637225151062012, | |
| "learning_rate": 0.00027004778818654173, | |
| "loss": 5.8469, | |
| "num_tokens": 236830612.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.3645833333333333, | |
| "grad_norm": 0.8380746245384216, | |
| "learning_rate": 0.0002677704087561138, | |
| "loss": 5.7858, | |
| "num_tokens": 238664763.0, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.375, | |
| "grad_norm": 0.9739496111869812, | |
| "learning_rate": 0.0002654830584110645, | |
| "loss": 5.5843, | |
| "num_tokens": 240497790.0, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.3854166666666667, | |
| "grad_norm": 0.7374522089958191, | |
| "learning_rate": 0.0002631860736843352, | |
| "loss": 5.0237, | |
| "num_tokens": 242324229.0, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.3958333333333333, | |
| "grad_norm": 0.7949540615081787, | |
| "learning_rate": 0.00026087979252635335, | |
| "loss": 4.0288, | |
| "num_tokens": 244080663.0, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.40625, | |
| "grad_norm": 0.9912927150726318, | |
| "learning_rate": 0.0002585645542553101, | |
| "loss": 5.7465, | |
| "num_tokens": 245915632.0, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.4166666666666667, | |
| "grad_norm": 0.9647045731544495, | |
| "learning_rate": 0.0002562406995072375, | |
| "loss": 6.1171, | |
| "num_tokens": 247750331.0, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.4270833333333333, | |
| "grad_norm": 0.9238935112953186, | |
| "learning_rate": 0.00025390857018589135, | |
| "loss": 5.8763, | |
| "num_tokens": 249584639.0, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.4375, | |
| "grad_norm": 0.893530547618866, | |
| "learning_rate": 0.0002515685094124476, | |
| "loss": 5.7744, | |
| "num_tokens": 251418209.0, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.4479166666666667, | |
| "grad_norm": 1.1296662092208862, | |
| "learning_rate": 0.00024922086147501977, | |
| "loss": 5.4371, | |
| "num_tokens": 253248977.0, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.4583333333333333, | |
| "grad_norm": 0.8839858174324036, | |
| "learning_rate": 0.0002468659717780045, | |
| "loss": 3.8757, | |
| "num_tokens": 255022934.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.46875, | |
| "grad_norm": 0.8194657564163208, | |
| "learning_rate": 0.0002445041867912629, | |
| "loss": 5.5079, | |
| "num_tokens": 256857942.0, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.4791666666666667, | |
| "grad_norm": 0.8461405634880066, | |
| "learning_rate": 0.00024213585399914526, | |
| "loss": 6.1028, | |
| "num_tokens": 258692708.0, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.4895833333333333, | |
| "grad_norm": 0.7493748664855957, | |
| "learning_rate": 0.00023976132184936648, | |
| "loss": 5.9799, | |
| "num_tokens": 260527136.0, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.914884090423584, | |
| "learning_rate": 0.00023738093970173955, | |
| "loss": 5.7974, | |
| "num_tokens": 262360894.0, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.5104166666666665, | |
| "grad_norm": 0.7370859384536743, | |
| "learning_rate": 0.00023499505777677509, | |
| "loss": 5.483, | |
| "num_tokens": 264192721.0, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.5208333333333335, | |
| "grad_norm": 0.7810927033424377, | |
| "learning_rate": 0.00023260402710415418, | |
| "loss": 3.7436, | |
| "num_tokens": 265947523.0, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.53125, | |
| "grad_norm": 0.9369405508041382, | |
| "learning_rate": 0.000230208199471082, | |
| "loss": 5.5074, | |
| "num_tokens": 267782531.0, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.5416666666666665, | |
| "grad_norm": 0.7483178377151489, | |
| "learning_rate": 0.00022780792737053034, | |
| "loss": 5.8986, | |
| "num_tokens": 269617364.0, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.5520833333333335, | |
| "grad_norm": 1.0921028852462769, | |
| "learning_rate": 0.00022540356394937577, | |
| "loss": 6.0131, | |
| "num_tokens": 271451886.0, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 0.8628895878791809, | |
| "learning_rate": 0.00022299546295644223, | |
| "loss": 6.0871, | |
| "num_tokens": 273285914.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.5729166666666665, | |
| "grad_norm": 0.7694334983825684, | |
| "learning_rate": 0.0002205839786904545, | |
| "loss": 5.7496, | |
| "num_tokens": 275118700.0, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.5833333333333335, | |
| "grad_norm": 0.7926948666572571, | |
| "learning_rate": 0.00021816946594791102, | |
| "loss": 4.3869, | |
| "num_tokens": 276940098.0, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 1.59375, | |
| "grad_norm": 0.753044843673706, | |
| "learning_rate": 0.0002157522799708836, | |
| "loss": 5.0379, | |
| "num_tokens": 278712550.0, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 1.6041666666666665, | |
| "grad_norm": 0.8183289170265198, | |
| "learning_rate": 0.00021333277639475106, | |
| "loss": 5.7533, | |
| "num_tokens": 280547478.0, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.6145833333333335, | |
| "grad_norm": 0.8382421731948853, | |
| "learning_rate": 0.0002109113111958759, | |
| "loss": 5.9166, | |
| "num_tokens": 282382117.0, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.625, | |
| "grad_norm": 0.8893575072288513, | |
| "learning_rate": 0.00020848824063922968, | |
| "loss": 6.0179, | |
| "num_tokens": 284216279.0, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.6354166666666665, | |
| "grad_norm": 0.8030447959899902, | |
| "learning_rate": 0.000206063921225977, | |
| "loss": 5.6948, | |
| "num_tokens": 286049489.0, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 1.6458333333333335, | |
| "grad_norm": 0.6560386419296265, | |
| "learning_rate": 0.00020363870964102394, | |
| "loss": 4.8645, | |
| "num_tokens": 287875672.0, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.65625, | |
| "grad_norm": 0.7248445749282837, | |
| "learning_rate": 0.00020121296270053996, | |
| "loss": 4.0077, | |
| "num_tokens": 289638765.0, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.8028330206871033, | |
| "learning_rate": 0.00019878703729946012, | |
| "loss": 5.6793, | |
| "num_tokens": 291473726.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.6770833333333335, | |
| "grad_norm": 0.7822152972221375, | |
| "learning_rate": 0.0001963612903589761, | |
| "loss": 5.7095, | |
| "num_tokens": 293308430.0, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 1.6875, | |
| "grad_norm": 0.8185696005821228, | |
| "learning_rate": 0.00019393607877402308, | |
| "loss": 5.9541, | |
| "num_tokens": 295142781.0, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 1.6979166666666665, | |
| "grad_norm": 0.8143693804740906, | |
| "learning_rate": 0.0001915117593607704, | |
| "loss": 5.5666, | |
| "num_tokens": 296976392.0, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 1.7083333333333335, | |
| "grad_norm": 0.8612039685249329, | |
| "learning_rate": 0.00018908868880412421, | |
| "loss": 5.4096, | |
| "num_tokens": 298807511.0, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 1.71875, | |
| "grad_norm": 0.7511785626411438, | |
| "learning_rate": 0.000186667223605249, | |
| "loss": 3.5891, | |
| "num_tokens": 300577163.0, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.7291666666666665, | |
| "grad_norm": 0.9434303641319275, | |
| "learning_rate": 0.00018424772002911653, | |
| "loss": 5.6975, | |
| "num_tokens": 302412171.0, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 1.7395833333333335, | |
| "grad_norm": 0.8521304726600647, | |
| "learning_rate": 0.00018183053405208897, | |
| "loss": 6.0424, | |
| "num_tokens": 304246992.0, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.8021337985992432, | |
| "learning_rate": 0.00017941602130954552, | |
| "loss": 5.9435, | |
| "num_tokens": 306081466.0, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 1.7604166666666665, | |
| "grad_norm": 0.8518204689025879, | |
| "learning_rate": 0.0001770045370435578, | |
| "loss": 5.7312, | |
| "num_tokens": 307915323.0, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 1.7708333333333335, | |
| "grad_norm": 0.8390392065048218, | |
| "learning_rate": 0.00017459643605062424, | |
| "loss": 5.5813, | |
| "num_tokens": 309747303.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.78125, | |
| "grad_norm": 0.7974820137023926, | |
| "learning_rate": 0.00017219207262946973, | |
| "loss": 3.7035, | |
| "num_tokens": 311490666.0, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 1.7916666666666665, | |
| "grad_norm": 0.9529057145118713, | |
| "learning_rate": 0.00016979180052891803, | |
| "loss": 5.6394, | |
| "num_tokens": 313325674.0, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 1.8020833333333335, | |
| "grad_norm": 0.6925763487815857, | |
| "learning_rate": 0.00016739597289584587, | |
| "loss": 5.6417, | |
| "num_tokens": 315160558.0, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 1.8125, | |
| "grad_norm": 0.9803677797317505, | |
| "learning_rate": 0.00016500494222322496, | |
| "loss": 5.9619, | |
| "num_tokens": 316995117.0, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 1.8229166666666665, | |
| "grad_norm": 0.7456706762313843, | |
| "learning_rate": 0.0001626190602982605, | |
| "loss": 5.902, | |
| "num_tokens": 318829146.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.8333333333333335, | |
| "grad_norm": 0.8381991982460022, | |
| "learning_rate": 0.00016023867815063357, | |
| "loss": 5.4646, | |
| "num_tokens": 320661993.0, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 1.84375, | |
| "grad_norm": 0.7155930995941162, | |
| "learning_rate": 0.0001578641460008548, | |
| "loss": 4.536, | |
| "num_tokens": 322481903.0, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 1.8541666666666665, | |
| "grad_norm": 0.8462711572647095, | |
| "learning_rate": 0.00015549581320873715, | |
| "loss": 4.7933, | |
| "num_tokens": 324251706.0, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 1.8645833333333335, | |
| "grad_norm": 0.75278639793396, | |
| "learning_rate": 0.00015313402822199554, | |
| "loss": 5.7969, | |
| "num_tokens": 326086618.0, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 0.7761452794075012, | |
| "learning_rate": 0.00015077913852498028, | |
| "loss": 5.8597, | |
| "num_tokens": 327921258.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.8854166666666665, | |
| "grad_norm": 0.7733147144317627, | |
| "learning_rate": 0.00014843149058755246, | |
| "loss": 5.8143, | |
| "num_tokens": 329755417.0, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 1.8958333333333335, | |
| "grad_norm": 0.8719548583030701, | |
| "learning_rate": 0.00014609142981410867, | |
| "loss": 5.5983, | |
| "num_tokens": 331588668.0, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 1.90625, | |
| "grad_norm": 0.7246740460395813, | |
| "learning_rate": 0.00014375930049276254, | |
| "loss": 4.8483, | |
| "num_tokens": 333416028.0, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 1.9166666666666665, | |
| "grad_norm": 0.641409158706665, | |
| "learning_rate": 0.00014143544574468994, | |
| "loss": 4.3747, | |
| "num_tokens": 335156923.0, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 1.9270833333333335, | |
| "grad_norm": 0.863508939743042, | |
| "learning_rate": 0.0001391202074736467, | |
| "loss": 5.7916, | |
| "num_tokens": 336991901.0, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.9375, | |
| "grad_norm": 0.8702845573425293, | |
| "learning_rate": 0.00013681392631566478, | |
| "loss": 5.9018, | |
| "num_tokens": 338826608.0, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 1.9479166666666665, | |
| "grad_norm": 0.735125720500946, | |
| "learning_rate": 0.00013451694158893556, | |
| "loss": 5.7923, | |
| "num_tokens": 340660903.0, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 1.9583333333333335, | |
| "grad_norm": 0.7928696274757385, | |
| "learning_rate": 0.00013222959124388627, | |
| "loss": 5.7267, | |
| "num_tokens": 342494421.0, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 1.96875, | |
| "grad_norm": 0.7430011034011841, | |
| "learning_rate": 0.0001299522118134583, | |
| "loss": 5.2829, | |
| "num_tokens": 344324800.0, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 1.9791666666666665, | |
| "grad_norm": 0.7103280425071716, | |
| "learning_rate": 0.00012768513836359382, | |
| "loss": 3.6185, | |
| "num_tokens": 346097221.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.9895833333333335, | |
| "grad_norm": 0.820694625377655, | |
| "learning_rate": 0.00012542870444393831, | |
| "loss": 5.6309, | |
| "num_tokens": 347931646.0, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.6098411679267883, | |
| "learning_rate": 0.00012318324203876584, | |
| "loss": 4.6723, | |
| "num_tokens": 349747436.0, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.30134105682373047, | |
| "eval_num_tokens": 349747436.0, | |
| "eval_runtime": 51.0004, | |
| "eval_samples_per_second": 43.411, | |
| "eval_steps_per_second": 1.373, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.0104166666666665, | |
| "grad_norm": 0.868577778339386, | |
| "learning_rate": 0.00012094908151813528, | |
| "loss": 4.884, | |
| "num_tokens": 351582444.0, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 2.0208333333333335, | |
| "grad_norm": 0.7576533555984497, | |
| "learning_rate": 0.00011872655158928347, | |
| "loss": 5.0435, | |
| "num_tokens": 353417307.0, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 2.03125, | |
| "grad_norm": 0.8024824857711792, | |
| "learning_rate": 0.00011651597924826328, | |
| "loss": 5.0431, | |
| "num_tokens": 355251875.0, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.0416666666666665, | |
| "grad_norm": 0.9183809757232666, | |
| "learning_rate": 0.00011431768973183325, | |
| "loss": 4.9594, | |
| "num_tokens": 357085971.0, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 2.0520833333333335, | |
| "grad_norm": 0.7868840098381042, | |
| "learning_rate": 0.00011213200646960665, | |
| "loss": 4.7211, | |
| "num_tokens": 358919037.0, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 2.0625, | |
| "grad_norm": 0.7261154055595398, | |
| "learning_rate": 0.00010995925103646532, | |
| "loss": 3.788, | |
| "num_tokens": 360740077.0, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 2.0729166666666665, | |
| "grad_norm": 0.6998794674873352, | |
| "learning_rate": 0.00010779974310524759, | |
| "loss": 4.2829, | |
| "num_tokens": 362500547.0, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 2.0833333333333335, | |
| "grad_norm": 0.8364837765693665, | |
| "learning_rate": 0.00010565380039971513, | |
| "loss": 4.8648, | |
| "num_tokens": 364335437.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.09375, | |
| "grad_norm": 0.7603851556777954, | |
| "learning_rate": 0.0001035217386478073, | |
| "loss": 5.0085, | |
| "num_tokens": 366170041.0, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 2.1041666666666665, | |
| "grad_norm": 0.7490676641464233, | |
| "learning_rate": 0.00010140387153518838, | |
| "loss": 4.9162, | |
| "num_tokens": 368004177.0, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 2.1145833333333335, | |
| "grad_norm": 0.868342399597168, | |
| "learning_rate": 9.930051065909602e-05, | |
| "loss": 4.8956, | |
| "num_tokens": 369837409.0, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 2.125, | |
| "grad_norm": 0.6991240382194519, | |
| "learning_rate": 9.721196548249643e-05, | |
| "loss": 4.2732, | |
| "num_tokens": 371664713.0, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 2.1354166666666665, | |
| "grad_norm": 0.7538133859634399, | |
| "learning_rate": 9.51385432885537e-05, | |
| "loss": 3.4226, | |
| "num_tokens": 373423781.0, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 2.1458333333333335, | |
| "grad_norm": 0.7492122054100037, | |
| "learning_rate": 9.308054913542008e-05, | |
| "loss": 4.7457, | |
| "num_tokens": 375258743.0, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 2.15625, | |
| "grad_norm": 0.8423147201538086, | |
| "learning_rate": 9.10382858113533e-05, | |
| "loss": 4.8438, | |
| "num_tokens": 377093447.0, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 2.1666666666666665, | |
| "grad_norm": 0.749155580997467, | |
| "learning_rate": 8.901205379016797e-05, | |
| "loss": 5.0235, | |
| "num_tokens": 378927784.0, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 2.1770833333333335, | |
| "grad_norm": 0.6537796854972839, | |
| "learning_rate": 8.70021511870276e-05, | |
| "loss": 4.7036, | |
| "num_tokens": 380761347.0, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 2.1875, | |
| "grad_norm": 0.6459214091300964, | |
| "learning_rate": 8.500887371458339e-05, | |
| "loss": 4.3882, | |
| "num_tokens": 382591921.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.1979166666666665, | |
| "grad_norm": 0.680185854434967, | |
| "learning_rate": 8.303251463946708e-05, | |
| "loss": 3.0801, | |
| "num_tokens": 384351003.0, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 2.2083333333333335, | |
| "grad_norm": 0.7055952548980713, | |
| "learning_rate": 8.107336473914268e-05, | |
| "loss": 4.6331, | |
| "num_tokens": 386186011.0, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 2.21875, | |
| "grad_norm": 0.682402491569519, | |
| "learning_rate": 7.913171225912536e-05, | |
| "loss": 4.7906, | |
| "num_tokens": 388020770.0, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 2.2291666666666665, | |
| "grad_norm": 0.6808469295501709, | |
| "learning_rate": 7.720784287057247e-05, | |
| "loss": 4.8913, | |
| "num_tokens": 389855172.0, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 2.2395833333333335, | |
| "grad_norm": 0.6908066272735596, | |
| "learning_rate": 7.530203962825331e-05, | |
| "loss": 4.736, | |
| "num_tokens": 391688847.0, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.6484770774841309, | |
| "learning_rate": 7.34145829289039e-05, | |
| "loss": 4.7076, | |
| "num_tokens": 393520369.0, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 2.2604166666666665, | |
| "grad_norm": 0.7851245403289795, | |
| "learning_rate": 7.154575046997282e-05, | |
| "loss": 2.852, | |
| "num_tokens": 395290365.0, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 2.2708333333333335, | |
| "grad_norm": 0.744822084903717, | |
| "learning_rate": 6.969581720876419e-05, | |
| "loss": 4.6526, | |
| "num_tokens": 397125373.0, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 2.28125, | |
| "grad_norm": 0.6934426426887512, | |
| "learning_rate": 6.786505532198374e-05, | |
| "loss": 4.8379, | |
| "num_tokens": 398960238.0, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 2.2916666666666665, | |
| "grad_norm": 0.7033064365386963, | |
| "learning_rate": 6.605373416569411e-05, | |
| "loss": 5.1609, | |
| "num_tokens": 400794764.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.3020833333333335, | |
| "grad_norm": 0.6805562376976013, | |
| "learning_rate": 6.4262120235685e-05, | |
| "loss": 4.8859, | |
| "num_tokens": 402628787.0, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 2.3125, | |
| "grad_norm": 0.6635998487472534, | |
| "learning_rate": 6.249047712826433e-05, | |
| "loss": 4.7145, | |
| "num_tokens": 404461539.0, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 2.3229166666666665, | |
| "grad_norm": 0.5987191796302795, | |
| "learning_rate": 6.073906550147566e-05, | |
| "loss": 3.431, | |
| "num_tokens": 406279737.0, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 2.3333333333333335, | |
| "grad_norm": 0.5484739542007446, | |
| "learning_rate": 5.900814303674842e-05, | |
| "loss": 4.0309, | |
| "num_tokens": 408070479.0, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 2.34375, | |
| "grad_norm": 0.6329848170280457, | |
| "learning_rate": 5.729796440098554e-05, | |
| "loss": 4.7709, | |
| "num_tokens": 409905369.0, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.3541666666666665, | |
| "grad_norm": 0.6661122441291809, | |
| "learning_rate": 5.5608781209095114e-05, | |
| "loss": 5.0799, | |
| "num_tokens": 411739974.0, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 2.3645833333333335, | |
| "grad_norm": 0.6367658972740173, | |
| "learning_rate": 5.394084198697056e-05, | |
| "loss": 4.9295, | |
| "num_tokens": 413574139.0, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 2.375, | |
| "grad_norm": 1.4353746175765991, | |
| "learning_rate": 5.2294392134925704e-05, | |
| "loss": 4.7959, | |
| "num_tokens": 415407400.0, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 2.3854166666666665, | |
| "grad_norm": 0.5810269117355347, | |
| "learning_rate": 5.066967389158954e-05, | |
| "loss": 4.1042, | |
| "num_tokens": 417234576.0, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 2.3958333333333335, | |
| "grad_norm": 0.6517537832260132, | |
| "learning_rate": 4.9066926298266146e-05, | |
| "loss": 3.3788, | |
| "num_tokens": 418999605.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.40625, | |
| "grad_norm": 0.5945186614990234, | |
| "learning_rate": 4.748638516376511e-05, | |
| "loss": 4.7507, | |
| "num_tokens": 420834554.0, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 2.4166666666666665, | |
| "grad_norm": 0.6609830856323242, | |
| "learning_rate": 4.592828302970742e-05, | |
| "loss": 4.9474, | |
| "num_tokens": 422669261.0, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 2.4270833333333335, | |
| "grad_norm": 0.6279190182685852, | |
| "learning_rate": 4.4392849136312145e-05, | |
| "loss": 4.8501, | |
| "num_tokens": 424503575.0, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 2.4375, | |
| "grad_norm": 0.6369905471801758, | |
| "learning_rate": 4.288030938866881e-05, | |
| "loss": 4.8513, | |
| "num_tokens": 426337040.0, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 2.4479166666666665, | |
| "grad_norm": 0.6173170804977417, | |
| "learning_rate": 4.13908863235005e-05, | |
| "loss": 4.5135, | |
| "num_tokens": 428167608.0, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 2.4583333333333335, | |
| "grad_norm": 0.7480139136314392, | |
| "learning_rate": 3.9924799076422414e-05, | |
| "loss": 2.9563, | |
| "num_tokens": 429903970.0, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 2.46875, | |
| "grad_norm": 0.6398470401763916, | |
| "learning_rate": 3.8482263349701084e-05, | |
| "loss": 4.7156, | |
| "num_tokens": 431738978.0, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 2.4791666666666665, | |
| "grad_norm": 0.5996270775794983, | |
| "learning_rate": 3.706349138051828e-05, | |
| "loss": 4.8745, | |
| "num_tokens": 433573808.0, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 2.4895833333333335, | |
| "grad_norm": 0.648737907409668, | |
| "learning_rate": 3.5668691909745425e-05, | |
| "loss": 5.2032, | |
| "num_tokens": 435408292.0, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.583387017250061, | |
| "learning_rate": 3.429807015123159e-05, | |
| "loss": 4.7081, | |
| "num_tokens": 437242213.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.5104166666666665, | |
| "grad_norm": 0.59371417760849, | |
| "learning_rate": 3.295182776161103e-05, | |
| "loss": 4.5919, | |
| "num_tokens": 439074391.0, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 2.5208333333333335, | |
| "grad_norm": 0.6875779032707214, | |
| "learning_rate": 3.1630162810633824e-05, | |
| "loss": 3.0964, | |
| "num_tokens": 440837083.0, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 2.53125, | |
| "grad_norm": 0.6423700451850891, | |
| "learning_rate": 3.0333269752024374e-05, | |
| "loss": 4.5561, | |
| "num_tokens": 442672091.0, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 2.5416666666666665, | |
| "grad_norm": 0.629224419593811, | |
| "learning_rate": 2.9061339394871723e-05, | |
| "loss": 4.7547, | |
| "num_tokens": 444506945.0, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 2.5520833333333335, | |
| "grad_norm": 0.9383465647697449, | |
| "learning_rate": 2.7814558875556305e-05, | |
| "loss": 4.9043, | |
| "num_tokens": 446341484.0, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 2.5625, | |
| "grad_norm": 0.6273048520088196, | |
| "learning_rate": 2.659311163021694e-05, | |
| "loss": 4.8548, | |
| "num_tokens": 448175477.0, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 2.5729166666666665, | |
| "grad_norm": 0.581720769405365, | |
| "learning_rate": 2.539717736776237e-05, | |
| "loss": 4.7217, | |
| "num_tokens": 450008278.0, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 2.5833333333333335, | |
| "grad_norm": 0.6323935985565186, | |
| "learning_rate": 2.422693204343085e-05, | |
| "loss": 3.7152, | |
| "num_tokens": 451829106.0, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 2.59375, | |
| "grad_norm": 0.5289592742919922, | |
| "learning_rate": 2.308254783290247e-05, | |
| "loss": 3.9177, | |
| "num_tokens": 453596312.0, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 2.6041666666666665, | |
| "grad_norm": 0.6072826385498047, | |
| "learning_rate": 2.1964193106967135e-05, | |
| "loss": 4.7996, | |
| "num_tokens": 455431228.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.6145833333333335, | |
| "grad_norm": 0.636060357093811, | |
| "learning_rate": 2.0872032406752686e-05, | |
| "loss": 4.8907, | |
| "num_tokens": 457265875.0, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 2.625, | |
| "grad_norm": 0.6241011619567871, | |
| "learning_rate": 1.9806226419516192e-05, | |
| "loss": 5.0163, | |
| "num_tokens": 459100000.0, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 2.6354166666666665, | |
| "grad_norm": 0.5778922438621521, | |
| "learning_rate": 1.8766931955002455e-05, | |
| "loss": 4.6326, | |
| "num_tokens": 460933143.0, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 2.6458333333333335, | |
| "grad_norm": 0.5672693252563477, | |
| "learning_rate": 1.775430192237284e-05, | |
| "loss": 4.0494, | |
| "num_tokens": 462759637.0, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 2.65625, | |
| "grad_norm": 0.6339104175567627, | |
| "learning_rate": 1.6768485307708292e-05, | |
| "loss": 3.1861, | |
| "num_tokens": 464521875.0, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 1.1769932508468628, | |
| "learning_rate": 1.5809627152089068e-05, | |
| "loss": 4.6774, | |
| "num_tokens": 466356841.0, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 2.6770833333333335, | |
| "grad_norm": 0.59512859582901, | |
| "learning_rate": 1.4877868530255279e-05, | |
| "loss": 4.9239, | |
| "num_tokens": 468191542.0, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 2.6875, | |
| "grad_norm": 0.5831692814826965, | |
| "learning_rate": 1.3973346529850872e-05, | |
| "loss": 4.8429, | |
| "num_tokens": 470025845.0, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 2.6979166666666665, | |
| "grad_norm": 0.6448671221733093, | |
| "learning_rate": 1.3096194231254212e-05, | |
| "loss": 4.7197, | |
| "num_tokens": 471859357.0, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 2.7083333333333335, | |
| "grad_norm": 0.5393466353416443, | |
| "learning_rate": 1.2246540687998264e-05, | |
| "loss": 4.4274, | |
| "num_tokens": 473689544.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.71875, | |
| "grad_norm": 0.7121680974960327, | |
| "learning_rate": 1.142451090778316e-05, | |
| "loss": 2.9111, | |
| "num_tokens": 475418561.0, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 2.7291666666666665, | |
| "grad_norm": 0.5749300718307495, | |
| "learning_rate": 1.0630225834084196e-05, | |
| "loss": 4.8669, | |
| "num_tokens": 477253569.0, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 2.7395833333333335, | |
| "grad_norm": 0.5907626152038574, | |
| "learning_rate": 9.86380232835753e-06, | |
| "loss": 4.768, | |
| "num_tokens": 479088363.0, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.5856068730354309, | |
| "learning_rate": 9.125353152846794e-06, | |
| "loss": 4.9738, | |
| "num_tokens": 480922811.0, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 2.7604166666666665, | |
| "grad_norm": 0.6345715522766113, | |
| "learning_rate": 8.41498695399241e-06, | |
| "loss": 5.0068, | |
| "num_tokens": 482756611.0, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 2.7708333333333335, | |
| "grad_norm": 0.5500353574752808, | |
| "learning_rate": 7.732808246446887e-06, | |
| "loss": 4.5116, | |
| "num_tokens": 484588510.0, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 2.78125, | |
| "grad_norm": 0.6664949059486389, | |
| "learning_rate": 7.07891739769766e-06, | |
| "loss": 3.01, | |
| "num_tokens": 486359532.0, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 2.7916666666666665, | |
| "grad_norm": 0.5878235697746277, | |
| "learning_rate": 6.453410613300226e-06, | |
| "loss": 4.6532, | |
| "num_tokens": 488194540.0, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 2.8020833333333335, | |
| "grad_norm": 0.5838302373886108, | |
| "learning_rate": 5.856379922723809e-06, | |
| "loss": 4.7997, | |
| "num_tokens": 490029403.0, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 2.8125, | |
| "grad_norm": 0.5767126679420471, | |
| "learning_rate": 5.2879131658110535e-06, | |
| "loss": 4.8465, | |
| "num_tokens": 491863932.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.8229166666666665, | |
| "grad_norm": 0.5807151794433594, | |
| "learning_rate": 4.748093979854429e-06, | |
| "loss": 4.6997, | |
| "num_tokens": 493697972.0, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 2.8333333333333335, | |
| "grad_norm": 0.5633906126022339, | |
| "learning_rate": 4.237001787290851e-06, | |
| "loss": 4.6285, | |
| "num_tokens": 495530692.0, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 2.84375, | |
| "grad_norm": 0.6755262613296509, | |
| "learning_rate": 3.754711784016407e-06, | |
| "loss": 3.4574, | |
| "num_tokens": 497346740.0, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 2.8541666666666665, | |
| "grad_norm": 0.5019537210464478, | |
| "learning_rate": 3.3012949283229845e-06, | |
| "loss": 3.9227, | |
| "num_tokens": 499103794.0, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 2.8645833333333335, | |
| "grad_norm": 0.5820115208625793, | |
| "learning_rate": 2.8768179304583085e-06, | |
| "loss": 4.6582, | |
| "num_tokens": 500938681.0, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.875, | |
| "grad_norm": 0.6077380180358887, | |
| "learning_rate": 2.481343242810996e-06, | |
| "loss": 4.7815, | |
| "num_tokens": 502773322.0, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 2.8854166666666665, | |
| "grad_norm": 0.629601001739502, | |
| "learning_rate": 2.1149290507220808e-06, | |
| "loss": 4.7978, | |
| "num_tokens": 504607491.0, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 2.8958333333333335, | |
| "grad_norm": 0.6091342568397522, | |
| "learning_rate": 1.7776292639243074e-06, | |
| "loss": 4.7009, | |
| "num_tokens": 506440687.0, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 2.90625, | |
| "grad_norm": 0.5185429453849792, | |
| "learning_rate": 1.4694935086105865e-06, | |
| "loss": 4.0256, | |
| "num_tokens": 508268714.0, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 2.9166666666666665, | |
| "grad_norm": 0.6426588892936707, | |
| "learning_rate": 1.1905671201324576e-06, | |
| "loss": 3.3111, | |
| "num_tokens": 510061733.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.9270833333333335, | |
| "grad_norm": 0.5608605146408081, | |
| "learning_rate": 9.408911363301576e-07, | |
| "loss": 4.5206, | |
| "num_tokens": 511896706.0, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 2.9375, | |
| "grad_norm": 0.6238455176353455, | |
| "learning_rate": 7.205022914946957e-07, | |
| "loss": 5.0062, | |
| "num_tokens": 513731401.0, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 2.9479166666666665, | |
| "grad_norm": 0.5830274224281311, | |
| "learning_rate": 5.29433010963265e-07, | |
| "loss": 4.9563, | |
| "num_tokens": 515565715.0, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 2.9583333333333335, | |
| "grad_norm": 0.5901724100112915, | |
| "learning_rate": 3.677114063485476e-07, | |
| "loss": 4.7332, | |
| "num_tokens": 517399223.0, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 2.96875, | |
| "grad_norm": 0.5470786690711975, | |
| "learning_rate": 2.3536127140273422e-07, | |
| "loss": 4.4041, | |
| "num_tokens": 519229822.0, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.9791666666666665, | |
| "grad_norm": 0.6242496371269226, | |
| "learning_rate": 1.324020785168134e-07, | |
| "loss": 3.1959, | |
| "num_tokens": 520970038.0, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 2.9895833333333335, | |
| "grad_norm": 0.5773048400878906, | |
| "learning_rate": 5.884897585557436e-08, | |
| "loss": 4.7913, | |
| "num_tokens": 522804448.0, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.5350597500801086, | |
| "learning_rate": 1.4712785129011863e-08, | |
| "loss": 4.0842, | |
| "num_tokens": 524621154.0, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.27593255043029785, | |
| "eval_num_tokens": 524621154.0, | |
| "eval_runtime": 51.1153, | |
| "eval_samples_per_second": 43.314, | |
| "eval_steps_per_second": 1.369, | |
| "step": 288 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 288, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.5771590544838361e+19, | |
| "train_batch_size": 28, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |