| { | |
| "best_metric": 2.1047139167785645, | |
| "best_model_checkpoint": "/content/drive/My Drive/Hugh Mann/Llama3.2-1B-Creative-Conversations/checkpoint-2000", | |
| "epoch": 3.0, | |
| "eval_steps": 50, | |
| "global_step": 2007, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014947683109118086, | |
| "grad_norm": 1.3233145475387573, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 4.6199, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.029895366218236172, | |
| "grad_norm": 1.5916823148727417, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 4.5531, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04484304932735426, | |
| "grad_norm": 1.4500253200531006, | |
| "learning_rate": 1.5e-06, | |
| "loss": 4.5768, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.059790732436472344, | |
| "grad_norm": 1.8787564039230347, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 4.5561, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07473841554559044, | |
| "grad_norm": 1.351128339767456, | |
| "learning_rate": 2.5e-06, | |
| "loss": 4.6083, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07473841554559044, | |
| "eval_loss": 4.6864728927612305, | |
| "eval_runtime": 30.0792, | |
| "eval_samples_per_second": 79.091, | |
| "eval_steps_per_second": 9.907, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08968609865470852, | |
| "grad_norm": 1.8167227506637573, | |
| "learning_rate": 3e-06, | |
| "loss": 4.6043, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10463378176382661, | |
| "grad_norm": 1.492981195449829, | |
| "learning_rate": 3.5e-06, | |
| "loss": 4.5974, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.11958146487294469, | |
| "grad_norm": 1.7814916372299194, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 4.589, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13452914798206278, | |
| "grad_norm": 1.9816361665725708, | |
| "learning_rate": 4.5e-06, | |
| "loss": 4.4917, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.14947683109118087, | |
| "grad_norm": 1.8495465517044067, | |
| "learning_rate": 5e-06, | |
| "loss": 4.5588, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14947683109118087, | |
| "eval_loss": 4.558848857879639, | |
| "eval_runtime": 30.1266, | |
| "eval_samples_per_second": 78.967, | |
| "eval_steps_per_second": 9.892, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16442451420029897, | |
| "grad_norm": 1.8986459970474243, | |
| "learning_rate": 5.500000000000001e-06, | |
| "loss": 4.432, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.17937219730941703, | |
| "grad_norm": 2.3086721897125244, | |
| "learning_rate": 6e-06, | |
| "loss": 4.4032, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19431988041853512, | |
| "grad_norm": 2.1522297859191895, | |
| "learning_rate": 6.5000000000000004e-06, | |
| "loss": 4.3179, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.20926756352765322, | |
| "grad_norm": 1.94242262840271, | |
| "learning_rate": 7e-06, | |
| "loss": 4.2956, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2242152466367713, | |
| "grad_norm": 2.5612740516662598, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 4.2195, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2242152466367713, | |
| "eval_loss": 4.228877067565918, | |
| "eval_runtime": 30.125, | |
| "eval_samples_per_second": 78.971, | |
| "eval_steps_per_second": 9.892, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.23916292974588937, | |
| "grad_norm": 2.5207152366638184, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 4.1068, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.25411061285500747, | |
| "grad_norm": 2.828591823577881, | |
| "learning_rate": 8.5e-06, | |
| "loss": 3.9656, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.26905829596412556, | |
| "grad_norm": 2.9801859855651855, | |
| "learning_rate": 9e-06, | |
| "loss": 3.8379, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.28400597907324365, | |
| "grad_norm": 2.9544835090637207, | |
| "learning_rate": 9.5e-06, | |
| "loss": 3.7476, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.29895366218236175, | |
| "grad_norm": 3.170365333557129, | |
| "learning_rate": 1e-05, | |
| "loss": 3.5613, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.29895366218236175, | |
| "eval_loss": 3.543750286102295, | |
| "eval_runtime": 30.1319, | |
| "eval_samples_per_second": 78.953, | |
| "eval_steps_per_second": 9.89, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.31390134529147984, | |
| "grad_norm": 4.423033714294434, | |
| "learning_rate": 9.944659656889874e-06, | |
| "loss": 3.4256, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.32884902840059793, | |
| "grad_norm": 4.2404890060424805, | |
| "learning_rate": 9.889319313779746e-06, | |
| "loss": 3.2156, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.34379671150971597, | |
| "grad_norm": 3.3334267139434814, | |
| "learning_rate": 9.83397897066962e-06, | |
| "loss": 3.0267, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.35874439461883406, | |
| "grad_norm": 3.050525665283203, | |
| "learning_rate": 9.778638627559491e-06, | |
| "loss": 2.9132, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.37369207772795215, | |
| "grad_norm": 3.204619884490967, | |
| "learning_rate": 9.723298284449364e-06, | |
| "loss": 2.7931, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.37369207772795215, | |
| "eval_loss": 2.7474868297576904, | |
| "eval_runtime": 30.1502, | |
| "eval_samples_per_second": 78.905, | |
| "eval_steps_per_second": 9.884, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38863976083707025, | |
| "grad_norm": 2.626147985458374, | |
| "learning_rate": 9.667957941339236e-06, | |
| "loss": 2.6279, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.40358744394618834, | |
| "grad_norm": 2.6221773624420166, | |
| "learning_rate": 9.612617598229111e-06, | |
| "loss": 2.5479, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.41853512705530643, | |
| "grad_norm": 2.923938035964966, | |
| "learning_rate": 9.557277255118983e-06, | |
| "loss": 2.431, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4334828101644245, | |
| "grad_norm": 2.0621166229248047, | |
| "learning_rate": 9.501936912008856e-06, | |
| "loss": 2.4756, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4484304932735426, | |
| "grad_norm": 1.700082540512085, | |
| "learning_rate": 9.446596568898728e-06, | |
| "loss": 2.3197, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4484304932735426, | |
| "eval_loss": 2.3277225494384766, | |
| "eval_runtime": 30.1595, | |
| "eval_samples_per_second": 78.881, | |
| "eval_steps_per_second": 9.881, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4633781763826607, | |
| "grad_norm": 1.3931200504302979, | |
| "learning_rate": 9.391256225788601e-06, | |
| "loss": 2.2127, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.47832585949177875, | |
| "grad_norm": 1.1735923290252686, | |
| "learning_rate": 9.335915882678473e-06, | |
| "loss": 2.2009, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.49327354260089684, | |
| "grad_norm": 2.1452367305755615, | |
| "learning_rate": 9.280575539568346e-06, | |
| "loss": 2.1953, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5082212257100149, | |
| "grad_norm": 2.798497438430786, | |
| "learning_rate": 9.225235196458218e-06, | |
| "loss": 2.2344, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.523168908819133, | |
| "grad_norm": 1.1647975444793701, | |
| "learning_rate": 9.169894853348092e-06, | |
| "loss": 2.2767, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.523168908819133, | |
| "eval_loss": 2.212197780609131, | |
| "eval_runtime": 30.1761, | |
| "eval_samples_per_second": 78.837, | |
| "eval_steps_per_second": 9.875, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5381165919282511, | |
| "grad_norm": 1.347925066947937, | |
| "learning_rate": 9.114554510237963e-06, | |
| "loss": 2.1664, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5530642750373692, | |
| "grad_norm": 1.2578520774841309, | |
| "learning_rate": 9.059214167127837e-06, | |
| "loss": 2.119, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5680119581464873, | |
| "grad_norm": 2.018049716949463, | |
| "learning_rate": 9.00387382401771e-06, | |
| "loss": 2.1952, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5829596412556054, | |
| "grad_norm": 1.453579068183899, | |
| "learning_rate": 8.948533480907582e-06, | |
| "loss": 2.2482, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5979073243647235, | |
| "grad_norm": 1.423059105873108, | |
| "learning_rate": 8.893193137797455e-06, | |
| "loss": 2.1596, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5979073243647235, | |
| "eval_loss": 2.1859352588653564, | |
| "eval_runtime": 30.1333, | |
| "eval_samples_per_second": 78.949, | |
| "eval_steps_per_second": 9.889, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6128550074738416, | |
| "grad_norm": 0.9468516707420349, | |
| "learning_rate": 8.837852794687328e-06, | |
| "loss": 2.1392, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6278026905829597, | |
| "grad_norm": 1.3290131092071533, | |
| "learning_rate": 8.7825124515772e-06, | |
| "loss": 2.2047, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6427503736920778, | |
| "grad_norm": 1.495255470275879, | |
| "learning_rate": 8.727172108467074e-06, | |
| "loss": 2.1482, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6576980568011959, | |
| "grad_norm": 1.195508599281311, | |
| "learning_rate": 8.671831765356947e-06, | |
| "loss": 2.0685, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.672645739910314, | |
| "grad_norm": 1.0751187801361084, | |
| "learning_rate": 8.616491422246819e-06, | |
| "loss": 2.179, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.672645739910314, | |
| "eval_loss": 2.1704654693603516, | |
| "eval_runtime": 30.1216, | |
| "eval_samples_per_second": 78.98, | |
| "eval_steps_per_second": 9.893, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6875934230194319, | |
| "grad_norm": 1.541083574295044, | |
| "learning_rate": 8.561151079136692e-06, | |
| "loss": 2.1376, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.70254110612855, | |
| "grad_norm": 1.583406686782837, | |
| "learning_rate": 8.505810736026564e-06, | |
| "loss": 2.1281, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7174887892376681, | |
| "grad_norm": 2.404257297515869, | |
| "learning_rate": 8.450470392916437e-06, | |
| "loss": 2.1263, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7324364723467862, | |
| "grad_norm": 1.3644176721572876, | |
| "learning_rate": 8.395130049806309e-06, | |
| "loss": 2.1226, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7473841554559043, | |
| "grad_norm": 0.8043954372406006, | |
| "learning_rate": 8.339789706696182e-06, | |
| "loss": 2.2121, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7473841554559043, | |
| "eval_loss": 2.159782886505127, | |
| "eval_runtime": 30.0708, | |
| "eval_samples_per_second": 79.113, | |
| "eval_steps_per_second": 9.91, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7623318385650224, | |
| "grad_norm": 1.537696361541748, | |
| "learning_rate": 8.284449363586054e-06, | |
| "loss": 2.0946, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7772795216741405, | |
| "grad_norm": 2.3374698162078857, | |
| "learning_rate": 8.229109020475927e-06, | |
| "loss": 2.0946, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.7922272047832586, | |
| "grad_norm": 1.0315120220184326, | |
| "learning_rate": 8.173768677365799e-06, | |
| "loss": 2.1473, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8071748878923767, | |
| "grad_norm": 1.2831902503967285, | |
| "learning_rate": 8.118428334255674e-06, | |
| "loss": 2.1544, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8221225710014948, | |
| "grad_norm": 1.32231867313385, | |
| "learning_rate": 8.063087991145546e-06, | |
| "loss": 2.1502, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8221225710014948, | |
| "eval_loss": 2.151494264602661, | |
| "eval_runtime": 30.1654, | |
| "eval_samples_per_second": 78.865, | |
| "eval_steps_per_second": 9.879, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8370702541106129, | |
| "grad_norm": 2.374706268310547, | |
| "learning_rate": 8.00774764803542e-06, | |
| "loss": 2.0736, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.852017937219731, | |
| "grad_norm": 1.6023039817810059, | |
| "learning_rate": 7.952407304925291e-06, | |
| "loss": 2.151, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.866965620328849, | |
| "grad_norm": 1.0656261444091797, | |
| "learning_rate": 7.897066961815164e-06, | |
| "loss": 2.1986, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.8819133034379671, | |
| "grad_norm": 1.1099286079406738, | |
| "learning_rate": 7.841726618705036e-06, | |
| "loss": 2.0966, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.8968609865470852, | |
| "grad_norm": 1.1349364519119263, | |
| "learning_rate": 7.78638627559491e-06, | |
| "loss": 2.1341, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8968609865470852, | |
| "eval_loss": 2.144345998764038, | |
| "eval_runtime": 30.1606, | |
| "eval_samples_per_second": 78.878, | |
| "eval_steps_per_second": 9.88, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9118086696562033, | |
| "grad_norm": 1.132156252861023, | |
| "learning_rate": 7.731045932484783e-06, | |
| "loss": 2.1445, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9267563527653214, | |
| "grad_norm": 2.02873158454895, | |
| "learning_rate": 7.675705589374654e-06, | |
| "loss": 2.1066, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9417040358744395, | |
| "grad_norm": 1.4138145446777344, | |
| "learning_rate": 7.620365246264527e-06, | |
| "loss": 2.1878, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9566517189835575, | |
| "grad_norm": 1.7440117597579956, | |
| "learning_rate": 7.5650249031543995e-06, | |
| "loss": 2.13, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9715994020926756, | |
| "grad_norm": 1.5043258666992188, | |
| "learning_rate": 7.509684560044273e-06, | |
| "loss": 2.1042, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9715994020926756, | |
| "eval_loss": 2.139143943786621, | |
| "eval_runtime": 30.2117, | |
| "eval_samples_per_second": 78.744, | |
| "eval_steps_per_second": 9.864, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9865470852017937, | |
| "grad_norm": 1.4107309579849243, | |
| "learning_rate": 7.4543442169341454e-06, | |
| "loss": 2.0977, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0014947683109119, | |
| "grad_norm": 1.5076972246170044, | |
| "learning_rate": 7.399003873824019e-06, | |
| "loss": 2.0892, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0164424514200299, | |
| "grad_norm": 1.3431518077850342, | |
| "learning_rate": 7.343663530713891e-06, | |
| "loss": 2.0366, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.031390134529148, | |
| "grad_norm": 1.3264635801315308, | |
| "learning_rate": 7.288323187603764e-06, | |
| "loss": 2.0692, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.046337817638266, | |
| "grad_norm": 1.3826216459274292, | |
| "learning_rate": 7.2329828444936365e-06, | |
| "loss": 2.1299, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.046337817638266, | |
| "eval_loss": 2.1334877014160156, | |
| "eval_runtime": 30.0787, | |
| "eval_samples_per_second": 79.092, | |
| "eval_steps_per_second": 9.907, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0612855007473843, | |
| "grad_norm": 1.2235782146453857, | |
| "learning_rate": 7.177642501383509e-06, | |
| "loss": 2.1352, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.0762331838565022, | |
| "grad_norm": 1.760510802268982, | |
| "learning_rate": 7.122302158273382e-06, | |
| "loss": 2.0925, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.0911808669656202, | |
| "grad_norm": 2.5025782585144043, | |
| "learning_rate": 7.066961815163255e-06, | |
| "loss": 2.0961, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1061285500747384, | |
| "grad_norm": 2.1543400287628174, | |
| "learning_rate": 7.0116214720531275e-06, | |
| "loss": 2.068, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1210762331838564, | |
| "grad_norm": 2.0268590450286865, | |
| "learning_rate": 6.956281128943e-06, | |
| "loss": 2.0878, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1210762331838564, | |
| "eval_loss": 2.1300783157348633, | |
| "eval_runtime": 30.122, | |
| "eval_samples_per_second": 78.979, | |
| "eval_steps_per_second": 9.893, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1360239162929746, | |
| "grad_norm": 0.9709022641181946, | |
| "learning_rate": 6.9009407858328726e-06, | |
| "loss": 2.1222, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1509715994020926, | |
| "grad_norm": 2.1023378372192383, | |
| "learning_rate": 6.845600442722745e-06, | |
| "loss": 2.0288, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1659192825112108, | |
| "grad_norm": 2.0665016174316406, | |
| "learning_rate": 6.790260099612618e-06, | |
| "loss": 2.108, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.1808669656203288, | |
| "grad_norm": 1.3326338529586792, | |
| "learning_rate": 6.73491975650249e-06, | |
| "loss": 2.1137, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.195814648729447, | |
| "grad_norm": 1.2175920009613037, | |
| "learning_rate": 6.679579413392363e-06, | |
| "loss": 2.1153, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.195814648729447, | |
| "eval_loss": 2.126614570617676, | |
| "eval_runtime": 30.1048, | |
| "eval_samples_per_second": 79.024, | |
| "eval_steps_per_second": 9.899, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.210762331838565, | |
| "grad_norm": 1.775018334388733, | |
| "learning_rate": 6.624239070282237e-06, | |
| "loss": 2.0816, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2257100149476832, | |
| "grad_norm": 1.5670348405838013, | |
| "learning_rate": 6.5688987271721095e-06, | |
| "loss": 2.1136, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2406576980568012, | |
| "grad_norm": 0.9595932960510254, | |
| "learning_rate": 6.513558384061982e-06, | |
| "loss": 2.1492, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2556053811659194, | |
| "grad_norm": 1.416823387145996, | |
| "learning_rate": 6.458218040951855e-06, | |
| "loss": 2.0646, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.2705530642750373, | |
| "grad_norm": 1.5041942596435547, | |
| "learning_rate": 6.402877697841727e-06, | |
| "loss": 2.0646, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.2705530642750373, | |
| "eval_loss": 2.1247856616973877, | |
| "eval_runtime": 30.1248, | |
| "eval_samples_per_second": 78.971, | |
| "eval_steps_per_second": 9.892, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.2855007473841553, | |
| "grad_norm": 1.0782493352890015, | |
| "learning_rate": 6.3475373547316e-06, | |
| "loss": 2.1219, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3004484304932735, | |
| "grad_norm": 3.2218639850616455, | |
| "learning_rate": 6.292197011621472e-06, | |
| "loss": 2.1935, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3153961136023917, | |
| "grad_norm": 1.599663496017456, | |
| "learning_rate": 6.236856668511345e-06, | |
| "loss": 2.1043, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3303437967115097, | |
| "grad_norm": 1.6561187505722046, | |
| "learning_rate": 6.181516325401218e-06, | |
| "loss": 2.1245, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3452914798206277, | |
| "grad_norm": 2.6123058795928955, | |
| "learning_rate": 6.126175982291091e-06, | |
| "loss": 2.1262, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3452914798206277, | |
| "eval_loss": 2.122194290161133, | |
| "eval_runtime": 30.1151, | |
| "eval_samples_per_second": 78.997, | |
| "eval_steps_per_second": 9.895, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.360239162929746, | |
| "grad_norm": 0.9529380798339844, | |
| "learning_rate": 6.070835639180963e-06, | |
| "loss": 2.0919, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.375186846038864, | |
| "grad_norm": 1.6302707195281982, | |
| "learning_rate": 6.015495296070836e-06, | |
| "loss": 2.0495, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.390134529147982, | |
| "grad_norm": 1.1701335906982422, | |
| "learning_rate": 5.960154952960708e-06, | |
| "loss": 2.0803, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4050822122571, | |
| "grad_norm": 2.805281400680542, | |
| "learning_rate": 5.904814609850582e-06, | |
| "loss": 2.0782, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4200298953662183, | |
| "grad_norm": 1.3093085289001465, | |
| "learning_rate": 5.849474266740455e-06, | |
| "loss": 2.0712, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4200298953662183, | |
| "eval_loss": 2.1198716163635254, | |
| "eval_runtime": 30.152, | |
| "eval_samples_per_second": 78.9, | |
| "eval_steps_per_second": 9.883, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4349775784753362, | |
| "grad_norm": 2.4577903747558594, | |
| "learning_rate": 5.794133923630328e-06, | |
| "loss": 2.0876, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4499252615844545, | |
| "grad_norm": 2.0424492359161377, | |
| "learning_rate": 5.7387935805202e-06, | |
| "loss": 2.061, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4648729446935724, | |
| "grad_norm": 1.8253668546676636, | |
| "learning_rate": 5.683453237410073e-06, | |
| "loss": 2.1498, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.4798206278026906, | |
| "grad_norm": 2.1907503604888916, | |
| "learning_rate": 5.628112894299945e-06, | |
| "loss": 2.1937, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.4947683109118086, | |
| "grad_norm": 1.3694138526916504, | |
| "learning_rate": 5.572772551189818e-06, | |
| "loss": 2.1856, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.4947683109118086, | |
| "eval_loss": 2.1183226108551025, | |
| "eval_runtime": 30.2109, | |
| "eval_samples_per_second": 78.746, | |
| "eval_steps_per_second": 9.864, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5097159940209268, | |
| "grad_norm": 1.022425651550293, | |
| "learning_rate": 5.51743220807969e-06, | |
| "loss": 2.087, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5246636771300448, | |
| "grad_norm": 1.1735296249389648, | |
| "learning_rate": 5.462091864969563e-06, | |
| "loss": 2.0792, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5396113602391628, | |
| "grad_norm": 1.3417099714279175, | |
| "learning_rate": 5.4067515218594355e-06, | |
| "loss": 2.0107, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.554559043348281, | |
| "grad_norm": 1.11248779296875, | |
| "learning_rate": 5.351411178749308e-06, | |
| "loss": 2.1598, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.5695067264573992, | |
| "grad_norm": 1.140304684638977, | |
| "learning_rate": 5.296070835639181e-06, | |
| "loss": 2.0904, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.5695067264573992, | |
| "eval_loss": 2.1167256832122803, | |
| "eval_runtime": 30.1622, | |
| "eval_samples_per_second": 78.873, | |
| "eval_steps_per_second": 9.88, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.5844544095665172, | |
| "grad_norm": 2.1005067825317383, | |
| "learning_rate": 5.240730492529054e-06, | |
| "loss": 2.0742, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.5994020926756352, | |
| "grad_norm": 1.0912773609161377, | |
| "learning_rate": 5.1853901494189265e-06, | |
| "loss": 2.075, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6143497757847534, | |
| "grad_norm": 1.2978053092956543, | |
| "learning_rate": 5.1300498063088e-06, | |
| "loss": 2.1205, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6292974588938716, | |
| "grad_norm": 1.3240020275115967, | |
| "learning_rate": 5.0747094631986724e-06, | |
| "loss": 2.0322, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6442451420029895, | |
| "grad_norm": 1.8735085725784302, | |
| "learning_rate": 5.019369120088545e-06, | |
| "loss": 2.1183, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6442451420029895, | |
| "eval_loss": 2.1154470443725586, | |
| "eval_runtime": 30.0869, | |
| "eval_samples_per_second": 79.071, | |
| "eval_steps_per_second": 9.905, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6591928251121075, | |
| "grad_norm": 1.5333822965621948, | |
| "learning_rate": 4.9640287769784175e-06, | |
| "loss": 2.0795, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.6741405082212257, | |
| "grad_norm": 1.368348240852356, | |
| "learning_rate": 4.908688433868291e-06, | |
| "loss": 2.088, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.689088191330344, | |
| "grad_norm": 1.627650260925293, | |
| "learning_rate": 4.8533480907581635e-06, | |
| "loss": 2.0805, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.704035874439462, | |
| "grad_norm": 1.4457964897155762, | |
| "learning_rate": 4.798007747648036e-06, | |
| "loss": 2.0955, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.71898355754858, | |
| "grad_norm": 2.6852869987487793, | |
| "learning_rate": 4.7426674045379085e-06, | |
| "loss": 2.1395, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.71898355754858, | |
| "eval_loss": 2.1143958568573, | |
| "eval_runtime": 30.0435, | |
| "eval_samples_per_second": 79.185, | |
| "eval_steps_per_second": 9.919, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.733931240657698, | |
| "grad_norm": 1.200893759727478, | |
| "learning_rate": 4.687327061427781e-06, | |
| "loss": 2.0545, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7488789237668163, | |
| "grad_norm": 1.6354032754898071, | |
| "learning_rate": 4.631986718317654e-06, | |
| "loss": 2.0884, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.7638266068759343, | |
| "grad_norm": 1.3736437559127808, | |
| "learning_rate": 4.576646375207527e-06, | |
| "loss": 2.0966, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.7787742899850523, | |
| "grad_norm": 1.8193949460983276, | |
| "learning_rate": 4.5213060320973996e-06, | |
| "loss": 2.1184, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.7937219730941703, | |
| "grad_norm": 1.8045661449432373, | |
| "learning_rate": 4.465965688987272e-06, | |
| "loss": 2.1076, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.7937219730941703, | |
| "eval_loss": 2.1125354766845703, | |
| "eval_runtime": 30.0932, | |
| "eval_samples_per_second": 79.054, | |
| "eval_steps_per_second": 9.903, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8086696562032885, | |
| "grad_norm": 0.9524943232536316, | |
| "learning_rate": 4.410625345877145e-06, | |
| "loss": 2.1212, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8236173393124067, | |
| "grad_norm": 2.238903045654297, | |
| "learning_rate": 4.355285002767017e-06, | |
| "loss": 2.074, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8385650224215246, | |
| "grad_norm": 1.2778817415237427, | |
| "learning_rate": 4.29994465965689e-06, | |
| "loss": 2.0873, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.8535127055306426, | |
| "grad_norm": 1.3004279136657715, | |
| "learning_rate": 4.244604316546763e-06, | |
| "loss": 1.9914, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.8684603886397608, | |
| "grad_norm": 1.1233559846878052, | |
| "learning_rate": 4.189263973436636e-06, | |
| "loss": 2.0448, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.8684603886397608, | |
| "eval_loss": 2.1116960048675537, | |
| "eval_runtime": 30.107, | |
| "eval_samples_per_second": 79.018, | |
| "eval_steps_per_second": 9.898, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.883408071748879, | |
| "grad_norm": 1.5583603382110596, | |
| "learning_rate": 4.133923630326508e-06, | |
| "loss": 2.0958, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.898355754857997, | |
| "grad_norm": 1.1002895832061768, | |
| "learning_rate": 4.078583287216381e-06, | |
| "loss": 2.0726, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.913303437967115, | |
| "grad_norm": 1.880204677581787, | |
| "learning_rate": 4.023242944106253e-06, | |
| "loss": 2.1392, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.9282511210762332, | |
| "grad_norm": 2.2396867275238037, | |
| "learning_rate": 3.967902600996127e-06, | |
| "loss": 2.0814, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9431988041853514, | |
| "grad_norm": 2.967911720275879, | |
| "learning_rate": 3.912562257885999e-06, | |
| "loss": 2.0733, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9431988041853514, | |
| "eval_loss": 2.1118075847625732, | |
| "eval_runtime": 30.057, | |
| "eval_samples_per_second": 79.15, | |
| "eval_steps_per_second": 9.914, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9581464872944694, | |
| "grad_norm": 1.3809398412704468, | |
| "learning_rate": 3.857221914775872e-06, | |
| "loss": 2.1542, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.9730941704035874, | |
| "grad_norm": 1.2915862798690796, | |
| "learning_rate": 3.8018815716657447e-06, | |
| "loss": 2.0958, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.9880418535127056, | |
| "grad_norm": 1.4718170166015625, | |
| "learning_rate": 3.7465412285556173e-06, | |
| "loss": 2.0213, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0029895366218238, | |
| "grad_norm": 1.0176986455917358, | |
| "learning_rate": 3.6912008854454903e-06, | |
| "loss": 2.0333, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0179372197309418, | |
| "grad_norm": 1.1901532411575317, | |
| "learning_rate": 3.635860542335363e-06, | |
| "loss": 2.1311, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0179372197309418, | |
| "eval_loss": 2.109938144683838, | |
| "eval_runtime": 30.073, | |
| "eval_samples_per_second": 79.107, | |
| "eval_steps_per_second": 9.909, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0328849028400597, | |
| "grad_norm": 1.2217210531234741, | |
| "learning_rate": 3.5805201992252353e-06, | |
| "loss": 2.0788, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0478325859491777, | |
| "grad_norm": 0.9801961183547974, | |
| "learning_rate": 3.525179856115108e-06, | |
| "loss": 2.1123, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.062780269058296, | |
| "grad_norm": 2.3935630321502686, | |
| "learning_rate": 3.4698395130049813e-06, | |
| "loss": 2.1163, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.077727952167414, | |
| "grad_norm": 0.9280572533607483, | |
| "learning_rate": 3.414499169894854e-06, | |
| "loss": 2.1277, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.092675635276532, | |
| "grad_norm": 1.5918769836425781, | |
| "learning_rate": 3.3591588267847264e-06, | |
| "loss": 2.1193, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.092675635276532, | |
| "eval_loss": 2.109112501144409, | |
| "eval_runtime": 30.0859, | |
| "eval_samples_per_second": 79.074, | |
| "eval_steps_per_second": 9.905, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.10762331838565, | |
| "grad_norm": 0.884373128414154, | |
| "learning_rate": 3.303818483674599e-06, | |
| "loss": 2.1235, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.1225710014947685, | |
| "grad_norm": 1.5050054788589478, | |
| "learning_rate": 3.2484781405644714e-06, | |
| "loss": 2.0006, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.1375186846038865, | |
| "grad_norm": 1.5645099878311157, | |
| "learning_rate": 3.1931377974543444e-06, | |
| "loss": 2.1273, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.1524663677130045, | |
| "grad_norm": 0.8769762516021729, | |
| "learning_rate": 3.1377974543442174e-06, | |
| "loss": 2.0386, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.1674140508221225, | |
| "grad_norm": 1.4254428148269653, | |
| "learning_rate": 3.08245711123409e-06, | |
| "loss": 2.1514, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1674140508221225, | |
| "eval_loss": 2.1084606647491455, | |
| "eval_runtime": 30.1039, | |
| "eval_samples_per_second": 79.026, | |
| "eval_steps_per_second": 9.899, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1823617339312404, | |
| "grad_norm": 1.3702871799468994, | |
| "learning_rate": 3.027116768123963e-06, | |
| "loss": 2.0774, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.197309417040359, | |
| "grad_norm": 1.3462142944335938, | |
| "learning_rate": 2.9717764250138354e-06, | |
| "loss": 2.0545, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.212257100149477, | |
| "grad_norm": 1.2407476902008057, | |
| "learning_rate": 2.916436081903708e-06, | |
| "loss": 2.0578, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.227204783258595, | |
| "grad_norm": 1.9418811798095703, | |
| "learning_rate": 2.8610957387935805e-06, | |
| "loss": 2.0477, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.242152466367713, | |
| "grad_norm": 0.9287471175193787, | |
| "learning_rate": 2.805755395683453e-06, | |
| "loss": 2.082, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.242152466367713, | |
| "eval_loss": 2.1078102588653564, | |
| "eval_runtime": 30.053, | |
| "eval_samples_per_second": 79.16, | |
| "eval_steps_per_second": 9.916, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.2571001494768312, | |
| "grad_norm": 1.2501996755599976, | |
| "learning_rate": 2.7504150525733265e-06, | |
| "loss": 2.0981, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.2720478325859492, | |
| "grad_norm": 1.3877182006835938, | |
| "learning_rate": 2.695074709463199e-06, | |
| "loss": 2.1213, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.286995515695067, | |
| "grad_norm": 1.2593797445297241, | |
| "learning_rate": 2.6397343663530715e-06, | |
| "loss": 2.1197, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.301943198804185, | |
| "grad_norm": 1.2758864164352417, | |
| "learning_rate": 2.5843940232429445e-06, | |
| "loss": 2.0212, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.3168908819133036, | |
| "grad_norm": 1.8401079177856445, | |
| "learning_rate": 2.529053680132817e-06, | |
| "loss": 2.0499, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3168908819133036, | |
| "eval_loss": 2.10884428024292, | |
| "eval_runtime": 30.1097, | |
| "eval_samples_per_second": 79.011, | |
| "eval_steps_per_second": 9.897, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3318385650224216, | |
| "grad_norm": 1.402533769607544, | |
| "learning_rate": 2.47371333702269e-06, | |
| "loss": 2.0468, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.3467862481315396, | |
| "grad_norm": 1.1168920993804932, | |
| "learning_rate": 2.4183729939125626e-06, | |
| "loss": 2.0492, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.3617339312406576, | |
| "grad_norm": 0.9348965287208557, | |
| "learning_rate": 2.363032650802435e-06, | |
| "loss": 2.0694, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.376681614349776, | |
| "grad_norm": 2.2180445194244385, | |
| "learning_rate": 2.307692307692308e-06, | |
| "loss": 2.0951, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.391629297458894, | |
| "grad_norm": 1.2066197395324707, | |
| "learning_rate": 2.2523519645821806e-06, | |
| "loss": 2.0384, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.391629297458894, | |
| "eval_loss": 2.1066524982452393, | |
| "eval_runtime": 30.0173, | |
| "eval_samples_per_second": 79.254, | |
| "eval_steps_per_second": 9.928, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.406576980568012, | |
| "grad_norm": 1.46078360080719, | |
| "learning_rate": 2.197011621472053e-06, | |
| "loss": 2.05, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.42152466367713, | |
| "grad_norm": 1.3928395509719849, | |
| "learning_rate": 2.1416712783619257e-06, | |
| "loss": 2.0549, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.436472346786248, | |
| "grad_norm": 1.1066607236862183, | |
| "learning_rate": 2.0863309352517987e-06, | |
| "loss": 2.095, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.4514200298953663, | |
| "grad_norm": 1.436477780342102, | |
| "learning_rate": 2.0309905921416716e-06, | |
| "loss": 2.1094, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.4663677130044843, | |
| "grad_norm": 1.1359939575195312, | |
| "learning_rate": 1.975650249031544e-06, | |
| "loss": 2.0611, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4663677130044843, | |
| "eval_loss": 2.106337785720825, | |
| "eval_runtime": 30.2026, | |
| "eval_samples_per_second": 78.768, | |
| "eval_steps_per_second": 9.867, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4813153961136023, | |
| "grad_norm": 2.3530466556549072, | |
| "learning_rate": 1.920309905921417e-06, | |
| "loss": 2.1308, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.4962630792227207, | |
| "grad_norm": 1.2133642435073853, | |
| "learning_rate": 1.8649695628112897e-06, | |
| "loss": 2.1137, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5112107623318387, | |
| "grad_norm": 1.1253492832183838, | |
| "learning_rate": 1.8096292197011622e-06, | |
| "loss": 2.0882, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5261584454409567, | |
| "grad_norm": 1.1537396907806396, | |
| "learning_rate": 1.7542888765910352e-06, | |
| "loss": 2.1309, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.5411061285500747, | |
| "grad_norm": 2.0210206508636475, | |
| "learning_rate": 1.6989485334809077e-06, | |
| "loss": 2.0659, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5411061285500747, | |
| "eval_loss": 2.1059720516204834, | |
| "eval_runtime": 30.1533, | |
| "eval_samples_per_second": 78.897, | |
| "eval_steps_per_second": 9.883, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5560538116591927, | |
| "grad_norm": 1.1660709381103516, | |
| "learning_rate": 1.6436081903707805e-06, | |
| "loss": 2.1343, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.5710014947683106, | |
| "grad_norm": 1.7620964050292969, | |
| "learning_rate": 1.588267847260653e-06, | |
| "loss": 2.0652, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.585949177877429, | |
| "grad_norm": 1.5986768007278442, | |
| "learning_rate": 1.532927504150526e-06, | |
| "loss": 2.0809, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.600896860986547, | |
| "grad_norm": 1.311782956123352, | |
| "learning_rate": 1.4775871610403986e-06, | |
| "loss": 2.0693, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.615844544095665, | |
| "grad_norm": 1.6281685829162598, | |
| "learning_rate": 1.422246817930271e-06, | |
| "loss": 2.1416, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.615844544095665, | |
| "eval_loss": 2.1055641174316406, | |
| "eval_runtime": 30.193, | |
| "eval_samples_per_second": 78.793, | |
| "eval_steps_per_second": 9.87, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6307922272047835, | |
| "grad_norm": 2.0422520637512207, | |
| "learning_rate": 1.366906474820144e-06, | |
| "loss": 2.0025, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.6457399103139014, | |
| "grad_norm": 1.092944622039795, | |
| "learning_rate": 1.3115661317100168e-06, | |
| "loss": 2.0917, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.6606875934230194, | |
| "grad_norm": 1.5289971828460693, | |
| "learning_rate": 1.2562257885998894e-06, | |
| "loss": 2.1295, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.6756352765321374, | |
| "grad_norm": 1.2470191717147827, | |
| "learning_rate": 1.2008854454897621e-06, | |
| "loss": 2.0243, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.6905829596412554, | |
| "grad_norm": 1.269268274307251, | |
| "learning_rate": 1.1455451023796349e-06, | |
| "loss": 2.0391, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.6905829596412554, | |
| "eval_loss": 2.1052894592285156, | |
| "eval_runtime": 30.1579, | |
| "eval_samples_per_second": 78.885, | |
| "eval_steps_per_second": 9.881, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.705530642750374, | |
| "grad_norm": 1.169645071029663, | |
| "learning_rate": 1.0902047592695076e-06, | |
| "loss": 2.0853, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.720478325859492, | |
| "grad_norm": 1.1329983472824097, | |
| "learning_rate": 1.0348644161593804e-06, | |
| "loss": 2.078, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.7354260089686098, | |
| "grad_norm": 1.12748122215271, | |
| "learning_rate": 9.79524073049253e-07, | |
| "loss": 2.1581, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.750373692077728, | |
| "grad_norm": 1.226067304611206, | |
| "learning_rate": 9.241837299391257e-07, | |
| "loss": 2.0633, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.765321375186846, | |
| "grad_norm": 1.1362773180007935, | |
| "learning_rate": 8.688433868289984e-07, | |
| "loss": 2.0396, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.765321375186846, | |
| "eval_loss": 2.105060338973999, | |
| "eval_runtime": 30.1339, | |
| "eval_samples_per_second": 78.948, | |
| "eval_steps_per_second": 9.889, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.780269058295964, | |
| "grad_norm": 1.0370166301727295, | |
| "learning_rate": 8.135030437188711e-07, | |
| "loss": 2.1196, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.795216741405082, | |
| "grad_norm": 1.070854902267456, | |
| "learning_rate": 7.581627006087438e-07, | |
| "loss": 2.061, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8101644245142, | |
| "grad_norm": 1.04238760471344, | |
| "learning_rate": 7.028223574986165e-07, | |
| "loss": 2.1097, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.8251121076233185, | |
| "grad_norm": 1.216498613357544, | |
| "learning_rate": 6.474820143884893e-07, | |
| "loss": 2.0793, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.8400597907324365, | |
| "grad_norm": 0.9645154476165771, | |
| "learning_rate": 5.92141671278362e-07, | |
| "loss": 2.0518, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.8400597907324365, | |
| "eval_loss": 2.104897975921631, | |
| "eval_runtime": 30.1412, | |
| "eval_samples_per_second": 78.929, | |
| "eval_steps_per_second": 9.887, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.8550074738415545, | |
| "grad_norm": 1.124147891998291, | |
| "learning_rate": 5.368013281682347e-07, | |
| "loss": 2.1064, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.8699551569506725, | |
| "grad_norm": 1.7220090627670288, | |
| "learning_rate": 4.814609850581074e-07, | |
| "loss": 2.1687, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.884902840059791, | |
| "grad_norm": 1.0958945751190186, | |
| "learning_rate": 4.261206419479801e-07, | |
| "loss": 2.0322, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.899850523168909, | |
| "grad_norm": 1.2948551177978516, | |
| "learning_rate": 3.707802988378528e-07, | |
| "loss": 2.1591, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.914798206278027, | |
| "grad_norm": 1.7751617431640625, | |
| "learning_rate": 3.154399557277255e-07, | |
| "loss": 2.058, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.914798206278027, | |
| "eval_loss": 2.104750871658325, | |
| "eval_runtime": 30.1996, | |
| "eval_samples_per_second": 78.776, | |
| "eval_steps_per_second": 9.868, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.929745889387145, | |
| "grad_norm": 1.2900798320770264, | |
| "learning_rate": 2.6009961261759827e-07, | |
| "loss": 2.0058, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 2.944693572496263, | |
| "grad_norm": 1.0592029094696045, | |
| "learning_rate": 2.0475926950747095e-07, | |
| "loss": 2.0297, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 2.9596412556053813, | |
| "grad_norm": 1.9278658628463745, | |
| "learning_rate": 1.4941892639734368e-07, | |
| "loss": 2.0576, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 2.9745889387144993, | |
| "grad_norm": 1.156235694885254, | |
| "learning_rate": 9.407858328721639e-08, | |
| "loss": 2.0676, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 2.9895366218236172, | |
| "grad_norm": 1.1107338666915894, | |
| "learning_rate": 3.87382401770891e-08, | |
| "loss": 2.0392, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.9895366218236172, | |
| "eval_loss": 2.1047139167785645, | |
| "eval_runtime": 30.1252, | |
| "eval_samples_per_second": 78.97, | |
| "eval_steps_per_second": 9.892, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 2007, | |
| "total_flos": 1.9233413985101414e+17, | |
| "train_loss": 2.3574759455302132, | |
| "train_runtime": 3249.9454, | |
| "train_samples_per_second": 19.762, | |
| "train_steps_per_second": 0.618 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2007, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9233413985101414e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |