smollm2-135M-SFT-Only / trainer_state.json
loubnabnl's picture
loubnabnl HF Staff
Model save
4056064 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 784,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002551020408163265,
"grad_norm": 1.138124187948155,
"learning_rate": 1.2658227848101267e-05,
"loss": 1.7551,
"step": 1
},
{
"epoch": 0.012755102040816327,
"grad_norm": 0.7551830131389933,
"learning_rate": 6.329113924050633e-05,
"loss": 1.7707,
"step": 5
},
{
"epoch": 0.025510204081632654,
"grad_norm": 1.379764699722761,
"learning_rate": 0.00012658227848101267,
"loss": 1.7257,
"step": 10
},
{
"epoch": 0.03826530612244898,
"grad_norm": 0.4560275436076655,
"learning_rate": 0.000189873417721519,
"loss": 1.6272,
"step": 15
},
{
"epoch": 0.05102040816326531,
"grad_norm": 0.3675085939167979,
"learning_rate": 0.00025316455696202533,
"loss": 1.5808,
"step": 20
},
{
"epoch": 0.06377551020408163,
"grad_norm": 0.20864289230413074,
"learning_rate": 0.00031645569620253165,
"loss": 1.5291,
"step": 25
},
{
"epoch": 0.07653061224489796,
"grad_norm": 0.18719394627139643,
"learning_rate": 0.000379746835443038,
"loss": 1.5338,
"step": 30
},
{
"epoch": 0.08928571428571429,
"grad_norm": 0.1717343346810603,
"learning_rate": 0.0004430379746835443,
"loss": 1.5007,
"step": 35
},
{
"epoch": 0.10204081632653061,
"grad_norm": 0.15610479873752922,
"learning_rate": 0.0005063291139240507,
"loss": 1.4652,
"step": 40
},
{
"epoch": 0.11479591836734694,
"grad_norm": 0.13214894352385953,
"learning_rate": 0.000569620253164557,
"loss": 1.4388,
"step": 45
},
{
"epoch": 0.12755102040816327,
"grad_norm": 0.12168972456032588,
"learning_rate": 0.0006329113924050633,
"loss": 1.4225,
"step": 50
},
{
"epoch": 0.14030612244897958,
"grad_norm": 0.1323077190059568,
"learning_rate": 0.0006962025316455697,
"loss": 1.409,
"step": 55
},
{
"epoch": 0.15306122448979592,
"grad_norm": 0.37430096940846036,
"learning_rate": 0.000759493670886076,
"loss": 1.3951,
"step": 60
},
{
"epoch": 0.16581632653061223,
"grad_norm": 0.3814386987087306,
"learning_rate": 0.0008227848101265824,
"loss": 1.3972,
"step": 65
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.40216892416078964,
"learning_rate": 0.0008860759493670886,
"loss": 1.3864,
"step": 70
},
{
"epoch": 0.1913265306122449,
"grad_norm": 0.20332439264842958,
"learning_rate": 0.0009493670886075949,
"loss": 1.3861,
"step": 75
},
{
"epoch": 0.20408163265306123,
"grad_norm": 0.2924926145777638,
"learning_rate": 0.0009999950356681913,
"loss": 1.3891,
"step": 80
},
{
"epoch": 0.21683673469387754,
"grad_norm": 0.29723891288444315,
"learning_rate": 0.000999821294405392,
"loss": 1.3982,
"step": 85
},
{
"epoch": 0.22959183673469388,
"grad_norm": 0.23043075033731064,
"learning_rate": 0.0009993994351217151,
"loss": 1.3821,
"step": 90
},
{
"epoch": 0.2423469387755102,
"grad_norm": 0.15934631204828656,
"learning_rate": 0.00099872966723379,
"loss": 1.3937,
"step": 95
},
{
"epoch": 0.25510204081632654,
"grad_norm": 0.19636795516281652,
"learning_rate": 0.0009978123232234147,
"loss": 1.3393,
"step": 100
},
{
"epoch": 0.26785714285714285,
"grad_norm": 0.3357008076473798,
"learning_rate": 0.0009966478584725086,
"loss": 1.3873,
"step": 105
},
{
"epoch": 0.28061224489795916,
"grad_norm": 0.2979357511561018,
"learning_rate": 0.0009952368510370538,
"loss": 1.3575,
"step": 110
},
{
"epoch": 0.29336734693877553,
"grad_norm": 0.23158016835002,
"learning_rate": 0.0009935800013601416,
"loss": 1.3614,
"step": 115
},
{
"epoch": 0.30612244897959184,
"grad_norm": 0.19086278999416814,
"learning_rate": 0.0009916781319242614,
"loss": 1.3609,
"step": 120
},
{
"epoch": 0.31887755102040816,
"grad_norm": 0.2238120900111794,
"learning_rate": 0.0009895321868430113,
"loss": 1.3436,
"step": 125
},
{
"epoch": 0.33163265306122447,
"grad_norm": 0.22212987754769067,
"learning_rate": 0.0009871432313924254,
"loss": 1.3356,
"step": 130
},
{
"epoch": 0.34438775510204084,
"grad_norm": 0.18172190875737002,
"learning_rate": 0.000984512451482158,
"loss": 1.3428,
"step": 135
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.1965103083782127,
"learning_rate": 0.0009816411530667814,
"loss": 1.3053,
"step": 140
},
{
"epoch": 0.36989795918367346,
"grad_norm": 0.14562927658303307,
"learning_rate": 0.000978530761497492,
"loss": 1.3233,
"step": 145
},
{
"epoch": 0.3826530612244898,
"grad_norm": 0.19370329245107856,
"learning_rate": 0.0009751828208145482,
"loss": 1.3309,
"step": 150
},
{
"epoch": 0.39540816326530615,
"grad_norm": 0.25874954807518885,
"learning_rate": 0.0009715989929807862,
"loss": 1.3386,
"step": 155
},
{
"epoch": 0.40816326530612246,
"grad_norm": 0.32074900532811484,
"learning_rate": 0.000967781057056601,
"loss": 1.3197,
"step": 160
},
{
"epoch": 0.42091836734693877,
"grad_norm": 0.2345834124220609,
"learning_rate": 0.0009637309083167956,
"loss": 1.3353,
"step": 165
},
{
"epoch": 0.4336734693877551,
"grad_norm": 0.19716142253846314,
"learning_rate": 0.0009594505573097414,
"loss": 1.3148,
"step": 170
},
{
"epoch": 0.44642857142857145,
"grad_norm": 1.6838448936670876,
"learning_rate": 0.0009549421288593157,
"loss": 1.4963,
"step": 175
},
{
"epoch": 0.45918367346938777,
"grad_norm": 0.5400080339132108,
"learning_rate": 0.0009502078610101092,
"loss": 1.3763,
"step": 180
},
{
"epoch": 0.4719387755102041,
"grad_norm": 0.5636053179876761,
"learning_rate": 0.0009452501039164315,
"loss": 1.3562,
"step": 185
},
{
"epoch": 0.4846938775510204,
"grad_norm": 0.26363289666703416,
"learning_rate": 0.0009400713186756625,
"loss": 1.3671,
"step": 190
},
{
"epoch": 0.49744897959183676,
"grad_norm": 0.2063220498775075,
"learning_rate": 0.0009346740761065305,
"loss": 1.337,
"step": 195
},
{
"epoch": 0.5102040816326531,
"grad_norm": 0.851877134215801,
"learning_rate": 0.0009290610554729234,
"loss": 1.3404,
"step": 200
},
{
"epoch": 0.5229591836734694,
"grad_norm": 0.17433687232665127,
"learning_rate": 0.0009232350431538657,
"loss": 1.3278,
"step": 205
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.2552513397179755,
"learning_rate": 0.0009171989312603226,
"loss": 1.3621,
"step": 210
},
{
"epoch": 0.548469387755102,
"grad_norm": 0.18498941711074082,
"learning_rate": 0.0009109557161995172,
"loss": 1.3365,
"step": 215
},
{
"epoch": 0.5612244897959183,
"grad_norm": 0.19451053194971357,
"learning_rate": 0.0009045084971874737,
"loss": 1.3329,
"step": 220
},
{
"epoch": 0.5739795918367347,
"grad_norm": 0.14628015588879814,
"learning_rate": 0.0008978604747105246,
"loss": 1.3133,
"step": 225
},
{
"epoch": 0.5867346938775511,
"grad_norm": 0.2028020969079187,
"learning_rate": 0.000891014948936546,
"loss": 1.3337,
"step": 230
},
{
"epoch": 0.5994897959183674,
"grad_norm": 0.18332572064094557,
"learning_rate": 0.0008839753180767108,
"loss": 1.3132,
"step": 235
},
{
"epoch": 0.6122448979591837,
"grad_norm": 0.14205229763128208,
"learning_rate": 0.0008767450766985694,
"loss": 1.29,
"step": 240
},
{
"epoch": 0.625,
"grad_norm": 0.30410202409557807,
"learning_rate": 0.000869327813991301,
"loss": 1.3057,
"step": 245
},
{
"epoch": 0.6377551020408163,
"grad_norm": 0.19577309506455626,
"learning_rate": 0.0008617272119839903,
"loss": 1.332,
"step": 250
},
{
"epoch": 0.6505102040816326,
"grad_norm": 0.1272875527576836,
"learning_rate": 0.0008539470437178196,
"loss": 1.3206,
"step": 255
},
{
"epoch": 0.6632653061224489,
"grad_norm": 0.15194088157912253,
"learning_rate": 0.0008459911713730799,
"loss": 1.2914,
"step": 260
},
{
"epoch": 0.6760204081632653,
"grad_norm": 0.12663044164256262,
"learning_rate": 0.0008378635443519327,
"loss": 1.2917,
"step": 265
},
{
"epoch": 0.6887755102040817,
"grad_norm": 0.1344562065693353,
"learning_rate": 0.0008295681973178737,
"loss": 1.2994,
"step": 270
},
{
"epoch": 0.701530612244898,
"grad_norm": 0.13392241997034995,
"learning_rate": 0.0008211092481928716,
"loss": 1.297,
"step": 275
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.14371429407675881,
"learning_rate": 0.0008124908961131759,
"loss": 1.3095,
"step": 280
},
{
"epoch": 0.7270408163265306,
"grad_norm": 0.15013152147820535,
"learning_rate": 0.0008037174193448089,
"loss": 1.2991,
"step": 285
},
{
"epoch": 0.7397959183673469,
"grad_norm": 0.13870289504861724,
"learning_rate": 0.000794793173159778,
"loss": 1.3066,
"step": 290
},
{
"epoch": 0.7525510204081632,
"grad_norm": 0.11463233811669157,
"learning_rate": 0.0007857225876740584,
"loss": 1.2914,
"step": 295
},
{
"epoch": 0.7653061224489796,
"grad_norm": 0.16118564397614718,
"learning_rate": 0.000776510165648425,
"loss": 1.286,
"step": 300
},
{
"epoch": 0.7780612244897959,
"grad_norm": 0.16328954399209802,
"learning_rate": 0.000767160480253221,
"loss": 1.3065,
"step": 305
},
{
"epoch": 0.7908163265306123,
"grad_norm": 0.3891017035371994,
"learning_rate": 0.000757678172798175,
"loss": 1.2978,
"step": 310
},
{
"epoch": 0.8035714285714286,
"grad_norm": 0.1374369729620582,
"learning_rate": 0.0007480679504283911,
"loss": 1.2737,
"step": 315
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.10483812728418156,
"learning_rate": 0.00073833458378766,
"loss": 1.2638,
"step": 320
},
{
"epoch": 0.8290816326530612,
"grad_norm": 0.20381163662422036,
"learning_rate": 0.0007284829046502467,
"loss": 1.2888,
"step": 325
},
{
"epoch": 0.8418367346938775,
"grad_norm": 0.11728927247718034,
"learning_rate": 0.0007185178035223327,
"loss": 1.2988,
"step": 330
},
{
"epoch": 0.8545918367346939,
"grad_norm": 0.10959677714791609,
"learning_rate": 0.0007084442272143026,
"loss": 1.2722,
"step": 335
},
{
"epoch": 0.8673469387755102,
"grad_norm": 0.1156080310279316,
"learning_rate": 0.0006982671763850814,
"loss": 1.2831,
"step": 340
},
{
"epoch": 0.8801020408163265,
"grad_norm": 0.1401568063137037,
"learning_rate": 0.0006879917030597397,
"loss": 1.2766,
"step": 345
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.1267536193584464,
"learning_rate": 0.0006776229081216001,
"loss": 1.2909,
"step": 350
},
{
"epoch": 0.9056122448979592,
"grad_norm": 0.1276445520009822,
"learning_rate": 0.0006671659387800909,
"loss": 1.271,
"step": 355
},
{
"epoch": 0.9183673469387755,
"grad_norm": 0.12028703336124712,
"learning_rate": 0.0006566259860156014,
"loss": 1.2681,
"step": 360
},
{
"epoch": 0.9311224489795918,
"grad_norm": 0.10748771516704643,
"learning_rate": 0.0006460082820026094,
"loss": 1.2634,
"step": 365
},
{
"epoch": 0.9438775510204082,
"grad_norm": 0.09058225138342332,
"learning_rate": 0.0006353180975123595,
"loss": 1.2785,
"step": 370
},
{
"epoch": 0.9566326530612245,
"grad_norm": 0.1471646323105399,
"learning_rate": 0.000624560739296381,
"loss": 1.2537,
"step": 375
},
{
"epoch": 0.9693877551020408,
"grad_norm": 0.12205622985562622,
"learning_rate": 0.0006137415474521454,
"loss": 1.2834,
"step": 380
},
{
"epoch": 0.9821428571428571,
"grad_norm": 0.1198227865731359,
"learning_rate": 0.0006028658927721697,
"loss": 1.268,
"step": 385
},
{
"epoch": 0.9948979591836735,
"grad_norm": 0.10689994479443513,
"learning_rate": 0.0005919391740778833,
"loss": 1.2705,
"step": 390
},
{
"epoch": 1.0,
"eval_loss": 1.864872694015503,
"eval_runtime": 85.1881,
"eval_samples_per_second": 152.568,
"eval_steps_per_second": 4.778,
"step": 392
},
{
"epoch": 1.0076530612244898,
"grad_norm": 0.11143268558344442,
"learning_rate": 0.0005809668155395793,
"loss": 1.2488,
"step": 395
},
{
"epoch": 1.0204081632653061,
"grad_norm": 0.10688715702524573,
"learning_rate": 0.0005699542639837844,
"loss": 1.2129,
"step": 400
},
{
"epoch": 1.0331632653061225,
"grad_norm": 0.09930208201709802,
"learning_rate": 0.0005589069861893798,
"loss": 1.2152,
"step": 405
},
{
"epoch": 1.0459183673469388,
"grad_norm": 0.14907362895999168,
"learning_rate": 0.0005478304661738199,
"loss": 1.2204,
"step": 410
},
{
"epoch": 1.058673469387755,
"grad_norm": 0.12329968075769418,
"learning_rate": 0.000536730202470791,
"loss": 1.248,
"step": 415
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.10929541376542931,
"learning_rate": 0.000525611705400666,
"loss": 1.2511,
"step": 420
},
{
"epoch": 1.0841836734693877,
"grad_norm": 0.12233961076169748,
"learning_rate": 0.000514480494335106,
"loss": 1.2165,
"step": 425
},
{
"epoch": 1.096938775510204,
"grad_norm": 0.11362833380867698,
"learning_rate": 0.0005033420949571712,
"loss": 1.2353,
"step": 430
},
{
"epoch": 1.1096938775510203,
"grad_norm": 0.09720826313265056,
"learning_rate": 0.0004922020365182968,
"loss": 1.209,
"step": 435
},
{
"epoch": 1.1224489795918366,
"grad_norm": 0.1184275991715705,
"learning_rate": 0.0004810658490934979,
"loss": 1.2218,
"step": 440
},
{
"epoch": 1.135204081632653,
"grad_norm": 0.13198322755570885,
"learning_rate": 0.0004699390608361665,
"loss": 1.2035,
"step": 445
},
{
"epoch": 1.1479591836734695,
"grad_norm": 0.10874477518300978,
"learning_rate": 0.0004588271952338212,
"loss": 1.2172,
"step": 450
},
{
"epoch": 1.1607142857142858,
"grad_norm": 0.08852135264878797,
"learning_rate": 0.00044773576836617336,
"loss": 1.221,
"step": 455
},
{
"epoch": 1.1734693877551021,
"grad_norm": 0.10770572502518004,
"learning_rate": 0.0004366702861668716,
"loss": 1.2192,
"step": 460
},
{
"epoch": 1.1862244897959184,
"grad_norm": 0.15699433228433193,
"learning_rate": 0.0004256362416902817,
"loss": 1.2204,
"step": 465
},
{
"epoch": 1.1989795918367347,
"grad_norm": 0.09204241599916607,
"learning_rate": 0.0004146391123846606,
"loss": 1.2338,
"step": 470
},
{
"epoch": 1.211734693877551,
"grad_norm": 0.08836564479064991,
"learning_rate": 0.00040368435737307733,
"loss": 1.2248,
"step": 475
},
{
"epoch": 1.2244897959183674,
"grad_norm": 0.09509702477272594,
"learning_rate": 0.00039277741474343054,
"loss": 1.2168,
"step": 480
},
{
"epoch": 1.2372448979591837,
"grad_norm": 0.0894046781603559,
"learning_rate": 0.00038192369884890886,
"loss": 1.2232,
"step": 485
},
{
"epoch": 1.25,
"grad_norm": 0.09858037446272845,
"learning_rate": 0.0003711285976202331,
"loss": 1.2159,
"step": 490
},
{
"epoch": 1.2627551020408163,
"grad_norm": 0.10699315355393832,
"learning_rate": 0.0003603974698910139,
"loss": 1.2324,
"step": 495
},
{
"epoch": 1.2755102040816326,
"grad_norm": 0.09869982370664032,
"learning_rate": 0.0003497356427375562,
"loss": 1.2252,
"step": 500
},
{
"epoch": 1.288265306122449,
"grad_norm": 0.0845449914129218,
"learning_rate": 0.0003391484088344257,
"loss": 1.231,
"step": 505
},
{
"epoch": 1.3010204081632653,
"grad_norm": 0.09770167031364653,
"learning_rate": 0.00032864102382709374,
"loss": 1.2108,
"step": 510
},
{
"epoch": 1.3137755102040816,
"grad_norm": 0.08930615418315077,
"learning_rate": 0.0003182187037229653,
"loss": 1.2234,
"step": 515
},
{
"epoch": 1.3265306122448979,
"grad_norm": 0.11628427629387307,
"learning_rate": 0.0003078866223020815,
"loss": 1.2504,
"step": 520
},
{
"epoch": 1.3392857142857144,
"grad_norm": 0.08943286693904606,
"learning_rate": 0.0002976499085487862,
"loss": 1.2265,
"step": 525
},
{
"epoch": 1.3520408163265305,
"grad_norm": 0.10296423428005826,
"learning_rate": 0.0002875136441056286,
"loss": 1.2096,
"step": 530
},
{
"epoch": 1.364795918367347,
"grad_norm": 0.0881663874621136,
"learning_rate": 0.00027748286075076836,
"loss": 1.2155,
"step": 535
},
{
"epoch": 1.3775510204081631,
"grad_norm": 0.08112211915193616,
"learning_rate": 0.00026756253790013193,
"loss": 1.2279,
"step": 540
},
{
"epoch": 1.3903061224489797,
"grad_norm": 0.090452876583666,
"learning_rate": 0.00025775760013556424,
"loss": 1.2176,
"step": 545
},
{
"epoch": 1.403061224489796,
"grad_norm": 0.08241049413771702,
"learning_rate": 0.00024807291476019994,
"loss": 1.2235,
"step": 550
},
{
"epoch": 1.4158163265306123,
"grad_norm": 0.0930330073695628,
"learning_rate": 0.00023851328938226808,
"loss": 1.2039,
"step": 555
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.07990040558164058,
"learning_rate": 0.0002290834695285316,
"loss": 1.2134,
"step": 560
},
{
"epoch": 1.441326530612245,
"grad_norm": 0.08688446519906046,
"learning_rate": 0.0002197881362885426,
"loss": 1.1858,
"step": 565
},
{
"epoch": 1.4540816326530612,
"grad_norm": 0.08906821862405218,
"learning_rate": 0.0002106319039908879,
"loss": 1.2142,
"step": 570
},
{
"epoch": 1.4668367346938775,
"grad_norm": 0.09353827272017026,
"learning_rate": 0.000201619317912573,
"loss": 1.2176,
"step": 575
},
{
"epoch": 1.4795918367346939,
"grad_norm": 0.09869151116487236,
"learning_rate": 0.00019275485202268573,
"loss": 1.2111,
"step": 580
},
{
"epoch": 1.4923469387755102,
"grad_norm": 0.07871336680332862,
"learning_rate": 0.00018404290676145857,
"loss": 1.2009,
"step": 585
},
{
"epoch": 1.5051020408163265,
"grad_norm": 0.08210906329432778,
"learning_rate": 0.00017548780685582949,
"loss": 1.1945,
"step": 590
},
{
"epoch": 1.5178571428571428,
"grad_norm": 0.07607843649816663,
"learning_rate": 0.00016709379917259027,
"loss": 1.2013,
"step": 595
},
{
"epoch": 1.5306122448979593,
"grad_norm": 0.07966793194282133,
"learning_rate": 0.00015886505061018413,
"loss": 1.2372,
"step": 600
},
{
"epoch": 1.5433673469387754,
"grad_norm": 0.07879844228820579,
"learning_rate": 0.00015080564603020142,
"loss": 1.2051,
"step": 605
},
{
"epoch": 1.556122448979592,
"grad_norm": 0.07337572605792314,
"learning_rate": 0.00014291958622959973,
"loss": 1.2224,
"step": 610
},
{
"epoch": 1.568877551020408,
"grad_norm": 0.08717534192433574,
"learning_rate": 0.0001352107859546533,
"loss": 1.1978,
"step": 615
},
{
"epoch": 1.5816326530612246,
"grad_norm": 0.08338962851545824,
"learning_rate": 0.00012768307195762168,
"loss": 1.1933,
"step": 620
},
{
"epoch": 1.5943877551020407,
"grad_norm": 0.07500681809171242,
"learning_rate": 0.00012034018109709716,
"loss": 1.1924,
"step": 625
},
{
"epoch": 1.6071428571428572,
"grad_norm": 0.07959205675721448,
"learning_rate": 0.0001131857584829783,
"loss": 1.2101,
"step": 630
},
{
"epoch": 1.6198979591836735,
"grad_norm": 0.06971085806957752,
"learning_rate": 0.00010622335566698877,
"loss": 1.2142,
"step": 635
},
{
"epoch": 1.6326530612244898,
"grad_norm": 0.06828459321037969,
"learning_rate": 9.94564288796384e-05,
"loss": 1.2119,
"step": 640
},
{
"epoch": 1.6454081632653061,
"grad_norm": 0.07198545650406166,
"learning_rate": 9.288833731450419e-05,
"loss": 1.203,
"step": 645
},
{
"epoch": 1.6581632653061225,
"grad_norm": 0.0765334557855611,
"learning_rate": 8.652234146068206e-05,
"loss": 1.1987,
"step": 650
},
{
"epoch": 1.6709183673469388,
"grad_norm": 0.06996249721515567,
"learning_rate": 8.036160148423449e-05,
"loss": 1.205,
"step": 655
},
{
"epoch": 1.683673469387755,
"grad_norm": 0.07068726506681239,
"learning_rate": 7.440917565944349e-05,
"loss": 1.2168,
"step": 660
},
{
"epoch": 1.6964285714285714,
"grad_norm": 0.06352753939218142,
"learning_rate": 6.866801885064056e-05,
"loss": 1.1967,
"step": 665
},
{
"epoch": 1.7091836734693877,
"grad_norm": 0.06967499508626017,
"learning_rate": 6.314098104537324e-05,
"loss": 1.1747,
"step": 670
},
{
"epoch": 1.7219387755102042,
"grad_norm": 0.06527614945695559,
"learning_rate": 5.783080593963219e-05,
"loss": 1.1991,
"step": 675
},
{
"epoch": 1.7346938775510203,
"grad_norm": 0.06301111901417586,
"learning_rate": 5.27401295758439e-05,
"loss": 1.1996,
"step": 680
},
{
"epoch": 1.7474489795918369,
"grad_norm": 0.06944052501466691,
"learning_rate": 4.787147903430383e-05,
"loss": 1.1968,
"step": 685
},
{
"epoch": 1.760204081632653,
"grad_norm": 0.07413044994704315,
"learning_rate": 4.322727117869951e-05,
"loss": 1.1943,
"step": 690
},
{
"epoch": 1.7729591836734695,
"grad_norm": 0.06365480540319339,
"learning_rate": 3.880981145634704e-05,
"loss": 1.2191,
"step": 695
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.0713239761471288,
"learning_rate": 3.462129275373577e-05,
"loss": 1.2286,
"step": 700
},
{
"epoch": 1.7984693877551021,
"grad_norm": 0.08049708826438332,
"learning_rate": 3.066379430795002e-05,
"loss": 1.2161,
"step": 705
},
{
"epoch": 1.8112244897959182,
"grad_norm": 0.061716360746827195,
"learning_rate": 2.6939280674508016e-05,
"loss": 1.1999,
"step": 710
},
{
"epoch": 1.8239795918367347,
"grad_norm": 0.0631321070521143,
"learning_rate": 2.3449600752129597e-05,
"loss": 1.2079,
"step": 715
},
{
"epoch": 1.836734693877551,
"grad_norm": 0.0635532701163778,
"learning_rate": 2.019648686491865e-05,
"loss": 1.191,
"step": 720
},
{
"epoch": 1.8494897959183674,
"grad_norm": 0.06640321579684394,
"learning_rate": 1.7181553902413438e-05,
"loss": 1.2255,
"step": 725
},
{
"epoch": 1.8622448979591837,
"grad_norm": 0.06524200918052049,
"learning_rate": 1.4406298517934068e-05,
"loss": 1.2206,
"step": 730
},
{
"epoch": 1.875,
"grad_norm": 0.06477805270118185,
"learning_rate": 1.1872098385623586e-05,
"loss": 1.1887,
"step": 735
},
{
"epoch": 1.8877551020408163,
"grad_norm": 0.06497765016187285,
"learning_rate": 9.580211516551862e-06,
"loss": 1.2036,
"step": 740
},
{
"epoch": 1.9005102040816326,
"grad_norm": 0.06544507847669666,
"learning_rate": 7.531775634222138e-06,
"loss": 1.2224,
"step": 745
},
{
"epoch": 1.913265306122449,
"grad_norm": 0.06400465123455064,
"learning_rate": 5.727807609789471e-06,
"loss": 1.2099,
"step": 750
},
{
"epoch": 1.9260204081632653,
"grad_norm": 0.06135587896382674,
"learning_rate": 4.169202957272522e-06,
"loss": 1.1876,
"step": 755
},
{
"epoch": 1.9387755102040818,
"grad_norm": 0.06395336761154996,
"learning_rate": 2.856735389008269e-06,
"loss": 1.2315,
"step": 760
},
{
"epoch": 1.9515306122448979,
"grad_norm": 0.06406624485413773,
"learning_rate": 1.7910564315704035e-06,
"loss": 1.1941,
"step": 765
},
{
"epoch": 1.9642857142857144,
"grad_norm": 0.06823262080551822,
"learning_rate": 9.726951023434348e-07,
"loss": 1.2067,
"step": 770
},
{
"epoch": 1.9770408163265305,
"grad_norm": 0.06268227825632704,
"learning_rate": 4.020576469108139e-07,
"loss": 1.2145,
"step": 775
},
{
"epoch": 1.989795918367347,
"grad_norm": 0.06573014422017058,
"learning_rate": 7.942733738924845e-08,
"loss": 1.1867,
"step": 780
},
{
"epoch": 2.0,
"eval_loss": 1.8389793634414673,
"eval_runtime": 82.5875,
"eval_samples_per_second": 157.373,
"eval_steps_per_second": 4.928,
"step": 784
},
{
"epoch": 2.0,
"step": 784,
"total_flos": 173253108695040.0,
"train_loss": 1.2838877389321521,
"train_runtime": 2377.8575,
"train_samples_per_second": 42.195,
"train_steps_per_second": 0.33
}
],
"logging_steps": 5,
"max_steps": 784,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 173253108695040.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}