James-WYang's picture
Initial upload of LIDR_M0_Meta-Llama-3-8B-Instruct_en_es_ru_de_fr
1b549af verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9982363315696648,
"eval_steps": 500,
"global_step": 283,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01763668430335097,
"grad_norm": 313.6099853515625,
"learning_rate": 2.5e-08,
"logits/chosen": 0.006098331417888403,
"logits/rejected": -0.07919329404830933,
"logps/chosen": -417.3211975097656,
"logps/rejected": -135.40716552734375,
"loss": 1.7577,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": 0.018350066617131233,
"rewards/margins": 0.014638463035225868,
"rewards/rejected": 0.003711604978889227,
"step": 5
},
{
"epoch": 0.03527336860670194,
"grad_norm": 152.8399658203125,
"learning_rate": 5e-08,
"logits/chosen": -0.04923827201128006,
"logits/rejected": 0.0881669819355011,
"logps/chosen": -441.92706298828125,
"logps/rejected": -145.1701202392578,
"loss": 1.648,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.03151443973183632,
"rewards/margins": 0.04618922993540764,
"rewards/rejected": -0.014674797654151917,
"step": 10
},
{
"epoch": 0.05291005291005291,
"grad_norm": 191.66473388671875,
"learning_rate": 7.5e-08,
"logits/chosen": 0.14285080134868622,
"logits/rejected": -0.03589191287755966,
"logps/chosen": -468.4457092285156,
"logps/rejected": -96.72927856445312,
"loss": 1.7065,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.019609758630394936,
"rewards/margins": 0.022998739033937454,
"rewards/rejected": -0.003388977376744151,
"step": 15
},
{
"epoch": 0.07054673721340388,
"grad_norm": 206.07550048828125,
"learning_rate": 1e-07,
"logits/chosen": -0.34465092420578003,
"logits/rejected": -0.06792903691530228,
"logps/chosen": -233.9963836669922,
"logps/rejected": -221.6174774169922,
"loss": 1.5938,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.02596401236951351,
"rewards/margins": 0.011774427257478237,
"rewards/rejected": 0.014189583249390125,
"step": 20
},
{
"epoch": 0.08818342151675485,
"grad_norm": 158.72621154785156,
"learning_rate": 1.25e-07,
"logits/chosen": 0.09270603954792023,
"logits/rejected": 0.11062409728765488,
"logps/chosen": -398.4977111816406,
"logps/rejected": -85.5314712524414,
"loss": 1.6147,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.0831952691078186,
"rewards/margins": 0.09730525314807892,
"rewards/rejected": -0.014109982177615166,
"step": 25
},
{
"epoch": 0.10582010582010581,
"grad_norm": 194.82046508789062,
"learning_rate": 1.5e-07,
"logits/chosen": 0.04338628798723221,
"logits/rejected": 0.17773175239562988,
"logps/chosen": -314.41534423828125,
"logps/rejected": -133.28741455078125,
"loss": 1.6821,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.06617891043424606,
"rewards/margins": 0.11408562958240509,
"rewards/rejected": -0.04790671542286873,
"step": 30
},
{
"epoch": 0.12345679012345678,
"grad_norm": 183.99908447265625,
"learning_rate": 1.75e-07,
"logits/chosen": -0.03513098135590553,
"logits/rejected": -5.73456272832118e-05,
"logps/chosen": -295.2171325683594,
"logps/rejected": -85.56248474121094,
"loss": 1.5639,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.17612342536449432,
"rewards/margins": 0.19541290402412415,
"rewards/rejected": -0.019289476796984673,
"step": 35
},
{
"epoch": 0.14109347442680775,
"grad_norm": 162.05503845214844,
"learning_rate": 2e-07,
"logits/chosen": -0.010212594643235207,
"logits/rejected": 0.16527053713798523,
"logps/chosen": -347.5972595214844,
"logps/rejected": -94.78019714355469,
"loss": 1.471,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.32217487692832947,
"rewards/margins": 0.3966469168663025,
"rewards/rejected": -0.0744720846414566,
"step": 40
},
{
"epoch": 0.15873015873015872,
"grad_norm": 119.99495697021484,
"learning_rate": 2.25e-07,
"logits/chosen": 0.3875502943992615,
"logits/rejected": 0.2595987617969513,
"logps/chosen": -321.2315979003906,
"logps/rejected": -129.35592651367188,
"loss": 1.3912,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.5000149607658386,
"rewards/margins": 0.5232937335968018,
"rewards/rejected": -0.02327878028154373,
"step": 45
},
{
"epoch": 0.1763668430335097,
"grad_norm": 95.13220977783203,
"learning_rate": 2.5e-07,
"logits/chosen": 0.10565916448831558,
"logits/rejected": -0.15445783734321594,
"logps/chosen": -673.9226684570312,
"logps/rejected": -129.2067413330078,
"loss": 1.2968,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.5901581048965454,
"rewards/margins": 1.9689127206802368,
"rewards/rejected": -0.37875470519065857,
"step": 50
},
{
"epoch": 0.19400352733686066,
"grad_norm": 67.34782409667969,
"learning_rate": 2.75e-07,
"logits/chosen": 0.0776977464556694,
"logits/rejected": -0.024907944723963737,
"logps/chosen": -416.7149353027344,
"logps/rejected": -142.47470092773438,
"loss": 1.2292,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.0576913356781006,
"rewards/margins": 1.5465197563171387,
"rewards/rejected": -0.4888283610343933,
"step": 55
},
{
"epoch": 0.21164021164021163,
"grad_norm": 59.450233459472656,
"learning_rate": 3e-07,
"logits/chosen": -0.05158572271466255,
"logits/rejected": -0.12746775150299072,
"logps/chosen": -312.43902587890625,
"logps/rejected": -169.53347778320312,
"loss": 1.2596,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.9128485918045044,
"rewards/margins": 1.222939372062683,
"rewards/rejected": -0.3100907504558563,
"step": 60
},
{
"epoch": 0.2292768959435626,
"grad_norm": 74.42351531982422,
"learning_rate": 3.25e-07,
"logits/chosen": 0.04989933967590332,
"logits/rejected": -0.08158798515796661,
"logps/chosen": -489.686279296875,
"logps/rejected": -136.57095336914062,
"loss": 1.2247,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.9382400512695312,
"rewards/margins": 2.6933512687683105,
"rewards/rejected": -0.755111575126648,
"step": 65
},
{
"epoch": 0.24691358024691357,
"grad_norm": 44.048831939697266,
"learning_rate": 3.5e-07,
"logits/chosen": 0.30019721388816833,
"logits/rejected": 0.19210687279701233,
"logps/chosen": -400.27288818359375,
"logps/rejected": -90.65263366699219,
"loss": 1.211,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.4682776927948,
"rewards/margins": 3.0848326683044434,
"rewards/rejected": -0.6165549159049988,
"step": 70
},
{
"epoch": 0.26455026455026454,
"grad_norm": 35.421875,
"learning_rate": 3.75e-07,
"logits/chosen": 0.16179195046424866,
"logits/rejected": 0.03494036942720413,
"logps/chosen": -433.01922607421875,
"logps/rejected": -96.19873046875,
"loss": 1.0681,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.9752895832061768,
"rewards/margins": 3.858776569366455,
"rewards/rejected": -0.8834871053695679,
"step": 75
},
{
"epoch": 0.2821869488536155,
"grad_norm": 37.444759368896484,
"learning_rate": 4e-07,
"logits/chosen": -0.043452538549900055,
"logits/rejected": -0.13688185811042786,
"logps/chosen": -297.93115234375,
"logps/rejected": -123.9369888305664,
"loss": 1.0561,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.215817928314209,
"rewards/margins": 3.3099753856658936,
"rewards/rejected": -1.0941574573516846,
"step": 80
},
{
"epoch": 0.2998236331569665,
"grad_norm": 45.99400329589844,
"learning_rate": 4.2499999999999995e-07,
"logits/chosen": 0.18330197036266327,
"logits/rejected": 0.04882228747010231,
"logps/chosen": -113.65718078613281,
"logps/rejected": -47.359718322753906,
"loss": 1.1666,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.04226815700531,
"rewards/margins": 1.639715552330017,
"rewards/rejected": -0.5974472165107727,
"step": 85
},
{
"epoch": 0.31746031746031744,
"grad_norm": 31.48380470275879,
"learning_rate": 4.5e-07,
"logits/chosen": -0.0681629553437233,
"logits/rejected": -0.11063985526561737,
"logps/chosen": -347.83184814453125,
"logps/rejected": -224.0146484375,
"loss": 1.0203,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.0378246307373047,
"rewards/margins": 3.744539976119995,
"rewards/rejected": -1.7067155838012695,
"step": 90
},
{
"epoch": 0.3350970017636684,
"grad_norm": 39.78249740600586,
"learning_rate": 4.7499999999999995e-07,
"logits/chosen": 0.19453981518745422,
"logits/rejected": -0.143437460064888,
"logps/chosen": -426.54083251953125,
"logps/rejected": -144.83743286132812,
"loss": 1.0806,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.4049665927886963,
"rewards/margins": 3.890450954437256,
"rewards/rejected": -1.4854844808578491,
"step": 95
},
{
"epoch": 0.3527336860670194,
"grad_norm": 26.297332763671875,
"learning_rate": 5e-07,
"logits/chosen": 0.16438576579093933,
"logits/rejected": 0.04840268939733505,
"logps/chosen": -291.79705810546875,
"logps/rejected": -169.189453125,
"loss": 1.0066,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.483132839202881,
"rewards/margins": 5.287341117858887,
"rewards/rejected": -2.804208278656006,
"step": 100
},
{
"epoch": 0.37037037037037035,
"grad_norm": 40.219547271728516,
"learning_rate": 4.990795908619189e-07,
"logits/chosen": -0.07876741886138916,
"logits/rejected": 0.008120670914649963,
"logps/chosen": -237.4617156982422,
"logps/rejected": -164.24118041992188,
"loss": 0.9724,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.506404161453247,
"rewards/margins": 3.349010467529297,
"rewards/rejected": -1.842606544494629,
"step": 105
},
{
"epoch": 0.3880070546737213,
"grad_norm": 24.741857528686523,
"learning_rate": 4.963251406715272e-07,
"logits/chosen": -0.18278034031391144,
"logits/rejected": -0.1590186357498169,
"logps/chosen": -178.89378356933594,
"logps/rejected": -173.58485412597656,
"loss": 0.9763,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.4526221752166748,
"rewards/margins": 4.451180934906006,
"rewards/rejected": -2.998558759689331,
"step": 110
},
{
"epoch": 0.4056437389770723,
"grad_norm": 17.86903953552246,
"learning_rate": 4.917569311978301e-07,
"logits/chosen": -0.2192661464214325,
"logits/rejected": -0.09564487636089325,
"logps/chosen": -314.15679931640625,
"logps/rejected": -145.6080780029297,
"loss": 0.9777,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.0302603244781494,
"rewards/margins": 5.838948726654053,
"rewards/rejected": -2.808687686920166,
"step": 115
},
{
"epoch": 0.42328042328042326,
"grad_norm": 16.235734939575195,
"learning_rate": 4.854085994147814e-07,
"logits/chosen": -0.14865146577358246,
"logits/rejected": -0.02487100474536419,
"logps/chosen": -530.9991455078125,
"logps/rejected": -203.55032348632812,
"loss": 0.9739,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 5.349178314208984,
"rewards/margins": 7.101385593414307,
"rewards/rejected": -1.752206802368164,
"step": 120
},
{
"epoch": 0.4409171075837742,
"grad_norm": 28.23626708984375,
"learning_rate": 4.773268898230589e-07,
"logits/chosen": 0.12143270671367645,
"logits/rejected": 0.09356644004583359,
"logps/chosen": -458.343994140625,
"logps/rejected": -188.42982482910156,
"loss": 0.9146,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.742281436920166,
"rewards/margins": 8.858478546142578,
"rewards/rejected": -4.116196632385254,
"step": 125
},
{
"epoch": 0.4585537918871252,
"grad_norm": 19.614173889160156,
"learning_rate": 4.675713102575388e-07,
"logits/chosen": -0.09454023838043213,
"logits/rejected": 0.16691946983337402,
"logps/chosen": -118.05609130859375,
"logps/rejected": -79.45272827148438,
"loss": 0.941,
"rewards/accuracies": 1.0,
"rewards/chosen": 1.4626652002334595,
"rewards/margins": 3.1161253452301025,
"rewards/rejected": -1.653460144996643,
"step": 130
},
{
"epoch": 0.47619047619047616,
"grad_norm": 14.55256462097168,
"learning_rate": 4.562136937148561e-07,
"logits/chosen": -0.11436698585748672,
"logits/rejected": 0.10679924488067627,
"logps/chosen": -355.46209716796875,
"logps/rejected": -118.79722595214844,
"loss": 0.8758,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.005434989929199,
"rewards/margins": 6.715737342834473,
"rewards/rejected": -2.7103025913238525,
"step": 135
},
{
"epoch": 0.49382716049382713,
"grad_norm": 21.749248504638672,
"learning_rate": 4.433376694274324e-07,
"logits/chosen": -0.10040668398141861,
"logits/rejected": -0.26842421293258667,
"logps/chosen": -404.2080383300781,
"logps/rejected": -137.44854736328125,
"loss": 0.9303,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.164219856262207,
"rewards/margins": 7.909791469573975,
"rewards/rejected": -2.7455716133117676,
"step": 140
},
{
"epoch": 0.5114638447971781,
"grad_norm": 8.761198043823242,
"learning_rate": 4.290380470785983e-07,
"logits/chosen": -0.06308789551258087,
"logits/rejected": -0.15449562668800354,
"logps/chosen": -272.6799621582031,
"logps/rejected": -112.20796966552734,
"loss": 0.8314,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.2221832275390625,
"rewards/margins": 5.653393745422363,
"rewards/rejected": -2.431210517883301,
"step": 145
},
{
"epoch": 0.5291005291005291,
"grad_norm": 22.968664169311523,
"learning_rate": 4.134201186930015e-07,
"logits/chosen": -0.1430361270904541,
"logits/rejected": -0.08517652004957199,
"logps/chosen": -286.87591552734375,
"logps/rejected": -101.37379455566406,
"loss": 0.8328,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.647578716278076,
"rewards/margins": 5.6754045486450195,
"rewards/rejected": -2.0278255939483643,
"step": 150
},
{
"epoch": 0.54673721340388,
"grad_norm": 19.0902042388916,
"learning_rate": 3.9659888334267386e-07,
"logits/chosen": -0.19678017497062683,
"logits/rejected": -0.011279584839940071,
"logps/chosen": -276.16937255859375,
"logps/rejected": -126.4999771118164,
"loss": 0.8527,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.482155799865723,
"rewards/margins": 8.302606582641602,
"rewards/rejected": -3.8204503059387207,
"step": 155
},
{
"epoch": 0.564373897707231,
"grad_norm": 12.187582969665527,
"learning_rate": 3.786982003774577e-07,
"logits/chosen": 0.0009343802812509239,
"logits/rejected": -0.1947936862707138,
"logps/chosen": -373.61822509765625,
"logps/rejected": -213.337890625,
"loss": 0.8252,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.5612094402313232,
"rewards/margins": 8.042330741882324,
"rewards/rejected": -4.481120586395264,
"step": 160
},
{
"epoch": 0.582010582010582,
"grad_norm": 51.25554656982422,
"learning_rate": 3.598498774147853e-07,
"logits/chosen": -0.3127750754356384,
"logits/rejected": -0.03805520385503769,
"logps/chosen": -177.94229125976562,
"logps/rejected": -103.98043060302734,
"loss": 0.9408,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.9349114894866943,
"rewards/margins": 6.028637886047363,
"rewards/rejected": -3.0937259197235107,
"step": 165
},
{
"epoch": 0.599647266313933,
"grad_norm": 13.361839294433594,
"learning_rate": 3.4019269980419587e-07,
"logits/chosen": -0.06776843965053558,
"logits/rejected": -0.017381291836500168,
"logps/chosen": -186.94850158691406,
"logps/rejected": -163.2671661376953,
"loss": 0.8766,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.637651205062866,
"rewards/margins": 7.637650489807129,
"rewards/rejected": -4.999998569488525,
"step": 170
},
{
"epoch": 0.6172839506172839,
"grad_norm": 29.0302734375,
"learning_rate": 3.1987140871290236e-07,
"logits/chosen": -0.04399419575929642,
"logits/rejected": -0.03328068181872368,
"logps/chosen": -292.4731140136719,
"logps/rejected": -89.1150894165039,
"loss": 0.9147,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.025395393371582,
"rewards/margins": 6.317913055419922,
"rewards/rejected": -2.292518377304077,
"step": 175
},
{
"epoch": 0.6349206349206349,
"grad_norm": 15.38488483428955,
"learning_rate": 2.990356353570492e-07,
"logits/chosen": 0.24281856417655945,
"logits/rejected": -0.12793979048728943,
"logps/chosen": -445.99652099609375,
"logps/rejected": -129.5789794921875,
"loss": 0.9398,
"rewards/accuracies": 1.0,
"rewards/chosen": 6.02224063873291,
"rewards/margins": 8.879093170166016,
"rewards/rejected": -2.8568522930145264,
"step": 180
},
{
"epoch": 0.6525573192239859,
"grad_norm": 8.789401054382324,
"learning_rate": 2.778387992262022e-07,
"logits/chosen": -0.10941989719867706,
"logits/rejected": -0.03445356339216232,
"logps/chosen": -265.72650146484375,
"logps/rejected": -142.39154052734375,
"loss": 0.7644,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.23318338394165,
"rewards/margins": 7.7108025550842285,
"rewards/rejected": -3.47761869430542,
"step": 185
},
{
"epoch": 0.6701940035273368,
"grad_norm": 11.12884521484375,
"learning_rate": 2.5643697841374715e-07,
"logits/chosen": -0.22050127387046814,
"logits/rejected": -0.006003641989082098,
"logps/chosen": -377.9897766113281,
"logps/rejected": -167.70004272460938,
"loss": 0.8729,
"rewards/accuracies": 1.0,
"rewards/chosen": 6.1067938804626465,
"rewards/margins": 10.21866512298584,
"rewards/rejected": -4.111870765686035,
"step": 190
},
{
"epoch": 0.6878306878306878,
"grad_norm": 15.367615699768066,
"learning_rate": 2.3498776037126292e-07,
"logits/chosen": -0.11942657083272934,
"logits/rejected": -0.023708079010248184,
"logps/chosen": -263.35870361328125,
"logps/rejected": -152.2161865234375,
"loss": 0.8002,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.137556552886963,
"rewards/margins": 8.01561450958252,
"rewards/rejected": -3.8780579566955566,
"step": 195
},
{
"epoch": 0.7054673721340388,
"grad_norm": 15.346793174743652,
"learning_rate": 2.1364908154907753e-07,
"logits/chosen": -0.012997478246688843,
"logits/rejected": 0.020813340321183205,
"logps/chosen": -462.3680114746094,
"logps/rejected": -113.67984771728516,
"loss": 0.9915,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.704432487487793,
"rewards/margins": 8.906639099121094,
"rewards/rejected": -3.20220685005188,
"step": 200
},
{
"epoch": 0.7231040564373897,
"grad_norm": 37.951595306396484,
"learning_rate": 1.9257806446705112e-07,
"logits/chosen": -0.010699939914047718,
"logits/rejected": -0.14113858342170715,
"logps/chosen": -371.30853271484375,
"logps/rejected": -145.60171508789062,
"loss": 0.958,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.4081010818481445,
"rewards/margins": 7.01975154876709,
"rewards/rejected": -2.6116511821746826,
"step": 205
},
{
"epoch": 0.7407407407407407,
"grad_norm": 15.26896858215332,
"learning_rate": 1.7192986077855134e-07,
"logits/chosen": -0.31857413053512573,
"logits/rejected": -0.008572896011173725,
"logps/chosen": -245.90115356445312,
"logps/rejected": -154.51458740234375,
"loss": 0.8198,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.508225917816162,
"rewards/margins": 10.117998123168945,
"rewards/rejected": -5.609771728515625,
"step": 210
},
{
"epoch": 0.7583774250440917,
"grad_norm": 19.904035568237305,
"learning_rate": 1.5185650884645707e-07,
"logits/chosen": 0.01754247210919857,
"logits/rejected": -0.027603263035416603,
"logps/chosen": -195.20726013183594,
"logps/rejected": -76.150634765625,
"loss": 0.9146,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.2162060737609863,
"rewards/margins": 5.500412464141846,
"rewards/rejected": -2.2842061519622803,
"step": 215
},
{
"epoch": 0.7760141093474426,
"grad_norm": 15.354774475097656,
"learning_rate": 1.3250581424317008e-07,
"logits/chosen": -0.17631444334983826,
"logits/rejected": -0.01371079497039318,
"logps/chosen": -426.755126953125,
"logps/rejected": -177.1373291015625,
"loss": 0.8712,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.517136573791504,
"rewards/margins": 10.632292747497559,
"rewards/rejected": -5.115156650543213,
"step": 220
},
{
"epoch": 0.7936507936507936,
"grad_norm": 11.204190254211426,
"learning_rate": 1.1402026141781885e-07,
"logits/chosen": 0.042705245316028595,
"logits/rejected": -0.19707567989826202,
"logps/chosen": -306.9375,
"logps/rejected": -97.74362182617188,
"loss": 0.8048,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.973811626434326,
"rewards/margins": 8.320436477661133,
"rewards/rejected": -3.346625566482544,
"step": 225
},
{
"epoch": 0.8112874779541446,
"grad_norm": 12.703167915344238,
"learning_rate": 9.653596454434698e-08,
"logits/chosen": -0.06526315212249756,
"logits/rejected": -0.2236555516719818,
"logps/chosen": -364.25958251953125,
"logps/rejected": -188.89682006835938,
"loss": 0.8502,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.544473648071289,
"rewards/margins": 9.414628028869629,
"rewards/rejected": -4.87015438079834,
"step": 230
},
{
"epoch": 0.8289241622574955,
"grad_norm": 16.88726043701172,
"learning_rate": 8.018166527567672e-08,
"logits/chosen": 0.06473386287689209,
"logits/rejected": 0.16756504774093628,
"logps/chosen": -207.73611450195312,
"logps/rejected": -124.5618667602539,
"loss": 0.9822,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.6486001014709473,
"rewards/margins": 7.799807548522949,
"rewards/rejected": -4.15120792388916,
"step": 235
},
{
"epoch": 0.8465608465608465,
"grad_norm": 11.667330741882324,
"learning_rate": 6.507778478375833e-08,
"logits/chosen": 0.14386427402496338,
"logits/rejected": 0.25987157225608826,
"logps/chosen": -388.20904541015625,
"logps/rejected": -165.959228515625,
"loss": 0.9145,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 4.7357916831970215,
"rewards/margins": 9.261954307556152,
"rewards/rejected": -4.526162624359131,
"step": 240
},
{
"epoch": 0.8641975308641975,
"grad_norm": 27.944272994995117,
"learning_rate": 5.133553706559371e-08,
"logits/chosen": 0.10739991813898087,
"logits/rejected": -0.0430210679769516,
"logps/chosen": -274.6513671875,
"logps/rejected": -180.67349243164062,
"loss": 0.8558,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.874281406402588,
"rewards/margins": 7.771068572998047,
"rewards/rejected": -3.89678692817688,
"step": 245
},
{
"epoch": 0.8818342151675485,
"grad_norm": 23.233898162841797,
"learning_rate": 3.905611004420359e-08,
"logits/chosen": -0.22658737003803253,
"logits/rejected": -0.14710170030593872,
"logps/chosen": -131.2843017578125,
"logps/rejected": -124.34599304199219,
"loss": 0.8206,
"rewards/accuracies": 1.0,
"rewards/chosen": 2.8018622398376465,
"rewards/margins": 7.4149651527404785,
"rewards/rejected": -4.613102912902832,
"step": 250
},
{
"epoch": 0.8994708994708994,
"grad_norm": 12.460490226745605,
"learning_rate": 2.832992049431496e-08,
"logits/chosen": 0.2131236046552658,
"logits/rejected": -0.0388239324092865,
"logps/chosen": -321.56207275390625,
"logps/rejected": -100.82319641113281,
"loss": 0.846,
"rewards/accuracies": 1.0,
"rewards/chosen": 4.911746025085449,
"rewards/margins": 7.102486610412598,
"rewards/rejected": -2.1907403469085693,
"step": 255
},
{
"epoch": 0.9171075837742504,
"grad_norm": 48.2673454284668,
"learning_rate": 1.9235948278956e-08,
"logits/chosen": 0.05348904803395271,
"logits/rejected": 0.0030027092434465885,
"logps/chosen": -299.47393798828125,
"logps/rejected": -140.73562622070312,
"loss": 0.8814,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.053894996643066,
"rewards/margins": 9.666501998901367,
"rewards/rejected": -4.612607479095459,
"step": 260
},
{
"epoch": 0.9347442680776014,
"grad_norm": 10.55972671508789,
"learning_rate": 1.1841154799154373e-08,
"logits/chosen": -0.21329502761363983,
"logits/rejected": -0.05265168100595474,
"logps/chosen": -287.35333251953125,
"logps/rejected": -118.7051010131836,
"loss": 0.8196,
"rewards/accuracies": 1.0,
"rewards/chosen": 3.7074661254882812,
"rewards/margins": 6.694541931152344,
"rewards/rejected": -2.9870758056640625,
"step": 265
},
{
"epoch": 0.9523809523809523,
"grad_norm": 9.531179428100586,
"learning_rate": 6.199989938854372e-09,
"logits/chosen": -0.15476630628108978,
"logits/rejected": -0.022993747144937515,
"logps/chosen": -351.3866882324219,
"logps/rejected": -196.15646362304688,
"loss": 0.8531,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.175906658172607,
"rewards/margins": 11.09540843963623,
"rewards/rejected": -5.919501304626465,
"step": 270
},
{
"epoch": 0.9700176366843033,
"grad_norm": 24.5665283203125,
"learning_rate": 2.353991135550765e-09,
"logits/chosen": -0.05437609553337097,
"logits/rejected": 0.054326556622982025,
"logps/chosen": -443.97576904296875,
"logps/rejected": -88.25070190429688,
"loss": 0.863,
"rewards/accuracies": 1.0,
"rewards/chosen": 5.58223819732666,
"rewards/margins": 6.808541297912598,
"rewards/rejected": -1.2263023853302002,
"step": 275
},
{
"epoch": 0.9876543209876543,
"grad_norm": 10.516907691955566,
"learning_rate": 3.314775287923677e-10,
"logits/chosen": -0.15406222641468048,
"logits/rejected": -0.10987401008605957,
"logps/chosen": -392.3808288574219,
"logps/rejected": -206.12026977539062,
"loss": 0.8295,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 6.4746246337890625,
"rewards/margins": 11.878049850463867,
"rewards/rejected": -5.403425216674805,
"step": 280
},
{
"epoch": 0.9982363315696648,
"step": 283,
"total_flos": 1.8439707941247386e+17,
"train_loss": 1.050797831464572,
"train_runtime": 884.6581,
"train_samples_per_second": 5.127,
"train_steps_per_second": 0.32
}
],
"logging_steps": 5,
"max_steps": 283,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8439707941247386e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}