| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.2505592841163311, | |
| "eval_steps": 45, | |
| "global_step": 56, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0044742729306487695, | |
| "grad_norm": 0.056262753903865814, | |
| "learning_rate": 2.5e-06, | |
| "loss": 2.2246, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0044742729306487695, | |
| "eval_loss": 2.4987943172454834, | |
| "eval_runtime": 289.9018, | |
| "eval_samples_per_second": 0.635, | |
| "eval_steps_per_second": 0.635, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.008948545861297539, | |
| "grad_norm": 0.05820031091570854, | |
| "learning_rate": 5e-06, | |
| "loss": 2.2253, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.013422818791946308, | |
| "grad_norm": 0.05092893913388252, | |
| "learning_rate": 7.5e-06, | |
| "loss": 2.2124, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.017897091722595078, | |
| "grad_norm": 0.06304118037223816, | |
| "learning_rate": 1e-05, | |
| "loss": 2.2923, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.02237136465324385, | |
| "grad_norm": 0.05285007879137993, | |
| "learning_rate": 1.25e-05, | |
| "loss": 2.2454, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.026845637583892617, | |
| "grad_norm": 0.05117042735219002, | |
| "learning_rate": 1.5e-05, | |
| "loss": 2.2598, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.03131991051454139, | |
| "grad_norm": 0.05469581112265587, | |
| "learning_rate": 1.75e-05, | |
| "loss": 2.2928, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.035794183445190156, | |
| "grad_norm": 0.060069695115089417, | |
| "learning_rate": 2e-05, | |
| "loss": 2.0551, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.040268456375838924, | |
| "grad_norm": 0.06096857041120529, | |
| "learning_rate": 2.25e-05, | |
| "loss": 2.3018, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0447427293064877, | |
| "grad_norm": 0.06183759495615959, | |
| "learning_rate": 2.5e-05, | |
| "loss": 2.1434, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.049217002237136466, | |
| "grad_norm": 0.05740294232964516, | |
| "learning_rate": 2.7500000000000004e-05, | |
| "loss": 2.2667, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.053691275167785234, | |
| "grad_norm": 0.05711159110069275, | |
| "learning_rate": 3e-05, | |
| "loss": 2.0616, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.058165548098434, | |
| "grad_norm": 0.05979042127728462, | |
| "learning_rate": 3.2500000000000004e-05, | |
| "loss": 2.2919, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.06263982102908278, | |
| "grad_norm": 0.06278480589389801, | |
| "learning_rate": 3.5e-05, | |
| "loss": 2.1027, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.06711409395973154, | |
| "grad_norm": 0.05609359219670296, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 2.2461, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.07158836689038031, | |
| "grad_norm": 0.0614086352288723, | |
| "learning_rate": 4e-05, | |
| "loss": 2.2855, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.07606263982102908, | |
| "grad_norm": 0.06281686574220657, | |
| "learning_rate": 4.25e-05, | |
| "loss": 2.2511, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.08053691275167785, | |
| "grad_norm": 0.06691695749759674, | |
| "learning_rate": 4.5e-05, | |
| "loss": 2.1579, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.08501118568232663, | |
| "grad_norm": 0.06627006828784943, | |
| "learning_rate": 4.75e-05, | |
| "loss": 2.194, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0894854586129754, | |
| "grad_norm": 0.07224016636610031, | |
| "learning_rate": 5e-05, | |
| "loss": 2.3305, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09395973154362416, | |
| "grad_norm": 0.060522738844156265, | |
| "learning_rate": 4.9997305665425414e-05, | |
| "loss": 2.2927, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.09843400447427293, | |
| "grad_norm": 0.05773790180683136, | |
| "learning_rate": 4.9989223306985084e-05, | |
| "loss": 2.2183, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.1029082774049217, | |
| "grad_norm": 0.05926959961652756, | |
| "learning_rate": 4.9975754860374825e-05, | |
| "loss": 2.1042, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.10738255033557047, | |
| "grad_norm": 0.06636251509189606, | |
| "learning_rate": 4.99569035512392e-05, | |
| "loss": 2.245, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.11185682326621924, | |
| "grad_norm": 0.055921752005815506, | |
| "learning_rate": 4.993267389439901e-05, | |
| "loss": 2.1514, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.116331096196868, | |
| "grad_norm": 0.05159250274300575, | |
| "learning_rate": 4.9903071692770007e-05, | |
| "loss": 2.3138, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.12080536912751678, | |
| "grad_norm": 0.05444907769560814, | |
| "learning_rate": 4.98681040359731e-05, | |
| "loss": 2.177, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.12527964205816555, | |
| "grad_norm": 0.053689103573560715, | |
| "learning_rate": 4.982777929863643e-05, | |
| "loss": 2.1221, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.1297539149888143, | |
| "grad_norm": 0.052982673048973083, | |
| "learning_rate": 4.978210713838971e-05, | |
| "loss": 2.1504, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.1342281879194631, | |
| "grad_norm": 0.05092277005314827, | |
| "learning_rate": 4.9731098493551194e-05, | |
| "loss": 2.2196, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.13870246085011187, | |
| "grad_norm": 0.05224297195672989, | |
| "learning_rate": 4.9674765580508025e-05, | |
| "loss": 2.1602, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.14317673378076062, | |
| "grad_norm": 0.05033150315284729, | |
| "learning_rate": 4.961312189079045e-05, | |
| "loss": 2.057, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.1476510067114094, | |
| "grad_norm": 0.052374593913555145, | |
| "learning_rate": 4.9546182187840656e-05, | |
| "loss": 2.0742, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.15212527964205816, | |
| "grad_norm": 0.050675470381975174, | |
| "learning_rate": 4.947396250347695e-05, | |
| "loss": 2.2151, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.15659955257270694, | |
| "grad_norm": 0.05398967117071152, | |
| "learning_rate": 4.939648013405423e-05, | |
| "loss": 2.1598, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1610738255033557, | |
| "grad_norm": 0.051805514842271805, | |
| "learning_rate": 4.9313753636321555e-05, | |
| "loss": 2.169, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.16554809843400448, | |
| "grad_norm": 0.04822838678956032, | |
| "learning_rate": 4.9225802822977866e-05, | |
| "loss": 2.1082, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.17002237136465326, | |
| "grad_norm": 0.05128757655620575, | |
| "learning_rate": 4.9132648757926907e-05, | |
| "loss": 2.2767, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.174496644295302, | |
| "grad_norm": 0.050592970103025436, | |
| "learning_rate": 4.9034313751232526e-05, | |
| "loss": 2.2146, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.1789709172259508, | |
| "grad_norm": 0.052525170147418976, | |
| "learning_rate": 4.8930821353775474e-05, | |
| "loss": 2.2353, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18344519015659955, | |
| "grad_norm": 0.050297219306230545, | |
| "learning_rate": 4.882219635161306e-05, | |
| "loss": 2.1571, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.18791946308724833, | |
| "grad_norm": 0.05148237198591232, | |
| "learning_rate": 4.870846476004296e-05, | |
| "loss": 2.2254, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.19239373601789708, | |
| "grad_norm": 0.052405666559934616, | |
| "learning_rate": 4.8589653817372684e-05, | |
| "loss": 2.2464, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.19686800894854586, | |
| "grad_norm": 0.051612552255392075, | |
| "learning_rate": 4.8465791978396077e-05, | |
| "loss": 2.2005, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.20134228187919462, | |
| "grad_norm": 0.054552752524614334, | |
| "learning_rate": 4.833690890757847e-05, | |
| "loss": 2.1034, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.20134228187919462, | |
| "eval_loss": 2.4256694316864014, | |
| "eval_runtime": 289.4928, | |
| "eval_samples_per_second": 0.636, | |
| "eval_steps_per_second": 0.636, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.2058165548098434, | |
| "grad_norm": 0.05522807687520981, | |
| "learning_rate": 4.8203035471952225e-05, | |
| "loss": 2.2148, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.21029082774049218, | |
| "grad_norm": 0.05309893563389778, | |
| "learning_rate": 4.80642037337241e-05, | |
| "loss": 2.2054, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.21476510067114093, | |
| "grad_norm": 0.05665770173072815, | |
| "learning_rate": 4.7920446942596535e-05, | |
| "loss": 2.1856, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.21923937360178972, | |
| "grad_norm": 0.051858678460121155, | |
| "learning_rate": 4.777179952780443e-05, | |
| "loss": 2.1558, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.22371364653243847, | |
| "grad_norm": 0.05269116908311844, | |
| "learning_rate": 4.761829708986949e-05, | |
| "loss": 2.1143, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.22818791946308725, | |
| "grad_norm": 0.05275040119886398, | |
| "learning_rate": 4.7459976392074e-05, | |
| "loss": 2.1701, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.232662192393736, | |
| "grad_norm": 0.051428090780973434, | |
| "learning_rate": 4.729687535165618e-05, | |
| "loss": 2.1786, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.2371364653243848, | |
| "grad_norm": 0.05071357637643814, | |
| "learning_rate": 4.712903303072913e-05, | |
| "loss": 1.9975, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.24161073825503357, | |
| "grad_norm": 0.05553808808326721, | |
| "learning_rate": 4.695648962692559e-05, | |
| "loss": 2.086, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.24608501118568232, | |
| "grad_norm": 0.056803636252880096, | |
| "learning_rate": 4.677928646377076e-05, | |
| "loss": 2.0821, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.2505592841163311, | |
| "grad_norm": 0.056700658053159714, | |
| "learning_rate": 4.659746598078546e-05, | |
| "loss": 2.2197, | |
| "step": 56 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 223, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 56, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.3790704163907174e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |