| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 2205, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.045351473922902494, | |
| "grad_norm": 0.1351398527622223, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.3177, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09070294784580499, | |
| "grad_norm": 0.23727059364318848, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 2.3049, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1360544217687075, | |
| "grad_norm": 0.30796971917152405, | |
| "learning_rate": 2e-05, | |
| "loss": 2.2541, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.18140589569160998, | |
| "grad_norm": 0.3966788351535797, | |
| "learning_rate": 1.98643264153509e-05, | |
| "loss": 2.1959, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.22675736961451248, | |
| "grad_norm": 0.4789985418319702, | |
| "learning_rate": 1.9460987125717907e-05, | |
| "loss": 2.1926, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.272108843537415, | |
| "grad_norm": 0.535058856010437, | |
| "learning_rate": 1.8800926628551884e-05, | |
| "loss": 2.1152, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 0.567406415939331, | |
| "learning_rate": 1.790205547859999e-05, | |
| "loss": 2.076, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.36281179138321995, | |
| "grad_norm": 0.7135646343231201, | |
| "learning_rate": 1.6788764290072554e-05, | |
| "loss": 2.0738, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "grad_norm": 0.6722047328948975, | |
| "learning_rate": 1.549126190423073e-05, | |
| "loss": 2.054, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.45351473922902497, | |
| "grad_norm": 0.8051442503929138, | |
| "learning_rate": 1.4044755681030094e-05, | |
| "loss": 2.0124, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4988662131519274, | |
| "grad_norm": 0.6087419390678406, | |
| "learning_rate": 1.2488496157374425e-05, | |
| "loss": 2.0051, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.54421768707483, | |
| "grad_norm": 0.8336877226829529, | |
| "learning_rate": 1.0864711994907457e-05, | |
| "loss": 1.9954, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5895691609977324, | |
| "grad_norm": 0.6950428485870361, | |
| "learning_rate": 9.217464117232859e-06, | |
| "loss": 2.0539, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 0.6528784036636353, | |
| "learning_rate": 7.591450129224569e-06, | |
| "loss": 2.0042, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.6802721088435374, | |
| "grad_norm": 1.0925904512405396, | |
| "learning_rate": 6.03079146017113e-06, | |
| "loss": 2.0167, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.7256235827664399, | |
| "grad_norm": 0.7688515186309814, | |
| "learning_rate": 4.577836141281368e-06, | |
| "loss": 1.9946, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.7709750566893424, | |
| "grad_norm": 1.1727160215377808, | |
| "learning_rate": 3.2720097038450394e-06, | |
| "loss": 1.9847, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "grad_norm": 0.8815014362335205, | |
| "learning_rate": 2.1487453786014513e-06, | |
| "loss": 1.9598, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.8616780045351474, | |
| "grad_norm": 0.8077797889709473, | |
| "learning_rate": 1.2385226250534566e-06, | |
| "loss": 1.9814, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.9070294784580499, | |
| "grad_norm": 1.262542963027954, | |
| "learning_rate": 5.660400799616572e-07, | |
| "loss": 1.9466, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 0.8123142123222351, | |
| "learning_rate": 1.495453668273672e-07, | |
| "loss": 1.9335, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.9977324263038548, | |
| "grad_norm": 0.8020434379577637, | |
| "learning_rate": 3.399517942515029e-10, | |
| "loss": 1.9826, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 2205, | |
| "total_flos": 4.00628735213568e+16, | |
| "train_loss": 2.065633855547224, | |
| "train_runtime": 693.7957, | |
| "train_samples_per_second": 6.355, | |
| "train_steps_per_second": 3.178 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2205, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.00628735213568e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |