| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 2247, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 19.199373245239258, | |
| "learning_rate": 3.887405429461504e-05, | |
| "loss": 5.3472, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 3.351874589920044, | |
| "learning_rate": 2.774810858923009e-05, | |
| "loss": 3.2429, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.8240840435028076, | |
| "learning_rate": 1.6622162883845125e-05, | |
| "loss": 2.4001, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "grad_norm": 3.5159881114959717, | |
| "learning_rate": 5.4962171784601695e-06, | |
| "loss": 1.7989, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 2247, | |
| "total_flos": 6.282668952178917e+18, | |
| "train_loss": 3.034465251628908, | |
| "train_runtime": 6532.7764, | |
| "train_samples_per_second": 21.994, | |
| "train_steps_per_second": 0.344 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2247, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 6.282668952178917e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |