| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9826435246995995, | |
| "eval_steps": 3, | |
| "global_step": 46, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06408544726301736, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.0852, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.12817089452603472, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.0936, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.19225634178905207, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.0951, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.25634178905206945, | |
| "grad_norm": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": 1.0732, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.3204272363150868, | |
| "grad_norm": 0.253641277551651, | |
| "learning_rate": 0.001414213562373095, | |
| "loss": 1.0803, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.38451268357810414, | |
| "grad_norm": 0.3905181288719177, | |
| "learning_rate": 0.0011547005383792518, | |
| "loss": 1.2341, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.4485981308411215, | |
| "grad_norm": 0.34546247124671936, | |
| "learning_rate": 0.0008164965809277262, | |
| "loss": 1.0057, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.5126835781041389, | |
| "grad_norm": 0.2852560877799988, | |
| "learning_rate": 0.0006666666666666666, | |
| "loss": 0.7892, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5767690253671562, | |
| "grad_norm": 0.23831070959568024, | |
| "learning_rate": 0.0005773502691896259, | |
| "loss": 0.769, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.6408544726301736, | |
| "grad_norm": 0.24124804139137268, | |
| "learning_rate": 0.0005163977794943222, | |
| "loss": 0.7525, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7049399198931909, | |
| "grad_norm": 0.23033007979393005, | |
| "learning_rate": 0.00047140452079103175, | |
| "loss": 0.7335, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.7690253671562083, | |
| "grad_norm": 0.20133820176124573, | |
| "learning_rate": 0.0004364357804719848, | |
| "loss": 0.7292, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.8331108144192256, | |
| "grad_norm": 0.22399117052555084, | |
| "learning_rate": 0.0004082482904638631, | |
| "loss": 0.7211, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.897196261682243, | |
| "grad_norm": 0.2745206654071808, | |
| "learning_rate": 0.00038490017945975053, | |
| "loss": 0.6917, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.9612817089452603, | |
| "grad_norm": 0.20307739078998566, | |
| "learning_rate": 0.00036514837167011074, | |
| "loss": 0.7122, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.9826435246995995, | |
| "step": 46, | |
| "total_flos": 4.671973340489974e+17, | |
| "train_loss": 0.8987806633762692, | |
| "train_runtime": 500.6131, | |
| "train_samples_per_second": 11.961, | |
| "train_steps_per_second": 0.092 | |
| } | |
| ], | |
| "logging_steps": 3, | |
| "max_steps": 46, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 3, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.671973340489974e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |