| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.932862190812721, | |
| "eval_steps": 500, | |
| "global_step": 105, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1413427561837456, | |
| "grad_norm": 1.3223986625671387, | |
| "learning_rate": 4.972077065562821e-05, | |
| "loss": 0.5319, | |
| "num_input_tokens_seen": 89360, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.2826855123674912, | |
| "grad_norm": 1.8465224504470825, | |
| "learning_rate": 4.888932014465352e-05, | |
| "loss": 0.346, | |
| "num_input_tokens_seen": 173680, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.42402826855123676, | |
| "grad_norm": 2.7824206352233887, | |
| "learning_rate": 4.752422169756048e-05, | |
| "loss": 0.3586, | |
| "num_input_tokens_seen": 256320, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.5653710247349824, | |
| "grad_norm": 2.0576891899108887, | |
| "learning_rate": 4.5655969357899874e-05, | |
| "loss": 0.3476, | |
| "num_input_tokens_seen": 354080, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.7067137809187279, | |
| "grad_norm": 2.399836778640747, | |
| "learning_rate": 4.332629679574566e-05, | |
| "loss": 0.2828, | |
| "num_input_tokens_seen": 445120, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.8480565371024735, | |
| "grad_norm": 3.21662974357605, | |
| "learning_rate": 4.058724504646834e-05, | |
| "loss": 0.3261, | |
| "num_input_tokens_seen": 536160, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.9893992932862191, | |
| "grad_norm": 2.1142466068267822, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.1752, | |
| "num_input_tokens_seen": 622160, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.1130742049469964, | |
| "grad_norm": 2.0932960510253906, | |
| "learning_rate": 3.413352560915988e-05, | |
| "loss": 0.1521, | |
| "num_input_tokens_seen": 702992, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.254416961130742, | |
| "grad_norm": 1.1899418830871582, | |
| "learning_rate": 3.056302334890786e-05, | |
| "loss": 0.1056, | |
| "num_input_tokens_seen": 795712, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.3957597173144876, | |
| "grad_norm": 1.8297990560531616, | |
| "learning_rate": 2.686825233966061e-05, | |
| "loss": 0.2243, | |
| "num_input_tokens_seen": 880032, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.5371024734982333, | |
| "grad_norm": 2.430858850479126, | |
| "learning_rate": 2.3131747660339394e-05, | |
| "loss": 0.1218, | |
| "num_input_tokens_seen": 969392, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.6784452296819787, | |
| "grad_norm": 1.3377656936645508, | |
| "learning_rate": 1.9436976651092144e-05, | |
| "loss": 0.1474, | |
| "num_input_tokens_seen": 1058752, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.8197879858657244, | |
| "grad_norm": 5.327373504638672, | |
| "learning_rate": 1.5866474390840125e-05, | |
| "loss": 0.0559, | |
| "num_input_tokens_seen": 1143072, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.96113074204947, | |
| "grad_norm": 5.6472272872924805, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.0936, | |
| "num_input_tokens_seen": 1234112, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.0848056537102475, | |
| "grad_norm": 0.6908755898475647, | |
| "learning_rate": 9.412754953531663e-06, | |
| "loss": 0.0648, | |
| "num_input_tokens_seen": 1303184, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.2261484098939928, | |
| "grad_norm": 1.777599811553955, | |
| "learning_rate": 6.673703204254347e-06, | |
| "loss": 0.0676, | |
| "num_input_tokens_seen": 1385824, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.3674911660777385, | |
| "grad_norm": 1.3812588453292847, | |
| "learning_rate": 4.344030642100133e-06, | |
| "loss": 0.0947, | |
| "num_input_tokens_seen": 1476864, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.508833922261484, | |
| "grad_norm": 1.7979100942611694, | |
| "learning_rate": 2.475778302439524e-06, | |
| "loss": 0.0447, | |
| "num_input_tokens_seen": 1569584, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.65017667844523, | |
| "grad_norm": 4.330524444580078, | |
| "learning_rate": 1.1106798553464804e-06, | |
| "loss": 0.0535, | |
| "num_input_tokens_seen": 1657264, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.791519434628975, | |
| "grad_norm": 2.0189173221588135, | |
| "learning_rate": 2.7922934437178695e-07, | |
| "loss": 0.1107, | |
| "num_input_tokens_seen": 1746624, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.932862190812721, | |
| "grad_norm": 1.6774531602859497, | |
| "learning_rate": 0.0, | |
| "loss": 0.0806, | |
| "num_input_tokens_seen": 1841024, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 2.932862190812721, | |
| "num_input_tokens_seen": 1841024, | |
| "step": 105, | |
| "total_flos": 7451631889809408.0, | |
| "train_loss": 0.1802581724666414, | |
| "train_runtime": 2989.6538, | |
| "train_samples_per_second": 0.567, | |
| "train_steps_per_second": 0.035 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 105, | |
| "num_input_tokens_seen": 1841024, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7451631889809408.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |