{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0588235294117647, "eval_steps": 500, "global_step": 36, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.029411764705882353, "grad_norm": 0.9765625, "learning_rate": 2e-05, "loss": 2.0227, "step": 1 }, { "epoch": 0.058823529411764705, "grad_norm": 0.8125, "learning_rate": 4e-05, "loss": 1.6615, "step": 2 }, { "epoch": 0.08823529411764706, "grad_norm": 0.90625, "learning_rate": 6e-05, "loss": 1.9129, "step": 3 }, { "epoch": 0.11764705882352941, "grad_norm": 1.296875, "learning_rate": 8e-05, "loss": 2.2892, "step": 4 }, { "epoch": 0.14705882352941177, "grad_norm": 1.2578125, "learning_rate": 0.0001, "loss": 2.0247, "step": 5 }, { "epoch": 0.17647058823529413, "grad_norm": 0.98828125, "learning_rate": 0.00012, "loss": 1.8962, "step": 6 }, { "epoch": 0.20588235294117646, "grad_norm": 1.0546875, "learning_rate": 0.00014, "loss": 1.703, "step": 7 }, { "epoch": 0.23529411764705882, "grad_norm": 0.9140625, "learning_rate": 0.00016, "loss": 1.5534, "step": 8 }, { "epoch": 0.2647058823529412, "grad_norm": 0.93359375, "learning_rate": 0.00018, "loss": 1.4303, "step": 9 }, { "epoch": 0.29411764705882354, "grad_norm": 0.83203125, "learning_rate": 0.0002, "loss": 1.3701, "step": 10 }, { "epoch": 0.3235294117647059, "grad_norm": 1.0703125, "learning_rate": 0.00019985334138511237, "loss": 1.3449, "step": 11 }, { "epoch": 0.35294117647058826, "grad_norm": 0.9765625, "learning_rate": 0.00019941379571543596, "loss": 1.3191, "step": 12 }, { "epoch": 0.38235294117647056, "grad_norm": 0.94140625, "learning_rate": 0.00019868265225415265, "loss": 1.0243, "step": 13 }, { "epoch": 0.4117647058823529, "grad_norm": 0.91015625, "learning_rate": 0.00019766205557100868, "loss": 1.0321, "step": 14 }, { "epoch": 0.4411764705882353, "grad_norm": 1.21875, "learning_rate": 0.0001963549992519223, "loss": 1.2083, "step": 15 }, { "epoch": 0.47058823529411764, "grad_norm": 1.140625, "learning_rate": 0.00019476531711828027, "loss": 1.2976, "step": 16 }, { "epoch": 0.5, "grad_norm": 1.125, "learning_rate": 0.00019289767198167916, "loss": 1.0929, "step": 17 }, { "epoch": 0.5294117647058824, "grad_norm": 1.1015625, "learning_rate": 0.00019075754196709572, "loss": 0.9081, "step": 18 }, { "epoch": 0.5588235294117647, "grad_norm": 1.15625, "learning_rate": 0.0001883512044446023, "loss": 0.9417, "step": 19 }, { "epoch": 0.5882352941176471, "grad_norm": 0.90234375, "learning_rate": 0.00018568571761675893, "loss": 0.9065, "step": 20 }, { "epoch": 0.6176470588235294, "grad_norm": 1.1796875, "learning_rate": 0.00018276889981568906, "loss": 0.9419, "step": 21 }, { "epoch": 0.6470588235294118, "grad_norm": 1.109375, "learning_rate": 0.00017960930657056438, "loss": 0.9199, "step": 22 }, { "epoch": 0.6764705882352942, "grad_norm": 1.15625, "learning_rate": 0.00017621620551276366, "loss": 0.9139, "step": 23 }, { "epoch": 0.7058823529411765, "grad_norm": 1.046875, "learning_rate": 0.0001725995491923131, "loss": 0.7512, "step": 24 }, { "epoch": 0.7352941176470589, "grad_norm": 1.234375, "learning_rate": 0.00016876994588534234, "loss": 0.7435, "step": 25 }, { "epoch": 0.7647058823529411, "grad_norm": 0.95703125, "learning_rate": 0.00016473862847818277, "loss": 0.7502, "step": 26 }, { "epoch": 0.7941176470588235, "grad_norm": 1.390625, "learning_rate": 0.00016051742151937655, "loss": 0.956, "step": 27 }, { "epoch": 0.8235294117647058, "grad_norm": 1.0390625, "learning_rate": 0.00015611870653623825, "loss": 0.8827, "step": 28 }, { "epoch": 0.8529411764705882, "grad_norm": 0.94140625, "learning_rate": 0.00015155538571770218, "loss": 0.7322, "step": 29 }, { "epoch": 0.8823529411764706, "grad_norm": 1.0078125, "learning_rate": 0.00014684084406997903, "loss": 0.7407, "step": 30 }, { "epoch": 0.9117647058823529, "grad_norm": 0.89453125, "learning_rate": 0.00014198891015602646, "loss": 0.8105, "step": 31 }, { "epoch": 0.9411764705882353, "grad_norm": 1.3125, "learning_rate": 0.00013701381553399145, "loss": 0.9126, "step": 32 }, { "epoch": 0.9705882352941176, "grad_norm": 1.1953125, "learning_rate": 0.000131930153013598, "loss": 0.9439, "step": 33 }, { "epoch": 1.0, "grad_norm": 1.1484375, "learning_rate": 0.00012675283385292212, "loss": 0.7298, "step": 34 }, { "epoch": 1.0294117647058822, "grad_norm": 0.87890625, "learning_rate": 0.00012149704402110243, "loss": 0.4975, "step": 35 }, { "epoch": 1.0588235294117647, "grad_norm": 0.84765625, "learning_rate": 0.0001161781996552765, "loss": 0.8089, "step": 36 } ], "logging_steps": 1, "max_steps": 68, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 12, "total_flos": 1.0646082519092429e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }