| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 100, | |
| "global_step": 76, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 6.25e-07, | |
| "logits/chosen": -2.521343946456909, | |
| "logits/rejected": -2.3620429039001465, | |
| "logps/chosen": -521.3419189453125, | |
| "logps/rejected": -476.025146484375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.989335440737587e-06, | |
| "logits/chosen": -2.358004093170166, | |
| "logits/rejected": -2.4325995445251465, | |
| "logps/chosen": -345.5417175292969, | |
| "logps/rejected": -309.4444580078125, | |
| "loss": 0.6911, | |
| "rewards/accuracies": 0.5694444179534912, | |
| "rewards/chosen": 0.01685788854956627, | |
| "rewards/margins": 0.004424349870532751, | |
| "rewards/rejected": 0.012433539144694805, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.625542839324036e-06, | |
| "logits/chosen": -2.4209659099578857, | |
| "logits/rejected": -2.4008195400238037, | |
| "logps/chosen": -297.3746643066406, | |
| "logps/rejected": -285.84234619140625, | |
| "loss": 0.679, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.07351277023553848, | |
| "rewards/margins": 0.030746515840291977, | |
| "rewards/rejected": 0.04276625066995621, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 3.81608040719339e-06, | |
| "logits/chosen": -2.4519193172454834, | |
| "logits/rejected": -2.5038084983825684, | |
| "logps/chosen": -297.0643005371094, | |
| "logps/rejected": -273.5292053222656, | |
| "loss": 0.6686, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.1535051167011261, | |
| "rewards/margins": 0.07135292887687683, | |
| "rewards/rejected": 0.08215219527482986, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 2.730670898658255e-06, | |
| "logits/chosen": -2.4741883277893066, | |
| "logits/rejected": -2.297362804412842, | |
| "logps/chosen": -321.7876281738281, | |
| "logps/rejected": -261.57379150390625, | |
| "loss": 0.673, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": 0.1949460208415985, | |
| "rewards/margins": 0.07163376361131668, | |
| "rewards/rejected": 0.12331227213144302, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.5968958345321178e-06, | |
| "logits/chosen": -2.4305148124694824, | |
| "logits/rejected": -2.4470670223236084, | |
| "logps/chosen": -298.35980224609375, | |
| "logps/rejected": -267.8696594238281, | |
| "loss": 0.6744, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.09592735767364502, | |
| "rewards/margins": 0.0405537411570549, | |
| "rewards/rejected": 0.05537362024188042, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 6.524777069483526e-07, | |
| "logits/chosen": -2.4608042240142822, | |
| "logits/rejected": -2.472360849380493, | |
| "logps/chosen": -333.09967041015625, | |
| "logps/rejected": -280.1986389160156, | |
| "loss": 0.6734, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": 0.23129253089427948, | |
| "rewards/margins": 0.10529744625091553, | |
| "rewards/rejected": 0.12599506974220276, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.54358920679524e-08, | |
| "logits/chosen": -2.3924319744110107, | |
| "logits/rejected": -2.393690586090088, | |
| "logps/chosen": -306.0284729003906, | |
| "logps/rejected": -271.36529541015625, | |
| "loss": 0.6684, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.06389348953962326, | |
| "rewards/margins": 0.027882063761353493, | |
| "rewards/rejected": 0.03601142764091492, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 76, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6752520862378573, | |
| "train_runtime": 2732.0836, | |
| "train_samples_per_second": 1.779, | |
| "train_steps_per_second": 0.028 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 76, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |