| { | |
| "best_metric": 0.5767690253671561, | |
| "best_model_checkpoint": "outputs/indobert-base-uncased-reddit-indonesia-sarcastic/checkpoint-1545", | |
| "epoch": 8.0, | |
| "eval_steps": 500, | |
| "global_step": 2472, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.997611994132918e-06, | |
| "loss": 0.5121, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7377746279234585, | |
| "eval_f1": 0.47740112994350287, | |
| "eval_loss": 0.49424678087234497, | |
| "eval_precision": 0.476056338028169, | |
| "eval_recall": 0.47875354107648727, | |
| "eval_runtime": 5.6623, | |
| "eval_samples_per_second": 249.194, | |
| "eval_steps_per_second": 4.062, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 9.990292594641672e-06, | |
| "loss": 0.4513, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.7951807228915663, | |
| "eval_f1": 0.4956369982547993, | |
| "eval_loss": 0.4422110319137573, | |
| "eval_precision": 0.6454545454545455, | |
| "eval_recall": 0.40226628895184136, | |
| "eval_runtime": 5.5658, | |
| "eval_samples_per_second": 253.511, | |
| "eval_steps_per_second": 4.132, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 9.97804837884484e-06, | |
| "loss": 0.4078, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.7980155917788803, | |
| "eval_f1": 0.4074844074844075, | |
| "eval_loss": 0.4770745635032654, | |
| "eval_precision": 0.765625, | |
| "eval_recall": 0.2776203966005666, | |
| "eval_runtime": 5.5683, | |
| "eval_samples_per_second": 253.398, | |
| "eval_steps_per_second": 4.131, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 9.960891430305153e-06, | |
| "loss": 0.3686, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8051027639971652, | |
| "eval_f1": 0.4897959183673469, | |
| "eval_loss": 0.4755268394947052, | |
| "eval_precision": 0.7096774193548387, | |
| "eval_recall": 0.37393767705382436, | |
| "eval_runtime": 5.5542, | |
| "eval_samples_per_second": 254.043, | |
| "eval_steps_per_second": 4.141, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 9.938838680859435e-06, | |
| "loss": 0.3358, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.7753366406803686, | |
| "eval_f1": 0.5767690253671561, | |
| "eval_loss": 0.48644235730171204, | |
| "eval_precision": 0.5454545454545454, | |
| "eval_recall": 0.6118980169971672, | |
| "eval_runtime": 5.566, | |
| "eval_samples_per_second": 253.505, | |
| "eval_steps_per_second": 4.132, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 9.91191189390893e-06, | |
| "loss": 0.299, | |
| "step": 1854 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.7632884479092842, | |
| "eval_f1": 0.5728900255754475, | |
| "eval_loss": 0.5038222670555115, | |
| "eval_precision": 0.5221445221445221, | |
| "eval_recall": 0.6345609065155807, | |
| "eval_runtime": 5.6526, | |
| "eval_samples_per_second": 249.619, | |
| "eval_steps_per_second": 4.069, | |
| "step": 1854 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 9.880137642941445e-06, | |
| "loss": 0.2602, | |
| "step": 2163 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.7888022678951099, | |
| "eval_f1": 0.5386996904024767, | |
| "eval_loss": 0.5241848826408386, | |
| "eval_precision": 0.5938566552901023, | |
| "eval_recall": 0.49291784702549574, | |
| "eval_runtime": 5.5631, | |
| "eval_samples_per_second": 253.635, | |
| "eval_steps_per_second": 4.134, | |
| "step": 2163 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 9.843547285306535e-06, | |
| "loss": 0.2184, | |
| "step": 2472 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.781715095676825, | |
| "eval_f1": 0.5523255813953489, | |
| "eval_loss": 0.6153298616409302, | |
| "eval_precision": 0.5671641791044776, | |
| "eval_recall": 0.5382436260623229, | |
| "eval_runtime": 5.5657, | |
| "eval_samples_per_second": 253.519, | |
| "eval_steps_per_second": 4.132, | |
| "step": 2472 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "step": 2472, | |
| "total_flos": 5199600676024320.0, | |
| "train_loss": 0.3566574973195888, | |
| "train_runtime": 992.7359, | |
| "train_samples_per_second": 995.33, | |
| "train_steps_per_second": 31.126 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 30900, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 500, | |
| "total_flos": 5199600676024320.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |