| { | |
| "best_metric": 0.49511729307051994, | |
| "best_model_checkpoint": "./indobertweet-review-rating/checkpoint-14300", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 21450, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 1.9813519813519816e-05, | |
| "loss": 0.4207, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 1.962703962703963e-05, | |
| "loss": 0.3879, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.944055944055944e-05, | |
| "loss": 0.3832, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 1.9254079254079257e-05, | |
| "loss": 0.3796, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.906759906759907e-05, | |
| "loss": 0.3764, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.888111888111888e-05, | |
| "loss": 0.3732, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.8694638694638696e-05, | |
| "loss": 0.3715, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_akurasi": 0.304635761589404, | |
| "eval_f1": 0.41270686365854736, | |
| "eval_loss": 0.36894935369491577, | |
| "eval_roc_auc": 0.6308684676414661, | |
| "eval_runtime": 132.2279, | |
| "eval_samples_per_second": 115.339, | |
| "eval_steps_per_second": 7.215, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 1.850815850815851e-05, | |
| "loss": 0.3592, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.8321678321678323e-05, | |
| "loss": 0.3515, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 1.8135198135198137e-05, | |
| "loss": 0.3529, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.794871794871795e-05, | |
| "loss": 0.3497, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 1.7762237762237765e-05, | |
| "loss": 0.3489, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.7575757575757576e-05, | |
| "loss": 0.3511, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.738927738927739e-05, | |
| "loss": 0.3494, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_akurasi": 0.36633663366336633, | |
| "eval_f1": 0.4571545380212592, | |
| "eval_loss": 0.36878135800361633, | |
| "eval_roc_auc": 0.653645662579503, | |
| "eval_runtime": 132.2013, | |
| "eval_samples_per_second": 115.362, | |
| "eval_steps_per_second": 7.216, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 1.7202797202797203e-05, | |
| "loss": 0.327, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 1.7016317016317017e-05, | |
| "loss": 0.3207, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.682983682983683e-05, | |
| "loss": 0.3178, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 1.6643356643356645e-05, | |
| "loss": 0.3141, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 1.645687645687646e-05, | |
| "loss": 0.3183, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 1.6270396270396273e-05, | |
| "loss": 0.3226, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 1.6083916083916083e-05, | |
| "loss": 0.322, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_akurasi": 0.4225296701855616, | |
| "eval_f1": 0.4886535552193646, | |
| "eval_loss": 0.3799174726009369, | |
| "eval_roc_auc": 0.6730296374008262, | |
| "eval_runtime": 132.2733, | |
| "eval_samples_per_second": 115.299, | |
| "eval_steps_per_second": 7.212, | |
| "step": 10725 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 1.5897435897435897e-05, | |
| "loss": 0.2957, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 1.5710955710955715e-05, | |
| "loss": 0.2751, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 1.5524475524475525e-05, | |
| "loss": 0.2724, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 1.533799533799534e-05, | |
| "loss": 0.2827, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.5151515151515153e-05, | |
| "loss": 0.2846, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.4965034965034965e-05, | |
| "loss": 0.2826, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.4778554778554779e-05, | |
| "loss": 0.2863, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_akurasi": 0.448101763818766, | |
| "eval_f1": 0.49511729307051994, | |
| "eval_loss": 0.412160724401474, | |
| "eval_roc_auc": 0.6790866172709985, | |
| "eval_runtime": 132.2949, | |
| "eval_samples_per_second": 115.28, | |
| "eval_steps_per_second": 7.211, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 1.4592074592074595e-05, | |
| "loss": 0.2652, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 1.4405594405594407e-05, | |
| "loss": 0.2333, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 1.421911421911422e-05, | |
| "loss": 0.2457, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 1.4032634032634035e-05, | |
| "loss": 0.244, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 0.2438, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 1.365967365967366e-05, | |
| "loss": 0.2448, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.3473193473193473e-05, | |
| "loss": 0.2484, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_akurasi": 0.45597010032129043, | |
| "eval_f1": 0.48838265609168097, | |
| "eval_loss": 0.4510646164417267, | |
| "eval_roc_auc": 0.6768326667103797, | |
| "eval_runtime": 132.311, | |
| "eval_samples_per_second": 115.266, | |
| "eval_steps_per_second": 7.21, | |
| "step": 17875 | |
| }, | |
| { | |
| "epoch": 5.03, | |
| "learning_rate": 1.3286713286713288e-05, | |
| "loss": 0.2364, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 5.17, | |
| "learning_rate": 1.3100233100233102e-05, | |
| "loss": 0.2016, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 5.31, | |
| "learning_rate": 1.2913752913752915e-05, | |
| "loss": 0.2034, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 5.45, | |
| "learning_rate": 1.2727272727272728e-05, | |
| "loss": 0.2131, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 5.59, | |
| "learning_rate": 1.254079254079254e-05, | |
| "loss": 0.2076, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 5.73, | |
| "learning_rate": 1.2354312354312355e-05, | |
| "loss": 0.2032, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 5.87, | |
| "learning_rate": 1.216783216783217e-05, | |
| "loss": 0.2134, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_akurasi": 0.462986033702708, | |
| "eval_f1": 0.4940430572005852, | |
| "eval_loss": 0.4969989061355591, | |
| "eval_roc_auc": 0.680316044849518, | |
| "eval_runtime": 132.3664, | |
| "eval_samples_per_second": 115.218, | |
| "eval_steps_per_second": 7.207, | |
| "step": 21450 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 53625, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "total_flos": 4.514475841140326e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |