| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.790419161676647, | |
| "eval_steps": 500, | |
| "global_step": 369, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.23952095808383234, | |
| "grad_norm": 0.6780208945274353, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.9013, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.47904191616766467, | |
| "grad_norm": 0.2924523651599884, | |
| "learning_rate": 9.990516643685222e-05, | |
| "loss": 0.5871, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.718562874251497, | |
| "grad_norm": 0.3275580108165741, | |
| "learning_rate": 9.944154131125642e-05, | |
| "loss": 0.4616, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.9580838323353293, | |
| "grad_norm": 0.29193195700645447, | |
| "learning_rate": 9.859528969650738e-05, | |
| "loss": 0.3833, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9116215885759624, | |
| "eval_loss": 0.3712063729763031, | |
| "eval_runtime": 25.6047, | |
| "eval_samples_per_second": 14.411, | |
| "eval_steps_per_second": 2.421, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.1916167664670658, | |
| "grad_norm": 0.31183212995529175, | |
| "learning_rate": 9.737296070648186e-05, | |
| "loss": 0.359, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.4311377245508983, | |
| "grad_norm": 0.32040631771087646, | |
| "learning_rate": 9.57840139057007e-05, | |
| "loss": 0.2768, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.6706586826347305, | |
| "grad_norm": 0.3420811891555786, | |
| "learning_rate": 9.384074610206495e-05, | |
| "loss": 0.3306, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.910179640718563, | |
| "grad_norm": 0.3617421090602875, | |
| "learning_rate": 9.155819618225708e-05, | |
| "loss": 0.298, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.927978239677673, | |
| "eval_loss": 0.28049176931381226, | |
| "eval_runtime": 25.5364, | |
| "eval_samples_per_second": 14.45, | |
| "eval_steps_per_second": 2.428, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.143712574850299, | |
| "grad_norm": 0.32619667053222656, | |
| "learning_rate": 8.895402872628352e-05, | |
| "loss": 0.2521, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.3832335329341316, | |
| "grad_norm": 0.37772828340530396, | |
| "learning_rate": 8.604839730186125e-05, | |
| "loss": 0.2509, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.622754491017964, | |
| "grad_norm": 0.41024157404899597, | |
| "learning_rate": 8.286378849660896e-05, | |
| "loss": 0.2314, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.8622754491017965, | |
| "grad_norm": 0.4271406829357147, | |
| "learning_rate": 7.942484789507283e-05, | |
| "loss": 0.2038, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9400044813821201, | |
| "eval_loss": 0.2474980503320694, | |
| "eval_runtime": 25.503, | |
| "eval_samples_per_second": 14.469, | |
| "eval_steps_per_second": 2.431, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 3.095808383233533, | |
| "grad_norm": 0.31891995668411255, | |
| "learning_rate": 7.57581893473448e-05, | |
| "loss": 0.178, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 3.3353293413173652, | |
| "grad_norm": 0.4321751296520233, | |
| "learning_rate": 7.18921890053375e-05, | |
| "loss": 0.1676, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 3.5748502994011977, | |
| "grad_norm": 0.4070405662059784, | |
| "learning_rate": 6.785676572066225e-05, | |
| "loss": 0.166, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.81437125748503, | |
| "grad_norm": 0.39369383454322815, | |
| "learning_rate": 6.368314950360415e-05, | |
| "loss": 0.1427, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9457944390098919, | |
| "eval_loss": 0.22429963946342468, | |
| "eval_runtime": 25.5486, | |
| "eval_samples_per_second": 14.443, | |
| "eval_steps_per_second": 2.427, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 4.047904191616767, | |
| "grad_norm": 0.27738964557647705, | |
| "learning_rate": 5.940363983508257e-05, | |
| "loss": 0.1245, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 4.287425149700598, | |
| "grad_norm": 0.3594737648963928, | |
| "learning_rate": 5.5051355702012893e-05, | |
| "loss": 0.1112, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 4.526946107784431, | |
| "grad_norm": 0.41399770975112915, | |
| "learning_rate": 5.0659979290537954e-05, | |
| "loss": 0.1059, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 4.766467065868263, | |
| "grad_norm": 0.458842396736145, | |
| "learning_rate": 4.626349532067879e-05, | |
| "loss": 0.1083, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.3905090093612671, | |
| "learning_rate": 4.189592803968563e-05, | |
| "loss": 0.1081, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9490314927529816, | |
| "eval_loss": 0.22448676824569702, | |
| "eval_runtime": 25.5318, | |
| "eval_samples_per_second": 14.453, | |
| "eval_steps_per_second": 2.428, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 5.2395209580838324, | |
| "grad_norm": 0.36203253269195557, | |
| "learning_rate": 3.759107790948882e-05, | |
| "loss": 0.0938, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 5.479041916167665, | |
| "grad_norm": 0.3554639220237732, | |
| "learning_rate": 3.338226002601703e-05, | |
| "loss": 0.0714, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 5.718562874251497, | |
| "grad_norm": 0.45702579617500305, | |
| "learning_rate": 2.9302046294747497e-05, | |
| "loss": 0.0695, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 5.95808383233533, | |
| "grad_norm": 0.4139527976512909, | |
| "learning_rate": 2.5382013357782893e-05, | |
| "loss": 0.066, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9516058032104253, | |
| "eval_loss": 0.22889475524425507, | |
| "eval_runtime": 25.5009, | |
| "eval_samples_per_second": 14.47, | |
| "eval_steps_per_second": 2.431, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 6.191616766467066, | |
| "grad_norm": 0.4615657925605774, | |
| "learning_rate": 2.1652498223239427e-05, | |
| "loss": 0.0641, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 6.431137724550898, | |
| "grad_norm": 0.31427574157714844, | |
| "learning_rate": 1.814236348812211e-05, | |
| "loss": 0.0481, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 6.6706586826347305, | |
| "grad_norm": 0.3197039067745209, | |
| "learning_rate": 1.4878773971620074e-05, | |
| "loss": 0.0436, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 6.910179640718563, | |
| "grad_norm": 0.4587210714817047, | |
| "learning_rate": 1.1886986487449475e-05, | |
| "loss": 0.0503, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9523039796337592, | |
| "eval_loss": 0.24567043781280518, | |
| "eval_runtime": 25.5173, | |
| "eval_samples_per_second": 14.461, | |
| "eval_steps_per_second": 2.43, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 7.1437125748503, | |
| "grad_norm": 0.26290225982666016, | |
| "learning_rate": 9.190154382188921e-06, | |
| "loss": 0.0445, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 7.383233532934132, | |
| "grad_norm": 0.3077375590801239, | |
| "learning_rate": 6.809148352279182e-06, | |
| "loss": 0.0396, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 7.6227544910179645, | |
| "grad_norm": 0.3048953711986542, | |
| "learning_rate": 4.762394926378477e-06, | |
| "loss": 0.0376, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 7.862275449101796, | |
| "grad_norm": 0.3477061986923218, | |
| "learning_rate": 3.065733863053072e-06, | |
| "loss": 0.0401, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9527086778146084, | |
| "eval_loss": 0.26157039403915405, | |
| "eval_runtime": 25.5175, | |
| "eval_samples_per_second": 14.461, | |
| "eval_steps_per_second": 2.43, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 8.095808383233534, | |
| "grad_norm": 0.2449229508638382, | |
| "learning_rate": 1.7322955673980678e-06, | |
| "loss": 0.0311, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 8.335329341317365, | |
| "grad_norm": 0.2587301433086395, | |
| "learning_rate": 7.723994752570462e-07, | |
| "loss": 0.0311, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 8.574850299401197, | |
| "grad_norm": 0.25320425629615784, | |
| "learning_rate": 1.9347419144180035e-07, | |
| "loss": 0.0338, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 8.790419161676647, | |
| "eval_accuracy": 0.9525826602796044, | |
| "eval_loss": 0.2624136507511139, | |
| "eval_runtime": 25.3173, | |
| "eval_samples_per_second": 14.575, | |
| "eval_steps_per_second": 2.449, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 8.790419161676647, | |
| "step": 369, | |
| "total_flos": 2.992361562963968e+17, | |
| "train_loss": 0.17746063170394277, | |
| "train_runtime": 2311.8158, | |
| "train_samples_per_second": 3.893, | |
| "train_steps_per_second": 0.16 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 369, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.992361562963968e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |