| { | |
| "best_metric": 0.08565253019332886, | |
| "best_model_checkpoint": "./beans_outputs/checkpoint-650", | |
| "epoch": 5.0, | |
| "global_step": 650, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.9692307692307696e-05, | |
| "loss": 1.0129, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 1.9384615384615386e-05, | |
| "loss": 0.981, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 1.907692307692308e-05, | |
| "loss": 0.8538, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.876923076923077e-05, | |
| "loss": 0.7629, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.8461538461538465e-05, | |
| "loss": 0.6842, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 1.8153846153846155e-05, | |
| "loss": 0.6331, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.784615384615385e-05, | |
| "loss": 0.5599, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.753846153846154e-05, | |
| "loss": 0.4303, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.7230769230769234e-05, | |
| "loss": 0.3775, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "loss": 0.3291, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.6615384615384618e-05, | |
| "loss": 0.3385, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 1.630769230769231e-05, | |
| "loss": 0.2658, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.308, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.2117719203233719, | |
| "eval_runtime": 1.3566, | |
| "eval_samples_per_second": 98.042, | |
| "eval_steps_per_second": 12.532, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.5692307692307693e-05, | |
| "loss": 0.2892, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.2359, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 1.5076923076923078e-05, | |
| "loss": 0.2336, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 1.4769230769230772e-05, | |
| "loss": 0.1469, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 1.4461538461538462e-05, | |
| "loss": 0.1915, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 1.4153846153846156e-05, | |
| "loss": 0.1963, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "loss": 0.1549, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 1.353846153846154e-05, | |
| "loss": 0.1447, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.3230769230769231e-05, | |
| "loss": 0.1779, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 1.2923076923076925e-05, | |
| "loss": 0.1904, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 1.2615384615384616e-05, | |
| "loss": 0.1902, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 1.230769230769231e-05, | |
| "loss": 0.2154, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.2219, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9699248120300752, | |
| "eval_loss": 0.1302611231803894, | |
| "eval_runtime": 1.4178, | |
| "eval_samples_per_second": 93.808, | |
| "eval_steps_per_second": 11.991, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 1.1692307692307694e-05, | |
| "loss": 0.1568, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 1.1384615384615385e-05, | |
| "loss": 0.1809, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 1.1076923076923079e-05, | |
| "loss": 0.2291, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 1.076923076923077e-05, | |
| "loss": 0.1456, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.0461538461538463e-05, | |
| "loss": 0.2131, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 1.0153846153846154e-05, | |
| "loss": 0.1676, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 9.846153846153848e-06, | |
| "loss": 0.1338, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 9.53846153846154e-06, | |
| "loss": 0.1717, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 0.1218, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 8.923076923076925e-06, | |
| "loss": 0.1224, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 8.615384615384617e-06, | |
| "loss": 0.136, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 8.307692307692309e-06, | |
| "loss": 0.0875, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.1831, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.11418382823467255, | |
| "eval_runtime": 1.3894, | |
| "eval_samples_per_second": 95.726, | |
| "eval_steps_per_second": 12.236, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.1057, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 7.384615384615386e-06, | |
| "loss": 0.1514, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 7.076923076923078e-06, | |
| "loss": 0.099, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 6.76923076923077e-06, | |
| "loss": 0.1241, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 6.461538461538463e-06, | |
| "loss": 0.1543, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 6.153846153846155e-06, | |
| "loss": 0.0997, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 5.846153846153847e-06, | |
| "loss": 0.1242, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 5.538461538461539e-06, | |
| "loss": 0.2135, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 5.230769230769232e-06, | |
| "loss": 0.1074, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 4.923076923076924e-06, | |
| "loss": 0.1023, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.0578, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 4.307692307692308e-06, | |
| "loss": 0.1487, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0838, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9774436090225563, | |
| "eval_loss": 0.10310114920139313, | |
| "eval_runtime": 1.3721, | |
| "eval_samples_per_second": 96.931, | |
| "eval_steps_per_second": 12.39, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "learning_rate": 3.692307692307693e-06, | |
| "loss": 0.1201, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 3.384615384615385e-06, | |
| "loss": 0.0799, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 0.1362, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 2.7692307692307697e-06, | |
| "loss": 0.1308, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 2.461538461538462e-06, | |
| "loss": 0.1356, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 2.153846153846154e-06, | |
| "loss": 0.0681, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 1.8461538461538465e-06, | |
| "loss": 0.0825, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.074, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 1.230769230769231e-06, | |
| "loss": 0.0909, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 9.230769230769232e-07, | |
| "loss": 0.0875, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 4.85, | |
| "learning_rate": 6.153846153846155e-07, | |
| "loss": 0.1273, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 3.0769230769230774e-07, | |
| "loss": 0.1401, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.1266, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9849624060150376, | |
| "eval_loss": 0.08565253019332886, | |
| "eval_runtime": 1.4608, | |
| "eval_samples_per_second": 91.043, | |
| "eval_steps_per_second": 11.637, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 650, | |
| "total_flos": 4.006371770595533e+17, | |
| "train_loss": 0.23302755612593432, | |
| "train_runtime": 157.279, | |
| "train_samples_per_second": 32.872, | |
| "train_steps_per_second": 4.133 | |
| } | |
| ], | |
| "max_steps": 650, | |
| "num_train_epochs": 5, | |
| "total_flos": 4.006371770595533e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |