| { | |
| "best_metric": 4.496387481689453, | |
| "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained_last/en_wiki_mlm_30/checkpoint-40000", | |
| "epoch": 22.637238256932655, | |
| "eval_steps": 2000, | |
| "global_step": 40000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.1318619128466327, | |
| "eval_loss": 7.872045516967773, | |
| "eval_runtime": 3.9374, | |
| "eval_samples_per_second": 1252.08, | |
| "eval_steps_per_second": 78.477, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.2637238256932655, | |
| "grad_norm": 1.0014455318450928, | |
| "learning_rate": 1e-05, | |
| "loss": 7.9236, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.2637238256932655, | |
| "eval_loss": 7.107393264770508, | |
| "eval_runtime": 3.9376, | |
| "eval_samples_per_second": 1252.036, | |
| "eval_steps_per_second": 78.474, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.395585738539898, | |
| "eval_loss": 7.030406951904297, | |
| "eval_runtime": 3.9404, | |
| "eval_samples_per_second": 1251.146, | |
| "eval_steps_per_second": 78.419, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 4.527447651386531, | |
| "grad_norm": 1.1162114143371582, | |
| "learning_rate": 2e-05, | |
| "loss": 7.0257, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.527447651386531, | |
| "eval_loss": 6.953151226043701, | |
| "eval_runtime": 4.0906, | |
| "eval_samples_per_second": 1205.208, | |
| "eval_steps_per_second": 75.539, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 5.659309564233164, | |
| "eval_loss": 6.87604284286499, | |
| "eval_runtime": 3.912, | |
| "eval_samples_per_second": 1260.235, | |
| "eval_steps_per_second": 78.988, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 6.791171477079796, | |
| "grad_norm": 1.346594214439392, | |
| "learning_rate": 3e-05, | |
| "loss": 6.8811, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.791171477079796, | |
| "eval_loss": 6.811014175415039, | |
| "eval_runtime": 3.951, | |
| "eval_samples_per_second": 1247.773, | |
| "eval_steps_per_second": 78.207, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 7.923033389926429, | |
| "eval_loss": 6.731590747833252, | |
| "eval_runtime": 3.8635, | |
| "eval_samples_per_second": 1276.05, | |
| "eval_steps_per_second": 79.98, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 9.054895302773062, | |
| "grad_norm": 1.458003044128418, | |
| "learning_rate": 4e-05, | |
| "loss": 6.7587, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 9.054895302773062, | |
| "eval_loss": 6.689206600189209, | |
| "eval_runtime": 3.8397, | |
| "eval_samples_per_second": 1283.96, | |
| "eval_steps_per_second": 80.475, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 10.186757215619695, | |
| "eval_loss": 6.650099754333496, | |
| "eval_runtime": 3.9002, | |
| "eval_samples_per_second": 1264.041, | |
| "eval_steps_per_second": 79.227, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 11.318619128466327, | |
| "grad_norm": 1.5871217250823975, | |
| "learning_rate": 5e-05, | |
| "loss": 6.6566, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 11.318619128466327, | |
| "eval_loss": 6.595088958740234, | |
| "eval_runtime": 3.9235, | |
| "eval_samples_per_second": 1256.533, | |
| "eval_steps_per_second": 78.756, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 12.45048104131296, | |
| "eval_loss": 6.525483131408691, | |
| "eval_runtime": 3.8651, | |
| "eval_samples_per_second": 1275.512, | |
| "eval_steps_per_second": 79.946, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 13.582342954159593, | |
| "grad_norm": 2.502711296081543, | |
| "learning_rate": 6e-05, | |
| "loss": 6.546, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 13.582342954159593, | |
| "eval_loss": 6.440639972686768, | |
| "eval_runtime": 3.8611, | |
| "eval_samples_per_second": 1276.85, | |
| "eval_steps_per_second": 80.03, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 14.714204867006226, | |
| "eval_loss": 6.316535472869873, | |
| "eval_runtime": 3.8871, | |
| "eval_samples_per_second": 1268.291, | |
| "eval_steps_per_second": 79.493, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 15.846066779852858, | |
| "grad_norm": 4.2022857666015625, | |
| "learning_rate": 7e-05, | |
| "loss": 6.3494, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 15.846066779852858, | |
| "eval_loss": 6.149945259094238, | |
| "eval_runtime": 3.9074, | |
| "eval_samples_per_second": 1261.705, | |
| "eval_steps_per_second": 79.081, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 16.97792869269949, | |
| "eval_loss": 5.94103479385376, | |
| "eval_runtime": 3.8837, | |
| "eval_samples_per_second": 1269.393, | |
| "eval_steps_per_second": 79.562, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 18.109790605546124, | |
| "grad_norm": 4.1835408210754395, | |
| "learning_rate": 8e-05, | |
| "loss": 6.0156, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 18.109790605546124, | |
| "eval_loss": 5.63771390914917, | |
| "eval_runtime": 3.9903, | |
| "eval_samples_per_second": 1235.504, | |
| "eval_steps_per_second": 77.438, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 19.241652518392755, | |
| "eval_loss": 5.117396354675293, | |
| "eval_runtime": 3.9867, | |
| "eval_samples_per_second": 1236.617, | |
| "eval_steps_per_second": 77.508, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 20.37351443123939, | |
| "grad_norm": 3.6266136169433594, | |
| "learning_rate": 9e-05, | |
| "loss": 5.2999, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 20.37351443123939, | |
| "eval_loss": 4.855127811431885, | |
| "eval_runtime": 3.8488, | |
| "eval_samples_per_second": 1280.914, | |
| "eval_steps_per_second": 80.284, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 21.50537634408602, | |
| "eval_loss": 4.6650190353393555, | |
| "eval_runtime": 3.8768, | |
| "eval_samples_per_second": 1271.655, | |
| "eval_steps_per_second": 79.704, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 22.637238256932655, | |
| "grad_norm": 3.870995044708252, | |
| "learning_rate": 0.0001, | |
| "loss": 4.7633, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 22.637238256932655, | |
| "eval_loss": 4.496387481689453, | |
| "eval_runtime": 3.9179, | |
| "eval_samples_per_second": 1258.325, | |
| "eval_steps_per_second": 78.869, | |
| "step": 40000 | |
| } | |
| ], | |
| "logging_steps": 4000, | |
| "max_steps": 100000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 57, | |
| "save_steps": 4000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2476180458635264e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |