{ "best_metric": 3.254521369934082, "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained_last/en_wiki_mlm_30/checkpoint-84000", "epoch": 47.53820033955857, "eval_steps": 2000, "global_step": 84000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.1318619128466327, "eval_loss": 7.872045516967773, "eval_runtime": 3.9374, "eval_samples_per_second": 1252.08, "eval_steps_per_second": 78.477, "step": 2000 }, { "epoch": 2.2637238256932655, "grad_norm": 1.0014455318450928, "learning_rate": 1e-05, "loss": 7.9236, "step": 4000 }, { "epoch": 2.2637238256932655, "eval_loss": 7.107393264770508, "eval_runtime": 3.9376, "eval_samples_per_second": 1252.036, "eval_steps_per_second": 78.474, "step": 4000 }, { "epoch": 3.395585738539898, "eval_loss": 7.030406951904297, "eval_runtime": 3.9404, "eval_samples_per_second": 1251.146, "eval_steps_per_second": 78.419, "step": 6000 }, { "epoch": 4.527447651386531, "grad_norm": 1.1162114143371582, "learning_rate": 2e-05, "loss": 7.0257, "step": 8000 }, { "epoch": 4.527447651386531, "eval_loss": 6.953151226043701, "eval_runtime": 4.0906, "eval_samples_per_second": 1205.208, "eval_steps_per_second": 75.539, "step": 8000 }, { "epoch": 5.659309564233164, "eval_loss": 6.87604284286499, "eval_runtime": 3.912, "eval_samples_per_second": 1260.235, "eval_steps_per_second": 78.988, "step": 10000 }, { "epoch": 6.791171477079796, "grad_norm": 1.346594214439392, "learning_rate": 3e-05, "loss": 6.8811, "step": 12000 }, { "epoch": 6.791171477079796, "eval_loss": 6.811014175415039, "eval_runtime": 3.951, "eval_samples_per_second": 1247.773, "eval_steps_per_second": 78.207, "step": 12000 }, { "epoch": 7.923033389926429, "eval_loss": 6.731590747833252, "eval_runtime": 3.8635, "eval_samples_per_second": 1276.05, "eval_steps_per_second": 79.98, "step": 14000 }, { "epoch": 9.054895302773062, "grad_norm": 1.458003044128418, "learning_rate": 4e-05, "loss": 6.7587, "step": 16000 }, { "epoch": 9.054895302773062, "eval_loss": 6.689206600189209, "eval_runtime": 3.8397, "eval_samples_per_second": 1283.96, "eval_steps_per_second": 80.475, "step": 16000 }, { "epoch": 10.186757215619695, "eval_loss": 6.650099754333496, "eval_runtime": 3.9002, "eval_samples_per_second": 1264.041, "eval_steps_per_second": 79.227, "step": 18000 }, { "epoch": 11.318619128466327, "grad_norm": 1.5871217250823975, "learning_rate": 5e-05, "loss": 6.6566, "step": 20000 }, { "epoch": 11.318619128466327, "eval_loss": 6.595088958740234, "eval_runtime": 3.9235, "eval_samples_per_second": 1256.533, "eval_steps_per_second": 78.756, "step": 20000 }, { "epoch": 12.45048104131296, "eval_loss": 6.525483131408691, "eval_runtime": 3.8651, "eval_samples_per_second": 1275.512, "eval_steps_per_second": 79.946, "step": 22000 }, { "epoch": 13.582342954159593, "grad_norm": 2.502711296081543, "learning_rate": 6e-05, "loss": 6.546, "step": 24000 }, { "epoch": 13.582342954159593, "eval_loss": 6.440639972686768, "eval_runtime": 3.8611, "eval_samples_per_second": 1276.85, "eval_steps_per_second": 80.03, "step": 24000 }, { "epoch": 14.714204867006226, "eval_loss": 6.316535472869873, "eval_runtime": 3.8871, "eval_samples_per_second": 1268.291, "eval_steps_per_second": 79.493, "step": 26000 }, { "epoch": 15.846066779852858, "grad_norm": 4.2022857666015625, "learning_rate": 7e-05, "loss": 6.3494, "step": 28000 }, { "epoch": 15.846066779852858, "eval_loss": 6.149945259094238, "eval_runtime": 3.9074, "eval_samples_per_second": 1261.705, "eval_steps_per_second": 79.081, "step": 28000 }, { "epoch": 16.97792869269949, "eval_loss": 5.94103479385376, "eval_runtime": 3.8837, "eval_samples_per_second": 1269.393, "eval_steps_per_second": 79.562, "step": 30000 }, { "epoch": 18.109790605546124, "grad_norm": 4.1835408210754395, "learning_rate": 8e-05, "loss": 6.0156, "step": 32000 }, { "epoch": 18.109790605546124, "eval_loss": 5.63771390914917, "eval_runtime": 3.9903, "eval_samples_per_second": 1235.504, "eval_steps_per_second": 77.438, "step": 32000 }, { "epoch": 19.241652518392755, "eval_loss": 5.117396354675293, "eval_runtime": 3.9867, "eval_samples_per_second": 1236.617, "eval_steps_per_second": 77.508, "step": 34000 }, { "epoch": 20.37351443123939, "grad_norm": 3.6266136169433594, "learning_rate": 9e-05, "loss": 5.2999, "step": 36000 }, { "epoch": 20.37351443123939, "eval_loss": 4.855127811431885, "eval_runtime": 3.8488, "eval_samples_per_second": 1280.914, "eval_steps_per_second": 80.284, "step": 36000 }, { "epoch": 21.50537634408602, "eval_loss": 4.6650190353393555, "eval_runtime": 3.8768, "eval_samples_per_second": 1271.655, "eval_steps_per_second": 79.704, "step": 38000 }, { "epoch": 22.637238256932655, "grad_norm": 3.870995044708252, "learning_rate": 0.0001, "loss": 4.7633, "step": 40000 }, { "epoch": 22.637238256932655, "eval_loss": 4.496387481689453, "eval_runtime": 3.9179, "eval_samples_per_second": 1258.325, "eval_steps_per_second": 78.869, "step": 40000 }, { "epoch": 23.769100169779286, "eval_loss": 4.324868679046631, "eval_runtime": 3.916, "eval_samples_per_second": 1258.94, "eval_steps_per_second": 78.907, "step": 42000 }, { "epoch": 24.90096208262592, "grad_norm": 4.243465423583984, "learning_rate": 9.333333333333334e-05, "loss": 4.4471, "step": 44000 }, { "epoch": 24.90096208262592, "eval_loss": 4.211695194244385, "eval_runtime": 3.9298, "eval_samples_per_second": 1254.526, "eval_steps_per_second": 78.631, "step": 44000 }, { "epoch": 26.03282399547255, "eval_loss": 4.076672077178955, "eval_runtime": 3.9702, "eval_samples_per_second": 1241.746, "eval_steps_per_second": 77.829, "step": 46000 }, { "epoch": 27.164685908319186, "grad_norm": 4.304368495941162, "learning_rate": 8.666666666666667e-05, "loss": 4.1884, "step": 48000 }, { "epoch": 27.164685908319186, "eval_loss": 3.9929893016815186, "eval_runtime": 3.9338, "eval_samples_per_second": 1253.246, "eval_steps_per_second": 78.55, "step": 48000 }, { "epoch": 28.296547821165817, "eval_loss": 3.9029903411865234, "eval_runtime": 3.9273, "eval_samples_per_second": 1255.305, "eval_steps_per_second": 78.679, "step": 50000 }, { "epoch": 29.42840973401245, "grad_norm": 4.121150970458984, "learning_rate": 8e-05, "loss": 3.9939, "step": 52000 }, { "epoch": 29.42840973401245, "eval_loss": 3.8125526905059814, "eval_runtime": 3.9176, "eval_samples_per_second": 1258.429, "eval_steps_per_second": 78.875, "step": 52000 }, { "epoch": 30.560271646859082, "eval_loss": 3.7701168060302734, "eval_runtime": 3.9559, "eval_samples_per_second": 1246.255, "eval_steps_per_second": 78.112, "step": 54000 }, { "epoch": 31.692133559705717, "grad_norm": 4.4801177978515625, "learning_rate": 7.333333333333333e-05, "loss": 3.8479, "step": 56000 }, { "epoch": 31.692133559705717, "eval_loss": 3.6775288581848145, "eval_runtime": 3.8905, "eval_samples_per_second": 1267.203, "eval_steps_per_second": 79.425, "step": 56000 }, { "epoch": 32.82399547255235, "eval_loss": 3.643172025680542, "eval_runtime": 3.9657, "eval_samples_per_second": 1243.163, "eval_steps_per_second": 77.918, "step": 58000 }, { "epoch": 33.95585738539898, "grad_norm": 4.373381614685059, "learning_rate": 6.666666666666667e-05, "loss": 3.7265, "step": 60000 }, { "epoch": 33.95585738539898, "eval_loss": 3.5951223373413086, "eval_runtime": 3.9071, "eval_samples_per_second": 1261.815, "eval_steps_per_second": 79.087, "step": 60000 }, { "epoch": 35.08771929824562, "eval_loss": 3.5469629764556885, "eval_runtime": 3.9205, "eval_samples_per_second": 1257.484, "eval_steps_per_second": 78.816, "step": 62000 }, { "epoch": 36.21958121109225, "grad_norm": 3.978059768676758, "learning_rate": 6e-05, "loss": 3.6305, "step": 64000 }, { "epoch": 36.21958121109225, "eval_loss": 3.5206313133239746, "eval_runtime": 3.9167, "eval_samples_per_second": 1258.71, "eval_steps_per_second": 78.893, "step": 64000 }, { "epoch": 37.35144312393888, "eval_loss": 3.494868278503418, "eval_runtime": 3.9115, "eval_samples_per_second": 1260.397, "eval_steps_per_second": 78.998, "step": 66000 }, { "epoch": 38.48330503678551, "grad_norm": 4.399171352386475, "learning_rate": 5.333333333333333e-05, "loss": 3.5483, "step": 68000 }, { "epoch": 38.48330503678551, "eval_loss": 3.476797103881836, "eval_runtime": 3.9327, "eval_samples_per_second": 1253.586, "eval_steps_per_second": 78.572, "step": 68000 }, { "epoch": 39.61516694963215, "eval_loss": 3.422701358795166, "eval_runtime": 3.878, "eval_samples_per_second": 1271.27, "eval_steps_per_second": 79.68, "step": 70000 }, { "epoch": 40.74702886247878, "grad_norm": 4.03302001953125, "learning_rate": 4.666666666666667e-05, "loss": 3.4798, "step": 72000 }, { "epoch": 40.74702886247878, "eval_loss": 3.373471975326538, "eval_runtime": 3.9535, "eval_samples_per_second": 1247.004, "eval_steps_per_second": 78.159, "step": 72000 }, { "epoch": 41.87889077532541, "eval_loss": 3.3893837928771973, "eval_runtime": 3.8713, "eval_samples_per_second": 1273.464, "eval_steps_per_second": 79.818, "step": 74000 }, { "epoch": 43.01075268817204, "grad_norm": 4.291697978973389, "learning_rate": 4e-05, "loss": 3.4256, "step": 76000 }, { "epoch": 43.01075268817204, "eval_loss": 3.354308843612671, "eval_runtime": 3.9005, "eval_samples_per_second": 1263.926, "eval_steps_per_second": 79.22, "step": 76000 }, { "epoch": 44.14261460101868, "eval_loss": 3.3211026191711426, "eval_runtime": 3.9503, "eval_samples_per_second": 1248.001, "eval_steps_per_second": 78.222, "step": 78000 }, { "epoch": 45.27447651386531, "grad_norm": 4.428945541381836, "learning_rate": 3.3333333333333335e-05, "loss": 3.3707, "step": 80000 }, { "epoch": 45.27447651386531, "eval_loss": 3.3156309127807617, "eval_runtime": 3.9036, "eval_samples_per_second": 1262.941, "eval_steps_per_second": 79.158, "step": 80000 }, { "epoch": 46.40633842671194, "eval_loss": 3.289860963821411, "eval_runtime": 3.8723, "eval_samples_per_second": 1273.15, "eval_steps_per_second": 79.798, "step": 82000 }, { "epoch": 47.53820033955857, "grad_norm": 4.017796516418457, "learning_rate": 2.6666666666666667e-05, "loss": 3.3325, "step": 84000 }, { "epoch": 47.53820033955857, "eval_loss": 3.254521369934082, "eval_runtime": 3.8949, "eval_samples_per_second": 1265.772, "eval_steps_per_second": 79.335, "step": 84000 } ], "logging_steps": 4000, "max_steps": 100000, "num_input_tokens_seen": 0, "num_train_epochs": 57, "save_steps": 4000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.6199932172632064e+16, "train_batch_size": 16, "trial_name": null, "trial_params": null }