MHGanainy's picture
MHGanainy/gpt2-xl-lora-ecthr-random-balanced-cluster-8-id-1
d9e989a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 5237,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019094901661256443,
"grad_norm": 0.10446564108133316,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.4329,
"step": 100
},
{
"epoch": 0.038189803322512886,
"grad_norm": 0.13006480038166046,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4426,
"step": 200
},
{
"epoch": 0.057284704983769336,
"grad_norm": 0.22490981221199036,
"learning_rate": 6e-06,
"loss": 2.4012,
"step": 300
},
{
"epoch": 0.07637960664502577,
"grad_norm": 0.2587246894836426,
"learning_rate": 8.000000000000001e-06,
"loss": 2.3775,
"step": 400
},
{
"epoch": 0.09547450830628222,
"grad_norm": 0.3504439890384674,
"learning_rate": 1e-05,
"loss": 2.3528,
"step": 500
},
{
"epoch": 0.11456940996753867,
"grad_norm": 0.4341156482696533,
"learning_rate": 1.2e-05,
"loss": 2.3048,
"step": 600
},
{
"epoch": 0.1336643116287951,
"grad_norm": 0.5058615803718567,
"learning_rate": 1.4e-05,
"loss": 2.2659,
"step": 700
},
{
"epoch": 0.15275921329005154,
"grad_norm": 0.5303977131843567,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.2519,
"step": 800
},
{
"epoch": 0.171854114951308,
"grad_norm": 0.6431277990341187,
"learning_rate": 1.8e-05,
"loss": 2.2328,
"step": 900
},
{
"epoch": 0.19094901661256444,
"grad_norm": 0.6453608274459839,
"learning_rate": 2e-05,
"loss": 2.1953,
"step": 1000
},
{
"epoch": 0.21004391827382088,
"grad_norm": 0.6129643321037292,
"learning_rate": 1.99725239787624e-05,
"loss": 2.2064,
"step": 1100
},
{
"epoch": 0.22913881993507734,
"grad_norm": 0.6118465662002563,
"learning_rate": 1.98902469013982e-05,
"loss": 2.151,
"step": 1200
},
{
"epoch": 0.24823372159633378,
"grad_norm": 0.8548713326454163,
"learning_rate": 1.975362089725241e-05,
"loss": 2.1276,
"step": 1300
},
{
"epoch": 0.2673286232575902,
"grad_norm": 0.8343802690505981,
"learning_rate": 1.9563396754123335e-05,
"loss": 2.1236,
"step": 1400
},
{
"epoch": 0.28642352491884665,
"grad_norm": 0.9589895606040955,
"learning_rate": 1.9320619792530275e-05,
"loss": 2.1004,
"step": 1500
},
{
"epoch": 0.3055184265801031,
"grad_norm": 0.8541079759597778,
"learning_rate": 1.902662412146378e-05,
"loss": 2.0843,
"step": 1600
},
{
"epoch": 0.3246133282413596,
"grad_norm": 0.7465143799781799,
"learning_rate": 1.8683025307184242e-05,
"loss": 2.0586,
"step": 1700
},
{
"epoch": 0.343708229902616,
"grad_norm": 0.8386638760566711,
"learning_rate": 1.829171149535534e-05,
"loss": 2.088,
"step": 1800
},
{
"epoch": 0.36280313156387245,
"grad_norm": 0.9529417157173157,
"learning_rate": 1.7854833035297944e-05,
"loss": 2.0816,
"step": 1900
},
{
"epoch": 0.3818980332251289,
"grad_norm": 0.6660144329071045,
"learning_rate": 1.7374790663381416e-05,
"loss": 2.0369,
"step": 2000
},
{
"epoch": 0.4009929348863853,
"grad_norm": 0.8226873278617859,
"learning_rate": 1.6854222310486902e-05,
"loss": 2.0412,
"step": 2100
},
{
"epoch": 0.42008783654764176,
"grad_norm": 0.8749469518661499,
"learning_rate": 1.6295988606038343e-05,
"loss": 2.0461,
"step": 2200
},
{
"epoch": 0.43918273820889825,
"grad_norm": 0.9127281904220581,
"learning_rate": 1.5703157158259546e-05,
"loss": 2.0094,
"step": 2300
},
{
"epoch": 0.4582776398701547,
"grad_norm": 1.1835373640060425,
"learning_rate": 1.50789856970404e-05,
"loss": 2.0119,
"step": 2400
},
{
"epoch": 0.4773725415314111,
"grad_norm": 0.8751312494277954,
"learning_rate": 1.4426904172045787e-05,
"loss": 2.0382,
"step": 2500
},
{
"epoch": 0.49646744319266756,
"grad_norm": 0.8412628173828125,
"learning_rate": 1.3750495904441577e-05,
"loss": 2.0671,
"step": 2600
},
{
"epoch": 0.515562344853924,
"grad_norm": 0.9513535499572754,
"learning_rate": 1.3053477895812972e-05,
"loss": 2.0278,
"step": 2700
},
{
"epoch": 0.5346572465151804,
"grad_norm": 0.7872034907341003,
"learning_rate": 1.2339680402481584e-05,
"loss": 2.0902,
"step": 2800
},
{
"epoch": 0.5537521481764369,
"grad_norm": 1.064986228942871,
"learning_rate": 1.1613025887464642e-05,
"loss": 1.9987,
"step": 2900
},
{
"epoch": 0.5728470498376933,
"grad_norm": 0.896093487739563,
"learning_rate": 1.0877507465739541e-05,
"loss": 2.0385,
"step": 3000
},
{
"epoch": 0.5919419514989498,
"grad_norm": 0.874965250492096,
"learning_rate": 1.0137166961261478e-05,
"loss": 2.0305,
"step": 3100
},
{
"epoch": 0.6110368531602062,
"grad_norm": 1.0294139385223389,
"learning_rate": 9.39607269631527e-06,
"loss": 2.0021,
"step": 3200
},
{
"epoch": 0.6301317548214627,
"grad_norm": 0.9223864078521729,
"learning_rate": 8.658297135253469e-06,
"loss": 2.0041,
"step": 3300
},
{
"epoch": 0.6492266564827192,
"grad_norm": 0.946811318397522,
"learning_rate": 7.927894505472926e-06,
"loss": 2.0528,
"step": 3400
},
{
"epoch": 0.6683215581439755,
"grad_norm": 1.025842308998108,
"learning_rate": 7.208878518607228e-06,
"loss": 2.0344,
"step": 3500
},
{
"epoch": 0.687416459805232,
"grad_norm": 1.156582236289978,
"learning_rate": 6.5052003143614155e-06,
"loss": 2.0005,
"step": 3600
},
{
"epoch": 0.7065113614664884,
"grad_norm": 1.553145170211792,
"learning_rate": 5.8207267481923655e-06,
"loss": 2.0142,
"step": 3700
},
{
"epoch": 0.7256062631277449,
"grad_norm": 0.8570035099983215,
"learning_rate": 5.159219142148193e-06,
"loss": 2.0181,
"step": 3800
},
{
"epoch": 0.7447011647890014,
"grad_norm": 0.9739204049110413,
"learning_rate": 4.524312615635407e-06,
"loss": 2.0573,
"step": 3900
},
{
"epoch": 0.7637960664502578,
"grad_norm": 1.0215950012207031,
"learning_rate": 3.919496109695276e-06,
"loss": 2.0368,
"step": 4000
},
{
"epoch": 0.7828909681115143,
"grad_norm": 1.1113394498825073,
"learning_rate": 3.348093214560212e-06,
"loss": 2.0571,
"step": 4100
},
{
"epoch": 0.8019858697727706,
"grad_norm": 1.0689241886138916,
"learning_rate": 2.8132439058466056e-06,
"loss": 2.006,
"step": 4200
},
{
"epoch": 0.8210807714340271,
"grad_norm": 1.0689373016357422,
"learning_rate": 2.317887289747488e-06,
"loss": 2.0027,
"step": 4300
},
{
"epoch": 0.8401756730952835,
"grad_norm": 1.0181564092636108,
"learning_rate": 1.8647454520436826e-06,
"loss": 2.0088,
"step": 4400
},
{
"epoch": 0.85927057475654,
"grad_norm": 0.9078167080879211,
"learning_rate": 1.4563084996864662e-06,
"loss": 2.0433,
"step": 4500
},
{
"epoch": 0.8783654764177965,
"grad_norm": 1.2132090330123901,
"learning_rate": 1.094820877151277e-06,
"loss": 2.0174,
"step": 4600
},
{
"epoch": 0.8974603780790529,
"grad_norm": 1.361141324043274,
"learning_rate": 7.822690327568993e-07,
"loss": 2.0128,
"step": 4700
},
{
"epoch": 0.9165552797403094,
"grad_norm": 1.0606088638305664,
"learning_rate": 5.203705027262185e-07,
"loss": 1.99,
"step": 4800
},
{
"epoch": 0.9356501814015658,
"grad_norm": 1.7803822755813599,
"learning_rate": 3.105644729738777e-07,
"loss": 2.0137,
"step": 4900
},
{
"epoch": 0.9547450830628222,
"grad_norm": 0.9340605735778809,
"learning_rate": 1.5400387048572608e-07,
"loss": 2.0485,
"step": 5000
},
{
"epoch": 0.9738399847240786,
"grad_norm": 0.8442704081535339,
"learning_rate": 5.154902774955339e-08,
"loss": 2.0174,
"step": 5100
},
{
"epoch": 0.9929348863853351,
"grad_norm": 0.9462343454360962,
"learning_rate": 3.762955052341033e-09,
"loss": 1.9595,
"step": 5200
},
{
"epoch": 1.0,
"step": 5237,
"total_flos": 9.539063611392e+16,
"train_loss": 2.0977558706333466,
"train_runtime": 845.4487,
"train_samples_per_second": 12.389,
"train_steps_per_second": 6.194
}
],
"logging_steps": 100,
"max_steps": 5237,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.539063611392e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}