Qwen2.5-7B-Multi-Parallel / trainer_state.json
PumeTu's picture
Upload folder using huggingface_hub
3ebeae9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9835189309576835,
"eval_steps": 500,
"global_step": 420,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07126948775055679,
"grad_norm": 3.7971609533442936,
"learning_rate": 4.761904761904762e-06,
"loss": 0.8475,
"step": 10
},
{
"epoch": 0.14253897550111358,
"grad_norm": 0.9295260271504954,
"learning_rate": 9.523809523809525e-06,
"loss": 0.5121,
"step": 20
},
{
"epoch": 0.21380846325167038,
"grad_norm": 0.7378111786994856,
"learning_rate": 1.4285714285714287e-05,
"loss": 0.4342,
"step": 30
},
{
"epoch": 0.28507795100222716,
"grad_norm": 0.706728951221838,
"learning_rate": 1.904761904761905e-05,
"loss": 0.3944,
"step": 40
},
{
"epoch": 0.35634743875278396,
"grad_norm": 0.8090312873781099,
"learning_rate": 1.997790438338385e-05,
"loss": 0.3626,
"step": 50
},
{
"epoch": 0.42761692650334077,
"grad_norm": 0.6955054433411858,
"learning_rate": 1.9888308262251286e-05,
"loss": 0.3474,
"step": 60
},
{
"epoch": 0.49888641425389757,
"grad_norm": 0.6161974091570769,
"learning_rate": 1.973044870579824e-05,
"loss": 0.3277,
"step": 70
},
{
"epoch": 0.5701559020044543,
"grad_norm": 0.6402632736940507,
"learning_rate": 1.9505415489478293e-05,
"loss": 0.3141,
"step": 80
},
{
"epoch": 0.6414253897550112,
"grad_norm": 0.6353304250627707,
"learning_rate": 1.921476211870408e-05,
"loss": 0.2979,
"step": 90
},
{
"epoch": 0.7126948775055679,
"grad_norm": 0.6366565830233913,
"learning_rate": 1.8860495104301346e-05,
"loss": 0.2871,
"step": 100
},
{
"epoch": 0.7839643652561247,
"grad_norm": 0.6354146476657556,
"learning_rate": 1.844506011066308e-05,
"loss": 0.2723,
"step": 110
},
{
"epoch": 0.8552338530066815,
"grad_norm": 0.5882708445945315,
"learning_rate": 1.7971325072229227e-05,
"loss": 0.2681,
"step": 120
},
{
"epoch": 0.9265033407572383,
"grad_norm": 0.5762368078327551,
"learning_rate": 1.7442560394846518e-05,
"loss": 0.2559,
"step": 130
},
{
"epoch": 0.9977728285077951,
"grad_norm": 0.5958357402554844,
"learning_rate": 1.686241637868734e-05,
"loss": 0.2437,
"step": 140
},
{
"epoch": 1.064142538975501,
"grad_norm": 0.6936917056904216,
"learning_rate": 1.6234898018587336e-05,
"loss": 0.1816,
"step": 150
},
{
"epoch": 1.135412026726058,
"grad_norm": 0.5744999045875022,
"learning_rate": 1.5564337355766412e-05,
"loss": 0.1727,
"step": 160
},
{
"epoch": 1.2066815144766148,
"grad_norm": 0.5316452824085909,
"learning_rate": 1.4855363571801523e-05,
"loss": 0.1644,
"step": 170
},
{
"epoch": 1.2779510022271716,
"grad_norm": 0.4952795924197091,
"learning_rate": 1.4112871031306118e-05,
"loss": 0.1631,
"step": 180
},
{
"epoch": 1.3492204899777283,
"grad_norm": 0.4880859442135627,
"learning_rate": 1.3341985493931877e-05,
"loss": 0.1615,
"step": 190
},
{
"epoch": 1.4204899777282851,
"grad_norm": 0.4845969774780985,
"learning_rate": 1.2548028728946548e-05,
"loss": 0.1601,
"step": 200
},
{
"epoch": 1.4917594654788418,
"grad_norm": 0.5021044989896025,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.1583,
"step": 210
},
{
"epoch": 1.5630289532293986,
"grad_norm": 0.44605081296525473,
"learning_rate": 1.0912947110386484e-05,
"loss": 0.1541,
"step": 220
},
{
"epoch": 1.6342984409799555,
"grad_norm": 0.46635765009201846,
"learning_rate": 1.0083109959960974e-05,
"loss": 0.154,
"step": 230
},
{
"epoch": 1.7055679287305123,
"grad_norm": 0.45234389725218227,
"learning_rate": 9.252699064135759e-06,
"loss": 0.1536,
"step": 240
},
{
"epoch": 1.7768374164810692,
"grad_norm": 0.47812323330213485,
"learning_rate": 8.427447122476148e-06,
"loss": 0.1509,
"step": 250
},
{
"epoch": 1.8481069042316258,
"grad_norm": 0.4452592705770747,
"learning_rate": 7.613051219968624e-06,
"loss": 0.1475,
"step": 260
},
{
"epoch": 1.9193763919821827,
"grad_norm": 0.447493408892927,
"learning_rate": 6.815133497483157e-06,
"loss": 0.1478,
"step": 270
},
{
"epoch": 1.9906458797327393,
"grad_norm": 0.4643827774343368,
"learning_rate": 6.039202339608432e-06,
"loss": 0.1469,
"step": 280
},
{
"epoch": 2.057015590200445,
"grad_norm": 0.5296378146178365,
"learning_rate": 5.290614347797802e-06,
"loss": 0.0958,
"step": 290
},
{
"epoch": 2.128285077951002,
"grad_norm": 0.4045550908197575,
"learning_rate": 4.5745373613424075e-06,
"loss": 0.0776,
"step": 300
},
{
"epoch": 2.199554565701559,
"grad_norm": 0.4040354519811954,
"learning_rate": 3.89591478145437e-06,
"loss": 0.0776,
"step": 310
},
{
"epoch": 2.270824053452116,
"grad_norm": 0.42857601545200796,
"learning_rate": 3.2594314447468457e-06,
"loss": 0.0754,
"step": 320
},
{
"epoch": 2.3420935412026727,
"grad_norm": 0.3670300653980398,
"learning_rate": 2.669481281701739e-06,
"loss": 0.0748,
"step": 330
},
{
"epoch": 2.4133630289532295,
"grad_norm": 0.3902364845802205,
"learning_rate": 2.130136983393112e-06,
"loss": 0.0704,
"step": 340
},
{
"epoch": 2.4846325167037864,
"grad_norm": 0.377128443473291,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.0742,
"step": 350
},
{
"epoch": 2.5559020044543432,
"grad_norm": 0.3930553430015275,
"learning_rate": 1.2177842662977136e-06,
"loss": 0.0733,
"step": 360
},
{
"epoch": 2.6271714922048996,
"grad_norm": 0.3590982366224671,
"learning_rate": 8.510742282896545e-07,
"loss": 0.0717,
"step": 370
},
{
"epoch": 2.6984409799554565,
"grad_norm": 0.3746976787822436,
"learning_rate": 5.475233360227516e-07,
"loss": 0.0709,
"step": 380
},
{
"epoch": 2.7697104677060134,
"grad_norm": 0.36100542307641714,
"learning_rate": 3.0922713770922155e-07,
"loss": 0.0723,
"step": 390
},
{
"epoch": 2.8409799554565702,
"grad_norm": 0.3896273158596708,
"learning_rate": 1.3783069908621772e-07,
"loss": 0.0727,
"step": 400
},
{
"epoch": 2.912249443207127,
"grad_norm": 0.385255671671692,
"learning_rate": 3.451724678784518e-08,
"loss": 0.0727,
"step": 410
},
{
"epoch": 2.9835189309576835,
"grad_norm": 0.38370569586805797,
"learning_rate": 0.0,
"loss": 0.072,
"step": 420
},
{
"epoch": 2.9835189309576835,
"step": 420,
"total_flos": 234603226333184.0,
"train_loss": 0.20078422256878445,
"train_runtime": 13460.8563,
"train_samples_per_second": 16.011,
"train_steps_per_second": 0.031
}
],
"logging_steps": 10,
"max_steps": 420,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 234603226333184.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}