llava-v1.5-7b-lora-change_order / trainer_state.json
Polly1231's picture
Upload 2 files
8d176b5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 125,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 5e-05,
"loss": 1.9897,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 0.0001,
"loss": 1.9968,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 0.00015000000000000001,
"loss": 1.8464,
"step": 3
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.5344,
"step": 4
},
{
"epoch": 0.04,
"learning_rate": 0.00019996629653035126,
"loss": 1.5001,
"step": 5
},
{
"epoch": 0.05,
"learning_rate": 0.00019986520883988232,
"loss": 1.5331,
"step": 6
},
{
"epoch": 0.06,
"learning_rate": 0.00019969680506871137,
"loss": 1.4421,
"step": 7
},
{
"epoch": 0.06,
"learning_rate": 0.00019946119873266613,
"loss": 1.3842,
"step": 8
},
{
"epoch": 0.07,
"learning_rate": 0.00019915854864676664,
"loss": 1.2945,
"step": 9
},
{
"epoch": 0.08,
"learning_rate": 0.00019878905881817252,
"loss": 1.3725,
"step": 10
},
{
"epoch": 0.09,
"learning_rate": 0.00019835297830866826,
"loss": 1.3138,
"step": 11
},
{
"epoch": 0.1,
"learning_rate": 0.00019785060106677818,
"loss": 1.2199,
"step": 12
},
{
"epoch": 0.1,
"learning_rate": 0.00019728226572962473,
"loss": 1.2444,
"step": 13
},
{
"epoch": 0.11,
"learning_rate": 0.0001966483553946637,
"loss": 1.2858,
"step": 14
},
{
"epoch": 0.12,
"learning_rate": 0.00019594929736144976,
"loss": 1.1798,
"step": 15
},
{
"epoch": 0.13,
"learning_rate": 0.00019518556284360696,
"loss": 1.1977,
"step": 16
},
{
"epoch": 0.14,
"learning_rate": 0.0001943576666511982,
"loss": 1.2798,
"step": 17
},
{
"epoch": 0.14,
"learning_rate": 0.0001934661668437073,
"loss": 1.1731,
"step": 18
},
{
"epoch": 0.15,
"learning_rate": 0.0001925116643538684,
"loss": 1.2879,
"step": 19
},
{
"epoch": 0.16,
"learning_rate": 0.00019149480258259533,
"loss": 1.2748,
"step": 20
},
{
"epoch": 0.17,
"learning_rate": 0.00019041626696528503,
"loss": 1.169,
"step": 21
},
{
"epoch": 0.18,
"learning_rate": 0.0001892767845097864,
"loss": 1.3298,
"step": 22
},
{
"epoch": 0.18,
"learning_rate": 0.00018807712330634642,
"loss": 1.3213,
"step": 23
},
{
"epoch": 0.19,
"learning_rate": 0.0001868180920098644,
"loss": 1.2148,
"step": 24
},
{
"epoch": 0.2,
"learning_rate": 0.00018550053929480202,
"loss": 1.2735,
"step": 25
},
{
"epoch": 0.21,
"learning_rate": 0.00018412535328311814,
"loss": 1.1605,
"step": 26
},
{
"epoch": 0.22,
"learning_rate": 0.0001826934609456129,
"loss": 1.1582,
"step": 27
},
{
"epoch": 0.22,
"learning_rate": 0.00018120582747708502,
"loss": 1.1273,
"step": 28
},
{
"epoch": 0.23,
"learning_rate": 0.0001796634556457236,
"loss": 1.1209,
"step": 29
},
{
"epoch": 0.24,
"learning_rate": 0.0001780673851171728,
"loss": 1.0741,
"step": 30
},
{
"epoch": 0.25,
"learning_rate": 0.00017641869175372493,
"loss": 1.1887,
"step": 31
},
{
"epoch": 0.26,
"learning_rate": 0.00017471848688911464,
"loss": 1.215,
"step": 32
},
{
"epoch": 0.26,
"learning_rate": 0.000172967916579403,
"loss": 1.1515,
"step": 33
},
{
"epoch": 0.27,
"learning_rate": 0.00017116816083045602,
"loss": 1.1635,
"step": 34
},
{
"epoch": 0.28,
"learning_rate": 0.0001693204328025389,
"loss": 1.1609,
"step": 35
},
{
"epoch": 0.29,
"learning_rate": 0.00016742597799256182,
"loss": 1.199,
"step": 36
},
{
"epoch": 0.3,
"learning_rate": 0.00016548607339452853,
"loss": 1.2147,
"step": 37
},
{
"epoch": 0.3,
"learning_rate": 0.00016350202663875386,
"loss": 1.1347,
"step": 38
},
{
"epoch": 0.31,
"learning_rate": 0.0001614751751104301,
"loss": 1.1196,
"step": 39
},
{
"epoch": 0.32,
"learning_rate": 0.00015940688504813662,
"loss": 1.1205,
"step": 40
},
{
"epoch": 0.33,
"learning_rate": 0.00015729855062290022,
"loss": 1.1248,
"step": 41
},
{
"epoch": 0.34,
"learning_rate": 0.00015515159299842707,
"loss": 1.0917,
"step": 42
},
{
"epoch": 0.34,
"learning_rate": 0.00015296745937313987,
"loss": 1.1394,
"step": 43
},
{
"epoch": 0.35,
"learning_rate": 0.00015074762200466556,
"loss": 1.1209,
"step": 44
},
{
"epoch": 0.36,
"learning_rate": 0.00014849357721743168,
"loss": 1.1102,
"step": 45
},
{
"epoch": 0.37,
"learning_rate": 0.00014620684439403962,
"loss": 1.0751,
"step": 46
},
{
"epoch": 0.38,
"learning_rate": 0.0001438889649510956,
"loss": 1.2496,
"step": 47
},
{
"epoch": 0.38,
"learning_rate": 0.00014154150130018866,
"loss": 1.1825,
"step": 48
},
{
"epoch": 0.39,
"learning_rate": 0.00013916603579471705,
"loss": 1.1389,
"step": 49
},
{
"epoch": 0.4,
"learning_rate": 0.000136764169663272,
"loss": 1.186,
"step": 50
},
{
"epoch": 0.41,
"learning_rate": 0.00013433752193029886,
"loss": 1.1931,
"step": 51
},
{
"epoch": 0.42,
"learning_rate": 0.00013188772832476188,
"loss": 1.2447,
"step": 52
},
{
"epoch": 0.42,
"learning_rate": 0.00012941644017754964,
"loss": 1.2026,
"step": 53
},
{
"epoch": 0.43,
"learning_rate": 0.00012692532330836346,
"loss": 1.1829,
"step": 54
},
{
"epoch": 0.44,
"learning_rate": 0.00012441605690283915,
"loss": 1.1138,
"step": 55
},
{
"epoch": 0.45,
"learning_rate": 0.0001218903323806595,
"loss": 1.071,
"step": 56
},
{
"epoch": 0.46,
"learning_rate": 0.00011934985225541998,
"loss": 1.1258,
"step": 57
},
{
"epoch": 0.46,
"learning_rate": 0.00011679632898701649,
"loss": 1.0669,
"step": 58
},
{
"epoch": 0.47,
"learning_rate": 0.00011423148382732853,
"loss": 1.1091,
"step": 59
},
{
"epoch": 0.48,
"learning_rate": 0.00011165704565997593,
"loss": 1.2107,
"step": 60
},
{
"epoch": 0.49,
"learning_rate": 0.00010907474983493144,
"loss": 1.1345,
"step": 61
},
{
"epoch": 0.5,
"learning_rate": 0.0001064863369987743,
"loss": 1.1462,
"step": 62
},
{
"epoch": 0.5,
"learning_rate": 0.00010389355192137377,
"loss": 1.0794,
"step": 63
},
{
"epoch": 0.51,
"learning_rate": 0.0001012981423197931,
"loss": 1.0142,
"step": 64
},
{
"epoch": 0.52,
"learning_rate": 9.870185768020693e-05,
"loss": 1.1707,
"step": 65
},
{
"epoch": 0.53,
"learning_rate": 9.610644807862625e-05,
"loss": 1.2325,
"step": 66
},
{
"epoch": 0.54,
"learning_rate": 9.35136630012257e-05,
"loss": 1.099,
"step": 67
},
{
"epoch": 0.54,
"learning_rate": 9.092525016506858e-05,
"loss": 1.1396,
"step": 68
},
{
"epoch": 0.55,
"learning_rate": 8.83429543400241e-05,
"loss": 1.0698,
"step": 69
},
{
"epoch": 0.56,
"learning_rate": 8.57685161726715e-05,
"loss": 1.1392,
"step": 70
},
{
"epoch": 0.57,
"learning_rate": 8.320367101298351e-05,
"loss": 1.1096,
"step": 71
},
{
"epoch": 0.58,
"learning_rate": 8.065014774458003e-05,
"loss": 1.0607,
"step": 72
},
{
"epoch": 0.58,
"learning_rate": 7.810966761934053e-05,
"loss": 1.0792,
"step": 73
},
{
"epoch": 0.59,
"learning_rate": 7.558394309716088e-05,
"loss": 1.2152,
"step": 74
},
{
"epoch": 0.6,
"learning_rate": 7.307467669163655e-05,
"loss": 1.1314,
"step": 75
},
{
"epoch": 0.61,
"learning_rate": 7.058355982245037e-05,
"loss": 1.1976,
"step": 76
},
{
"epoch": 0.62,
"learning_rate": 6.811227167523815e-05,
"loss": 1.1721,
"step": 77
},
{
"epoch": 0.62,
"learning_rate": 6.566247806970119e-05,
"loss": 1.0145,
"step": 78
},
{
"epoch": 0.63,
"learning_rate": 6.323583033672799e-05,
"loss": 1.1097,
"step": 79
},
{
"epoch": 0.64,
"learning_rate": 6.083396420528298e-05,
"loss": 1.1235,
"step": 80
},
{
"epoch": 0.65,
"learning_rate": 5.845849869981137e-05,
"loss": 1.1021,
"step": 81
},
{
"epoch": 0.66,
"learning_rate": 5.611103504890444e-05,
"loss": 0.9463,
"step": 82
},
{
"epoch": 0.66,
"learning_rate": 5.379315560596038e-05,
"loss": 1.2438,
"step": 83
},
{
"epoch": 0.67,
"learning_rate": 5.1506422782568345e-05,
"loss": 0.9885,
"step": 84
},
{
"epoch": 0.68,
"learning_rate": 4.9252377995334444e-05,
"loss": 1.1437,
"step": 85
},
{
"epoch": 0.69,
"learning_rate": 4.703254062686017e-05,
"loss": 1.0583,
"step": 86
},
{
"epoch": 0.7,
"learning_rate": 4.484840700157295e-05,
"loss": 1.0868,
"step": 87
},
{
"epoch": 0.7,
"learning_rate": 4.270144937709981e-05,
"loss": 1.1543,
"step": 88
},
{
"epoch": 0.71,
"learning_rate": 4.059311495186338e-05,
"loss": 1.1438,
"step": 89
},
{
"epoch": 0.72,
"learning_rate": 3.852482488956992e-05,
"loss": 1.1299,
"step": 90
},
{
"epoch": 0.73,
"learning_rate": 3.649797336124615e-05,
"loss": 1.0474,
"step": 91
},
{
"epoch": 0.74,
"learning_rate": 3.45139266054715e-05,
"loss": 1.1362,
"step": 92
},
{
"epoch": 0.74,
"learning_rate": 3.257402200743821e-05,
"loss": 1.1321,
"step": 93
},
{
"epoch": 0.75,
"learning_rate": 3.0679567197461134e-05,
"loss": 1.0806,
"step": 94
},
{
"epoch": 0.76,
"learning_rate": 2.8831839169543996e-05,
"loss": 1.0315,
"step": 95
},
{
"epoch": 0.77,
"learning_rate": 2.7032083420597e-05,
"loss": 1.0436,
"step": 96
},
{
"epoch": 0.78,
"learning_rate": 2.528151311088537e-05,
"loss": 1.1125,
"step": 97
},
{
"epoch": 0.78,
"learning_rate": 2.3581308246275103e-05,
"loss": 1.0054,
"step": 98
},
{
"epoch": 0.79,
"learning_rate": 2.1932614882827197e-05,
"loss": 1.1633,
"step": 99
},
{
"epoch": 0.8,
"learning_rate": 2.03365443542764e-05,
"loss": 1.1341,
"step": 100
},
{
"epoch": 0.81,
"learning_rate": 1.879417252291502e-05,
"loss": 1.1938,
"step": 101
},
{
"epoch": 0.82,
"learning_rate": 1.730653905438714e-05,
"loss": 1.169,
"step": 102
},
{
"epoch": 0.82,
"learning_rate": 1.587464671688187e-05,
"loss": 0.9951,
"step": 103
},
{
"epoch": 0.83,
"learning_rate": 1.4499460705197998e-05,
"loss": 1.2001,
"step": 104
},
{
"epoch": 0.84,
"learning_rate": 1.3181907990135622e-05,
"loss": 1.1739,
"step": 105
},
{
"epoch": 0.85,
"learning_rate": 1.1922876693653585e-05,
"loss": 1.0842,
"step": 106
},
{
"epoch": 0.86,
"learning_rate": 1.0723215490213634e-05,
"loss": 1.0556,
"step": 107
},
{
"epoch": 0.86,
"learning_rate": 9.583733034714981e-06,
"loss": 1.0925,
"step": 108
},
{
"epoch": 0.87,
"learning_rate": 8.505197417404687e-06,
"loss": 1.2059,
"step": 109
},
{
"epoch": 0.88,
"learning_rate": 7.488335646131628e-06,
"loss": 1.0587,
"step": 110
},
{
"epoch": 0.89,
"learning_rate": 6.533833156292679e-06,
"loss": 1.1123,
"step": 111
},
{
"epoch": 0.9,
"learning_rate": 5.6423333488018095e-06,
"loss": 1.0905,
"step": 112
},
{
"epoch": 0.9,
"learning_rate": 4.8144371563930476e-06,
"loss": 1.0838,
"step": 113
},
{
"epoch": 0.91,
"learning_rate": 4.050702638550275e-06,
"loss": 1.0772,
"step": 114
},
{
"epoch": 0.92,
"learning_rate": 3.3516446053363015e-06,
"loss": 1.0842,
"step": 115
},
{
"epoch": 0.93,
"learning_rate": 2.717734270375272e-06,
"loss": 1.211,
"step": 116
},
{
"epoch": 0.94,
"learning_rate": 2.1493989332218468e-06,
"loss": 1.0427,
"step": 117
},
{
"epoch": 0.94,
"learning_rate": 1.6470216913317626e-06,
"loss": 1.0476,
"step": 118
},
{
"epoch": 0.95,
"learning_rate": 1.2109411818274852e-06,
"loss": 1.0105,
"step": 119
},
{
"epoch": 0.96,
"learning_rate": 8.41451353233369e-07,
"loss": 1.0489,
"step": 120
},
{
"epoch": 0.97,
"learning_rate": 5.388012673338661e-07,
"loss": 1.0077,
"step": 121
},
{
"epoch": 0.98,
"learning_rate": 3.0319493128866396e-07,
"loss": 1.1216,
"step": 122
},
{
"epoch": 0.98,
"learning_rate": 1.3479116011769767e-07,
"loss": 1.0093,
"step": 123
},
{
"epoch": 0.99,
"learning_rate": 3.370346964876036e-08,
"loss": 1.0159,
"step": 124
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"loss": 1.0134,
"step": 125
},
{
"epoch": 1.0,
"step": 125,
"total_flos": 1906959974400.0,
"train_loss": 1.1714857983589173,
"train_runtime": 990.3544,
"train_samples_per_second": 2.019,
"train_steps_per_second": 0.126
}
],
"logging_steps": 1.0,
"max_steps": 125,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50000,
"total_flos": 1906959974400.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}