JoshMe1 commited on
Commit
b8fb8ee
·
verified ·
1 Parent(s): 20edc87

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:beb3038837c41f22c7331d8e0203167cd2b26cfb8d2a725b93d7a8eea26aa1e5
3
  size 140815952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073f6bd0e44861f9bbefb443566a168c81495bced39dd1b1d2a0be23cbbe9cb5
3
  size 140815952
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8b138152c8cc8a42ca5f8cd0600cf66ac1a69402919c839146f1b0aa8493476
3
  size 71878612
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:689241204dabed053806b0006b9e37c3f782c5542b4a0e6383d0d2978eae10a6
3
  size 71878612
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:122a2936afa43ed66142df0bd3dcbe036fd722232978b87a762dea6e03beb670
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a9275c62aa538dc59da4e92cab0e96aef321694e347e049b029bf91527188c8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7624744ddc571068835a96b66c68f20536621f83b9432ca68e2d5ee8eb961785
3
  size 1192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:513432b56c3d25d6cb2b5f5a8da383c67096421a340fb1793f248481156b1328
3
  size 1192
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.0360867977142334,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.054892273912446825,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 19.366,
94
  "eval_steps_per_second": 9.683,
95
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 10,
@@ -116,12 +194,12 @@
116
  "should_evaluate": false,
117
  "should_log": false,
118
  "should_save": true,
119
- "should_training_stop": false
120
  },
121
  "attributes": {}
122
  }
123
  },
124
- "total_flos": 3864241215897600.0,
125
  "train_batch_size": 2,
126
  "trial_name": null,
127
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.9555625915527344,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.10978454782489365,
5
  "eval_steps": 100,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 19.366,
94
  "eval_steps_per_second": 9.683,
95
  "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.06038150130369151,
99
+ "grad_norm": 2.6299073696136475,
100
+ "learning_rate": 5e-05,
101
+ "loss": 1.9692,
102
+ "step": 110
103
+ },
104
+ {
105
+ "epoch": 0.06587072869493618,
106
+ "grad_norm": 4.907375812530518,
107
+ "learning_rate": 5e-05,
108
+ "loss": 2.1752,
109
+ "step": 120
110
+ },
111
+ {
112
+ "epoch": 0.07135995608618087,
113
+ "grad_norm": 2.7539217472076416,
114
+ "learning_rate": 5e-05,
115
+ "loss": 1.9246,
116
+ "step": 130
117
+ },
118
+ {
119
+ "epoch": 0.07684918347742556,
120
+ "grad_norm": 5.6027302742004395,
121
+ "learning_rate": 5e-05,
122
+ "loss": 1.9031,
123
+ "step": 140
124
+ },
125
+ {
126
+ "epoch": 0.08233841086867023,
127
+ "grad_norm": 3.0815937519073486,
128
+ "learning_rate": 5e-05,
129
+ "loss": 2.0121,
130
+ "step": 150
131
+ },
132
+ {
133
+ "epoch": 0.08782763825991492,
134
+ "grad_norm": 3.3530800342559814,
135
+ "learning_rate": 5e-05,
136
+ "loss": 2.1147,
137
+ "step": 160
138
+ },
139
+ {
140
+ "epoch": 0.0933168656511596,
141
+ "grad_norm": 2.6608502864837646,
142
+ "learning_rate": 5e-05,
143
+ "loss": 2.1767,
144
+ "step": 170
145
+ },
146
+ {
147
+ "epoch": 0.09880609304240429,
148
+ "grad_norm": 4.09913444519043,
149
+ "learning_rate": 5e-05,
150
+ "loss": 1.931,
151
+ "step": 180
152
+ },
153
+ {
154
+ "epoch": 0.10429532043364896,
155
+ "grad_norm": 2.4433376789093018,
156
+ "learning_rate": 5e-05,
157
+ "loss": 2.0358,
158
+ "step": 190
159
+ },
160
+ {
161
+ "epoch": 0.10978454782489365,
162
+ "grad_norm": 2.8582210540771484,
163
+ "learning_rate": 5e-05,
164
+ "loss": 2.0232,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 0.10978454782489365,
169
+ "eval_loss": 1.9555625915527344,
170
+ "eval_runtime": 39.601,
171
+ "eval_samples_per_second": 19.393,
172
+ "eval_steps_per_second": 9.697,
173
+ "step": 200
174
  }
175
  ],
176
  "logging_steps": 10,
 
194
  "should_evaluate": false,
195
  "should_log": false,
196
  "should_save": true,
197
+ "should_training_stop": true
198
  },
199
  "attributes": {}
200
  }
201
  },
202
+ "total_flos": 7728482431795200.0,
203
  "train_batch_size": 2,
204
  "trial_name": null,
205
  "trial_params": null