Jonathanmann commited on
Commit
a4f3173
·
verified ·
1 Parent(s): 8484d30

Upload 14 files

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.46.3",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.46.3"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77aff4290a4cc52d71f0dde3425ad8cd7aba3cbc61c62b9f38c5a1658b113a1d
3
+ size 497774208
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90a74b6f2804dc63003d5d3383e01e31a4d1b41a54eb11fe22b1cd8abd13d00
3
+ size 995642298
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:372010c33f15d586b91174490010376899886b6fb8f25f6df4ad722a385ca854
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468c7deb41f53c0d0b135af65f6e03d06758293f4a8fceff1aa7408685b2495c
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": false,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 1024,
19
+ "pad_token": "<|endoftext|>",
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
tokenizer_full_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_max_length": 1024,
3
+ "padding_side": "right",
4
+ "pad_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "name_or_path": "gpt2",
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "special_tokens": {
9
+ "bos_token": "<|endoftext|>",
10
+ "eos_token": "<|endoftext|>",
11
+ "unk_token": "<|endoftext|>",
12
+ "pad_token": "<|endoftext|>"
13
+ }
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 3.256429433822632,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Hugh Mann/GPT2/GPT2-LyricsDec8/checkpoint-1600",
4
+ "epoch": 9.907120743034056,
5
+ "eval_steps": 100,
6
+ "global_step": 1600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.6191950464396285,
13
+ "grad_norm": 7.51154088973999,
14
+ "learning_rate": 9.7e-06,
15
+ "loss": 3.9524,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.6191950464396285,
20
+ "eval_loss": 3.506685733795166,
21
+ "eval_runtime": 2.2552,
22
+ "eval_samples_per_second": 254.077,
23
+ "eval_steps_per_second": 15.963,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 1.238390092879257,
28
+ "grad_norm": 6.163869857788086,
29
+ "learning_rate": 9.58099352051836e-06,
30
+ "loss": 3.598,
31
+ "step": 200
32
+ },
33
+ {
34
+ "epoch": 1.238390092879257,
35
+ "eval_loss": 3.4033825397491455,
36
+ "eval_runtime": 2.2344,
37
+ "eval_samples_per_second": 256.441,
38
+ "eval_steps_per_second": 16.111,
39
+ "step": 200
40
+ },
41
+ {
42
+ "epoch": 1.8575851393188856,
43
+ "grad_norm": 5.917358875274658,
44
+ "learning_rate": 9.14902807775378e-06,
45
+ "loss": 3.5138,
46
+ "step": 300
47
+ },
48
+ {
49
+ "epoch": 1.8575851393188856,
50
+ "eval_loss": 3.360402822494507,
51
+ "eval_runtime": 2.2391,
52
+ "eval_samples_per_second": 255.912,
53
+ "eval_steps_per_second": 16.078,
54
+ "step": 300
55
+ },
56
+ {
57
+ "epoch": 2.476780185758514,
58
+ "grad_norm": 5.890804290771484,
59
+ "learning_rate": 8.717062634989202e-06,
60
+ "loss": 3.4766,
61
+ "step": 400
62
+ },
63
+ {
64
+ "epoch": 2.476780185758514,
65
+ "eval_loss": 3.3368523120880127,
66
+ "eval_runtime": 2.2358,
67
+ "eval_samples_per_second": 256.287,
68
+ "eval_steps_per_second": 16.102,
69
+ "step": 400
70
+ },
71
+ {
72
+ "epoch": 3.0959752321981426,
73
+ "grad_norm": 6.225989818572998,
74
+ "learning_rate": 8.285097192224622e-06,
75
+ "loss": 3.4229,
76
+ "step": 500
77
+ },
78
+ {
79
+ "epoch": 3.0959752321981426,
80
+ "eval_loss": 3.319918394088745,
81
+ "eval_runtime": 2.2394,
82
+ "eval_samples_per_second": 255.87,
83
+ "eval_steps_per_second": 16.076,
84
+ "step": 500
85
+ },
86
+ {
87
+ "epoch": 3.715170278637771,
88
+ "grad_norm": 6.0069499015808105,
89
+ "learning_rate": 7.853131749460043e-06,
90
+ "loss": 3.4022,
91
+ "step": 600
92
+ },
93
+ {
94
+ "epoch": 3.715170278637771,
95
+ "eval_loss": 3.306849718093872,
96
+ "eval_runtime": 2.3091,
97
+ "eval_samples_per_second": 248.153,
98
+ "eval_steps_per_second": 15.591,
99
+ "step": 600
100
+ },
101
+ {
102
+ "epoch": 4.3343653250774,
103
+ "grad_norm": 7.3684492111206055,
104
+ "learning_rate": 7.4211663066954655e-06,
105
+ "loss": 3.3725,
106
+ "step": 700
107
+ },
108
+ {
109
+ "epoch": 4.3343653250774,
110
+ "eval_loss": 3.297823667526245,
111
+ "eval_runtime": 2.2288,
112
+ "eval_samples_per_second": 257.086,
113
+ "eval_steps_per_second": 16.152,
114
+ "step": 700
115
+ },
116
+ {
117
+ "epoch": 4.953560371517028,
118
+ "grad_norm": 6.168036460876465,
119
+ "learning_rate": 6.989200863930886e-06,
120
+ "loss": 3.3689,
121
+ "step": 800
122
+ },
123
+ {
124
+ "epoch": 4.953560371517028,
125
+ "eval_loss": 3.2880301475524902,
126
+ "eval_runtime": 2.226,
127
+ "eval_samples_per_second": 257.41,
128
+ "eval_steps_per_second": 16.172,
129
+ "step": 800
130
+ },
131
+ {
132
+ "epoch": 5.572755417956657,
133
+ "grad_norm": 6.212117671966553,
134
+ "learning_rate": 6.557235421166307e-06,
135
+ "loss": 3.3332,
136
+ "step": 900
137
+ },
138
+ {
139
+ "epoch": 5.572755417956657,
140
+ "eval_loss": 3.2826037406921387,
141
+ "eval_runtime": 2.2453,
142
+ "eval_samples_per_second": 255.204,
143
+ "eval_steps_per_second": 16.034,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 6.191950464396285,
148
+ "grad_norm": 5.847946643829346,
149
+ "learning_rate": 6.125269978401729e-06,
150
+ "loss": 3.3367,
151
+ "step": 1000
152
+ },
153
+ {
154
+ "epoch": 6.191950464396285,
155
+ "eval_loss": 3.276566505432129,
156
+ "eval_runtime": 2.2355,
157
+ "eval_samples_per_second": 256.315,
158
+ "eval_steps_per_second": 16.104,
159
+ "step": 1000
160
+ },
161
+ {
162
+ "epoch": 6.811145510835914,
163
+ "grad_norm": 6.364712238311768,
164
+ "learning_rate": 5.6933045356371494e-06,
165
+ "loss": 3.3289,
166
+ "step": 1100
167
+ },
168
+ {
169
+ "epoch": 6.811145510835914,
170
+ "eval_loss": 3.272658348083496,
171
+ "eval_runtime": 2.2297,
172
+ "eval_samples_per_second": 256.986,
173
+ "eval_steps_per_second": 16.146,
174
+ "step": 1100
175
+ },
176
+ {
177
+ "epoch": 7.430340557275541,
178
+ "grad_norm": 5.989032745361328,
179
+ "learning_rate": 5.261339092872571e-06,
180
+ "loss": 3.3134,
181
+ "step": 1200
182
+ },
183
+ {
184
+ "epoch": 7.430340557275541,
185
+ "eval_loss": 3.2689383029937744,
186
+ "eval_runtime": 2.2577,
187
+ "eval_samples_per_second": 253.799,
188
+ "eval_steps_per_second": 15.945,
189
+ "step": 1200
190
+ },
191
+ {
192
+ "epoch": 8.04953560371517,
193
+ "grad_norm": 6.310012340545654,
194
+ "learning_rate": 4.829373650107992e-06,
195
+ "loss": 3.2967,
196
+ "step": 1300
197
+ },
198
+ {
199
+ "epoch": 8.04953560371517,
200
+ "eval_loss": 3.265749216079712,
201
+ "eval_runtime": 2.247,
202
+ "eval_samples_per_second": 255.004,
203
+ "eval_steps_per_second": 16.021,
204
+ "step": 1300
205
+ },
206
+ {
207
+ "epoch": 8.6687306501548,
208
+ "grad_norm": 6.473470211029053,
209
+ "learning_rate": 4.397408207343413e-06,
210
+ "loss": 3.2846,
211
+ "step": 1400
212
+ },
213
+ {
214
+ "epoch": 8.6687306501548,
215
+ "eval_loss": 3.2619001865386963,
216
+ "eval_runtime": 2.2395,
217
+ "eval_samples_per_second": 255.862,
218
+ "eval_steps_per_second": 16.075,
219
+ "step": 1400
220
+ },
221
+ {
222
+ "epoch": 9.287925696594428,
223
+ "grad_norm": 6.593929767608643,
224
+ "learning_rate": 3.965442764578834e-06,
225
+ "loss": 3.2858,
226
+ "step": 1500
227
+ },
228
+ {
229
+ "epoch": 9.287925696594428,
230
+ "eval_loss": 3.2583212852478027,
231
+ "eval_runtime": 2.2458,
232
+ "eval_samples_per_second": 255.144,
233
+ "eval_steps_per_second": 16.03,
234
+ "step": 1500
235
+ },
236
+ {
237
+ "epoch": 9.907120743034056,
238
+ "grad_norm": 6.568899154663086,
239
+ "learning_rate": 3.5334773218142552e-06,
240
+ "loss": 3.2755,
241
+ "step": 1600
242
+ },
243
+ {
244
+ "epoch": 9.907120743034056,
245
+ "eval_loss": 3.256429433822632,
246
+ "eval_runtime": 2.2439,
247
+ "eval_samples_per_second": 255.355,
248
+ "eval_steps_per_second": 16.043,
249
+ "step": 1600
250
+ }
251
+ ],
252
+ "logging_steps": 100,
253
+ "max_steps": 2415,
254
+ "num_input_tokens_seen": 0,
255
+ "num_train_epochs": 15,
256
+ "save_steps": 100,
257
+ "stateful_callbacks": {
258
+ "TrainerControl": {
259
+ "args": {
260
+ "should_epoch_stop": false,
261
+ "should_evaluate": false,
262
+ "should_log": false,
263
+ "should_save": true,
264
+ "should_training_stop": false
265
+ },
266
+ "attributes": {}
267
+ }
268
+ },
269
+ "total_flos": 1.3347580870656e+16,
270
+ "train_batch_size": 16,
271
+ "trial_name": null,
272
+ "trial_params": null
273
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ffa1ba06ce21ab77fb40cb76067e70f1f7fb76ae1d4e6b7cbba1d2fe02ca2e
3
+ size 5368
training_args_full.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "/content/drive/MyDrive/Hugh Mann/GPT2/GPT2-LyricsDec8",
3
+ "overwrite_output_dir": false,
4
+ "do_train": false,
5
+ "do_eval": true,
6
+ "do_predict": false,
7
+ "eval_strategy": "steps",
8
+ "prediction_loss_only": false,
9
+ "per_device_train_batch_size": 16,
10
+ "per_device_eval_batch_size": 16,
11
+ "per_gpu_train_batch_size": null,
12
+ "per_gpu_eval_batch_size": null,
13
+ "gradient_accumulation_steps": 2,
14
+ "eval_accumulation_steps": null,
15
+ "eval_delay": 0,
16
+ "torch_empty_cache_steps": null,
17
+ "learning_rate": 1e-05,
18
+ "weight_decay": 0.01,
19
+ "adam_beta1": 0.9,
20
+ "adam_beta2": 0.999,
21
+ "adam_epsilon": 1e-08,
22
+ "max_grad_norm": 1.0,
23
+ "num_train_epochs": 15,
24
+ "max_steps": -1,
25
+ "lr_scheduler_type": "linear",
26
+ "lr_scheduler_kwargs": {},
27
+ "warmup_ratio": 0.0,
28
+ "warmup_steps": 100,
29
+ "log_level": "passive",
30
+ "log_level_replica": "warning",
31
+ "log_on_each_node": true,
32
+ "logging_dir": "/content/drive/MyDrive/Hugh Mann/GPT2/GPT2-LyricsDec8/runs/Dec09_17-32-33_dec940e50fe7",
33
+ "logging_strategy": "steps",
34
+ "logging_first_step": false,
35
+ "logging_steps": 100,
36
+ "logging_nan_inf_filter": true,
37
+ "save_strategy": "steps",
38
+ "save_steps": 100,
39
+ "save_total_limit": null,
40
+ "save_safetensors": true,
41
+ "save_on_each_node": false,
42
+ "save_only_model": false,
43
+ "restore_callback_states_from_checkpoint": false,
44
+ "no_cuda": false,
45
+ "use_cpu": false,
46
+ "use_mps_device": false,
47
+ "seed": 42,
48
+ "data_seed": null,
49
+ "jit_mode_eval": false,
50
+ "use_ipex": false,
51
+ "bf16": false,
52
+ "fp16": true,
53
+ "fp16_opt_level": "O1",
54
+ "half_precision_backend": "auto",
55
+ "bf16_full_eval": false,
56
+ "fp16_full_eval": false,
57
+ "tf32": null,
58
+ "local_rank": 0,
59
+ "ddp_backend": null,
60
+ "tpu_num_cores": null,
61
+ "tpu_metrics_debug": false,
62
+ "debug": [],
63
+ "dataloader_drop_last": false,
64
+ "eval_steps": 100,
65
+ "dataloader_num_workers": 0,
66
+ "dataloader_prefetch_factor": null,
67
+ "past_index": -1,
68
+ "run_name": "/content/drive/MyDrive/Hugh Mann/GPT2/GPT2-LyricsDec8",
69
+ "disable_tqdm": false,
70
+ "remove_unused_columns": true,
71
+ "label_names": null,
72
+ "load_best_model_at_end": true,
73
+ "metric_for_best_model": "loss",
74
+ "greater_is_better": false,
75
+ "ignore_data_skip": false,
76
+ "fsdp": [],
77
+ "fsdp_min_num_params": 0,
78
+ "fsdp_config": {
79
+ "min_num_params": 0,
80
+ "xla": false,
81
+ "xla_fsdp_v2": false,
82
+ "xla_fsdp_grad_ckpt": false
83
+ },
84
+ "fsdp_transformer_layer_cls_to_wrap": null,
85
+ "accelerator_config": {
86
+ "split_batches": false,
87
+ "dispatch_batches": null,
88
+ "even_batches": true,
89
+ "use_seedable_sampler": true,
90
+ "non_blocking": false,
91
+ "gradient_accumulation_kwargs": null
92
+ },
93
+ "deepspeed": null,
94
+ "label_smoothing_factor": 0.0,
95
+ "optim": "adamw_torch",
96
+ "optim_args": null,
97
+ "adafactor": false,
98
+ "group_by_length": false,
99
+ "length_column_name": "length",
100
+ "report_to": [
101
+ "tensorboard",
102
+ "wandb"
103
+ ],
104
+ "ddp_find_unused_parameters": null,
105
+ "ddp_bucket_cap_mb": null,
106
+ "ddp_broadcast_buffers": null,
107
+ "dataloader_pin_memory": true,
108
+ "dataloader_persistent_workers": false,
109
+ "skip_memory_metrics": true,
110
+ "use_legacy_prediction_loop": false,
111
+ "push_to_hub": false,
112
+ "resume_from_checkpoint": null,
113
+ "hub_model_id": null,
114
+ "hub_strategy": "every_save",
115
+ "hub_token": "<HUB_TOKEN>",
116
+ "hub_private_repo": false,
117
+ "hub_always_push": false,
118
+ "gradient_checkpointing": false,
119
+ "gradient_checkpointing_kwargs": null,
120
+ "include_inputs_for_metrics": false,
121
+ "include_for_metrics": [],
122
+ "eval_do_concat_batches": true,
123
+ "fp16_backend": "auto",
124
+ "evaluation_strategy": "steps",
125
+ "push_to_hub_model_id": null,
126
+ "push_to_hub_organization": null,
127
+ "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>",
128
+ "mp_parameters": "",
129
+ "auto_find_batch_size": false,
130
+ "full_determinism": false,
131
+ "torchdynamo": null,
132
+ "ray_scope": "last",
133
+ "ddp_timeout": 1800,
134
+ "torch_compile": false,
135
+ "torch_compile_backend": null,
136
+ "torch_compile_mode": null,
137
+ "dispatch_batches": null,
138
+ "split_batches": null,
139
+ "include_tokens_per_second": false,
140
+ "include_num_input_tokens_seen": false,
141
+ "neftune_noise_alpha": null,
142
+ "optim_target_modules": null,
143
+ "batch_eval_metrics": false,
144
+ "eval_on_start": false,
145
+ "use_liger_kernel": false,
146
+ "eval_use_gather_object": false,
147
+ "average_tokens_across_devices": false
148
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff