corranm commited on
Commit
536f287
·
verified ·
1 Parent(s): 1c1b7be

End of training

Browse files
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: google/vit-large-patch16-224
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: squarerun_large_model
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # squarerun_large_model
18
+
19
+ This model is a fine-tuned version of [google/vit-large-patch16-224](https://huggingface.co/google/vit-large-patch16-224) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.5150
22
+ - F1 Macro: 0.4837
23
+ - F1 Micro: 0.5909
24
+ - F1 Weighted: 0.5569
25
+ - Precision Macro: 0.5183
26
+ - Precision Micro: 0.5909
27
+ - Precision Weighted: 0.5764
28
+ - Recall Macro: 0.5013
29
+ - Recall Micro: 0.5909
30
+ - Recall Weighted: 0.5909
31
+ - Accuracy: 0.5909
32
+
33
+ ## Model description
34
+
35
+ More information needed
36
+
37
+ ## Intended uses & limitations
38
+
39
+ More information needed
40
+
41
+ ## Training and evaluation data
42
+
43
+ More information needed
44
+
45
+ ## Training procedure
46
+
47
+ ### Training hyperparameters
48
+
49
+ The following hyperparameters were used during training:
50
+ - learning_rate: 0.0001
51
+ - train_batch_size: 8
52
+ - eval_batch_size: 8
53
+ - seed: 42
54
+ - gradient_accumulation_steps: 2
55
+ - total_train_batch_size: 16
56
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
57
+ - lr_scheduler_type: linear
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 25
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | F1 Macro | F1 Micro | F1 Weighted | Precision Macro | Precision Micro | Precision Weighted | Recall Macro | Recall Micro | Recall Weighted | Accuracy |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|:-----------:|:---------------:|:---------------:|:------------------:|:------------:|:------------:|:---------------:|:--------:|
65
+ | 1.917 | 1.0 | 29 | 1.9115 | 0.1066 | 0.2197 | 0.1273 | 0.0780 | 0.2197 | 0.0923 | 0.1832 | 0.2197 | 0.2197 | 0.2197 |
66
+ | 1.6762 | 2.0 | 58 | 1.6722 | 0.2733 | 0.3561 | 0.3005 | 0.3141 | 0.3561 | 0.3684 | 0.3355 | 0.3561 | 0.3561 | 0.3561 |
67
+ | 1.9664 | 3.0 | 87 | 1.5057 | 0.3554 | 0.4545 | 0.4060 | 0.3734 | 0.4545 | 0.4129 | 0.3857 | 0.4545 | 0.4545 | 0.4545 |
68
+ | 1.1934 | 4.0 | 116 | 1.4217 | 0.3130 | 0.4091 | 0.3530 | 0.3414 | 0.4091 | 0.3818 | 0.3629 | 0.4091 | 0.4091 | 0.4091 |
69
+ | 1.0968 | 5.0 | 145 | 1.1879 | 0.4608 | 0.5758 | 0.5258 | 0.4807 | 0.5758 | 0.5438 | 0.5045 | 0.5758 | 0.5758 | 0.5758 |
70
+ | 1.1313 | 6.0 | 174 | 1.2307 | 0.4964 | 0.5530 | 0.5243 | 0.5850 | 0.5530 | 0.6114 | 0.5196 | 0.5530 | 0.5530 | 0.5530 |
71
+ | 1.0807 | 7.0 | 203 | 1.2771 | 0.4088 | 0.5303 | 0.4772 | 0.5393 | 0.5303 | 0.5816 | 0.4304 | 0.5303 | 0.5303 | 0.5303 |
72
+ | 1.1825 | 8.0 | 232 | 1.2339 | 0.4528 | 0.5682 | 0.5175 | 0.5544 | 0.5682 | 0.6169 | 0.4920 | 0.5682 | 0.5682 | 0.5682 |
73
+ | 0.4454 | 9.0 | 261 | 1.0474 | 0.6064 | 0.6970 | 0.6763 | 0.6334 | 0.6970 | 0.6868 | 0.6100 | 0.6970 | 0.6970 | 0.6970 |
74
+ | 0.5439 | 10.0 | 290 | 1.6815 | 0.4580 | 0.5152 | 0.4920 | 0.5394 | 0.5152 | 0.5951 | 0.4903 | 0.5152 | 0.5152 | 0.5152 |
75
+ | 0.4256 | 11.0 | 319 | 1.1378 | 0.5800 | 0.6667 | 0.6495 | 0.5801 | 0.6667 | 0.6435 | 0.5907 | 0.6667 | 0.6667 | 0.6667 |
76
+ | 0.4968 | 12.0 | 348 | 1.4229 | 0.5307 | 0.6136 | 0.6013 | 0.5348 | 0.6136 | 0.6095 | 0.5486 | 0.6136 | 0.6136 | 0.6136 |
77
+ | 0.3408 | 13.0 | 377 | 1.4445 | 0.5426 | 0.6288 | 0.6095 | 0.5559 | 0.6288 | 0.6307 | 0.5621 | 0.6288 | 0.6288 | 0.6288 |
78
+ | 0.2914 | 14.0 | 406 | 1.4277 | 0.6009 | 0.6515 | 0.6470 | 0.7068 | 0.6515 | 0.6868 | 0.5958 | 0.6515 | 0.6515 | 0.6515 |
79
+ | 0.2003 | 15.0 | 435 | 1.5517 | 0.5770 | 0.6288 | 0.6296 | 0.5890 | 0.6288 | 0.6475 | 0.5792 | 0.6288 | 0.6288 | 0.6288 |
80
+ | 0.0871 | 16.0 | 464 | 1.4812 | 0.5702 | 0.6515 | 0.6407 | 0.5777 | 0.6515 | 0.6491 | 0.5785 | 0.6515 | 0.6515 | 0.6515 |
81
+ | 0.0352 | 17.0 | 493 | 2.1052 | 0.5007 | 0.5985 | 0.5744 | 0.5466 | 0.5985 | 0.6130 | 0.5127 | 0.5985 | 0.5985 | 0.5985 |
82
+ | 0.0101 | 18.0 | 522 | 1.9978 | 0.5725 | 0.6212 | 0.6223 | 0.6152 | 0.6212 | 0.6559 | 0.5672 | 0.6212 | 0.6212 | 0.6212 |
83
+ | 0.0035 | 19.0 | 551 | 2.0304 | 0.5880 | 0.6439 | 0.6388 | 0.6698 | 0.6439 | 0.6936 | 0.5805 | 0.6439 | 0.6439 | 0.6439 |
84
+ | 0.0013 | 20.0 | 580 | 2.1374 | 0.5514 | 0.6364 | 0.6224 | 0.6025 | 0.6364 | 0.6765 | 0.5685 | 0.6364 | 0.6364 | 0.6364 |
85
+ | 0.0589 | 21.0 | 609 | 1.7676 | 0.5879 | 0.6439 | 0.6396 | 0.5940 | 0.6439 | 0.6407 | 0.5889 | 0.6439 | 0.6439 | 0.6439 |
86
+ | 0.0263 | 22.0 | 638 | 1.8416 | 0.5785 | 0.6439 | 0.6327 | 0.6016 | 0.6439 | 0.6454 | 0.5758 | 0.6439 | 0.6439 | 0.6439 |
87
+ | 0.0028 | 23.0 | 667 | 1.9843 | 0.6068 | 0.6667 | 0.6569 | 0.6631 | 0.6667 | 0.6882 | 0.6069 | 0.6667 | 0.6667 | 0.6667 |
88
+ | 0.0006 | 24.0 | 696 | 1.9432 | 0.6157 | 0.6742 | 0.6655 | 0.6603 | 0.6742 | 0.6853 | 0.6152 | 0.6742 | 0.6742 | 0.6742 |
89
+ | 0.0004 | 25.0 | 725 | 1.9346 | 0.6089 | 0.6667 | 0.6569 | 0.6548 | 0.6667 | 0.6763 | 0.6073 | 0.6667 | 0.6667 | 0.6667 |
90
+
91
+
92
+ ### Framework versions
93
+
94
+ - Transformers 4.48.2
95
+ - Pytorch 2.6.0+cu124
96
+ - Datasets 3.2.0
97
+ - Tokenizers 0.21.0
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.5909090909090909,
4
+ "eval_f1_macro": 0.48373349339735894,
5
+ "eval_f1_micro": 0.5909090909090909,
6
+ "eval_f1_weighted": 0.5568712333418215,
7
+ "eval_loss": 1.5150469541549683,
8
+ "eval_precision_macro": 0.5182539682539683,
9
+ "eval_precision_micro": 0.5909090909090909,
10
+ "eval_precision_weighted": 0.5764309764309764,
11
+ "eval_recall_macro": 0.5013038548752834,
12
+ "eval_recall_micro": 0.5909090909090909,
13
+ "eval_recall_weighted": 0.5909090909090909,
14
+ "eval_runtime": 1.5008,
15
+ "eval_samples_per_second": 43.977,
16
+ "eval_steps_per_second": 5.997,
17
+ "total_flos": 3.163993239336653e+18,
18
+ "train_loss": 0.5988656284373478,
19
+ "train_runtime": 971.0069,
20
+ "train_samples_per_second": 11.895,
21
+ "train_steps_per_second": 0.747
22
+ }
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 1024,
11
+ "id2label": {
12
+ "0": "-",
13
+ "1": "0",
14
+ "2": "1",
15
+ "3": "2",
16
+ "4": "3",
17
+ "5": "4",
18
+ "6": "5"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 4096,
23
+ "label2id": {
24
+ "-": "0",
25
+ "0": "1",
26
+ "1": "2",
27
+ "2": "3",
28
+ "3": "4",
29
+ "4": "5",
30
+ "5": "6"
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 16,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 24,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.48.2"
42
+ }
eval_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "eval_accuracy": 0.5909090909090909,
4
+ "eval_f1_macro": 0.48373349339735894,
5
+ "eval_f1_micro": 0.5909090909090909,
6
+ "eval_f1_weighted": 0.5568712333418215,
7
+ "eval_loss": 1.5150469541549683,
8
+ "eval_precision_macro": 0.5182539682539683,
9
+ "eval_precision_micro": 0.5909090909090909,
10
+ "eval_precision_weighted": 0.5764309764309764,
11
+ "eval_recall_macro": 0.5013038548752834,
12
+ "eval_recall_micro": 0.5909090909090909,
13
+ "eval_recall_weighted": 0.5909090909090909,
14
+ "eval_runtime": 1.5008,
15
+ "eval_samples_per_second": 43.977,
16
+ "eval_steps_per_second": 5.997
17
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c65d8e1d6418f6c7639f0a38bba5aa15596f677824fdee5da51ec2acbfe76c
3
+ size 1213281772
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessorFast",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
runs/Feb02_21-36-39_modal/events.out.tfevents.1738532200.modal.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57d86686ec1b84341fc5e99676c5ca60d7fa22da74721c30e45466041001d04a
3
+ size 102419
runs/Feb02_21-36-39_modal/events.out.tfevents.1738532200.modal.2.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fb4d8b70c38c2310eb7b563f505ec4e424231f85926e33d1e261138db8b5a2
3
+ size 102419
runs/Feb02_21-36-39_modal/events.out.tfevents.1738533173.modal.2.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b74fc37ce31f0ddf84c7882c7a69cdf4afebb6875af4017bdd4ab813d71b43e
3
+ size 921
runs/Feb02_21-36-39_modal/events.out.tfevents.1738533173.modal.2.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:913e641364bafe942c3a394b45af39325916ca1d22e9a1d4b44e7254275627fb
3
+ size 921
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 25.0,
3
+ "total_flos": 3.163993239336653e+18,
4
+ "train_loss": 0.5988656284373478,
5
+ "train_runtime": 971.0069,
6
+ "train_samples_per_second": 11.895,
7
+ "train_steps_per_second": 0.747
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,3026 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0474289655685425,
3
+ "best_model_checkpoint": "squarerun_large_model/checkpoint-261",
4
+ "epoch": 25.0,
5
+ "eval_steps": 500,
6
+ "global_step": 725,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06896551724137931,
13
+ "grad_norm": 16.904699325561523,
14
+ "learning_rate": 2.7397260273972604e-06,
15
+ "loss": 2.0161,
16
+ "step": 2
17
+ },
18
+ {
19
+ "epoch": 0.13793103448275862,
20
+ "grad_norm": 7.744546413421631,
21
+ "learning_rate": 5.479452054794521e-06,
22
+ "loss": 1.9714,
23
+ "step": 4
24
+ },
25
+ {
26
+ "epoch": 0.20689655172413793,
27
+ "grad_norm": 11.83096694946289,
28
+ "learning_rate": 8.21917808219178e-06,
29
+ "loss": 2.0864,
30
+ "step": 6
31
+ },
32
+ {
33
+ "epoch": 0.27586206896551724,
34
+ "grad_norm": 8.980508804321289,
35
+ "learning_rate": 1.0958904109589042e-05,
36
+ "loss": 1.9544,
37
+ "step": 8
38
+ },
39
+ {
40
+ "epoch": 0.3448275862068966,
41
+ "grad_norm": 11.756573677062988,
42
+ "learning_rate": 1.3698630136986302e-05,
43
+ "loss": 1.872,
44
+ "step": 10
45
+ },
46
+ {
47
+ "epoch": 0.41379310344827586,
48
+ "grad_norm": 12.744009971618652,
49
+ "learning_rate": 1.643835616438356e-05,
50
+ "loss": 2.128,
51
+ "step": 12
52
+ },
53
+ {
54
+ "epoch": 0.4827586206896552,
55
+ "grad_norm": 11.320892333984375,
56
+ "learning_rate": 1.9178082191780822e-05,
57
+ "loss": 1.9613,
58
+ "step": 14
59
+ },
60
+ {
61
+ "epoch": 0.5517241379310345,
62
+ "grad_norm": 7.5296406745910645,
63
+ "learning_rate": 2.1917808219178083e-05,
64
+ "loss": 2.0163,
65
+ "step": 16
66
+ },
67
+ {
68
+ "epoch": 0.6206896551724138,
69
+ "grad_norm": 10.615029335021973,
70
+ "learning_rate": 2.4657534246575342e-05,
71
+ "loss": 1.8878,
72
+ "step": 18
73
+ },
74
+ {
75
+ "epoch": 0.6896551724137931,
76
+ "grad_norm": 9.413975715637207,
77
+ "learning_rate": 2.7397260273972603e-05,
78
+ "loss": 1.9238,
79
+ "step": 20
80
+ },
81
+ {
82
+ "epoch": 0.7586206896551724,
83
+ "grad_norm": 7.4806437492370605,
84
+ "learning_rate": 3.0136986301369862e-05,
85
+ "loss": 1.9426,
86
+ "step": 22
87
+ },
88
+ {
89
+ "epoch": 0.8275862068965517,
90
+ "grad_norm": 4.6118035316467285,
91
+ "learning_rate": 3.287671232876712e-05,
92
+ "loss": 1.8594,
93
+ "step": 24
94
+ },
95
+ {
96
+ "epoch": 0.896551724137931,
97
+ "grad_norm": 9.567954063415527,
98
+ "learning_rate": 3.561643835616438e-05,
99
+ "loss": 1.762,
100
+ "step": 26
101
+ },
102
+ {
103
+ "epoch": 0.9655172413793104,
104
+ "grad_norm": 7.831980228424072,
105
+ "learning_rate": 3.8356164383561644e-05,
106
+ "loss": 1.917,
107
+ "step": 28
108
+ },
109
+ {
110
+ "epoch": 1.0,
111
+ "eval_accuracy": 0.2196969696969697,
112
+ "eval_f1_macro": 0.10662030635038826,
113
+ "eval_f1_micro": 0.2196969696969697,
114
+ "eval_f1_weighted": 0.12729797039644397,
115
+ "eval_loss": 1.9114803075790405,
116
+ "eval_precision_macro": 0.07802017358667604,
117
+ "eval_precision_micro": 0.2196969696969697,
118
+ "eval_precision_weighted": 0.09233343285067422,
119
+ "eval_recall_macro": 0.18317460317460316,
120
+ "eval_recall_micro": 0.2196969696969697,
121
+ "eval_recall_weighted": 0.2196969696969697,
122
+ "eval_runtime": 2.9641,
123
+ "eval_samples_per_second": 44.533,
124
+ "eval_steps_per_second": 5.735,
125
+ "step": 29
126
+ },
127
+ {
128
+ "epoch": 1.0344827586206897,
129
+ "grad_norm": 11.95688533782959,
130
+ "learning_rate": 4.1095890410958905e-05,
131
+ "loss": 1.9276,
132
+ "step": 30
133
+ },
134
+ {
135
+ "epoch": 1.103448275862069,
136
+ "grad_norm": 7.446818828582764,
137
+ "learning_rate": 4.383561643835617e-05,
138
+ "loss": 1.7188,
139
+ "step": 32
140
+ },
141
+ {
142
+ "epoch": 1.1724137931034484,
143
+ "grad_norm": 7.449117183685303,
144
+ "learning_rate": 4.657534246575342e-05,
145
+ "loss": 1.7229,
146
+ "step": 34
147
+ },
148
+ {
149
+ "epoch": 1.2413793103448276,
150
+ "grad_norm": 9.84091567993164,
151
+ "learning_rate": 4.9315068493150684e-05,
152
+ "loss": 2.0518,
153
+ "step": 36
154
+ },
155
+ {
156
+ "epoch": 1.3103448275862069,
157
+ "grad_norm": 9.748187065124512,
158
+ "learning_rate": 5.2054794520547945e-05,
159
+ "loss": 1.838,
160
+ "step": 38
161
+ },
162
+ {
163
+ "epoch": 1.3793103448275863,
164
+ "grad_norm": 10.7653226852417,
165
+ "learning_rate": 5.479452054794521e-05,
166
+ "loss": 1.9796,
167
+ "step": 40
168
+ },
169
+ {
170
+ "epoch": 1.4482758620689655,
171
+ "grad_norm": 8.703463554382324,
172
+ "learning_rate": 5.753424657534247e-05,
173
+ "loss": 1.8676,
174
+ "step": 42
175
+ },
176
+ {
177
+ "epoch": 1.5172413793103448,
178
+ "grad_norm": 8.969353675842285,
179
+ "learning_rate": 6.0273972602739724e-05,
180
+ "loss": 1.6242,
181
+ "step": 44
182
+ },
183
+ {
184
+ "epoch": 1.5862068965517242,
185
+ "grad_norm": 6.287548542022705,
186
+ "learning_rate": 6.301369863013699e-05,
187
+ "loss": 1.6454,
188
+ "step": 46
189
+ },
190
+ {
191
+ "epoch": 1.6551724137931034,
192
+ "grad_norm": 6.384350776672363,
193
+ "learning_rate": 6.575342465753424e-05,
194
+ "loss": 1.7246,
195
+ "step": 48
196
+ },
197
+ {
198
+ "epoch": 1.7241379310344827,
199
+ "grad_norm": 8.735137939453125,
200
+ "learning_rate": 6.84931506849315e-05,
201
+ "loss": 1.8569,
202
+ "step": 50
203
+ },
204
+ {
205
+ "epoch": 1.793103448275862,
206
+ "grad_norm": 7.929757118225098,
207
+ "learning_rate": 7.123287671232876e-05,
208
+ "loss": 1.6627,
209
+ "step": 52
210
+ },
211
+ {
212
+ "epoch": 1.8620689655172413,
213
+ "grad_norm": 9.604327201843262,
214
+ "learning_rate": 7.397260273972603e-05,
215
+ "loss": 1.7562,
216
+ "step": 54
217
+ },
218
+ {
219
+ "epoch": 1.9310344827586206,
220
+ "grad_norm": 8.560978889465332,
221
+ "learning_rate": 7.671232876712329e-05,
222
+ "loss": 1.5444,
223
+ "step": 56
224
+ },
225
+ {
226
+ "epoch": 2.0,
227
+ "grad_norm": 8.442503929138184,
228
+ "learning_rate": 7.945205479452055e-05,
229
+ "loss": 1.6762,
230
+ "step": 58
231
+ },
232
+ {
233
+ "epoch": 2.0,
234
+ "eval_accuracy": 0.3560606060606061,
235
+ "eval_f1_macro": 0.27325003502368067,
236
+ "eval_f1_micro": 0.3560606060606061,
237
+ "eval_f1_weighted": 0.30047625697016983,
238
+ "eval_loss": 1.6721538305282593,
239
+ "eval_precision_macro": 0.31412770236299653,
240
+ "eval_precision_micro": 0.3560606060606061,
241
+ "eval_precision_weighted": 0.368357405122111,
242
+ "eval_recall_macro": 0.33546485260770975,
243
+ "eval_recall_micro": 0.3560606060606061,
244
+ "eval_recall_weighted": 0.3560606060606061,
245
+ "eval_runtime": 3.2765,
246
+ "eval_samples_per_second": 40.287,
247
+ "eval_steps_per_second": 5.189,
248
+ "step": 58
249
+ },
250
+ {
251
+ "epoch": 2.0689655172413794,
252
+ "grad_norm": 10.002808570861816,
253
+ "learning_rate": 8.219178082191781e-05,
254
+ "loss": 1.6511,
255
+ "step": 60
256
+ },
257
+ {
258
+ "epoch": 2.1379310344827585,
259
+ "grad_norm": 32.81317901611328,
260
+ "learning_rate": 8.493150684931507e-05,
261
+ "loss": 1.5311,
262
+ "step": 62
263
+ },
264
+ {
265
+ "epoch": 2.206896551724138,
266
+ "grad_norm": 11.362375259399414,
267
+ "learning_rate": 8.767123287671233e-05,
268
+ "loss": 1.5404,
269
+ "step": 64
270
+ },
271
+ {
272
+ "epoch": 2.2758620689655173,
273
+ "grad_norm": 15.569432258605957,
274
+ "learning_rate": 9.041095890410958e-05,
275
+ "loss": 1.7608,
276
+ "step": 66
277
+ },
278
+ {
279
+ "epoch": 2.344827586206897,
280
+ "grad_norm": 8.356627464294434,
281
+ "learning_rate": 9.315068493150684e-05,
282
+ "loss": 1.6792,
283
+ "step": 68
284
+ },
285
+ {
286
+ "epoch": 2.413793103448276,
287
+ "grad_norm": 9.728259086608887,
288
+ "learning_rate": 9.58904109589041e-05,
289
+ "loss": 1.7309,
290
+ "step": 70
291
+ },
292
+ {
293
+ "epoch": 2.4827586206896552,
294
+ "grad_norm": 8.866983413696289,
295
+ "learning_rate": 9.863013698630137e-05,
296
+ "loss": 1.8618,
297
+ "step": 72
298
+ },
299
+ {
300
+ "epoch": 2.5517241379310347,
301
+ "grad_norm": 10.20724105834961,
302
+ "learning_rate": 9.984662576687117e-05,
303
+ "loss": 1.4866,
304
+ "step": 74
305
+ },
306
+ {
307
+ "epoch": 2.6206896551724137,
308
+ "grad_norm": 10.527627944946289,
309
+ "learning_rate": 9.95398773006135e-05,
310
+ "loss": 1.7936,
311
+ "step": 76
312
+ },
313
+ {
314
+ "epoch": 2.689655172413793,
315
+ "grad_norm": 5.849502086639404,
316
+ "learning_rate": 9.923312883435584e-05,
317
+ "loss": 1.7227,
318
+ "step": 78
319
+ },
320
+ {
321
+ "epoch": 2.7586206896551726,
322
+ "grad_norm": 9.242766380310059,
323
+ "learning_rate": 9.892638036809816e-05,
324
+ "loss": 1.7347,
325
+ "step": 80
326
+ },
327
+ {
328
+ "epoch": 2.8275862068965516,
329
+ "grad_norm": 9.303982734680176,
330
+ "learning_rate": 9.861963190184049e-05,
331
+ "loss": 1.5123,
332
+ "step": 82
333
+ },
334
+ {
335
+ "epoch": 2.896551724137931,
336
+ "grad_norm": 7.220952987670898,
337
+ "learning_rate": 9.831288343558283e-05,
338
+ "loss": 1.9009,
339
+ "step": 84
340
+ },
341
+ {
342
+ "epoch": 2.9655172413793105,
343
+ "grad_norm": 8.945652961730957,
344
+ "learning_rate": 9.800613496932515e-05,
345
+ "loss": 1.9664,
346
+ "step": 86
347
+ },
348
+ {
349
+ "epoch": 3.0,
350
+ "eval_accuracy": 0.45454545454545453,
351
+ "eval_f1_macro": 0.35540571944322835,
352
+ "eval_f1_micro": 0.45454545454545453,
353
+ "eval_f1_weighted": 0.4060242434737366,
354
+ "eval_loss": 1.5057227611541748,
355
+ "eval_precision_macro": 0.3733890330325679,
356
+ "eval_precision_micro": 0.45454545454545453,
357
+ "eval_precision_weighted": 0.41290912992763085,
358
+ "eval_recall_macro": 0.3856538170823885,
359
+ "eval_recall_micro": 0.45454545454545453,
360
+ "eval_recall_weighted": 0.45454545454545453,
361
+ "eval_runtime": 2.9982,
362
+ "eval_samples_per_second": 44.026,
363
+ "eval_steps_per_second": 5.67,
364
+ "step": 87
365
+ },
366
+ {
367
+ "epoch": 3.0344827586206895,
368
+ "grad_norm": 7.55537223815918,
369
+ "learning_rate": 9.76993865030675e-05,
370
+ "loss": 1.3214,
371
+ "step": 88
372
+ },
373
+ {
374
+ "epoch": 3.103448275862069,
375
+ "grad_norm": 6.456240177154541,
376
+ "learning_rate": 9.739263803680982e-05,
377
+ "loss": 1.2262,
378
+ "step": 90
379
+ },
380
+ {
381
+ "epoch": 3.1724137931034484,
382
+ "grad_norm": 10.980310440063477,
383
+ "learning_rate": 9.708588957055215e-05,
384
+ "loss": 1.6514,
385
+ "step": 92
386
+ },
387
+ {
388
+ "epoch": 3.2413793103448274,
389
+ "grad_norm": 10.584870338439941,
390
+ "learning_rate": 9.677914110429448e-05,
391
+ "loss": 1.464,
392
+ "step": 94
393
+ },
394
+ {
395
+ "epoch": 3.310344827586207,
396
+ "grad_norm": 7.440125465393066,
397
+ "learning_rate": 9.647239263803681e-05,
398
+ "loss": 1.2235,
399
+ "step": 96
400
+ },
401
+ {
402
+ "epoch": 3.3793103448275863,
403
+ "grad_norm": 9.703545570373535,
404
+ "learning_rate": 9.616564417177915e-05,
405
+ "loss": 1.7171,
406
+ "step": 98
407
+ },
408
+ {
409
+ "epoch": 3.4482758620689653,
410
+ "grad_norm": 7.038817882537842,
411
+ "learning_rate": 9.585889570552147e-05,
412
+ "loss": 1.3095,
413
+ "step": 100
414
+ },
415
+ {
416
+ "epoch": 3.5172413793103448,
417
+ "grad_norm": 11.278383255004883,
418
+ "learning_rate": 9.555214723926381e-05,
419
+ "loss": 1.6106,
420
+ "step": 102
421
+ },
422
+ {
423
+ "epoch": 3.586206896551724,
424
+ "grad_norm": 7.141289710998535,
425
+ "learning_rate": 9.524539877300614e-05,
426
+ "loss": 1.0537,
427
+ "step": 104
428
+ },
429
+ {
430
+ "epoch": 3.655172413793103,
431
+ "grad_norm": 6.102043151855469,
432
+ "learning_rate": 9.493865030674846e-05,
433
+ "loss": 1.2514,
434
+ "step": 106
435
+ },
436
+ {
437
+ "epoch": 3.7241379310344827,
438
+ "grad_norm": 9.988530158996582,
439
+ "learning_rate": 9.46319018404908e-05,
440
+ "loss": 1.1408,
441
+ "step": 108
442
+ },
443
+ {
444
+ "epoch": 3.793103448275862,
445
+ "grad_norm": 7.49151611328125,
446
+ "learning_rate": 9.432515337423313e-05,
447
+ "loss": 1.5343,
448
+ "step": 110
449
+ },
450
+ {
451
+ "epoch": 3.862068965517241,
452
+ "grad_norm": 13.044901847839355,
453
+ "learning_rate": 9.401840490797547e-05,
454
+ "loss": 1.9732,
455
+ "step": 112
456
+ },
457
+ {
458
+ "epoch": 3.9310344827586206,
459
+ "grad_norm": 10.967622756958008,
460
+ "learning_rate": 9.37116564417178e-05,
461
+ "loss": 1.4529,
462
+ "step": 114
463
+ },
464
+ {
465
+ "epoch": 4.0,
466
+ "grad_norm": 6.224091529846191,
467
+ "learning_rate": 9.340490797546013e-05,
468
+ "loss": 1.1934,
469
+ "step": 116
470
+ },
471
+ {
472
+ "epoch": 4.0,
473
+ "eval_accuracy": 0.4090909090909091,
474
+ "eval_f1_macro": 0.3129633381731382,
475
+ "eval_f1_micro": 0.4090909090909091,
476
+ "eval_f1_weighted": 0.3530157221067592,
477
+ "eval_loss": 1.4216643571853638,
478
+ "eval_precision_macro": 0.34140786749482405,
479
+ "eval_precision_micro": 0.4090909090909091,
480
+ "eval_precision_weighted": 0.3818291611769873,
481
+ "eval_recall_macro": 0.3628798185941043,
482
+ "eval_recall_micro": 0.4090909090909091,
483
+ "eval_recall_weighted": 0.4090909090909091,
484
+ "eval_runtime": 2.9654,
485
+ "eval_samples_per_second": 44.513,
486
+ "eval_steps_per_second": 5.733,
487
+ "step": 116
488
+ },
489
+ {
490
+ "epoch": 4.068965517241379,
491
+ "grad_norm": 10.413983345031738,
492
+ "learning_rate": 9.309815950920246e-05,
493
+ "loss": 1.4143,
494
+ "step": 118
495
+ },
496
+ {
497
+ "epoch": 4.137931034482759,
498
+ "grad_norm": 6.670328617095947,
499
+ "learning_rate": 9.279141104294478e-05,
500
+ "loss": 1.1382,
501
+ "step": 120
502
+ },
503
+ {
504
+ "epoch": 4.206896551724138,
505
+ "grad_norm": 7.599217414855957,
506
+ "learning_rate": 9.248466257668712e-05,
507
+ "loss": 1.291,
508
+ "step": 122
509
+ },
510
+ {
511
+ "epoch": 4.275862068965517,
512
+ "grad_norm": 9.762212753295898,
513
+ "learning_rate": 9.217791411042945e-05,
514
+ "loss": 1.2167,
515
+ "step": 124
516
+ },
517
+ {
518
+ "epoch": 4.344827586206897,
519
+ "grad_norm": 7.603570938110352,
520
+ "learning_rate": 9.187116564417179e-05,
521
+ "loss": 1.1758,
522
+ "step": 126
523
+ },
524
+ {
525
+ "epoch": 4.413793103448276,
526
+ "grad_norm": 6.104551315307617,
527
+ "learning_rate": 9.156441717791411e-05,
528
+ "loss": 1.1649,
529
+ "step": 128
530
+ },
531
+ {
532
+ "epoch": 4.482758620689655,
533
+ "grad_norm": 10.374088287353516,
534
+ "learning_rate": 9.125766871165644e-05,
535
+ "loss": 1.3607,
536
+ "step": 130
537
+ },
538
+ {
539
+ "epoch": 4.551724137931035,
540
+ "grad_norm": 9.6392822265625,
541
+ "learning_rate": 9.095092024539878e-05,
542
+ "loss": 1.6339,
543
+ "step": 132
544
+ },
545
+ {
546
+ "epoch": 4.620689655172414,
547
+ "grad_norm": 7.82922887802124,
548
+ "learning_rate": 9.06441717791411e-05,
549
+ "loss": 1.8073,
550
+ "step": 134
551
+ },
552
+ {
553
+ "epoch": 4.689655172413794,
554
+ "grad_norm": 9.731952667236328,
555
+ "learning_rate": 9.033742331288344e-05,
556
+ "loss": 1.3304,
557
+ "step": 136
558
+ },
559
+ {
560
+ "epoch": 4.758620689655173,
561
+ "grad_norm": 6.175458908081055,
562
+ "learning_rate": 9.003067484662577e-05,
563
+ "loss": 1.1931,
564
+ "step": 138
565
+ },
566
+ {
567
+ "epoch": 4.827586206896552,
568
+ "grad_norm": 4.451329231262207,
569
+ "learning_rate": 8.972392638036811e-05,
570
+ "loss": 1.0355,
571
+ "step": 140
572
+ },
573
+ {
574
+ "epoch": 4.896551724137931,
575
+ "grad_norm": 6.266848087310791,
576
+ "learning_rate": 8.941717791411043e-05,
577
+ "loss": 1.2382,
578
+ "step": 142
579
+ },
580
+ {
581
+ "epoch": 4.9655172413793105,
582
+ "grad_norm": 5.750278949737549,
583
+ "learning_rate": 8.911042944785276e-05,
584
+ "loss": 1.0968,
585
+ "step": 144
586
+ },
587
+ {
588
+ "epoch": 5.0,
589
+ "eval_accuracy": 0.5757575757575758,
590
+ "eval_f1_macro": 0.4607554427944924,
591
+ "eval_f1_micro": 0.5757575757575758,
592
+ "eval_f1_weighted": 0.525821869552332,
593
+ "eval_loss": 1.1879011392593384,
594
+ "eval_precision_macro": 0.4807155657962109,
595
+ "eval_precision_micro": 0.5757575757575758,
596
+ "eval_precision_weighted": 0.5437836021505377,
597
+ "eval_recall_macro": 0.5045275888133031,
598
+ "eval_recall_micro": 0.5757575757575758,
599
+ "eval_recall_weighted": 0.5757575757575758,
600
+ "eval_runtime": 3.2227,
601
+ "eval_samples_per_second": 40.96,
602
+ "eval_steps_per_second": 5.275,
603
+ "step": 145
604
+ },
605
+ {
606
+ "epoch": 5.0344827586206895,
607
+ "grad_norm": 7.826033592224121,
608
+ "learning_rate": 8.88036809815951e-05,
609
+ "loss": 1.5289,
610
+ "step": 146
611
+ },
612
+ {
613
+ "epoch": 5.103448275862069,
614
+ "grad_norm": 11.248700141906738,
615
+ "learning_rate": 8.849693251533742e-05,
616
+ "loss": 1.0666,
617
+ "step": 148
618
+ },
619
+ {
620
+ "epoch": 5.172413793103448,
621
+ "grad_norm": 6.100868225097656,
622
+ "learning_rate": 8.819018404907976e-05,
623
+ "loss": 0.9321,
624
+ "step": 150
625
+ },
626
+ {
627
+ "epoch": 5.241379310344827,
628
+ "grad_norm": 5.934208393096924,
629
+ "learning_rate": 8.788343558282209e-05,
630
+ "loss": 0.8063,
631
+ "step": 152
632
+ },
633
+ {
634
+ "epoch": 5.310344827586207,
635
+ "grad_norm": 6.6341471672058105,
636
+ "learning_rate": 8.757668711656443e-05,
637
+ "loss": 1.1262,
638
+ "step": 154
639
+ },
640
+ {
641
+ "epoch": 5.379310344827586,
642
+ "grad_norm": 6.946788311004639,
643
+ "learning_rate": 8.726993865030675e-05,
644
+ "loss": 0.957,
645
+ "step": 156
646
+ },
647
+ {
648
+ "epoch": 5.448275862068965,
649
+ "grad_norm": 11.869194030761719,
650
+ "learning_rate": 8.696319018404908e-05,
651
+ "loss": 1.6966,
652
+ "step": 158
653
+ },
654
+ {
655
+ "epoch": 5.517241379310345,
656
+ "grad_norm": 8.566763877868652,
657
+ "learning_rate": 8.665644171779142e-05,
658
+ "loss": 1.222,
659
+ "step": 160
660
+ },
661
+ {
662
+ "epoch": 5.586206896551724,
663
+ "grad_norm": 8.25841999053955,
664
+ "learning_rate": 8.634969325153374e-05,
665
+ "loss": 1.4671,
666
+ "step": 162
667
+ },
668
+ {
669
+ "epoch": 5.655172413793103,
670
+ "grad_norm": 4.913756370544434,
671
+ "learning_rate": 8.604294478527608e-05,
672
+ "loss": 1.07,
673
+ "step": 164
674
+ },
675
+ {
676
+ "epoch": 5.724137931034483,
677
+ "grad_norm": 7.771355152130127,
678
+ "learning_rate": 8.573619631901841e-05,
679
+ "loss": 0.8391,
680
+ "step": 166
681
+ },
682
+ {
683
+ "epoch": 5.793103448275862,
684
+ "grad_norm": 8.31480598449707,
685
+ "learning_rate": 8.542944785276073e-05,
686
+ "loss": 1.311,
687
+ "step": 168
688
+ },
689
+ {
690
+ "epoch": 5.862068965517241,
691
+ "grad_norm": 14.048528671264648,
692
+ "learning_rate": 8.512269938650307e-05,
693
+ "loss": 1.4462,
694
+ "step": 170
695
+ },
696
+ {
697
+ "epoch": 5.931034482758621,
698
+ "grad_norm": 7.521085739135742,
699
+ "learning_rate": 8.48159509202454e-05,
700
+ "loss": 1.2066,
701
+ "step": 172
702
+ },
703
+ {
704
+ "epoch": 6.0,
705
+ "grad_norm": 5.221696376800537,
706
+ "learning_rate": 8.450920245398774e-05,
707
+ "loss": 1.1313,
708
+ "step": 174
709
+ },
710
+ {
711
+ "epoch": 6.0,
712
+ "eval_accuracy": 0.553030303030303,
713
+ "eval_f1_macro": 0.4964328132779621,
714
+ "eval_f1_micro": 0.553030303030303,
715
+ "eval_f1_weighted": 0.5242598987794572,
716
+ "eval_loss": 1.230722427368164,
717
+ "eval_precision_macro": 0.5849505386679483,
718
+ "eval_precision_micro": 0.553030303030303,
719
+ "eval_precision_weighted": 0.6113855756896126,
720
+ "eval_recall_macro": 0.5196371882086168,
721
+ "eval_recall_micro": 0.553030303030303,
722
+ "eval_recall_weighted": 0.553030303030303,
723
+ "eval_runtime": 2.9624,
724
+ "eval_samples_per_second": 44.559,
725
+ "eval_steps_per_second": 5.739,
726
+ "step": 174
727
+ },
728
+ {
729
+ "epoch": 6.068965517241379,
730
+ "grad_norm": 8.70207691192627,
731
+ "learning_rate": 8.420245398773006e-05,
732
+ "loss": 1.0758,
733
+ "step": 176
734
+ },
735
+ {
736
+ "epoch": 6.137931034482759,
737
+ "grad_norm": 5.113523960113525,
738
+ "learning_rate": 8.38957055214724e-05,
739
+ "loss": 0.835,
740
+ "step": 178
741
+ },
742
+ {
743
+ "epoch": 6.206896551724138,
744
+ "grad_norm": 8.031415939331055,
745
+ "learning_rate": 8.358895705521473e-05,
746
+ "loss": 0.9388,
747
+ "step": 180
748
+ },
749
+ {
750
+ "epoch": 6.275862068965517,
751
+ "grad_norm": 6.924206256866455,
752
+ "learning_rate": 8.328220858895705e-05,
753
+ "loss": 1.2986,
754
+ "step": 182
755
+ },
756
+ {
757
+ "epoch": 6.344827586206897,
758
+ "grad_norm": 8.165400505065918,
759
+ "learning_rate": 8.297546012269939e-05,
760
+ "loss": 0.9004,
761
+ "step": 184
762
+ },
763
+ {
764
+ "epoch": 6.413793103448276,
765
+ "grad_norm": 8.674925804138184,
766
+ "learning_rate": 8.266871165644172e-05,
767
+ "loss": 0.8029,
768
+ "step": 186
769
+ },
770
+ {
771
+ "epoch": 6.482758620689655,
772
+ "grad_norm": 7.9463114738464355,
773
+ "learning_rate": 8.236196319018406e-05,
774
+ "loss": 0.7277,
775
+ "step": 188
776
+ },
777
+ {
778
+ "epoch": 6.551724137931035,
779
+ "grad_norm": 7.694469451904297,
780
+ "learning_rate": 8.205521472392638e-05,
781
+ "loss": 1.1082,
782
+ "step": 190
783
+ },
784
+ {
785
+ "epoch": 6.620689655172414,
786
+ "grad_norm": 7.801606178283691,
787
+ "learning_rate": 8.174846625766872e-05,
788
+ "loss": 1.0687,
789
+ "step": 192
790
+ },
791
+ {
792
+ "epoch": 6.689655172413794,
793
+ "grad_norm": 7.689370155334473,
794
+ "learning_rate": 8.144171779141105e-05,
795
+ "loss": 1.0604,
796
+ "step": 194
797
+ },
798
+ {
799
+ "epoch": 6.758620689655173,
800
+ "grad_norm": 7.995850086212158,
801
+ "learning_rate": 8.113496932515337e-05,
802
+ "loss": 1.3311,
803
+ "step": 196
804
+ },
805
+ {
806
+ "epoch": 6.827586206896552,
807
+ "grad_norm": 4.453602313995361,
808
+ "learning_rate": 8.082822085889571e-05,
809
+ "loss": 0.8307,
810
+ "step": 198
811
+ },
812
+ {
813
+ "epoch": 6.896551724137931,
814
+ "grad_norm": 5.754306793212891,
815
+ "learning_rate": 8.052147239263804e-05,
816
+ "loss": 0.7514,
817
+ "step": 200
818
+ },
819
+ {
820
+ "epoch": 6.9655172413793105,
821
+ "grad_norm": 6.210538387298584,
822
+ "learning_rate": 8.021472392638038e-05,
823
+ "loss": 1.0807,
824
+ "step": 202
825
+ },
826
+ {
827
+ "epoch": 7.0,
828
+ "eval_accuracy": 0.5303030303030303,
829
+ "eval_f1_macro": 0.4087798393817858,
830
+ "eval_f1_micro": 0.5303030303030303,
831
+ "eval_f1_weighted": 0.47721174256077536,
832
+ "eval_loss": 1.2771176099777222,
833
+ "eval_precision_macro": 0.5392650999793857,
834
+ "eval_precision_micro": 0.5303030303030303,
835
+ "eval_precision_weighted": 0.581616489571035,
836
+ "eval_recall_macro": 0.4303930461073318,
837
+ "eval_recall_micro": 0.5303030303030303,
838
+ "eval_recall_weighted": 0.5303030303030303,
839
+ "eval_runtime": 2.9556,
840
+ "eval_samples_per_second": 44.661,
841
+ "eval_steps_per_second": 5.752,
842
+ "step": 203
843
+ },
844
+ {
845
+ "epoch": 7.0344827586206895,
846
+ "grad_norm": 5.119338512420654,
847
+ "learning_rate": 7.99079754601227e-05,
848
+ "loss": 0.962,
849
+ "step": 204
850
+ },
851
+ {
852
+ "epoch": 7.103448275862069,
853
+ "grad_norm": 9.368791580200195,
854
+ "learning_rate": 7.960122699386503e-05,
855
+ "loss": 0.7753,
856
+ "step": 206
857
+ },
858
+ {
859
+ "epoch": 7.172413793103448,
860
+ "grad_norm": 7.991408348083496,
861
+ "learning_rate": 7.929447852760737e-05,
862
+ "loss": 1.0809,
863
+ "step": 208
864
+ },
865
+ {
866
+ "epoch": 7.241379310344827,
867
+ "grad_norm": 8.43641185760498,
868
+ "learning_rate": 7.898773006134969e-05,
869
+ "loss": 0.9518,
870
+ "step": 210
871
+ },
872
+ {
873
+ "epoch": 7.310344827586207,
874
+ "grad_norm": 11.549813270568848,
875
+ "learning_rate": 7.868098159509203e-05,
876
+ "loss": 1.0382,
877
+ "step": 212
878
+ },
879
+ {
880
+ "epoch": 7.379310344827586,
881
+ "grad_norm": 7.266906261444092,
882
+ "learning_rate": 7.837423312883436e-05,
883
+ "loss": 0.9945,
884
+ "step": 214
885
+ },
886
+ {
887
+ "epoch": 7.448275862068965,
888
+ "grad_norm": 6.440887928009033,
889
+ "learning_rate": 7.80674846625767e-05,
890
+ "loss": 0.8706,
891
+ "step": 216
892
+ },
893
+ {
894
+ "epoch": 7.517241379310345,
895
+ "grad_norm": 7.407945156097412,
896
+ "learning_rate": 7.776073619631902e-05,
897
+ "loss": 0.7985,
898
+ "step": 218
899
+ },
900
+ {
901
+ "epoch": 7.586206896551724,
902
+ "grad_norm": 6.593641757965088,
903
+ "learning_rate": 7.745398773006135e-05,
904
+ "loss": 0.5974,
905
+ "step": 220
906
+ },
907
+ {
908
+ "epoch": 7.655172413793103,
909
+ "grad_norm": 7.8406829833984375,
910
+ "learning_rate": 7.714723926380369e-05,
911
+ "loss": 0.8405,
912
+ "step": 222
913
+ },
914
+ {
915
+ "epoch": 7.724137931034483,
916
+ "grad_norm": 9.805188179016113,
917
+ "learning_rate": 7.684049079754601e-05,
918
+ "loss": 1.177,
919
+ "step": 224
920
+ },
921
+ {
922
+ "epoch": 7.793103448275862,
923
+ "grad_norm": 10.728581428527832,
924
+ "learning_rate": 7.653374233128835e-05,
925
+ "loss": 1.5151,
926
+ "step": 226
927
+ },
928
+ {
929
+ "epoch": 7.862068965517241,
930
+ "grad_norm": 5.296911239624023,
931
+ "learning_rate": 7.622699386503068e-05,
932
+ "loss": 0.7184,
933
+ "step": 228
934
+ },
935
+ {
936
+ "epoch": 7.931034482758621,
937
+ "grad_norm": 7.458691596984863,
938
+ "learning_rate": 7.5920245398773e-05,
939
+ "loss": 1.1221,
940
+ "step": 230
941
+ },
942
+ {
943
+ "epoch": 8.0,
944
+ "grad_norm": 7.329806804656982,
945
+ "learning_rate": 7.561349693251534e-05,
946
+ "loss": 1.1825,
947
+ "step": 232
948
+ },
949
+ {
950
+ "epoch": 8.0,
951
+ "eval_accuracy": 0.5681818181818182,
952
+ "eval_f1_macro": 0.45282668788005603,
953
+ "eval_f1_micro": 0.5681818181818182,
954
+ "eval_f1_weighted": 0.517546575055234,
955
+ "eval_loss": 1.2339295148849487,
956
+ "eval_precision_macro": 0.5544132386496917,
957
+ "eval_precision_micro": 0.5681818181818182,
958
+ "eval_precision_weighted": 0.6168848132014276,
959
+ "eval_recall_macro": 0.4919803476946334,
960
+ "eval_recall_micro": 0.5681818181818182,
961
+ "eval_recall_weighted": 0.5681818181818182,
962
+ "eval_runtime": 3.0256,
963
+ "eval_samples_per_second": 43.628,
964
+ "eval_steps_per_second": 5.619,
965
+ "step": 232
966
+ },
967
+ {
968
+ "epoch": 8.068965517241379,
969
+ "grad_norm": 12.180721282958984,
970
+ "learning_rate": 7.530674846625767e-05,
971
+ "loss": 0.9113,
972
+ "step": 234
973
+ },
974
+ {
975
+ "epoch": 8.137931034482758,
976
+ "grad_norm": 4.219178199768066,
977
+ "learning_rate": 7.500000000000001e-05,
978
+ "loss": 0.8664,
979
+ "step": 236
980
+ },
981
+ {
982
+ "epoch": 8.206896551724139,
983
+ "grad_norm": 7.4406304359436035,
984
+ "learning_rate": 7.469325153374233e-05,
985
+ "loss": 0.9639,
986
+ "step": 238
987
+ },
988
+ {
989
+ "epoch": 8.275862068965518,
990
+ "grad_norm": 7.360535621643066,
991
+ "learning_rate": 7.438650306748467e-05,
992
+ "loss": 0.9976,
993
+ "step": 240
994
+ },
995
+ {
996
+ "epoch": 8.344827586206897,
997
+ "grad_norm": 5.644505023956299,
998
+ "learning_rate": 7.4079754601227e-05,
999
+ "loss": 0.6907,
1000
+ "step": 242
1001
+ },
1002
+ {
1003
+ "epoch": 8.413793103448276,
1004
+ "grad_norm": 7.283102512359619,
1005
+ "learning_rate": 7.377300613496932e-05,
1006
+ "loss": 1.0068,
1007
+ "step": 244
1008
+ },
1009
+ {
1010
+ "epoch": 8.482758620689655,
1011
+ "grad_norm": 4.419933319091797,
1012
+ "learning_rate": 7.346625766871166e-05,
1013
+ "loss": 0.5921,
1014
+ "step": 246
1015
+ },
1016
+ {
1017
+ "epoch": 8.551724137931034,
1018
+ "grad_norm": 8.00572395324707,
1019
+ "learning_rate": 7.315950920245399e-05,
1020
+ "loss": 0.6962,
1021
+ "step": 248
1022
+ },
1023
+ {
1024
+ "epoch": 8.620689655172415,
1025
+ "grad_norm": 7.26607608795166,
1026
+ "learning_rate": 7.285276073619633e-05,
1027
+ "loss": 0.8763,
1028
+ "step": 250
1029
+ },
1030
+ {
1031
+ "epoch": 8.689655172413794,
1032
+ "grad_norm": 4.030153751373291,
1033
+ "learning_rate": 7.254601226993865e-05,
1034
+ "loss": 0.4548,
1035
+ "step": 252
1036
+ },
1037
+ {
1038
+ "epoch": 8.758620689655173,
1039
+ "grad_norm": 9.402144432067871,
1040
+ "learning_rate": 7.223926380368099e-05,
1041
+ "loss": 0.9571,
1042
+ "step": 254
1043
+ },
1044
+ {
1045
+ "epoch": 8.827586206896552,
1046
+ "grad_norm": 6.616663932800293,
1047
+ "learning_rate": 7.193251533742332e-05,
1048
+ "loss": 0.4974,
1049
+ "step": 256
1050
+ },
1051
+ {
1052
+ "epoch": 8.89655172413793,
1053
+ "grad_norm": 6.191842079162598,
1054
+ "learning_rate": 7.162576687116564e-05,
1055
+ "loss": 0.7361,
1056
+ "step": 258
1057
+ },
1058
+ {
1059
+ "epoch": 8.96551724137931,
1060
+ "grad_norm": 7.1751837730407715,
1061
+ "learning_rate": 7.131901840490798e-05,
1062
+ "loss": 0.4454,
1063
+ "step": 260
1064
+ },
1065
+ {
1066
+ "epoch": 9.0,
1067
+ "eval_accuracy": 0.696969696969697,
1068
+ "eval_f1_macro": 0.6064190911129687,
1069
+ "eval_f1_micro": 0.696969696969697,
1070
+ "eval_f1_weighted": 0.6763255683710229,
1071
+ "eval_loss": 1.0474289655685425,
1072
+ "eval_precision_macro": 0.6334407447189402,
1073
+ "eval_precision_micro": 0.696969696969697,
1074
+ "eval_precision_weighted": 0.686842105263158,
1075
+ "eval_recall_macro": 0.6099622071050643,
1076
+ "eval_recall_micro": 0.696969696969697,
1077
+ "eval_recall_weighted": 0.696969696969697,
1078
+ "eval_runtime": 2.9691,
1079
+ "eval_samples_per_second": 44.457,
1080
+ "eval_steps_per_second": 5.726,
1081
+ "step": 261
1082
+ },
1083
+ {
1084
+ "epoch": 9.03448275862069,
1085
+ "grad_norm": 4.017953872680664,
1086
+ "learning_rate": 7.101226993865031e-05,
1087
+ "loss": 0.3868,
1088
+ "step": 262
1089
+ },
1090
+ {
1091
+ "epoch": 9.10344827586207,
1092
+ "grad_norm": 8.303009986877441,
1093
+ "learning_rate": 7.070552147239265e-05,
1094
+ "loss": 0.5722,
1095
+ "step": 264
1096
+ },
1097
+ {
1098
+ "epoch": 9.172413793103448,
1099
+ "grad_norm": 16.19445037841797,
1100
+ "learning_rate": 7.039877300613497e-05,
1101
+ "loss": 0.8712,
1102
+ "step": 266
1103
+ },
1104
+ {
1105
+ "epoch": 9.241379310344827,
1106
+ "grad_norm": 6.667177677154541,
1107
+ "learning_rate": 7.00920245398773e-05,
1108
+ "loss": 0.4407,
1109
+ "step": 268
1110
+ },
1111
+ {
1112
+ "epoch": 9.310344827586206,
1113
+ "grad_norm": 5.363644599914551,
1114
+ "learning_rate": 6.978527607361964e-05,
1115
+ "loss": 0.3215,
1116
+ "step": 270
1117
+ },
1118
+ {
1119
+ "epoch": 9.379310344827585,
1120
+ "grad_norm": 12.17090892791748,
1121
+ "learning_rate": 6.947852760736196e-05,
1122
+ "loss": 0.489,
1123
+ "step": 272
1124
+ },
1125
+ {
1126
+ "epoch": 9.448275862068966,
1127
+ "grad_norm": 10.571720123291016,
1128
+ "learning_rate": 6.91717791411043e-05,
1129
+ "loss": 0.8153,
1130
+ "step": 274
1131
+ },
1132
+ {
1133
+ "epoch": 9.517241379310345,
1134
+ "grad_norm": 6.873518466949463,
1135
+ "learning_rate": 6.886503067484663e-05,
1136
+ "loss": 0.379,
1137
+ "step": 276
1138
+ },
1139
+ {
1140
+ "epoch": 9.586206896551724,
1141
+ "grad_norm": 3.1050946712493896,
1142
+ "learning_rate": 6.855828220858897e-05,
1143
+ "loss": 0.168,
1144
+ "step": 278
1145
+ },
1146
+ {
1147
+ "epoch": 9.655172413793103,
1148
+ "grad_norm": 3.549830675125122,
1149
+ "learning_rate": 6.825153374233129e-05,
1150
+ "loss": 0.1496,
1151
+ "step": 280
1152
+ },
1153
+ {
1154
+ "epoch": 9.724137931034482,
1155
+ "grad_norm": 7.51619815826416,
1156
+ "learning_rate": 6.794478527607362e-05,
1157
+ "loss": 0.7824,
1158
+ "step": 282
1159
+ },
1160
+ {
1161
+ "epoch": 9.793103448275861,
1162
+ "grad_norm": 9.865971565246582,
1163
+ "learning_rate": 6.763803680981596e-05,
1164
+ "loss": 0.9922,
1165
+ "step": 284
1166
+ },
1167
+ {
1168
+ "epoch": 9.862068965517242,
1169
+ "grad_norm": 11.25515079498291,
1170
+ "learning_rate": 6.733128834355828e-05,
1171
+ "loss": 1.0158,
1172
+ "step": 286
1173
+ },
1174
+ {
1175
+ "epoch": 9.931034482758621,
1176
+ "grad_norm": 6.325614929199219,
1177
+ "learning_rate": 6.702453987730062e-05,
1178
+ "loss": 0.4932,
1179
+ "step": 288
1180
+ },
1181
+ {
1182
+ "epoch": 10.0,
1183
+ "grad_norm": 6.320476531982422,
1184
+ "learning_rate": 6.671779141104295e-05,
1185
+ "loss": 0.5439,
1186
+ "step": 290
1187
+ },
1188
+ {
1189
+ "epoch": 10.0,
1190
+ "eval_accuracy": 0.5151515151515151,
1191
+ "eval_f1_macro": 0.4580164261227385,
1192
+ "eval_f1_micro": 0.5151515151515151,
1193
+ "eval_f1_weighted": 0.491986025706956,
1194
+ "eval_loss": 1.6814864873886108,
1195
+ "eval_precision_macro": 0.5393793994024408,
1196
+ "eval_precision_micro": 0.5151515151515151,
1197
+ "eval_precision_weighted": 0.595123648752681,
1198
+ "eval_recall_macro": 0.4903250188964474,
1199
+ "eval_recall_micro": 0.5151515151515151,
1200
+ "eval_recall_weighted": 0.5151515151515151,
1201
+ "eval_runtime": 2.9552,
1202
+ "eval_samples_per_second": 44.667,
1203
+ "eval_steps_per_second": 5.753,
1204
+ "step": 290
1205
+ },
1206
+ {
1207
+ "epoch": 10.068965517241379,
1208
+ "grad_norm": 19.37687110900879,
1209
+ "learning_rate": 6.641104294478529e-05,
1210
+ "loss": 1.1536,
1211
+ "step": 292
1212
+ },
1213
+ {
1214
+ "epoch": 10.137931034482758,
1215
+ "grad_norm": 5.3166093826293945,
1216
+ "learning_rate": 6.610429447852761e-05,
1217
+ "loss": 0.6399,
1218
+ "step": 294
1219
+ },
1220
+ {
1221
+ "epoch": 10.206896551724139,
1222
+ "grad_norm": 6.868032932281494,
1223
+ "learning_rate": 6.579754601226994e-05,
1224
+ "loss": 0.5945,
1225
+ "step": 296
1226
+ },
1227
+ {
1228
+ "epoch": 10.275862068965518,
1229
+ "grad_norm": 6.491347312927246,
1230
+ "learning_rate": 6.549079754601228e-05,
1231
+ "loss": 0.5558,
1232
+ "step": 298
1233
+ },
1234
+ {
1235
+ "epoch": 10.344827586206897,
1236
+ "grad_norm": 5.701662063598633,
1237
+ "learning_rate": 6.51840490797546e-05,
1238
+ "loss": 0.5752,
1239
+ "step": 300
1240
+ },
1241
+ {
1242
+ "epoch": 10.413793103448276,
1243
+ "grad_norm": 6.190051555633545,
1244
+ "learning_rate": 6.487730061349694e-05,
1245
+ "loss": 0.4731,
1246
+ "step": 302
1247
+ },
1248
+ {
1249
+ "epoch": 10.482758620689655,
1250
+ "grad_norm": 6.490513801574707,
1251
+ "learning_rate": 6.457055214723927e-05,
1252
+ "loss": 0.5012,
1253
+ "step": 304
1254
+ },
1255
+ {
1256
+ "epoch": 10.551724137931034,
1257
+ "grad_norm": 7.6167521476745605,
1258
+ "learning_rate": 6.426380368098159e-05,
1259
+ "loss": 0.553,
1260
+ "step": 306
1261
+ },
1262
+ {
1263
+ "epoch": 10.620689655172415,
1264
+ "grad_norm": 9.545734405517578,
1265
+ "learning_rate": 6.395705521472393e-05,
1266
+ "loss": 0.6079,
1267
+ "step": 308
1268
+ },
1269
+ {
1270
+ "epoch": 10.689655172413794,
1271
+ "grad_norm": 10.175183296203613,
1272
+ "learning_rate": 6.365030674846626e-05,
1273
+ "loss": 0.6955,
1274
+ "step": 310
1275
+ },
1276
+ {
1277
+ "epoch": 10.758620689655173,
1278
+ "grad_norm": 8.330876350402832,
1279
+ "learning_rate": 6.33435582822086e-05,
1280
+ "loss": 0.739,
1281
+ "step": 312
1282
+ },
1283
+ {
1284
+ "epoch": 10.827586206896552,
1285
+ "grad_norm": 9.028648376464844,
1286
+ "learning_rate": 6.303680981595092e-05,
1287
+ "loss": 0.4514,
1288
+ "step": 314
1289
+ },
1290
+ {
1291
+ "epoch": 10.89655172413793,
1292
+ "grad_norm": 6.639448642730713,
1293
+ "learning_rate": 6.273006134969326e-05,
1294
+ "loss": 0.2532,
1295
+ "step": 316
1296
+ },
1297
+ {
1298
+ "epoch": 10.96551724137931,
1299
+ "grad_norm": 6.314249515533447,
1300
+ "learning_rate": 6.242331288343559e-05,
1301
+ "loss": 0.4256,
1302
+ "step": 318
1303
+ },
1304
+ {
1305
+ "epoch": 11.0,
1306
+ "eval_accuracy": 0.6666666666666666,
1307
+ "eval_f1_macro": 0.5799728079445362,
1308
+ "eval_f1_micro": 0.6666666666666666,
1309
+ "eval_f1_weighted": 0.649504493507492,
1310
+ "eval_loss": 1.137831687927246,
1311
+ "eval_precision_macro": 0.5800879765395894,
1312
+ "eval_precision_micro": 0.6666666666666666,
1313
+ "eval_precision_weighted": 0.6434914393198851,
1314
+ "eval_recall_macro": 0.590650037792895,
1315
+ "eval_recall_micro": 0.6666666666666666,
1316
+ "eval_recall_weighted": 0.6666666666666666,
1317
+ "eval_runtime": 2.949,
1318
+ "eval_samples_per_second": 44.761,
1319
+ "eval_steps_per_second": 5.765,
1320
+ "step": 319
1321
+ },
1322
+ {
1323
+ "epoch": 11.03448275862069,
1324
+ "grad_norm": 5.468910217285156,
1325
+ "learning_rate": 6.211656441717791e-05,
1326
+ "loss": 0.4358,
1327
+ "step": 320
1328
+ },
1329
+ {
1330
+ "epoch": 11.10344827586207,
1331
+ "grad_norm": 4.487988471984863,
1332
+ "learning_rate": 6.180981595092025e-05,
1333
+ "loss": 0.349,
1334
+ "step": 322
1335
+ },
1336
+ {
1337
+ "epoch": 11.172413793103448,
1338
+ "grad_norm": 4.8094587326049805,
1339
+ "learning_rate": 6.150306748466258e-05,
1340
+ "loss": 0.4052,
1341
+ "step": 324
1342
+ },
1343
+ {
1344
+ "epoch": 11.241379310344827,
1345
+ "grad_norm": 2.387544870376587,
1346
+ "learning_rate": 6.119631901840492e-05,
1347
+ "loss": 0.2168,
1348
+ "step": 326
1349
+ },
1350
+ {
1351
+ "epoch": 11.310344827586206,
1352
+ "grad_norm": 7.197230815887451,
1353
+ "learning_rate": 6.088957055214725e-05,
1354
+ "loss": 0.2149,
1355
+ "step": 328
1356
+ },
1357
+ {
1358
+ "epoch": 11.379310344827585,
1359
+ "grad_norm": 4.119611740112305,
1360
+ "learning_rate": 6.058282208588958e-05,
1361
+ "loss": 0.3088,
1362
+ "step": 330
1363
+ },
1364
+ {
1365
+ "epoch": 11.448275862068966,
1366
+ "grad_norm": 8.908929824829102,
1367
+ "learning_rate": 6.02760736196319e-05,
1368
+ "loss": 0.4711,
1369
+ "step": 332
1370
+ },
1371
+ {
1372
+ "epoch": 11.517241379310345,
1373
+ "grad_norm": 7.788853168487549,
1374
+ "learning_rate": 5.996932515337423e-05,
1375
+ "loss": 0.2111,
1376
+ "step": 334
1377
+ },
1378
+ {
1379
+ "epoch": 11.586206896551724,
1380
+ "grad_norm": 4.6214118003845215,
1381
+ "learning_rate": 5.9662576687116564e-05,
1382
+ "loss": 0.4032,
1383
+ "step": 336
1384
+ },
1385
+ {
1386
+ "epoch": 11.655172413793103,
1387
+ "grad_norm": 12.485306739807129,
1388
+ "learning_rate": 5.93558282208589e-05,
1389
+ "loss": 0.6185,
1390
+ "step": 338
1391
+ },
1392
+ {
1393
+ "epoch": 11.724137931034482,
1394
+ "grad_norm": 8.764460563659668,
1395
+ "learning_rate": 5.9049079754601235e-05,
1396
+ "loss": 0.3864,
1397
+ "step": 340
1398
+ },
1399
+ {
1400
+ "epoch": 11.793103448275861,
1401
+ "grad_norm": 7.914090156555176,
1402
+ "learning_rate": 5.874233128834357e-05,
1403
+ "loss": 0.3361,
1404
+ "step": 342
1405
+ },
1406
+ {
1407
+ "epoch": 11.862068965517242,
1408
+ "grad_norm": 7.184711933135986,
1409
+ "learning_rate": 5.8435582822085886e-05,
1410
+ "loss": 0.3037,
1411
+ "step": 344
1412
+ },
1413
+ {
1414
+ "epoch": 11.931034482758621,
1415
+ "grad_norm": 5.275500297546387,
1416
+ "learning_rate": 5.812883435582822e-05,
1417
+ "loss": 0.2329,
1418
+ "step": 346
1419
+ },
1420
+ {
1421
+ "epoch": 12.0,
1422
+ "grad_norm": 9.601807594299316,
1423
+ "learning_rate": 5.782208588957055e-05,
1424
+ "loss": 0.4968,
1425
+ "step": 348
1426
+ },
1427
+ {
1428
+ "epoch": 12.0,
1429
+ "eval_accuracy": 0.6136363636363636,
1430
+ "eval_f1_macro": 0.5307113938692886,
1431
+ "eval_f1_micro": 0.6136363636363636,
1432
+ "eval_f1_weighted": 0.6012838636642465,
1433
+ "eval_loss": 1.4229304790496826,
1434
+ "eval_precision_macro": 0.5347963458887828,
1435
+ "eval_precision_micro": 0.6136363636363636,
1436
+ "eval_precision_weighted": 0.6095054001916747,
1437
+ "eval_recall_macro": 0.5485941043083901,
1438
+ "eval_recall_micro": 0.6136363636363636,
1439
+ "eval_recall_weighted": 0.6136363636363636,
1440
+ "eval_runtime": 2.9496,
1441
+ "eval_samples_per_second": 44.752,
1442
+ "eval_steps_per_second": 5.764,
1443
+ "step": 348
1444
+ },
1445
+ {
1446
+ "epoch": 12.068965517241379,
1447
+ "grad_norm": 3.669062614440918,
1448
+ "learning_rate": 5.751533742331289e-05,
1449
+ "loss": 0.2376,
1450
+ "step": 350
1451
+ },
1452
+ {
1453
+ "epoch": 12.137931034482758,
1454
+ "grad_norm": 7.027435302734375,
1455
+ "learning_rate": 5.720858895705522e-05,
1456
+ "loss": 0.3657,
1457
+ "step": 352
1458
+ },
1459
+ {
1460
+ "epoch": 12.206896551724139,
1461
+ "grad_norm": 5.393192768096924,
1462
+ "learning_rate": 5.6901840490797555e-05,
1463
+ "loss": 0.3012,
1464
+ "step": 354
1465
+ },
1466
+ {
1467
+ "epoch": 12.275862068965518,
1468
+ "grad_norm": 1.6986196041107178,
1469
+ "learning_rate": 5.6595092024539874e-05,
1470
+ "loss": 0.1062,
1471
+ "step": 356
1472
+ },
1473
+ {
1474
+ "epoch": 12.344827586206897,
1475
+ "grad_norm": 11.825511932373047,
1476
+ "learning_rate": 5.6288343558282206e-05,
1477
+ "loss": 0.5445,
1478
+ "step": 358
1479
+ },
1480
+ {
1481
+ "epoch": 12.413793103448276,
1482
+ "grad_norm": 11.926621437072754,
1483
+ "learning_rate": 5.598159509202454e-05,
1484
+ "loss": 0.8014,
1485
+ "step": 360
1486
+ },
1487
+ {
1488
+ "epoch": 12.482758620689655,
1489
+ "grad_norm": 7.5627665519714355,
1490
+ "learning_rate": 5.567484662576688e-05,
1491
+ "loss": 0.3761,
1492
+ "step": 362
1493
+ },
1494
+ {
1495
+ "epoch": 12.551724137931034,
1496
+ "grad_norm": 5.975190162658691,
1497
+ "learning_rate": 5.536809815950921e-05,
1498
+ "loss": 0.3764,
1499
+ "step": 364
1500
+ },
1501
+ {
1502
+ "epoch": 12.620689655172415,
1503
+ "grad_norm": 5.467086315155029,
1504
+ "learning_rate": 5.506134969325154e-05,
1505
+ "loss": 0.3437,
1506
+ "step": 366
1507
+ },
1508
+ {
1509
+ "epoch": 12.689655172413794,
1510
+ "grad_norm": 7.421685218811035,
1511
+ "learning_rate": 5.475460122699386e-05,
1512
+ "loss": 0.242,
1513
+ "step": 368
1514
+ },
1515
+ {
1516
+ "epoch": 12.758620689655173,
1517
+ "grad_norm": 6.59246301651001,
1518
+ "learning_rate": 5.4447852760736193e-05,
1519
+ "loss": 0.4546,
1520
+ "step": 370
1521
+ },
1522
+ {
1523
+ "epoch": 12.827586206896552,
1524
+ "grad_norm": 7.327258586883545,
1525
+ "learning_rate": 5.4141104294478526e-05,
1526
+ "loss": 0.4652,
1527
+ "step": 372
1528
+ },
1529
+ {
1530
+ "epoch": 12.89655172413793,
1531
+ "grad_norm": 8.353236198425293,
1532
+ "learning_rate": 5.3834355828220865e-05,
1533
+ "loss": 0.2649,
1534
+ "step": 374
1535
+ },
1536
+ {
1537
+ "epoch": 12.96551724137931,
1538
+ "grad_norm": 5.369735240936279,
1539
+ "learning_rate": 5.35276073619632e-05,
1540
+ "loss": 0.3408,
1541
+ "step": 376
1542
+ },
1543
+ {
1544
+ "epoch": 13.0,
1545
+ "eval_accuracy": 0.6287878787878788,
1546
+ "eval_f1_macro": 0.5426263900444636,
1547
+ "eval_f1_micro": 0.6287878787878788,
1548
+ "eval_f1_weighted": 0.6095195120641993,
1549
+ "eval_loss": 1.4445136785507202,
1550
+ "eval_precision_macro": 0.5558757032441243,
1551
+ "eval_precision_micro": 0.6287878787878788,
1552
+ "eval_precision_weighted": 0.6306541677355074,
1553
+ "eval_recall_macro": 0.5621390778533636,
1554
+ "eval_recall_micro": 0.6287878787878788,
1555
+ "eval_recall_weighted": 0.6287878787878788,
1556
+ "eval_runtime": 2.9534,
1557
+ "eval_samples_per_second": 44.694,
1558
+ "eval_steps_per_second": 5.756,
1559
+ "step": 377
1560
+ },
1561
+ {
1562
+ "epoch": 13.03448275862069,
1563
+ "grad_norm": 3.363588333129883,
1564
+ "learning_rate": 5.322085889570553e-05,
1565
+ "loss": 0.2143,
1566
+ "step": 378
1567
+ },
1568
+ {
1569
+ "epoch": 13.10344827586207,
1570
+ "grad_norm": 5.9551682472229,
1571
+ "learning_rate": 5.291411042944786e-05,
1572
+ "loss": 0.3482,
1573
+ "step": 380
1574
+ },
1575
+ {
1576
+ "epoch": 13.172413793103448,
1577
+ "grad_norm": 3.5881054401397705,
1578
+ "learning_rate": 5.260736196319018e-05,
1579
+ "loss": 0.3513,
1580
+ "step": 382
1581
+ },
1582
+ {
1583
+ "epoch": 13.241379310344827,
1584
+ "grad_norm": 3.4177823066711426,
1585
+ "learning_rate": 5.230061349693251e-05,
1586
+ "loss": 0.141,
1587
+ "step": 384
1588
+ },
1589
+ {
1590
+ "epoch": 13.310344827586206,
1591
+ "grad_norm": 1.6237398386001587,
1592
+ "learning_rate": 5.1993865030674845e-05,
1593
+ "loss": 0.1823,
1594
+ "step": 386
1595
+ },
1596
+ {
1597
+ "epoch": 13.379310344827585,
1598
+ "grad_norm": 4.370217323303223,
1599
+ "learning_rate": 5.1687116564417185e-05,
1600
+ "loss": 0.1641,
1601
+ "step": 388
1602
+ },
1603
+ {
1604
+ "epoch": 13.448275862068966,
1605
+ "grad_norm": 7.227107048034668,
1606
+ "learning_rate": 5.138036809815952e-05,
1607
+ "loss": 0.3134,
1608
+ "step": 390
1609
+ },
1610
+ {
1611
+ "epoch": 13.517241379310345,
1612
+ "grad_norm": 8.068262100219727,
1613
+ "learning_rate": 5.107361963190185e-05,
1614
+ "loss": 0.2103,
1615
+ "step": 392
1616
+ },
1617
+ {
1618
+ "epoch": 13.586206896551724,
1619
+ "grad_norm": 0.982545793056488,
1620
+ "learning_rate": 5.076687116564417e-05,
1621
+ "loss": 0.0825,
1622
+ "step": 394
1623
+ },
1624
+ {
1625
+ "epoch": 13.655172413793103,
1626
+ "grad_norm": 5.129421234130859,
1627
+ "learning_rate": 5.04601226993865e-05,
1628
+ "loss": 0.3593,
1629
+ "step": 396
1630
+ },
1631
+ {
1632
+ "epoch": 13.724137931034482,
1633
+ "grad_norm": 7.760966777801514,
1634
+ "learning_rate": 5.015337423312883e-05,
1635
+ "loss": 0.4634,
1636
+ "step": 398
1637
+ },
1638
+ {
1639
+ "epoch": 13.793103448275861,
1640
+ "grad_norm": 5.949847221374512,
1641
+ "learning_rate": 4.984662576687117e-05,
1642
+ "loss": 0.4661,
1643
+ "step": 400
1644
+ },
1645
+ {
1646
+ "epoch": 13.862068965517242,
1647
+ "grad_norm": 6.836796760559082,
1648
+ "learning_rate": 4.9539877300613504e-05,
1649
+ "loss": 0.4485,
1650
+ "step": 402
1651
+ },
1652
+ {
1653
+ "epoch": 13.931034482758621,
1654
+ "grad_norm": 8.495439529418945,
1655
+ "learning_rate": 4.923312883435583e-05,
1656
+ "loss": 0.6775,
1657
+ "step": 404
1658
+ },
1659
+ {
1660
+ "epoch": 14.0,
1661
+ "grad_norm": 6.360949993133545,
1662
+ "learning_rate": 4.892638036809816e-05,
1663
+ "loss": 0.2914,
1664
+ "step": 406
1665
+ },
1666
+ {
1667
+ "epoch": 14.0,
1668
+ "eval_accuracy": 0.6515151515151515,
1669
+ "eval_f1_macro": 0.6009180806560772,
1670
+ "eval_f1_micro": 0.6515151515151515,
1671
+ "eval_f1_weighted": 0.6470399238998288,
1672
+ "eval_loss": 1.4276713132858276,
1673
+ "eval_precision_macro": 0.7068301168482659,
1674
+ "eval_precision_micro": 0.6515151515151515,
1675
+ "eval_precision_weighted": 0.6867810122256585,
1676
+ "eval_recall_macro": 0.5958201058201057,
1677
+ "eval_recall_micro": 0.6515151515151515,
1678
+ "eval_recall_weighted": 0.6515151515151515,
1679
+ "eval_runtime": 2.9541,
1680
+ "eval_samples_per_second": 44.683,
1681
+ "eval_steps_per_second": 5.755,
1682
+ "step": 406
1683
+ },
1684
+ {
1685
+ "epoch": 14.068965517241379,
1686
+ "grad_norm": 5.016369342803955,
1687
+ "learning_rate": 4.8619631901840495e-05,
1688
+ "loss": 0.1568,
1689
+ "step": 408
1690
+ },
1691
+ {
1692
+ "epoch": 14.137931034482758,
1693
+ "grad_norm": 1.709494948387146,
1694
+ "learning_rate": 4.831288343558282e-05,
1695
+ "loss": 0.1725,
1696
+ "step": 410
1697
+ },
1698
+ {
1699
+ "epoch": 14.206896551724139,
1700
+ "grad_norm": 11.258108139038086,
1701
+ "learning_rate": 4.800613496932516e-05,
1702
+ "loss": 0.4265,
1703
+ "step": 412
1704
+ },
1705
+ {
1706
+ "epoch": 14.275862068965518,
1707
+ "grad_norm": 7.010964870452881,
1708
+ "learning_rate": 4.769938650306749e-05,
1709
+ "loss": 0.2611,
1710
+ "step": 414
1711
+ },
1712
+ {
1713
+ "epoch": 14.344827586206897,
1714
+ "grad_norm": 2.1494526863098145,
1715
+ "learning_rate": 4.739263803680982e-05,
1716
+ "loss": 0.2004,
1717
+ "step": 416
1718
+ },
1719
+ {
1720
+ "epoch": 14.413793103448276,
1721
+ "grad_norm": 1.0723114013671875,
1722
+ "learning_rate": 4.708588957055215e-05,
1723
+ "loss": 0.1463,
1724
+ "step": 418
1725
+ },
1726
+ {
1727
+ "epoch": 14.482758620689655,
1728
+ "grad_norm": 4.130954742431641,
1729
+ "learning_rate": 4.677914110429448e-05,
1730
+ "loss": 0.0826,
1731
+ "step": 420
1732
+ },
1733
+ {
1734
+ "epoch": 14.551724137931034,
1735
+ "grad_norm": 2.124276876449585,
1736
+ "learning_rate": 4.647239263803681e-05,
1737
+ "loss": 0.2997,
1738
+ "step": 422
1739
+ },
1740
+ {
1741
+ "epoch": 14.620689655172415,
1742
+ "grad_norm": 6.786820888519287,
1743
+ "learning_rate": 4.616564417177914e-05,
1744
+ "loss": 0.186,
1745
+ "step": 424
1746
+ },
1747
+ {
1748
+ "epoch": 14.689655172413794,
1749
+ "grad_norm": 1.8289989233016968,
1750
+ "learning_rate": 4.585889570552148e-05,
1751
+ "loss": 0.1018,
1752
+ "step": 426
1753
+ },
1754
+ {
1755
+ "epoch": 14.758620689655173,
1756
+ "grad_norm": 2.392906904220581,
1757
+ "learning_rate": 4.5552147239263805e-05,
1758
+ "loss": 0.1582,
1759
+ "step": 428
1760
+ },
1761
+ {
1762
+ "epoch": 14.827586206896552,
1763
+ "grad_norm": 0.6232361197471619,
1764
+ "learning_rate": 4.524539877300614e-05,
1765
+ "loss": 0.0487,
1766
+ "step": 430
1767
+ },
1768
+ {
1769
+ "epoch": 14.89655172413793,
1770
+ "grad_norm": 10.35379695892334,
1771
+ "learning_rate": 4.493865030674847e-05,
1772
+ "loss": 0.3485,
1773
+ "step": 432
1774
+ },
1775
+ {
1776
+ "epoch": 14.96551724137931,
1777
+ "grad_norm": 3.712934970855713,
1778
+ "learning_rate": 4.4631901840490795e-05,
1779
+ "loss": 0.2003,
1780
+ "step": 434
1781
+ },
1782
+ {
1783
+ "epoch": 15.0,
1784
+ "eval_accuracy": 0.6287878787878788,
1785
+ "eval_f1_macro": 0.5769950459342476,
1786
+ "eval_f1_micro": 0.6287878787878788,
1787
+ "eval_f1_weighted": 0.6296265645546432,
1788
+ "eval_loss": 1.5517460107803345,
1789
+ "eval_precision_macro": 0.5889880952380953,
1790
+ "eval_precision_micro": 0.6287878787878788,
1791
+ "eval_precision_weighted": 0.6474589646464647,
1792
+ "eval_recall_macro": 0.579191232048375,
1793
+ "eval_recall_micro": 0.6287878787878788,
1794
+ "eval_recall_weighted": 0.6287878787878788,
1795
+ "eval_runtime": 2.9553,
1796
+ "eval_samples_per_second": 44.666,
1797
+ "eval_steps_per_second": 5.752,
1798
+ "step": 435
1799
+ },
1800
+ {
1801
+ "epoch": 15.03448275862069,
1802
+ "grad_norm": 6.708681106567383,
1803
+ "learning_rate": 4.432515337423313e-05,
1804
+ "loss": 0.0948,
1805
+ "step": 436
1806
+ },
1807
+ {
1808
+ "epoch": 15.10344827586207,
1809
+ "grad_norm": 2.6313998699188232,
1810
+ "learning_rate": 4.4018404907975466e-05,
1811
+ "loss": 0.1151,
1812
+ "step": 438
1813
+ },
1814
+ {
1815
+ "epoch": 15.172413793103448,
1816
+ "grad_norm": 1.044400930404663,
1817
+ "learning_rate": 4.371165644171779e-05,
1818
+ "loss": 0.0373,
1819
+ "step": 440
1820
+ },
1821
+ {
1822
+ "epoch": 15.241379310344827,
1823
+ "grad_norm": 4.7035064697265625,
1824
+ "learning_rate": 4.3404907975460124e-05,
1825
+ "loss": 0.1399,
1826
+ "step": 442
1827
+ },
1828
+ {
1829
+ "epoch": 15.310344827586206,
1830
+ "grad_norm": 10.513556480407715,
1831
+ "learning_rate": 4.309815950920246e-05,
1832
+ "loss": 0.5143,
1833
+ "step": 444
1834
+ },
1835
+ {
1836
+ "epoch": 15.379310344827585,
1837
+ "grad_norm": 2.8816163539886475,
1838
+ "learning_rate": 4.279141104294479e-05,
1839
+ "loss": 0.0437,
1840
+ "step": 446
1841
+ },
1842
+ {
1843
+ "epoch": 15.448275862068966,
1844
+ "grad_norm": 0.7600950598716736,
1845
+ "learning_rate": 4.2484662576687115e-05,
1846
+ "loss": 0.112,
1847
+ "step": 448
1848
+ },
1849
+ {
1850
+ "epoch": 15.517241379310345,
1851
+ "grad_norm": 4.796483516693115,
1852
+ "learning_rate": 4.2177914110429454e-05,
1853
+ "loss": 0.0414,
1854
+ "step": 450
1855
+ },
1856
+ {
1857
+ "epoch": 15.586206896551724,
1858
+ "grad_norm": 9.010687828063965,
1859
+ "learning_rate": 4.1871165644171786e-05,
1860
+ "loss": 0.1845,
1861
+ "step": 452
1862
+ },
1863
+ {
1864
+ "epoch": 15.655172413793103,
1865
+ "grad_norm": 5.453256607055664,
1866
+ "learning_rate": 4.156441717791411e-05,
1867
+ "loss": 0.2598,
1868
+ "step": 454
1869
+ },
1870
+ {
1871
+ "epoch": 15.724137931034482,
1872
+ "grad_norm": 0.6193873286247253,
1873
+ "learning_rate": 4.1257668711656444e-05,
1874
+ "loss": 0.1249,
1875
+ "step": 456
1876
+ },
1877
+ {
1878
+ "epoch": 15.793103448275861,
1879
+ "grad_norm": 1.3485605716705322,
1880
+ "learning_rate": 4.0950920245398776e-05,
1881
+ "loss": 0.0884,
1882
+ "step": 458
1883
+ },
1884
+ {
1885
+ "epoch": 15.862068965517242,
1886
+ "grad_norm": 5.937335014343262,
1887
+ "learning_rate": 4.06441717791411e-05,
1888
+ "loss": 0.2239,
1889
+ "step": 460
1890
+ },
1891
+ {
1892
+ "epoch": 15.931034482758621,
1893
+ "grad_norm": 1.5148670673370361,
1894
+ "learning_rate": 4.033742331288344e-05,
1895
+ "loss": 0.1397,
1896
+ "step": 462
1897
+ },
1898
+ {
1899
+ "epoch": 16.0,
1900
+ "grad_norm": 6.244295120239258,
1901
+ "learning_rate": 4.0030674846625773e-05,
1902
+ "loss": 0.0871,
1903
+ "step": 464
1904
+ },
1905
+ {
1906
+ "epoch": 16.0,
1907
+ "eval_accuracy": 0.6515151515151515,
1908
+ "eval_f1_macro": 0.5701671714225517,
1909
+ "eval_f1_micro": 0.6515151515151515,
1910
+ "eval_f1_weighted": 0.6407448459978784,
1911
+ "eval_loss": 1.481210708618164,
1912
+ "eval_precision_macro": 0.5776942355889724,
1913
+ "eval_precision_micro": 0.6515151515151515,
1914
+ "eval_precision_weighted": 0.6491323004480899,
1915
+ "eval_recall_macro": 0.5785411942554799,
1916
+ "eval_recall_micro": 0.6515151515151515,
1917
+ "eval_recall_weighted": 0.6515151515151515,
1918
+ "eval_runtime": 2.9649,
1919
+ "eval_samples_per_second": 44.52,
1920
+ "eval_steps_per_second": 5.734,
1921
+ "step": 464
1922
+ },
1923
+ {
1924
+ "epoch": 16.06896551724138,
1925
+ "grad_norm": 11.335488319396973,
1926
+ "learning_rate": 3.97239263803681e-05,
1927
+ "loss": 0.3416,
1928
+ "step": 466
1929
+ },
1930
+ {
1931
+ "epoch": 16.137931034482758,
1932
+ "grad_norm": 3.1509413719177246,
1933
+ "learning_rate": 3.941717791411043e-05,
1934
+ "loss": 0.0441,
1935
+ "step": 468
1936
+ },
1937
+ {
1938
+ "epoch": 16.20689655172414,
1939
+ "grad_norm": 11.243932723999023,
1940
+ "learning_rate": 3.9110429447852764e-05,
1941
+ "loss": 0.297,
1942
+ "step": 470
1943
+ },
1944
+ {
1945
+ "epoch": 16.275862068965516,
1946
+ "grad_norm": 4.241555690765381,
1947
+ "learning_rate": 3.880368098159509e-05,
1948
+ "loss": 0.0561,
1949
+ "step": 472
1950
+ },
1951
+ {
1952
+ "epoch": 16.344827586206897,
1953
+ "grad_norm": 2.7286536693573,
1954
+ "learning_rate": 3.849693251533742e-05,
1955
+ "loss": 0.1139,
1956
+ "step": 474
1957
+ },
1958
+ {
1959
+ "epoch": 16.413793103448278,
1960
+ "grad_norm": 0.8547440767288208,
1961
+ "learning_rate": 3.819018404907976e-05,
1962
+ "loss": 0.0259,
1963
+ "step": 476
1964
+ },
1965
+ {
1966
+ "epoch": 16.482758620689655,
1967
+ "grad_norm": 0.9898260235786438,
1968
+ "learning_rate": 3.7883435582822086e-05,
1969
+ "loss": 0.1832,
1970
+ "step": 478
1971
+ },
1972
+ {
1973
+ "epoch": 16.551724137931036,
1974
+ "grad_norm": 1.3115100860595703,
1975
+ "learning_rate": 3.757668711656442e-05,
1976
+ "loss": 0.098,
1977
+ "step": 480
1978
+ },
1979
+ {
1980
+ "epoch": 16.620689655172413,
1981
+ "grad_norm": 7.043308734893799,
1982
+ "learning_rate": 3.726993865030675e-05,
1983
+ "loss": 0.0928,
1984
+ "step": 482
1985
+ },
1986
+ {
1987
+ "epoch": 16.689655172413794,
1988
+ "grad_norm": 5.262262344360352,
1989
+ "learning_rate": 3.696319018404908e-05,
1990
+ "loss": 0.2486,
1991
+ "step": 484
1992
+ },
1993
+ {
1994
+ "epoch": 16.75862068965517,
1995
+ "grad_norm": 4.680488586425781,
1996
+ "learning_rate": 3.665644171779141e-05,
1997
+ "loss": 0.0745,
1998
+ "step": 486
1999
+ },
2000
+ {
2001
+ "epoch": 16.82758620689655,
2002
+ "grad_norm": 1.229391098022461,
2003
+ "learning_rate": 3.634969325153375e-05,
2004
+ "loss": 0.0213,
2005
+ "step": 488
2006
+ },
2007
+ {
2008
+ "epoch": 16.896551724137932,
2009
+ "grad_norm": 3.3319144248962402,
2010
+ "learning_rate": 3.6042944785276074e-05,
2011
+ "loss": 0.0361,
2012
+ "step": 490
2013
+ },
2014
+ {
2015
+ "epoch": 16.96551724137931,
2016
+ "grad_norm": 2.358989953994751,
2017
+ "learning_rate": 3.5736196319018406e-05,
2018
+ "loss": 0.0352,
2019
+ "step": 492
2020
+ },
2021
+ {
2022
+ "epoch": 17.0,
2023
+ "eval_accuracy": 0.5984848484848485,
2024
+ "eval_f1_macro": 0.5007356485225759,
2025
+ "eval_f1_micro": 0.5984848484848485,
2026
+ "eval_f1_weighted": 0.5743528123674804,
2027
+ "eval_loss": 2.10516357421875,
2028
+ "eval_precision_macro": 0.5466466490902581,
2029
+ "eval_precision_micro": 0.5984848484848485,
2030
+ "eval_precision_weighted": 0.6130060430000621,
2031
+ "eval_recall_macro": 0.5126681783824641,
2032
+ "eval_recall_micro": 0.5984848484848485,
2033
+ "eval_recall_weighted": 0.5984848484848485,
2034
+ "eval_runtime": 2.9571,
2035
+ "eval_samples_per_second": 44.638,
2036
+ "eval_steps_per_second": 5.749,
2037
+ "step": 493
2038
+ },
2039
+ {
2040
+ "epoch": 17.03448275862069,
2041
+ "grad_norm": 7.632280349731445,
2042
+ "learning_rate": 3.542944785276074e-05,
2043
+ "loss": 0.0805,
2044
+ "step": 494
2045
+ },
2046
+ {
2047
+ "epoch": 17.103448275862068,
2048
+ "grad_norm": 2.5213019847869873,
2049
+ "learning_rate": 3.512269938650307e-05,
2050
+ "loss": 0.0341,
2051
+ "step": 496
2052
+ },
2053
+ {
2054
+ "epoch": 17.17241379310345,
2055
+ "grad_norm": 0.9358583688735962,
2056
+ "learning_rate": 3.4815950920245396e-05,
2057
+ "loss": 0.0441,
2058
+ "step": 498
2059
+ },
2060
+ {
2061
+ "epoch": 17.24137931034483,
2062
+ "grad_norm": 1.5655701160430908,
2063
+ "learning_rate": 3.4509202453987735e-05,
2064
+ "loss": 0.0189,
2065
+ "step": 500
2066
+ },
2067
+ {
2068
+ "epoch": 17.310344827586206,
2069
+ "grad_norm": 0.4072040319442749,
2070
+ "learning_rate": 3.420245398773007e-05,
2071
+ "loss": 0.0209,
2072
+ "step": 502
2073
+ },
2074
+ {
2075
+ "epoch": 17.379310344827587,
2076
+ "grad_norm": 7.48984956741333,
2077
+ "learning_rate": 3.3895705521472393e-05,
2078
+ "loss": 0.1448,
2079
+ "step": 504
2080
+ },
2081
+ {
2082
+ "epoch": 17.448275862068964,
2083
+ "grad_norm": 3.6645641326904297,
2084
+ "learning_rate": 3.3588957055214726e-05,
2085
+ "loss": 0.1325,
2086
+ "step": 506
2087
+ },
2088
+ {
2089
+ "epoch": 17.517241379310345,
2090
+ "grad_norm": 1.4333422183990479,
2091
+ "learning_rate": 3.328220858895706e-05,
2092
+ "loss": 0.0464,
2093
+ "step": 508
2094
+ },
2095
+ {
2096
+ "epoch": 17.586206896551722,
2097
+ "grad_norm": 2.419375419616699,
2098
+ "learning_rate": 3.2975460122699384e-05,
2099
+ "loss": 0.018,
2100
+ "step": 510
2101
+ },
2102
+ {
2103
+ "epoch": 17.655172413793103,
2104
+ "grad_norm": 9.471452713012695,
2105
+ "learning_rate": 3.266871165644172e-05,
2106
+ "loss": 0.1255,
2107
+ "step": 512
2108
+ },
2109
+ {
2110
+ "epoch": 17.724137931034484,
2111
+ "grad_norm": 5.6544575691223145,
2112
+ "learning_rate": 3.2361963190184055e-05,
2113
+ "loss": 0.0936,
2114
+ "step": 514
2115
+ },
2116
+ {
2117
+ "epoch": 17.79310344827586,
2118
+ "grad_norm": 3.275522470474243,
2119
+ "learning_rate": 3.205521472392638e-05,
2120
+ "loss": 0.1898,
2121
+ "step": 516
2122
+ },
2123
+ {
2124
+ "epoch": 17.862068965517242,
2125
+ "grad_norm": 0.8146153092384338,
2126
+ "learning_rate": 3.174846625766871e-05,
2127
+ "loss": 0.0209,
2128
+ "step": 518
2129
+ },
2130
+ {
2131
+ "epoch": 17.93103448275862,
2132
+ "grad_norm": 9.057777404785156,
2133
+ "learning_rate": 3.1441717791411045e-05,
2134
+ "loss": 0.2627,
2135
+ "step": 520
2136
+ },
2137
+ {
2138
+ "epoch": 18.0,
2139
+ "grad_norm": 0.2130121886730194,
2140
+ "learning_rate": 3.113496932515337e-05,
2141
+ "loss": 0.0101,
2142
+ "step": 522
2143
+ },
2144
+ {
2145
+ "epoch": 18.0,
2146
+ "eval_accuracy": 0.6212121212121212,
2147
+ "eval_f1_macro": 0.5724632695552766,
2148
+ "eval_f1_micro": 0.6212121212121212,
2149
+ "eval_f1_weighted": 0.6223060135184718,
2150
+ "eval_loss": 1.9977855682373047,
2151
+ "eval_precision_macro": 0.615182436611008,
2152
+ "eval_precision_micro": 0.6212121212121212,
2153
+ "eval_precision_weighted": 0.6558523547159911,
2154
+ "eval_recall_macro": 0.5672486772486772,
2155
+ "eval_recall_micro": 0.6212121212121212,
2156
+ "eval_recall_weighted": 0.6212121212121212,
2157
+ "eval_runtime": 2.9607,
2158
+ "eval_samples_per_second": 44.583,
2159
+ "eval_steps_per_second": 5.742,
2160
+ "step": 522
2161
+ },
2162
+ {
2163
+ "epoch": 18.06896551724138,
2164
+ "grad_norm": 5.074588775634766,
2165
+ "learning_rate": 3.0828220858895703e-05,
2166
+ "loss": 0.0407,
2167
+ "step": 524
2168
+ },
2169
+ {
2170
+ "epoch": 18.137931034482758,
2171
+ "grad_norm": 4.95729923248291,
2172
+ "learning_rate": 3.052147239263804e-05,
2173
+ "loss": 0.0346,
2174
+ "step": 526
2175
+ },
2176
+ {
2177
+ "epoch": 18.20689655172414,
2178
+ "grad_norm": 0.48612403869628906,
2179
+ "learning_rate": 3.0214723926380368e-05,
2180
+ "loss": 0.006,
2181
+ "step": 528
2182
+ },
2183
+ {
2184
+ "epoch": 18.275862068965516,
2185
+ "grad_norm": 1.2487165927886963,
2186
+ "learning_rate": 2.99079754601227e-05,
2187
+ "loss": 0.0738,
2188
+ "step": 530
2189
+ },
2190
+ {
2191
+ "epoch": 18.344827586206897,
2192
+ "grad_norm": 6.368688106536865,
2193
+ "learning_rate": 2.9601226993865033e-05,
2194
+ "loss": 0.1009,
2195
+ "step": 532
2196
+ },
2197
+ {
2198
+ "epoch": 18.413793103448278,
2199
+ "grad_norm": 0.3834826648235321,
2200
+ "learning_rate": 2.9294478527607362e-05,
2201
+ "loss": 0.0337,
2202
+ "step": 534
2203
+ },
2204
+ {
2205
+ "epoch": 18.482758620689655,
2206
+ "grad_norm": 7.203604221343994,
2207
+ "learning_rate": 2.8987730061349694e-05,
2208
+ "loss": 0.1217,
2209
+ "step": 536
2210
+ },
2211
+ {
2212
+ "epoch": 18.551724137931036,
2213
+ "grad_norm": 0.1774144023656845,
2214
+ "learning_rate": 2.8680981595092026e-05,
2215
+ "loss": 0.0448,
2216
+ "step": 538
2217
+ },
2218
+ {
2219
+ "epoch": 18.620689655172413,
2220
+ "grad_norm": 1.4739829301834106,
2221
+ "learning_rate": 2.837423312883436e-05,
2222
+ "loss": 0.042,
2223
+ "step": 540
2224
+ },
2225
+ {
2226
+ "epoch": 18.689655172413794,
2227
+ "grad_norm": 7.63886022567749,
2228
+ "learning_rate": 2.8067484662576688e-05,
2229
+ "loss": 0.1012,
2230
+ "step": 542
2231
+ },
2232
+ {
2233
+ "epoch": 18.75862068965517,
2234
+ "grad_norm": 0.5444397926330566,
2235
+ "learning_rate": 2.776073619631902e-05,
2236
+ "loss": 0.0614,
2237
+ "step": 544
2238
+ },
2239
+ {
2240
+ "epoch": 18.82758620689655,
2241
+ "grad_norm": 0.015687117353081703,
2242
+ "learning_rate": 2.7453987730061353e-05,
2243
+ "loss": 0.1418,
2244
+ "step": 546
2245
+ },
2246
+ {
2247
+ "epoch": 18.896551724137932,
2248
+ "grad_norm": 1.4215542078018188,
2249
+ "learning_rate": 2.714723926380368e-05,
2250
+ "loss": 0.0078,
2251
+ "step": 548
2252
+ },
2253
+ {
2254
+ "epoch": 18.96551724137931,
2255
+ "grad_norm": 0.5485539436340332,
2256
+ "learning_rate": 2.6840490797546014e-05,
2257
+ "loss": 0.0035,
2258
+ "step": 550
2259
+ },
2260
+ {
2261
+ "epoch": 19.0,
2262
+ "eval_accuracy": 0.6439393939393939,
2263
+ "eval_f1_macro": 0.587968716690295,
2264
+ "eval_f1_micro": 0.6439393939393939,
2265
+ "eval_f1_weighted": 0.638753653210352,
2266
+ "eval_loss": 2.030425786972046,
2267
+ "eval_precision_macro": 0.6697785789090137,
2268
+ "eval_precision_micro": 0.6439393939393939,
2269
+ "eval_precision_weighted": 0.6935502946767769,
2270
+ "eval_recall_macro": 0.5804761904761905,
2271
+ "eval_recall_micro": 0.6439393939393939,
2272
+ "eval_recall_weighted": 0.6439393939393939,
2273
+ "eval_runtime": 2.9436,
2274
+ "eval_samples_per_second": 44.843,
2275
+ "eval_steps_per_second": 5.775,
2276
+ "step": 551
2277
+ },
2278
+ {
2279
+ "epoch": 19.03448275862069,
2280
+ "grad_norm": 0.7515120506286621,
2281
+ "learning_rate": 2.6533742331288346e-05,
2282
+ "loss": 0.0053,
2283
+ "step": 552
2284
+ },
2285
+ {
2286
+ "epoch": 19.103448275862068,
2287
+ "grad_norm": 6.6086835861206055,
2288
+ "learning_rate": 2.6226993865030675e-05,
2289
+ "loss": 0.0638,
2290
+ "step": 554
2291
+ },
2292
+ {
2293
+ "epoch": 19.17241379310345,
2294
+ "grad_norm": 1.8158479928970337,
2295
+ "learning_rate": 2.5920245398773008e-05,
2296
+ "loss": 0.0173,
2297
+ "step": 556
2298
+ },
2299
+ {
2300
+ "epoch": 19.24137931034483,
2301
+ "grad_norm": 1.9973891973495483,
2302
+ "learning_rate": 2.561349693251534e-05,
2303
+ "loss": 0.0127,
2304
+ "step": 558
2305
+ },
2306
+ {
2307
+ "epoch": 19.310344827586206,
2308
+ "grad_norm": 0.03836736083030701,
2309
+ "learning_rate": 2.530674846625767e-05,
2310
+ "loss": 0.0009,
2311
+ "step": 560
2312
+ },
2313
+ {
2314
+ "epoch": 19.379310344827587,
2315
+ "grad_norm": 2.1399827003479004,
2316
+ "learning_rate": 2.5e-05,
2317
+ "loss": 0.0149,
2318
+ "step": 562
2319
+ },
2320
+ {
2321
+ "epoch": 19.448275862068964,
2322
+ "grad_norm": 1.5504521131515503,
2323
+ "learning_rate": 2.469325153374233e-05,
2324
+ "loss": 0.0125,
2325
+ "step": 564
2326
+ },
2327
+ {
2328
+ "epoch": 19.517241379310345,
2329
+ "grad_norm": 2.622783660888672,
2330
+ "learning_rate": 2.4386503067484666e-05,
2331
+ "loss": 0.0509,
2332
+ "step": 566
2333
+ },
2334
+ {
2335
+ "epoch": 19.586206896551722,
2336
+ "grad_norm": 6.634346008300781,
2337
+ "learning_rate": 2.4079754601226995e-05,
2338
+ "loss": 0.0351,
2339
+ "step": 568
2340
+ },
2341
+ {
2342
+ "epoch": 19.655172413793103,
2343
+ "grad_norm": 0.067986860871315,
2344
+ "learning_rate": 2.3773006134969324e-05,
2345
+ "loss": 0.1117,
2346
+ "step": 570
2347
+ },
2348
+ {
2349
+ "epoch": 19.724137931034484,
2350
+ "grad_norm": 0.664193332195282,
2351
+ "learning_rate": 2.346625766871166e-05,
2352
+ "loss": 0.0052,
2353
+ "step": 572
2354
+ },
2355
+ {
2356
+ "epoch": 19.79310344827586,
2357
+ "grad_norm": 11.805370330810547,
2358
+ "learning_rate": 2.315950920245399e-05,
2359
+ "loss": 0.2013,
2360
+ "step": 574
2361
+ },
2362
+ {
2363
+ "epoch": 19.862068965517242,
2364
+ "grad_norm": 0.12794336676597595,
2365
+ "learning_rate": 2.285276073619632e-05,
2366
+ "loss": 0.0086,
2367
+ "step": 576
2368
+ },
2369
+ {
2370
+ "epoch": 19.93103448275862,
2371
+ "grad_norm": 0.3751371204853058,
2372
+ "learning_rate": 2.2546012269938653e-05,
2373
+ "loss": 0.0046,
2374
+ "step": 578
2375
+ },
2376
+ {
2377
+ "epoch": 20.0,
2378
+ "grad_norm": 0.025759520009160042,
2379
+ "learning_rate": 2.2239263803680982e-05,
2380
+ "loss": 0.0013,
2381
+ "step": 580
2382
+ },
2383
+ {
2384
+ "epoch": 20.0,
2385
+ "eval_accuracy": 0.6363636363636364,
2386
+ "eval_f1_macro": 0.5513619025246932,
2387
+ "eval_f1_micro": 0.6363636363636364,
2388
+ "eval_f1_weighted": 0.6223818820101413,
2389
+ "eval_loss": 2.1374073028564453,
2390
+ "eval_precision_macro": 0.6025479764157659,
2391
+ "eval_precision_micro": 0.6363636363636364,
2392
+ "eval_precision_weighted": 0.6765262979680514,
2393
+ "eval_recall_macro": 0.5684882842025699,
2394
+ "eval_recall_micro": 0.6363636363636364,
2395
+ "eval_recall_weighted": 0.6363636363636364,
2396
+ "eval_runtime": 3.0501,
2397
+ "eval_samples_per_second": 43.277,
2398
+ "eval_steps_per_second": 5.574,
2399
+ "step": 580
2400
+ },
2401
+ {
2402
+ "epoch": 20.06896551724138,
2403
+ "grad_norm": 5.584213733673096,
2404
+ "learning_rate": 2.1932515337423315e-05,
2405
+ "loss": 0.0257,
2406
+ "step": 582
2407
+ },
2408
+ {
2409
+ "epoch": 20.137931034482758,
2410
+ "grad_norm": 6.075766086578369,
2411
+ "learning_rate": 2.1625766871165647e-05,
2412
+ "loss": 0.0319,
2413
+ "step": 584
2414
+ },
2415
+ {
2416
+ "epoch": 20.20689655172414,
2417
+ "grad_norm": 0.27452629804611206,
2418
+ "learning_rate": 2.1319018404907976e-05,
2419
+ "loss": 0.0029,
2420
+ "step": 586
2421
+ },
2422
+ {
2423
+ "epoch": 20.275862068965516,
2424
+ "grad_norm": 0.04866321012377739,
2425
+ "learning_rate": 2.1012269938650308e-05,
2426
+ "loss": 0.0021,
2427
+ "step": 588
2428
+ },
2429
+ {
2430
+ "epoch": 20.344827586206897,
2431
+ "grad_norm": 1.3495759963989258,
2432
+ "learning_rate": 2.0705521472392637e-05,
2433
+ "loss": 0.0088,
2434
+ "step": 590
2435
+ },
2436
+ {
2437
+ "epoch": 20.413793103448278,
2438
+ "grad_norm": 2.283107042312622,
2439
+ "learning_rate": 2.039877300613497e-05,
2440
+ "loss": 0.0166,
2441
+ "step": 592
2442
+ },
2443
+ {
2444
+ "epoch": 20.482758620689655,
2445
+ "grad_norm": 1.5386358499526978,
2446
+ "learning_rate": 2.0092024539877302e-05,
2447
+ "loss": 0.0544,
2448
+ "step": 594
2449
+ },
2450
+ {
2451
+ "epoch": 20.551724137931036,
2452
+ "grad_norm": 5.752704620361328,
2453
+ "learning_rate": 1.978527607361963e-05,
2454
+ "loss": 0.0246,
2455
+ "step": 596
2456
+ },
2457
+ {
2458
+ "epoch": 20.620689655172413,
2459
+ "grad_norm": 3.968679904937744,
2460
+ "learning_rate": 1.9478527607361967e-05,
2461
+ "loss": 0.0161,
2462
+ "step": 598
2463
+ },
2464
+ {
2465
+ "epoch": 20.689655172413794,
2466
+ "grad_norm": 0.8225330710411072,
2467
+ "learning_rate": 1.9171779141104296e-05,
2468
+ "loss": 0.0056,
2469
+ "step": 600
2470
+ },
2471
+ {
2472
+ "epoch": 20.75862068965517,
2473
+ "grad_norm": 6.011229991912842,
2474
+ "learning_rate": 1.8865030674846625e-05,
2475
+ "loss": 0.1674,
2476
+ "step": 602
2477
+ },
2478
+ {
2479
+ "epoch": 20.82758620689655,
2480
+ "grad_norm": 0.19111225008964539,
2481
+ "learning_rate": 1.855828220858896e-05,
2482
+ "loss": 0.0069,
2483
+ "step": 604
2484
+ },
2485
+ {
2486
+ "epoch": 20.896551724137932,
2487
+ "grad_norm": 14.381773948669434,
2488
+ "learning_rate": 1.825153374233129e-05,
2489
+ "loss": 0.1019,
2490
+ "step": 606
2491
+ },
2492
+ {
2493
+ "epoch": 20.96551724137931,
2494
+ "grad_norm": 7.869429111480713,
2495
+ "learning_rate": 1.7944785276073618e-05,
2496
+ "loss": 0.0589,
2497
+ "step": 608
2498
+ },
2499
+ {
2500
+ "epoch": 21.0,
2501
+ "eval_accuracy": 0.6439393939393939,
2502
+ "eval_f1_macro": 0.5879137999438752,
2503
+ "eval_f1_micro": 0.6439393939393939,
2504
+ "eval_f1_weighted": 0.6396446806663525,
2505
+ "eval_loss": 1.767621636390686,
2506
+ "eval_precision_macro": 0.5940388301069416,
2507
+ "eval_precision_micro": 0.6439393939393939,
2508
+ "eval_precision_weighted": 0.640713517648502,
2509
+ "eval_recall_macro": 0.5888813303099018,
2510
+ "eval_recall_micro": 0.6439393939393939,
2511
+ "eval_recall_weighted": 0.6439393939393939,
2512
+ "eval_runtime": 2.9525,
2513
+ "eval_samples_per_second": 44.709,
2514
+ "eval_steps_per_second": 5.758,
2515
+ "step": 609
2516
+ },
2517
+ {
2518
+ "epoch": 21.03448275862069,
2519
+ "grad_norm": 0.8013659119606018,
2520
+ "learning_rate": 1.7638036809815954e-05,
2521
+ "loss": 0.0109,
2522
+ "step": 610
2523
+ },
2524
+ {
2525
+ "epoch": 21.103448275862068,
2526
+ "grad_norm": 0.09149477630853653,
2527
+ "learning_rate": 1.7331288343558283e-05,
2528
+ "loss": 0.0015,
2529
+ "step": 612
2530
+ },
2531
+ {
2532
+ "epoch": 21.17241379310345,
2533
+ "grad_norm": 0.035085856914520264,
2534
+ "learning_rate": 1.7024539877300612e-05,
2535
+ "loss": 0.0278,
2536
+ "step": 614
2537
+ },
2538
+ {
2539
+ "epoch": 21.24137931034483,
2540
+ "grad_norm": 3.7100307941436768,
2541
+ "learning_rate": 1.6717791411042948e-05,
2542
+ "loss": 0.0217,
2543
+ "step": 616
2544
+ },
2545
+ {
2546
+ "epoch": 21.310344827586206,
2547
+ "grad_norm": 0.08583565056324005,
2548
+ "learning_rate": 1.6411042944785277e-05,
2549
+ "loss": 0.0083,
2550
+ "step": 618
2551
+ },
2552
+ {
2553
+ "epoch": 21.379310344827587,
2554
+ "grad_norm": 3.9796557426452637,
2555
+ "learning_rate": 1.6104294478527606e-05,
2556
+ "loss": 0.1204,
2557
+ "step": 620
2558
+ },
2559
+ {
2560
+ "epoch": 21.448275862068964,
2561
+ "grad_norm": 3.607825994491577,
2562
+ "learning_rate": 1.579754601226994e-05,
2563
+ "loss": 0.0264,
2564
+ "step": 622
2565
+ },
2566
+ {
2567
+ "epoch": 21.517241379310345,
2568
+ "grad_norm": 0.23625929653644562,
2569
+ "learning_rate": 1.549079754601227e-05,
2570
+ "loss": 0.0061,
2571
+ "step": 624
2572
+ },
2573
+ {
2574
+ "epoch": 21.586206896551722,
2575
+ "grad_norm": 0.34624549746513367,
2576
+ "learning_rate": 1.5184049079754603e-05,
2577
+ "loss": 0.0655,
2578
+ "step": 626
2579
+ },
2580
+ {
2581
+ "epoch": 21.655172413793103,
2582
+ "grad_norm": 4.58054256439209,
2583
+ "learning_rate": 1.4877300613496933e-05,
2584
+ "loss": 0.0465,
2585
+ "step": 628
2586
+ },
2587
+ {
2588
+ "epoch": 21.724137931034484,
2589
+ "grad_norm": 0.03262769430875778,
2590
+ "learning_rate": 1.4570552147239264e-05,
2591
+ "loss": 0.0025,
2592
+ "step": 630
2593
+ },
2594
+ {
2595
+ "epoch": 21.79310344827586,
2596
+ "grad_norm": 0.6401829123497009,
2597
+ "learning_rate": 1.4263803680981596e-05,
2598
+ "loss": 0.0849,
2599
+ "step": 632
2600
+ },
2601
+ {
2602
+ "epoch": 21.862068965517242,
2603
+ "grad_norm": 4.184028148651123,
2604
+ "learning_rate": 1.3957055214723927e-05,
2605
+ "loss": 0.0624,
2606
+ "step": 634
2607
+ },
2608
+ {
2609
+ "epoch": 21.93103448275862,
2610
+ "grad_norm": 0.050627466291189194,
2611
+ "learning_rate": 1.3650306748466258e-05,
2612
+ "loss": 0.0023,
2613
+ "step": 636
2614
+ },
2615
+ {
2616
+ "epoch": 22.0,
2617
+ "grad_norm": 3.8194804191589355,
2618
+ "learning_rate": 1.334355828220859e-05,
2619
+ "loss": 0.0263,
2620
+ "step": 638
2621
+ },
2622
+ {
2623
+ "epoch": 22.0,
2624
+ "eval_accuracy": 0.6439393939393939,
2625
+ "eval_f1_macro": 0.5785271807221967,
2626
+ "eval_f1_micro": 0.6439393939393939,
2627
+ "eval_f1_weighted": 0.6327110651723412,
2628
+ "eval_loss": 1.841572880744934,
2629
+ "eval_precision_macro": 0.6016241640968781,
2630
+ "eval_precision_micro": 0.6439393939393939,
2631
+ "eval_precision_weighted": 0.6454148108541149,
2632
+ "eval_recall_macro": 0.5757520786092215,
2633
+ "eval_recall_micro": 0.6439393939393939,
2634
+ "eval_recall_weighted": 0.6439393939393939,
2635
+ "eval_runtime": 2.9422,
2636
+ "eval_samples_per_second": 44.864,
2637
+ "eval_steps_per_second": 5.778,
2638
+ "step": 638
2639
+ },
2640
+ {
2641
+ "epoch": 22.06896551724138,
2642
+ "grad_norm": 0.16209831833839417,
2643
+ "learning_rate": 1.303680981595092e-05,
2644
+ "loss": 0.0465,
2645
+ "step": 640
2646
+ },
2647
+ {
2648
+ "epoch": 22.137931034482758,
2649
+ "grad_norm": 0.1036888137459755,
2650
+ "learning_rate": 1.2730061349693251e-05,
2651
+ "loss": 0.0657,
2652
+ "step": 642
2653
+ },
2654
+ {
2655
+ "epoch": 22.20689655172414,
2656
+ "grad_norm": 1.0510048866271973,
2657
+ "learning_rate": 1.2423312883435584e-05,
2658
+ "loss": 0.0062,
2659
+ "step": 644
2660
+ },
2661
+ {
2662
+ "epoch": 22.275862068965516,
2663
+ "grad_norm": 3.261247158050537,
2664
+ "learning_rate": 1.2116564417177914e-05,
2665
+ "loss": 0.0175,
2666
+ "step": 646
2667
+ },
2668
+ {
2669
+ "epoch": 22.344827586206897,
2670
+ "grad_norm": 0.41699743270874023,
2671
+ "learning_rate": 1.1809815950920245e-05,
2672
+ "loss": 0.0136,
2673
+ "step": 648
2674
+ },
2675
+ {
2676
+ "epoch": 22.413793103448278,
2677
+ "grad_norm": 1.2335162162780762,
2678
+ "learning_rate": 1.1503067484662577e-05,
2679
+ "loss": 0.005,
2680
+ "step": 650
2681
+ },
2682
+ {
2683
+ "epoch": 22.482758620689655,
2684
+ "grad_norm": 0.03782954812049866,
2685
+ "learning_rate": 1.119631901840491e-05,
2686
+ "loss": 0.2332,
2687
+ "step": 652
2688
+ },
2689
+ {
2690
+ "epoch": 22.551724137931036,
2691
+ "grad_norm": 4.644011497497559,
2692
+ "learning_rate": 1.0889570552147239e-05,
2693
+ "loss": 0.0251,
2694
+ "step": 654
2695
+ },
2696
+ {
2697
+ "epoch": 22.620689655172413,
2698
+ "grad_norm": 2.634734630584717,
2699
+ "learning_rate": 1.0582822085889571e-05,
2700
+ "loss": 0.0357,
2701
+ "step": 656
2702
+ },
2703
+ {
2704
+ "epoch": 22.689655172413794,
2705
+ "grad_norm": 0.04239609092473984,
2706
+ "learning_rate": 1.0276073619631903e-05,
2707
+ "loss": 0.0025,
2708
+ "step": 658
2709
+ },
2710
+ {
2711
+ "epoch": 22.75862068965517,
2712
+ "grad_norm": 1.9931837320327759,
2713
+ "learning_rate": 9.969325153374232e-06,
2714
+ "loss": 0.0139,
2715
+ "step": 660
2716
+ },
2717
+ {
2718
+ "epoch": 22.82758620689655,
2719
+ "grad_norm": 0.26363053917884827,
2720
+ "learning_rate": 9.662576687116565e-06,
2721
+ "loss": 0.0047,
2722
+ "step": 662
2723
+ },
2724
+ {
2725
+ "epoch": 22.896551724137932,
2726
+ "grad_norm": 0.2392028421163559,
2727
+ "learning_rate": 9.355828220858897e-06,
2728
+ "loss": 0.003,
2729
+ "step": 664
2730
+ },
2731
+ {
2732
+ "epoch": 22.96551724137931,
2733
+ "grad_norm": 0.5267300605773926,
2734
+ "learning_rate": 9.049079754601228e-06,
2735
+ "loss": 0.0028,
2736
+ "step": 666
2737
+ },
2738
+ {
2739
+ "epoch": 23.0,
2740
+ "eval_accuracy": 0.6666666666666666,
2741
+ "eval_f1_macro": 0.6068035568035569,
2742
+ "eval_f1_micro": 0.6666666666666666,
2743
+ "eval_f1_weighted": 0.6569024750842932,
2744
+ "eval_loss": 1.984299659729004,
2745
+ "eval_precision_macro": 0.663051653896554,
2746
+ "eval_precision_micro": 0.6666666666666666,
2747
+ "eval_precision_weighted": 0.6882214684446634,
2748
+ "eval_recall_macro": 0.6069312169312168,
2749
+ "eval_recall_micro": 0.6666666666666666,
2750
+ "eval_recall_weighted": 0.6666666666666666,
2751
+ "eval_runtime": 2.9646,
2752
+ "eval_samples_per_second": 44.526,
2753
+ "eval_steps_per_second": 5.734,
2754
+ "step": 667
2755
+ },
2756
+ {
2757
+ "epoch": 23.03448275862069,
2758
+ "grad_norm": 0.5740432739257812,
2759
+ "learning_rate": 8.742331288343558e-06,
2760
+ "loss": 0.003,
2761
+ "step": 668
2762
+ },
2763
+ {
2764
+ "epoch": 23.103448275862068,
2765
+ "grad_norm": 0.03469856455922127,
2766
+ "learning_rate": 8.435582822085889e-06,
2767
+ "loss": 0.0006,
2768
+ "step": 670
2769
+ },
2770
+ {
2771
+ "epoch": 23.17241379310345,
2772
+ "grad_norm": 0.05739348381757736,
2773
+ "learning_rate": 8.128834355828221e-06,
2774
+ "loss": 0.0562,
2775
+ "step": 672
2776
+ },
2777
+ {
2778
+ "epoch": 23.24137931034483,
2779
+ "grad_norm": 0.6837252974510193,
2780
+ "learning_rate": 7.822085889570554e-06,
2781
+ "loss": 0.0441,
2782
+ "step": 674
2783
+ },
2784
+ {
2785
+ "epoch": 23.310344827586206,
2786
+ "grad_norm": 10.602856636047363,
2787
+ "learning_rate": 7.5153374233128836e-06,
2788
+ "loss": 0.0493,
2789
+ "step": 676
2790
+ },
2791
+ {
2792
+ "epoch": 23.379310344827587,
2793
+ "grad_norm": 0.026621289551258087,
2794
+ "learning_rate": 7.208588957055215e-06,
2795
+ "loss": 0.0009,
2796
+ "step": 678
2797
+ },
2798
+ {
2799
+ "epoch": 23.448275862068964,
2800
+ "grad_norm": 0.05075424537062645,
2801
+ "learning_rate": 6.901840490797547e-06,
2802
+ "loss": 0.001,
2803
+ "step": 680
2804
+ },
2805
+ {
2806
+ "epoch": 23.517241379310345,
2807
+ "grad_norm": 3.2929599285125732,
2808
+ "learning_rate": 6.595092024539877e-06,
2809
+ "loss": 0.0166,
2810
+ "step": 682
2811
+ },
2812
+ {
2813
+ "epoch": 23.586206896551722,
2814
+ "grad_norm": 1.3883030414581299,
2815
+ "learning_rate": 6.288343558282209e-06,
2816
+ "loss": 0.0044,
2817
+ "step": 684
2818
+ },
2819
+ {
2820
+ "epoch": 23.655172413793103,
2821
+ "grad_norm": 3.663137197494507,
2822
+ "learning_rate": 5.98159509202454e-06,
2823
+ "loss": 0.0135,
2824
+ "step": 686
2825
+ },
2826
+ {
2827
+ "epoch": 23.724137931034484,
2828
+ "grad_norm": 0.037959225475788116,
2829
+ "learning_rate": 5.674846625766871e-06,
2830
+ "loss": 0.0009,
2831
+ "step": 688
2832
+ },
2833
+ {
2834
+ "epoch": 23.79310344827586,
2835
+ "grad_norm": 0.024055300280451775,
2836
+ "learning_rate": 5.368098159509203e-06,
2837
+ "loss": 0.001,
2838
+ "step": 690
2839
+ },
2840
+ {
2841
+ "epoch": 23.862068965517242,
2842
+ "grad_norm": 0.03909283131361008,
2843
+ "learning_rate": 5.061349693251534e-06,
2844
+ "loss": 0.0008,
2845
+ "step": 692
2846
+ },
2847
+ {
2848
+ "epoch": 23.93103448275862,
2849
+ "grad_norm": 0.017916660755872726,
2850
+ "learning_rate": 4.7546012269938654e-06,
2851
+ "loss": 0.0007,
2852
+ "step": 694
2853
+ },
2854
+ {
2855
+ "epoch": 24.0,
2856
+ "grad_norm": 0.009021880105137825,
2857
+ "learning_rate": 4.447852760736196e-06,
2858
+ "loss": 0.0006,
2859
+ "step": 696
2860
+ },
2861
+ {
2862
+ "epoch": 24.0,
2863
+ "eval_accuracy": 0.6742424242424242,
2864
+ "eval_f1_macro": 0.615661662832857,
2865
+ "eval_f1_micro": 0.6742424242424242,
2866
+ "eval_f1_weighted": 0.6655260925563127,
2867
+ "eval_loss": 1.9431589841842651,
2868
+ "eval_precision_macro": 0.6602878688132144,
2869
+ "eval_precision_micro": 0.6742424242424242,
2870
+ "eval_precision_weighted": 0.6853071985036794,
2871
+ "eval_recall_macro": 0.6152078609221466,
2872
+ "eval_recall_micro": 0.6742424242424242,
2873
+ "eval_recall_weighted": 0.6742424242424242,
2874
+ "eval_runtime": 2.9569,
2875
+ "eval_samples_per_second": 44.642,
2876
+ "eval_steps_per_second": 5.749,
2877
+ "step": 696
2878
+ },
2879
+ {
2880
+ "epoch": 24.06896551724138,
2881
+ "grad_norm": 0.08362489938735962,
2882
+ "learning_rate": 4.141104294478528e-06,
2883
+ "loss": 0.001,
2884
+ "step": 698
2885
+ },
2886
+ {
2887
+ "epoch": 24.137931034482758,
2888
+ "grad_norm": 2.7230169773101807,
2889
+ "learning_rate": 3.834355828220859e-06,
2890
+ "loss": 0.0105,
2891
+ "step": 700
2892
+ },
2893
+ {
2894
+ "epoch": 24.20689655172414,
2895
+ "grad_norm": 0.5051957368850708,
2896
+ "learning_rate": 3.52760736196319e-06,
2897
+ "loss": 0.0038,
2898
+ "step": 702
2899
+ },
2900
+ {
2901
+ "epoch": 24.275862068965516,
2902
+ "grad_norm": 0.6274848580360413,
2903
+ "learning_rate": 3.2208588957055217e-06,
2904
+ "loss": 0.0022,
2905
+ "step": 704
2906
+ },
2907
+ {
2908
+ "epoch": 24.344827586206897,
2909
+ "grad_norm": 1.267530083656311,
2910
+ "learning_rate": 2.914110429447853e-06,
2911
+ "loss": 0.0208,
2912
+ "step": 706
2913
+ },
2914
+ {
2915
+ "epoch": 24.413793103448278,
2916
+ "grad_norm": 0.0061959754675626755,
2917
+ "learning_rate": 2.607361963190184e-06,
2918
+ "loss": 0.0003,
2919
+ "step": 708
2920
+ },
2921
+ {
2922
+ "epoch": 24.482758620689655,
2923
+ "grad_norm": 0.01164495013654232,
2924
+ "learning_rate": 2.3006134969325154e-06,
2925
+ "loss": 0.0006,
2926
+ "step": 710
2927
+ },
2928
+ {
2929
+ "epoch": 24.551724137931036,
2930
+ "grad_norm": 0.051692791283130646,
2931
+ "learning_rate": 1.9938650306748465e-06,
2932
+ "loss": 0.0007,
2933
+ "step": 712
2934
+ },
2935
+ {
2936
+ "epoch": 24.620689655172413,
2937
+ "grad_norm": 1.7959578037261963,
2938
+ "learning_rate": 1.687116564417178e-06,
2939
+ "loss": 0.0073,
2940
+ "step": 714
2941
+ },
2942
+ {
2943
+ "epoch": 24.689655172413794,
2944
+ "grad_norm": 0.06827358901500702,
2945
+ "learning_rate": 1.3803680981595093e-06,
2946
+ "loss": 0.001,
2947
+ "step": 716
2948
+ },
2949
+ {
2950
+ "epoch": 24.75862068965517,
2951
+ "grad_norm": 0.014322124421596527,
2952
+ "learning_rate": 1.0736196319018406e-06,
2953
+ "loss": 0.0006,
2954
+ "step": 718
2955
+ },
2956
+ {
2957
+ "epoch": 24.82758620689655,
2958
+ "grad_norm": 0.24370881915092468,
2959
+ "learning_rate": 7.668711656441718e-07,
2960
+ "loss": 0.0045,
2961
+ "step": 720
2962
+ },
2963
+ {
2964
+ "epoch": 24.896551724137932,
2965
+ "grad_norm": 0.017789943143725395,
2966
+ "learning_rate": 4.601226993865031e-07,
2967
+ "loss": 0.0005,
2968
+ "step": 722
2969
+ },
2970
+ {
2971
+ "epoch": 24.96551724137931,
2972
+ "grad_norm": 0.03513872250914574,
2973
+ "learning_rate": 1.5337423312883438e-07,
2974
+ "loss": 0.0004,
2975
+ "step": 724
2976
+ },
2977
+ {
2978
+ "epoch": 25.0,
2979
+ "eval_accuracy": 0.6666666666666666,
2980
+ "eval_f1_macro": 0.6089082756725702,
2981
+ "eval_f1_micro": 0.6666666666666666,
2982
+ "eval_f1_weighted": 0.6569429652354989,
2983
+ "eval_loss": 1.9346412420272827,
2984
+ "eval_precision_macro": 0.654845691942466,
2985
+ "eval_precision_micro": 0.6666666666666666,
2986
+ "eval_precision_weighted": 0.6762704019668829,
2987
+ "eval_recall_macro": 0.6072713529856387,
2988
+ "eval_recall_micro": 0.6666666666666666,
2989
+ "eval_recall_weighted": 0.6666666666666666,
2990
+ "eval_runtime": 2.9677,
2991
+ "eval_samples_per_second": 44.478,
2992
+ "eval_steps_per_second": 5.728,
2993
+ "step": 725
2994
+ },
2995
+ {
2996
+ "epoch": 25.0,
2997
+ "step": 725,
2998
+ "total_flos": 3.163993239336653e+18,
2999
+ "train_loss": 0.5988656284373478,
3000
+ "train_runtime": 971.0069,
3001
+ "train_samples_per_second": 11.895,
3002
+ "train_steps_per_second": 0.747
3003
+ }
3004
+ ],
3005
+ "logging_steps": 2,
3006
+ "max_steps": 725,
3007
+ "num_input_tokens_seen": 0,
3008
+ "num_train_epochs": 25,
3009
+ "save_steps": 500,
3010
+ "stateful_callbacks": {
3011
+ "TrainerControl": {
3012
+ "args": {
3013
+ "should_epoch_stop": false,
3014
+ "should_evaluate": false,
3015
+ "should_log": false,
3016
+ "should_save": true,
3017
+ "should_training_stop": true
3018
+ },
3019
+ "attributes": {}
3020
+ }
3021
+ },
3022
+ "total_flos": 3.163993239336653e+18,
3023
+ "train_batch_size": 8,
3024
+ "trial_name": null,
3025
+ "trial_params": null
3026
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f68beff55092ac6120c5bc232cb6c4372ecb2f74dfd58cf8c1a32530d4aafc80
3
+ size 5368