valteu commited on
Commit
33b91b2
·
verified ·
1 Parent(s): 067804b

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 128000,
8
+ "eos_token_id": [
9
+ 128001,
10
+ 128008,
11
+ 128009
12
+ ],
13
+ "head_dim": 64,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 2048,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 8192,
18
+ "max_position_embeddings": 131072,
19
+ "mlp_bias": false,
20
+ "model_type": "llama",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 16,
23
+ "num_key_value_heads": 8,
24
+ "pad_token_id": 128004,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 32.0,
29
+ "high_freq_factor": 4.0,
30
+ "low_freq_factor": 1.0,
31
+ "original_max_position_embeddings": 8192,
32
+ "rope_type": "llama3"
33
+ },
34
+ "rope_theta": 500000.0,
35
+ "tie_word_embeddings": true,
36
+ "torch_dtype": "bfloat16",
37
+ "transformers_version": "4.52.4",
38
+ "use_cache": true,
39
+ "vocab_size": 128256
40
+ }
experiment_config.json ADDED
The diff for this file is too large to render. See raw diff
 
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.52.4"
12
+ }
logs.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93f303b33aca20ea8deb0319665b8a4be6f598f3c595bdeff0b163ff243f659
3
+ size 2471645608
profiler_cache.csv ADDED
The diff for this file is too large to render. See raw diff
 
results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "glue_mrpc_acc_score": 0.8014705882352942,
4
+ "glue_mrpc_acc_sem": 0.019772383169192092,
5
+ "glue_mrpc_f1_score": 0.8421052631578947,
6
+ "glue_mrpc_f1_sem": 0.017407398689792023,
7
+ "glue_rte_acc_score": 0.5234657039711191,
8
+ "glue_rte_acc_sem": 0.030063300411902607,
9
+ "glue_sst2_acc_score": 0.5229357798165137,
10
+ "glue_sst2_acc_sem": 0.01692401977869958
11
+ },
12
+ "energy": {
13
+ "total": 58401.35561,
14
+ "train": 45370.694260000004,
15
+ "eval": 13030.661350000002
16
+ },
17
+ "train_energy": 45370.694260000004,
18
+ "eval_energy": 13030.661350000002
19
+ }
summary.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "flops": {
3
+ "eval": 5014951860256000,
4
+ "train": 10640863719936576,
5
+ "total": 15655815580192576
6
+ },
7
+ "total": {
8
+ "total": 58401.35561,
9
+ "train": 45370.694260000004,
10
+ "eval": 13030.661350000002
11
+ },
12
+ "best_evals": {
13
+ "pplx": {
14
+ "score": 36628973.8921973,
15
+ "step": 2751
16
+ },
17
+ "rougel": {
18
+ "precision": 0.7598039215686274,
19
+ "recall": 0.7598039215686274,
20
+ "fmeasure": 0.7598039215686274
21
+ }
22
+ }
23
+ }