YunjinZhang commited on
Commit
3d8abad
·
verified ·
1 Parent(s): 7444f5c

Upload lact-muon-nope-postnorm-nheads2-chunk2048-760m/config.json with huggingface_hub

Browse files
lact-muon-nope-postnorm-nheads2-chunk2048-760m/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "configs/lact/760M_lact_swiglu_nh4_fwlow_rank_momentum_muon.json",
3
+ "architectures": [
4
+ "LaCTForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attn_qk_norm": false,
8
+ "bos_token_id": 1,
9
+ "elementwise_affine": true,
10
+ "eos_token_id": 2,
11
+ "fuse_cross_entropy": true,
12
+ "fuse_norm": true,
13
+ "fuse_swiglu": true,
14
+ "fw_init_gain": 0.5,
15
+ "hidden_act": "swish",
16
+ "hidden_ratio": 4,
17
+ "hidden_size": 1536,
18
+ "initializer_range": 0.02,
19
+ "inter_multi": 1,
20
+ "intermediate_size": null,
21
+ "lact_chunk_size": 2048,
22
+ "last_layer_fuse_norm": true,
23
+ "learnable_ttt_scale": true,
24
+ "lr_dim": 1,
25
+ "lr_parameterization": "mamba",
26
+ "max_position_embeddings": 32768,
27
+ "model_type": "lact_swiglu",
28
+ "norm_eps": 1e-06,
29
+ "num_attn_heads": 24,
30
+ "num_hidden_layers": 24,
31
+ "num_lact_heads": 4,
32
+ "qkv_bias": false,
33
+ "qkv_silu": true,
34
+ "rope_theta": 1000000,
35
+ "tie_word_embeddings": false,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.45.2",
38
+ "ttt_loss_type": "dot_product",
39
+ "ttt_nope": true,
40
+ "ttt_prenorm": false,
41
+ "use_cache": false,
42
+ "use_momentum": true,
43
+ "use_muon": true,
44
+ "vocab_size": 32000,
45
+ "w0_w2_low_rank": 32,
46
+ "window_size": 2048
47
+ }