aifeifei798 commited on
Commit
840fc15
·
verified ·
1 Parent(s): 6a4d6c5

Upload 7 files

Browse files
train/chat_moe_model.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - "Chat with Your Creation"
3
+ # (Final Interactive Inference Script)
4
+ # ==============================================================================
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
10
+ from transformers.models.llama.modeling_llama import LlamaMLP
11
+ import os
12
+
13
+ # --- 1. 关键:重新定义你的所有自定义模块 ---
14
+ # 这是让 from_pretrained() 能够成功重建你自定义模型的关键。
15
+
16
+ MODEL_PATH = "./SmolMoE-8x135M-Instruct-v1-Trained"
17
+
18
+ # 从保存好的模型配置中读取MoE参数
19
+ config = AutoConfig.from_pretrained(MODEL_PATH)
20
+ NUM_EXPERTS = config.moe_num_experts
21
+ TOP_K = config.moe_top_k
22
+
23
+ class MoERouter(nn.Module):
24
+ def __init__(self, hidden_size: int, num_experts: int):
25
+ super().__init__()
26
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
27
+ def forward(self, hidden_states):
28
+ return self.layer(hidden_states)
29
+
30
+ class MoEModule(nn.Module):
31
+ def __init__(self, config):
32
+ super().__init__()
33
+ self.hidden_size = config.hidden_size
34
+ self.top_k = TOP_K
35
+ self.num_experts = NUM_EXPERTS
36
+ self.router = MoERouter(self.hidden_size, self.num_experts)
37
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
38
+
39
+ def forward(self, hidden_states):
40
+ original_shape = hidden_states.shape
41
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
42
+ router_logits = self.router(flat_hidden_states)
43
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
44
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
45
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
46
+ routing_weights = routing_weights.to(hidden_states.dtype)
47
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
48
+ for k in range(self.top_k):
49
+ expert_indices_k = selected_experts[:, k]
50
+ routing_weights_k = routing_weights[:, k]
51
+ for i in range(self.num_experts):
52
+ mask = expert_indices_k == i
53
+ if mask.any():
54
+ expert_output = self.experts[i](flat_hidden_states[mask])
55
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
56
+ return final_hidden_states.view(*original_shape)
57
+
58
+
59
+ # --- 2. 主程序:加载模型并开始对话 ---
60
+ def main():
61
+ device = "cuda" if torch.cuda.is_available() else "cpu"
62
+
63
+ # --- 模型加载 ---
64
+ print(f"Loading tokenizer from '{MODEL_PATH}'...")
65
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
66
+
67
+ print(f"Manually rebuilding MoE model structure...")
68
+ # 用`from_config`创建一个随机权重的、但结构正确的“空壳”
69
+ moe_model = AutoModelForCausalLM.from_config(config)
70
+
71
+ # 手动进行“架构手术”,把标准的MLP替换成我们的MoE模块
72
+ for i, layer in enumerate(moe_model.model.layers):
73
+ layer.mlp = MoEModule(config)
74
+
75
+ print(f"Loading your trained MoE weights into the correct structure...")
76
+ from safetensors.torch import load_file
77
+ state_dict = load_file(os.path.join(MODEL_PATH, "model.safetensors"), device="cpu")
78
+
79
+ # 使用`strict=False`灵活加载,然后手动绑定权重
80
+ moe_model.load_state_dict(state_dict, strict=False)
81
+ moe_model.tie_weights()
82
+
83
+ moe_model.to(device, dtype=torch.bfloat16)
84
+ moe_model.eval() # 切换到评估模式
85
+ print("--- MoE Model is ready for conversation! ---")
86
+ print("Type 'exit' or 'quit' to end the chat.\n")
87
+
88
+ # --- 交互式对话循环 ---
89
+ messages = []
90
+ while True:
91
+ try:
92
+ user_input = input("You: ")
93
+ if user_input.lower() in ["exit", "quit"]:
94
+ print("Goodbye!")
95
+ break
96
+
97
+ # 1. 将用户输入添加到对话历史
98
+ messages.append({"role": "user", "content": user_input})
99
+
100
+ # 2. 使用聊天模板格式化完整的对话历史
101
+ # `add_generation_prompt=True` 会在末尾添加助手角色的起始标记
102
+ prompt_text = tokenizer.apply_chat_template(
103
+ messages,
104
+ tokenize=False,
105
+ add_generation_prompt=True
106
+ )
107
+
108
+ # 3. 编码输入并发送到GPU
109
+ inputs = tokenizer(prompt_text, return_tensors="pt").to(device)
110
+
111
+ # 4. 生成回复
112
+ with torch.no_grad():
113
+ outputs = moe_model.generate(
114
+ **inputs,
115
+ max_new_tokens=256,
116
+ temperature=0.7,
117
+ top_p=0.9,
118
+ do_sample=True
119
+ )
120
+
121
+ # 5. 解码并清理输出
122
+ # `outputs[0]` 包含了完整的对话(输入+输出),我们需要提取出模型新生成的部分
123
+ full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
124
+ # 通过移除原始prompt来找到新生成的部分
125
+ model_response = full_response.replace(prompt_text.replace("<s> ", "").replace("</s>", ""), "").strip()
126
+
127
+ print(f"MoE Model: {model_response}")
128
+
129
+ # 6. 将模型的回复也添加到对话历史中,以便进行多轮对话
130
+ messages.append({"role": "assistant", "content": model_response})
131
+
132
+ except KeyboardInterrupt:
133
+ print("\nGoodbye!")
134
+ break
135
+
136
+ if __name__ == "__main__":
137
+ main()
train/chat_moe_model_zhcn_en.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - "Chat with Your Creation"
3
+ # (Final Interactive Inference Script)
4
+ #
5
+ # Smol-MoE 8x135M - “与你的造物对话”
6
+ # (最终版交互式推理脚本)
7
+ # ==============================================================================
8
+
9
+ # --- Core Library Imports / 核心库导入 ---
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
14
+ from transformers.models.llama.modeling_llama import LlamaMLP
15
+ import os
16
+
17
+ # --- 1. CRITICAL: Re-define Your Custom Architecture ---
18
+ # --- 1. 关键:重新定义你的所有自定义模块 ---
19
+ # When loading a model with a custom architecture, Hugging Face needs to know the definition of the custom classes.
20
+ # By defining them here, we allow the `from_pretrained` process to correctly reconstruct our unique MoE model.
21
+ # 在加载一个拥有自定义架构的模型时,Hugging Face 需要知道这些自定义类的定义。
22
+ # 在这里定义它们,我们才能让 `from_pretrained` 函数成功地重建我们独一-无二的MoE模型。
23
+
24
+ # --- Model Configuration / 模型配置 ---
25
+ MODEL_PATH = "./SmolMoE-8x135M-Instruct-v1-Trained"
26
+
27
+ # Load our custom MoE parameters from the saved config file.
28
+ # 从我们保存的配置文件中,加载自定义的MoE参数。
29
+ config = AutoConfig.from_pretrained(MODEL_PATH)
30
+ NUM_EXPERTS = config.moe_num_experts
31
+ TOP_K = config.moe_top_k
32
+
33
+ class MoERouter(nn.Module):
34
+ """The Router module. Its job is to score experts for each token."""
35
+ """路由器模块。它的工作是为每个token给所有专家打分。"""
36
+ def __init__(self, hidden_size: int, num_experts: int):
37
+ super().__init__()
38
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
39
+ def forward(self, hidden_states):
40
+ return self.layer(hidden_states)
41
+
42
+ class MoEModule(nn.Module):
43
+ """The custom Mixture-of-Experts module that replaces the standard FFN."""
44
+ """我们自定义的混合专家模块,它替换了标准的FFN。"""
45
+ def __init__(self, config):
46
+ super().__init__()
47
+ self.hidden_size = config.hidden_size
48
+ self.top_k = TOP_K
49
+ self.num_experts = NUM_EXPERTS
50
+ self.router = MoERouter(self.hidden_size, self.num_experts)
51
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
52
+
53
+ def forward(self, hidden_states):
54
+ original_shape = hidden_states.shape
55
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
56
+ router_logits = self.router(flat_hidden_states)
57
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
58
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
59
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
60
+ routing_weights = routing_weights.to(hidden_states.dtype)
61
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
62
+ for k in range(self.top_k):
63
+ expert_indices_k = selected_experts[:, k]
64
+ routing_weights_k = routing_weights[:, k]
65
+ for i in range(self.num_experts):
66
+ mask = expert_indices_k == i
67
+ if mask.any():
68
+ expert_output = self.experts[i](flat_hidden_states[mask])
69
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
70
+ return final_hidden_states.view(*original_shape)
71
+
72
+ # --- 2. Main Program: Load Model and Start Conversation ---
73
+ # --- 2. 主程序:加载模型并开始对话 ---
74
+ def main():
75
+ device = "cuda" if torch.cuda.is_available() else "cpu"
76
+
77
+ # --- Model Loading / 模型加载 ---
78
+ print(f"Loading tokenizer from '{MODEL_PATH}'...")
79
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
80
+
81
+ print(f"Manually rebuilding MoE model structure...")
82
+ # First, we create an "empty shell" of our model with the correct architecture but random weights.
83
+ # `from_config` builds the structure without loading any weights.
84
+ # 首先,我们用`from_config`创建一个拥有正确架构但权重是随机的“空壳”模型。
85
+ # 这一步只搭建骨架,不加载任何权重。
86
+ moe_model = AutoModelForCausalLM.from_config(config)
87
+
88
+ # Then, we perform the "architectural surgery" again, replacing standard MLPs with our MoEModules.
89
+ # 然后,我们再次手动进行“架构手术”,把标准的MLP替换成我们的MoE模块。
90
+ for i, layer in enumerate(moe_model.model.layers):
91
+ layer.mlp = MoEModule(config)
92
+
93
+ print(f"Loading your trained MoE weights into the correct structure...")
94
+ from safetensors.torch import load_file
95
+ state_dict = load_file(os.path.join(MODEL_PATH, "model.safetensors"), device="cpu")
96
+
97
+ # Use `strict=False` for flexible loading, then manually tie the weights.
98
+ # This handles the missing `lm_head.weight` key caused by weight tying.
99
+ # 使用`strict=False`进行灵活加载,然后手动绑定权重。
100
+ # 这个操作处理了因权重绑定而导致的`lm_head.weight`键缺失的问题。
101
+ moe_model.load_state_dict(state_dict, strict=False)
102
+ moe_model.tie_weights()
103
+
104
+ # Move the finalized model to the GPU and set it to evaluation mode.
105
+ # 将最终完成的模型移动到GPU,并设置为评估模式。
106
+ moe_model.to(device, dtype=torch.bfloat16)
107
+ moe_model.eval()
108
+ print("--- MoE Model is ready for conversation! ---")
109
+ print("Type 'exit' or 'quit' to end the chat.\n")
110
+
111
+ # --- Interactive Conversation Loop / 交互式对话循环 ---
112
+ messages = []
113
+ while True:
114
+ try:
115
+ user_input = input("You: ")
116
+ if user_input.lower() in ["exit", "quit"]:
117
+ print("Goodbye!")
118
+ break
119
+
120
+ # Step 1: Add the user's input to the conversation history.
121
+ # 步骤 1: 将用户的输入添加到对话历史中。
122
+ messages.append({"role": "user", "content": user_input})
123
+
124
+ # Step 2: Format the entire conversation history using the chat template.
125
+ # `add_generation_prompt=True` adds the starting tokens for the assistant's turn.
126
+ # 步骤 2: 使用聊天模板格式化完整的对话历史。
127
+ # `add_generation_prompt=True` 会在末尾添加助手角色的起始标记。
128
+ prompt_text = tokenizer.apply_chat_template(
129
+ messages,
130
+ tokenize=False,
131
+ add_generation_prompt=True
132
+ )
133
+
134
+ # Step 3: Encode the input text and move it to the GPU.
135
+ # 步骤 3: 编码输入文本并将其发送到GPU。
136
+ inputs = tokenizer(prompt_text, return_tensors="pt").to(device)
137
+
138
+ # Step 4: Generate a response.
139
+ # 步骤 4: 生成回复。
140
+ with torch.no_grad():
141
+ outputs = moe_model.generate(
142
+ **inputs,
143
+ max_new_tokens=256,
144
+ temperature=0.7,
145
+ top_p=0.9,
146
+ do_sample=True
147
+ )
148
+
149
+ # Step 5: Decode and clean the output.
150
+ # `outputs[0]` contains the full conversation (input + output). We need to extract only the new part.
151
+ # 步骤 5: 解码并清理输出。
152
+ # `outputs[0]` 包含了完整的对话(输入+输出),我们需要从中提取出模型新生成的部分。
153
+ full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
154
+ # Find the newly generated part by removing the original prompt from the full response.
155
+ # 通过从完整回复中移除原始的prompt来找到新生成的部分。
156
+ assistant_prompt_start = "<|assistant|>\n"
157
+ # This is a robust way to find the start of the assistant's actual response
158
+ assistant_response_start_index = full_response.rfind(assistant_prompt_start)
159
+ if assistant_response_start_index != -1:
160
+ model_response = full_response[assistant_response_start_index + len(assistant_prompt_start):].strip()
161
+ else:
162
+ # Fallback for simpler cases
163
+ model_response = full_response.replace(prompt_text.replace("<s>", "").replace("</s>", ""), "").strip()
164
+
165
+ print(f"MoE Model: {model_response}")
166
+
167
+ # Step 6: Add the model's response to the history for multi-turn conversations.
168
+ # 步骤 6: 将模型的回复也添加到对话历史中,以便进行多轮对话。
169
+ messages.append({"role": "assistant", "content": model_response})
170
+
171
+ except KeyboardInterrupt:
172
+ print("\nGoodbye!")
173
+ break
174
+
175
+ # Script entry point / 脚本入口
176
+ if __name__ == "__main__":
177
+ main()
train/test_moe_model.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - "The Mind-Reader" Test Script
3
+ # (Final Version with Correct Loading Logic)
4
+ # ==============================================================================
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
10
+ from transformers.models.llama.modeling_llama import LlamaMLP, LlamaAttention, LlamaRMSNorm, LlamaForCausalLM
11
+ import numpy as np
12
+ import os
13
+ # --- 1. 关键:重新定义你的所有自定义模块 ---
14
+ # 我们需要这些类的定义,来手动重建模型的正确结构
15
+
16
+ MODEL_PATH = "./SmolMoE-8x135M-Instruct-v1-Trained"
17
+ config = AutoConfig.from_pretrained(MODEL_PATH)
18
+ NUM_EXPERTS = config.moe_num_experts
19
+ TOP_K = config.moe_top_k
20
+
21
+ EXPERT_NAMES = [
22
+ "Actor", "Analyst", "Coder", "Encyclopedia",
23
+ "Guardian", "Summarizer", "Thinker", "Writer"
24
+ ]
25
+
26
+ class MoERouter(nn.Module):
27
+ def __init__(self, hidden_size: int, num_experts: int):
28
+ super().__init__()
29
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
30
+ def forward(self, hidden_states):
31
+ return self.layer(hidden_states)
32
+
33
+ class MoEModule(nn.Module):
34
+ def __init__(self, config):
35
+ super().__init__()
36
+ self.hidden_size = config.hidden_size
37
+ self.top_k = TOP_K
38
+ self.num_experts = NUM_EXPERTS
39
+ self.router = MoERouter(self.hidden_size, self.num_experts)
40
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
41
+
42
+ def forward(self, hidden_states):
43
+ original_shape = hidden_states.shape
44
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
45
+ router_logits = self.router(flat_hidden_states)
46
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
47
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
48
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
49
+ routing_weights = routing_weights.to(hidden_states.dtype)
50
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
51
+ for k in range(self.top_k):
52
+ expert_indices_k = selected_experts[:, k]
53
+ routing_weights_k = routing_weights[:, k]
54
+ for i in range(self.num_experts):
55
+ mask = expert_indices_k == i
56
+ if mask.any():
57
+ expert_output = self.experts[i](flat_hidden_states[mask])
58
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
59
+ return final_hidden_states.view(*original_shape)
60
+
61
+ # --- 2. 诊断测试的核心工具 ---
62
+ captured_router_weights = {}
63
+ def get_router_weights_hook(layer_idx):
64
+ """这是一个创建钩子函数的工厂"""
65
+ def hook(module, input, output):
66
+ # input[0] 是进入MoE模块的hidden_states
67
+ router_logits = module.router(input[0])
68
+ # 我们计算整个句子(所有token)的平均路由概率
69
+ avg_probs = F.softmax(router_logits, dim=-1).mean(dim=[0, 1])
70
+
71
+ # *** 这是最终的修复:在转换成numpy前,先转换成float32 ***
72
+ captured_router_weights[layer_idx] = avg_probs.detach().cpu().to(torch.float32).numpy()
73
+ return hook
74
+
75
+ def visualize_router_decisions(prompt):
76
+ print("\n" + "="*80)
77
+ print(f"ROUTER DECISION ANALYSIS for Prompt: '{prompt[:50]}...'")
78
+ print("="*80)
79
+ print(f"{'Layer':<7} | {'Dominant Expert(s)':<45} | {'Confidence'}")
80
+ print("-"*80)
81
+ for layer_idx, weights in captured_router_weights.items():
82
+ top2_indices = np.argsort(weights)[-2:][::-1]
83
+ dominant_experts_str = f"1. {EXPERT_NAMES[top2_indices[0]]} | 2. {EXPERT_NAMES[top2_indices[1]]}"
84
+ confidence_str = f"({weights[top2_indices[0]]:.1%} | {weights[top2_indices[1]]:.1%})"
85
+ print(f"Layer {layer_idx:<4} | {dominant_experts_str:<45} | {confidence_str}")
86
+ print("="*80 + "\n")
87
+
88
+ # --- 3. 主测试流程 ---
89
+ def main():
90
+ device = "cuda" if torch.cuda.is_available() else "cpu"
91
+
92
+ print(f"Loading tokenizer from '{MODEL_PATH}'...")
93
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
94
+
95
+ print(f"Manually rebuilding MoE model structure...")
96
+ # 首先,我们用`from_config`创建一个随机权重的、但结构正确的“空壳”
97
+ moe_model = AutoModelForCausalLM.from_config(config)
98
+
99
+ # 然后,我们手动进行“架构手术”,把标准的MLP替换成我们的MoE模块
100
+ for i, layer in enumerate(moe_model.model.layers):
101
+ layer.mlp = MoEModule(config)
102
+
103
+ print(f"Loading your trained MoE weights into the correct structure...")
104
+ # 从safetensors文件加载权重
105
+ from safetensors.torch import load_file
106
+ state_dict = load_file(os.path.join(MODEL_PATH, "model.safetensors"), device="cpu")
107
+
108
+ # *** 这是最终的修复:第一步 - 使用strict=False灵活加载 ***
109
+ # 我们知道lm_head.weight会缺失,所以允许这种“不严格”的加载
110
+ moe_model.load_state_dict(state_dict, strict=False)
111
+
112
+ # *** 这是最终的修复:第二步 - 手动执行权重绑定 ***
113
+ # 这个函数会根据config中的"tie_word_embeddings"设置,将lm_head和词嵌入层绑定
114
+ moe_model.tie_weights()
115
+
116
+ moe_model.to(device, dtype=torch.bfloat16)
117
+ moe_model.eval()
118
+ print("--- Custom MoE Model Successfully Loaded and Finalized! ---")
119
+
120
+ # 为诊断测试安装“窃听器”
121
+ hooks = []
122
+ for i, layer in enumerate(moe_model.model.layers):
123
+ # 确保我们是在MoEModule上注册钩子,而不是标准的LlamaMLP
124
+ if isinstance(layer.mlp, MoEModule):
125
+ hook = layer.mlp.register_forward_hook(get_router_weights_hook(i))
126
+ hooks.append(hook)
127
+
128
+ # 设计一系列“考题”
129
+ test_prompts = {
130
+ "Coder": "Write a Python function that takes a list of numbers and returns a new list with only the even numbers.",
131
+ "Writer": "In a world where shadows have a life of their own, a young lamplighter discovers a terrible secret. Write the opening paragraph.",
132
+ "Thinker": "If all bloops are gloops, and some gloops are zloops, is it certain that some bloops are zloops? Explain your reasoning.",
133
+ "Encyclopedia": "What were the primary economic and political causes of the French Revolution?",
134
+ "Multi-Expert": "In the style of a Shakespearean tragedy, write a short monologue for a software developer lamenting a bug in their code. Include a comment line from the code."
135
+ }
136
+
137
+ for expert_name, prompt in test_prompts.items():
138
+ captured_router_weights.clear()
139
+ print(f"\n--- Testing for: {expert_name} Expert ---")
140
+ print(f"Prompt: {prompt}")
141
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
142
+ with torch.no_grad():
143
+ outputs = moe_model.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
144
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
145
+ print("\n--- Generated Text ---")
146
+ print(generated_text)
147
+ print("--- End of Generated Text ---")
148
+ visualize_router_decisions(prompt)
149
+
150
+ for hook in hooks:
151
+ hook.remove()
152
+ print("All tests complete and hooks have been removed.")
153
+
154
+ if __name__ == "__main__":
155
+ main()
train/test_moe_model_zhcn_en.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - "The Mind-Reader" Test Script
3
+ # (Final Version with Correct Loading Logic)
4
+ #
5
+ # Smol-MoE 8x135M - “读心器”测试脚本
6
+ # (包含正确加载逻辑的最终版本)
7
+ # ==============================================================================
8
+
9
+ # --- Core Library Imports / 核心库导入 ---
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
14
+ from transformers.models.llama.modeling_llama import LlamaMLP, LlamaForCausalLM # We need LlamaMLP for the expert definition / 我们需要 LlamaMLP 来定义专家
15
+ import numpy as np
16
+ import os # We need this for path operations / 我们需要 os 库来处理文件路径
17
+
18
+ # --- 1. CRITICAL: Re-define Your Custom Architecture ---
19
+ # --- 1. 关键:重新定义你的所有自定义模块 ---
20
+ # When loading a model with a custom architecture, Hugging Face needs to know the definition of the custom classes.
21
+ # By defining them here, we allow `from_pretrained` to correctly reconstruct our unique MoE model.
22
+ # 在加载一个拥有自定义架构的模型时,Hugging Face 需要知道这些自定义类的定义。
23
+ # 在这里定义它们,我们才能让 `from_pretrained` 函数成功地重建我们独一无二的MoE模型。
24
+
25
+ # --- Model Configuration / 模型配置 ---
26
+ MODEL_PATH = "./SmolMoE-8x135M-Instruct-v1-Trained"
27
+ config = AutoConfig.from_pretrained(MODEL_PATH)
28
+ # Load our custom MoE parameters from the saved config file.
29
+ # 从我们保存的配置文件中,加载自定义的MoE参数。
30
+ NUM_EXPERTS = config.moe_num_experts
31
+ TOP_K = config.moe_top_k
32
+
33
+ # A list of expert names for clear visualization later. The order must match the training script.
34
+ # 用于后续清晰可视化的专家名称列表。顺序必须和训练脚本中的保持一致。
35
+ EXPERT_NAMES = [
36
+ "Actor", "Analyst", "Coder", "Encyclopedia",
37
+ "Guardian", "Summarizer", "Thinker", "Writer"
38
+ ]
39
+
40
+ class MoERouter(nn.Module):
41
+ """The Router module. Its job is to score experts for each token."""
42
+ """路由器模块。它的工作是为每个token给所有专家打分。"""
43
+ def __init__(self, hidden_size: int, num_experts: int):
44
+ super().__init__()
45
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
46
+ def forward(self, hidden_states):
47
+ return self.layer(hidden_states)
48
+
49
+ class MoEModule(nn.Module):
50
+ """The custom Mixture-of-Experts module that replaces the standard FFN."""
51
+ """我们自定义的混合专家模块,它替换了标准的FFN。"""
52
+ def __init__(self, config):
53
+ super().__init__()
54
+ self.hidden_size = config.hidden_size
55
+ self.top_k = TOP_K
56
+ self.num_experts = NUM_EXPERTS
57
+ self.router = MoERouter(self.hidden_size, self.num_experts)
58
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
59
+
60
+ def forward(self, hidden_states):
61
+ original_shape = hidden_states.shape
62
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
63
+ router_logits = self.router(flat_hidden_states)
64
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
65
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
66
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
67
+ routing_weights = routing_weights.to(hidden_states.dtype)
68
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
69
+ for k in range(self.top_k):
70
+ expert_indices_k = selected_experts[:, k]
71
+ routing_weights_k = routing_weights[:, k]
72
+ for i in range(self.num_experts):
73
+ mask = expert_indices_k == i
74
+ if mask.any():
75
+ expert_output = self.experts[i](flat_hidden_states[mask])
76
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
77
+ return final_hidden_states.view(*original_shape)
78
+
79
+ # --- 2. Core Diagnostic Tools / 诊断测试的核心工具 ---
80
+ # This dictionary will store the router decisions captured by our hooks.
81
+ # 这个字典将用于存储我们的钩子捕获到的路由器决策数据。
82
+ captured_router_weights = {}
83
+
84
+ def get_router_weights_hook(layer_idx):
85
+ """This is a factory function that creates our hook."""
86
+ """这是一个创建钩子函数的工厂函数。"""
87
+ def hook(module, input, output):
88
+ # `input[0]` is the hidden_states tensor passed to the MoE module.
89
+ # `input[0]` 是传入MoE模块的hidden_states张量。
90
+ router_logits = module.router(input[0])
91
+ # We calculate the average routing probability for all tokens in the sequence.
92
+ # 我们计算序列中所有token的平��路由概率。
93
+ avg_probs = F.softmax(router_logits, dim=-1).mean(dim=[0, 1])
94
+
95
+ # *** FINAL FIX: Convert from BFloat16 to Float32 before converting to NumPy. ***
96
+ # NumPy does not support the bfloat16 dtype, so we must convert it first.
97
+ # *** 最终修复:在转换为numpy数组前,先将数据格式从BFloat16转换为Float32。***
98
+ # NumPy库不支持bfloat16这种数据类型,所以我们必须先进行转换。
99
+ captured_router_weights[layer_idx] = avg_probs.detach().cpu().to(torch.float32).numpy()
100
+ return hook
101
+
102
+ def visualize_router_decisions(prompt):
103
+ """A helper function to print the captured router decisions in a nice table."""
104
+ """一个辅助函数,用于将捕获到的路由器决策以漂亮的表格形式打印出来。"""
105
+ print("\n" + "="*80)
106
+ print(f"ROUTER DECISION ANALYSIS for Prompt: '{prompt[:50]}...'")
107
+ print("="*80)
108
+ print(f"{'Layer':<7} | {'Dominant Expert(s)':<45} | {'Confidence'}")
109
+ print("-"*80)
110
+ for layer_idx, weights in captured_router_weights.items():
111
+ top2_indices = np.argsort(weights)[-2:][::-1]
112
+ dominant_experts_str = f"1. {EXPERT_NAMES[top2_indices[0]]} | 2. {EXPERT_NAMES[top2_indices[1]]}"
113
+ confidence_str = f"({weights[top2_indices[0]]:.1%} | {weights[top2_indices[1]]:.1%})"
114
+ print(f"Layer {layer_idx:<4} | {dominant_experts_str:<45} | {confidence_str}")
115
+ print("="*80 + "\n")
116
+
117
+ # --- 3. Main Testing Workflow / 主测试流程 ---
118
+ def main():
119
+ device = "cuda" if torch.cuda.is_available() else "cpu"
120
+
121
+ print(f"Loading tokenizer from '{MODEL_PATH}'...")
122
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
123
+
124
+ print(f"Manually rebuilding MoE model structure...")
125
+ # First, we create an "empty shell" of our model with the correct architecture but random weights.
126
+ # `from_config` builds the structure without loading any weights.
127
+ # 首先,我们用`from_config`创建一个拥有正确架构但权重是随机的“空壳”模型。
128
+ # 这一步只搭建骨架,不加载任何权重。
129
+ moe_model = AutoModelForCausalLM.from_config(config)
130
+
131
+ # Then, we perform the "architectural surgery" again, replacing standard MLPs with our MoEModules.
132
+ # 然后,我们再次手动进行“架构手术”,把标准的MLP替换成我们的MoE模块。
133
+ for i, layer in enumerate(moe_model.model.layers):
134
+ layer.mlp = MoEModule(config)
135
+
136
+ print(f"Loading your trained MoE weights into the correct structure...")
137
+ # Load the weights from the safetensors file.
138
+ # 从safetensors文件加载权重。
139
+ from safetensors.torch import load_file
140
+ state_dict = load_file(os.path.join(MODEL_PATH, "model.safetensors"), device="cpu")
141
+
142
+ # *** FINAL FIX #1: Use `strict=False` for flexible loading. ***
143
+ # We know `lm_head.weight` is missing because of `tie_word_embeddings`, so we allow this "inexact" loading.
144
+ # *** 最终修复 #1:使用`strict=False`进行灵活加载。***
145
+ # 我们知道因为`tie_word_embeddings`的设置,`lm_head.weight`是缺失的,所以我们允许这种“不严格”的加载。
146
+ moe_model.load_state_dict(state_dict, strict=False)
147
+
148
+ # *** FINAL FIX #2: Manually tie the weights. ***
149
+ # This function reads the `tie_word_embeddings` setting from the config and correctly links the lm_head to the token embeddings.
150
+ # *** 最终修复 #2:手动执行权重绑定。***
151
+ # 这个函数会根据config中的`tie_word_embeddings`设置,将lm_head和词嵌入层正确地绑定在一起。
152
+ moe_model.tie_weights()
153
+
154
+ # Move the finalized model to the GPU and set it to evaluation mode.
155
+ # 将最终完成的模型移动到GPU,并设置为评估模式。
156
+ moe_model.to(device, dtype=torch.bfloat16)
157
+ moe_model.eval()
158
+ print("--- Custom MoE Model Successfully Loaded and Finalized! ---")
159
+
160
+ # Install our "listening devices" (hooks) on each MoE layer for diagnostics.
161
+ # 为诊断测试,在每个MoE层上都安装我们的“窃听器”(钩子)。
162
+ hooks = []
163
+ for i, layer in enumerate(moe_model.model.layers):
164
+ if isinstance(layer.mlp, MoEModule):
165
+ hook = layer.mlp.register_forward_hook(get_router_weights_hook(i))
166
+ hooks.append(hook)
167
+
168
+ # Design a series of "exam questions" to test different experts.
169
+ # 设计一系列“考题”来测试不同的专家。
170
+ test_prompts = {
171
+ "Coder": "Write a Python function that takes a list of numbers and returns a new list with only the even numbers.",
172
+ "Writer": "In a world where shadows have a life of their own, a young lamplighter discovers a terrible secret. Write the opening paragraph.",
173
+ "Thinker": "If all bloops are gloops, and some gloops are zloops, is it certain that some bloops are zloops? Explain your reasoning.",
174
+ "Encyclopedia": "What were the primary economic and political causes of the French Revolution?",
175
+ "Multi-Expert": "In the style of a Shakespearean tragedy, write a short monologue for a software developer lamenting a bug in their code. Include a comment line from the code."
176
+ }
177
+
178
+ # The main testing loop.
179
+ # 主测试循环。
180
+ for expert_name, prompt in test_prompts.items():
181
+ captured_router_weights.clear() # Clear data from the previous run / 清空上一次的捕获数据
182
+ print(f"\n--- Testing for: {expert_name} Expert ---")
183
+ print(f"Prompt: {prompt}")
184
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
185
+
186
+ # 1. Functional Test: Generate text / 1. 功能测试:生成文本
187
+ with torch.no_grad():
188
+ outputs = moe_model.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
189
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
190
+ print("\n--- Generated Text ---")
191
+ print(generated_text)
192
+ print("--- End of Generated Text ---")
193
+
194
+ # 2. Diagnostic Test: Visualize router decisions / 2. 诊断测试:可视化路由决策
195
+ visualize_router_decisions(prompt)
196
+
197
+ # Clean up by removing all hooks to prevent memory leaks.
198
+ # 清理工作:移除所有钩子以防止内存泄漏。
199
+ for hook in hooks:
200
+ hook.remove()
201
+ print("All tests complete and hooks have been removed.")
202
+
203
+ # Script entry point / 脚本入口
204
+ if __name__ == "__main__":
205
+ main()
train/train_moe_router.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - Master Script
3
+ # (Final Version, All Fixes Included)
4
+ # ==============================================================================
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.optim as optim
9
+ import torch.nn.functional as F
10
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer # <<< 这是最关键的修复!确保AutoTokenizer在这里
11
+ from transformers.models.llama.modeling_llama import LlamaMLP
12
+ from safetensors.torch import load_file # <<< 使用正确的safetensors加载器
13
+ import os
14
+ import shutil
15
+ import numpy as np
16
+ import time
17
+
18
+ # --- 0. Configuration & Setup ---
19
+ MODEL_NAME = "./SmolLM2-135M-Instruct"
20
+ BASE_EXPERT_PATH = "./models"
21
+ EXPERT_DIRS = [
22
+ "SmolLM2-135M-Instruct-Actor", "SmolLM2-135M-Instruct-Analyst",
23
+ "SmolLM2-135M-Instruct-Coder", "SmolLM2-135M-Instruct-Encyclopedia",
24
+ "SmolLM2-135M-Instruct-Guardian", "SmolLM2-135M-Instruct-Summarizer",
25
+ "SmolLM2-135M-Instruct-Thinker", "SmolLM2-135M-Instruct-Writer"
26
+ ]
27
+ NUM_EXPERTS = 8
28
+ TOP_K = 2
29
+ LEARNING_RATE = 0.001
30
+ EPOCHS = 20 # 既然我们知道模拟数据无法让模型学习,20轮足以验证流程
31
+ BATCH_SIZE = 4
32
+ SEQUENCE_LENGTH = 128
33
+ LB_LOSS_COEFFICIENT = 0.01
34
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
+ print(f"Using device: {device}")
36
+
37
+
38
+ # --- 1. Define the MoE Architecture Components ---
39
+ class MoERouter(nn.Module):
40
+ def __init__(self, hidden_size: int, num_experts: int):
41
+ super().__init__()
42
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
43
+ def forward(self, hidden_states):
44
+ return self.layer(hidden_states)
45
+
46
+ class MoEModule(nn.Module):
47
+ def __init__(self, config):
48
+ super().__init__()
49
+ self.hidden_size = config.hidden_size
50
+ self.top_k = TOP_K
51
+ self.num_experts = NUM_EXPERTS
52
+ self.router = MoERouter(self.hidden_size, self.num_experts)
53
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
54
+ self.most_recent_lb_loss = None
55
+
56
+ def forward(self, hidden_states):
57
+ original_shape = hidden_states.shape
58
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
59
+ router_logits = self.router(flat_hidden_states)
60
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
61
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
62
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
63
+ routing_weights = routing_weights.to(hidden_states.dtype)
64
+ router_probs_full = F.softmax(router_logits, dim=-1, dtype=torch.float)
65
+ avg_expert_prob = router_probs_full.mean(dim=0)
66
+ expert_mask_for_lb = F.one_hot(selected_experts, num_classes=self.num_experts).sum(dim=1)
67
+ avg_expert_fraction = expert_mask_for_lb.float().mean(dim=0)
68
+ self.most_recent_lb_loss = self.num_experts * torch.sum(avg_expert_prob * avg_expert_fraction)
69
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
70
+ for k in range(self.top_k):
71
+ expert_indices_k = selected_experts[:, k]
72
+ routing_weights_k = routing_weights[:, k]
73
+ for i in range(self.num_experts):
74
+ mask = expert_indices_k == i
75
+ if mask.any():
76
+ expert_output = self.experts[i](flat_hidden_states[mask])
77
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
78
+ return final_hidden_states.view(*original_shape)
79
+
80
+ # --- 2. The Grand Assembly Function ---
81
+ def create_moe_model():
82
+ print("--- Starting Architectural Surgery ---")
83
+ config = AutoConfig.from_pretrained(MODEL_NAME)
84
+ print("Step 1: Loading base model skeleton...")
85
+ base_model = AutoModelForCausalLM.from_pretrained(
86
+ os.path.join(BASE_EXPERT_PATH, EXPERT_DIRS[0]),
87
+ torch_dtype=torch.bfloat16,
88
+ device_map=device
89
+ )
90
+ print("Step 2: Pre-loading all expert weights into CPU memory for efficiency...")
91
+ all_experts_state_dicts = [
92
+ load_file(os.path.join(BASE_EXPERT_PATH, expert_dir, 'model.safetensors'), device='cpu')
93
+ for expert_dir in EXPERT_DIRS
94
+ ]
95
+ print("All expert weights pre-loaded.")
96
+ print("Step 3: Replacing FFNs with MoE modules and transplanting expert weights...")
97
+ for layer_idx, layer in enumerate(base_model.model.layers):
98
+ layer.mlp = MoEModule(config).to(device, dtype=torch.bfloat16)
99
+ for expert_idx in range(NUM_EXPERTS):
100
+ expert_state_dict = all_experts_state_dicts[expert_idx]
101
+ expert_mlp_weights = {
102
+ k.replace(f"model.layers.{layer_idx}.mlp.", ""): v
103
+ for k, v in expert_state_dict.items()
104
+ if f"model.layers.{layer_idx}.mlp." in k
105
+ }
106
+ layer.mlp.experts[expert_idx].load_state_dict(expert_mlp_weights)
107
+ print("Step 4: Freezing all parameters except for the routers...")
108
+ for name, param in base_model.named_parameters():
109
+ if "router" not in name:
110
+ param.requires_grad = False
111
+ print("\n--- Surgery Complete! MoE Model is assembled and ready for training. ---")
112
+ trainable_params = sum(p.numel() for p in base_model.parameters() if p.requires_grad)
113
+ total_params = sum(p.numel() for p in base_model.parameters())
114
+ print(f"Total Parameters: {total_params / 1e6:.2f}M")
115
+ print(f"Trainable Parameters (Routers): {trainable_params}")
116
+ return base_model
117
+
118
+ # --- 3. The Main Training & Saving Process ---
119
+ def main():
120
+ moe_model = create_moe_model()
121
+ optimizer = optim.AdamW([p for p in moe_model.parameters() if p.requires_grad], lr=LEARNING_RATE)
122
+ print("\n--- Preparing Simulated Mixed Dataset for Training ---")
123
+ mock_input_ids = torch.randint(0, moe_model.config.vocab_size, (BATCH_SIZE, SEQUENCE_LENGTH), device=device)
124
+ mock_labels = mock_input_ids.clone()
125
+ print("--- Starting Router Training Loop (Optimized & Corrected) ---")
126
+ moe_model.train()
127
+ start_time = time.time()
128
+ for epoch in range(EPOCHS):
129
+ optimizer.zero_grad()
130
+ outputs = moe_model(input_ids=mock_input_ids, labels=mock_labels)
131
+ main_loss = outputs.loss
132
+ total_lb_loss = 0.0
133
+ for layer in moe_model.model.layers:
134
+ total_lb_loss += layer.mlp.most_recent_lb_loss
135
+ total_loss = main_loss + LB_LOSS_COEFFICIENT * total_lb_loss
136
+ total_loss.backward()
137
+ optimizer.step()
138
+ if (epoch + 1) % 10 == 0:
139
+ elapsed_time = time.time() - start_time
140
+ print(f"Epoch [{epoch+1:03d}/{EPOCHS}] | Total Loss: {total_loss.item():.4f} | "
141
+ f"Main Loss: {main_loss.item():.4f} | "
142
+ f"Avg LB Loss: {(total_lb_loss.item() / moe_model.config.num_hidden_layers):.4f} | "
143
+ f"Time: {elapsed_time:.2f}s")
144
+ start_time = time.time()
145
+ print("\n--- Router Training Complete! ---")
146
+ print("\n--- Phase 5: Saving the fully trained MoE model to disk ---")
147
+ OUTPUT_MODEL_DIR = "./SmolMoE-8x135M-Instruct-v1-Trained"
148
+ if os.path.exists(OUTPUT_MODEL_DIR):
149
+ shutil.rmtree(OUTPUT_MODEL_DIR)
150
+ os.makedirs(OUTPUT_MODEL_DIR)
151
+ print("Updating model config with MoE-specific parameters...")
152
+ moe_model.config.moe_num_experts = NUM_EXPERTS
153
+ moe_model.config.moe_top_k = TOP_K
154
+ print(f"Saving model to '{OUTPUT_MODEL_DIR}'...")
155
+ moe_model.save_pretrained(OUTPUT_MODEL_DIR)
156
+ print("Saving tokenizer...")
157
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
158
+ tokenizer.save_pretrained(OUTPUT_MODEL_DIR)
159
+ print("\n--- Model successfully saved! ---")
160
+ print("You can now load this model in other scripts, but you must re-define the custom MoE classes first.")
161
+
162
+ if __name__ == "__main__":
163
+ main()
train/train_moe_router_en.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - The "Genesis" Master Script
3
+ # (Final Optimized Version with All Fixes & Detailed Comments)
4
+ # ==============================================================================
5
+
6
+ # --- Core Library Imports ---
7
+ # PyTorch Core
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.optim as optim
11
+ import torch.nn.functional as F
12
+
13
+ # Hugging Face Transformers library, the source of our "Lego bricks"
14
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
15
+ from transformers.models.llama.modeling_llama import LlamaMLP # The standard FFN module from Llama, which we use as the base for our "Experts"
16
+
17
+ # Safetensors library, for safely and efficiently loading model weights
18
+ from safetensors.torch import load_file
19
+
20
+ # Standard Python Libraries
21
+ import os # For handling file paths
22
+ import shutil # For directory operations (like deleting an old model folder)
23
+ import numpy as np # For data analysis (in the final test)
24
+ import time # For timing the training process
25
+
26
+ # --- 0. Global Configuration & Hyperparameters ---
27
+ # This is the master control panel for the entire project. All key parameters are defined here.
28
+
29
+ # MODEL_NAME: The path to the base model. We use this to load the initial config and tokenizer.
30
+ # Note: This should point to a standard, unmodified SmolLM model.
31
+ MODEL_NAME = "./SmolLM2-135M-Instruct"
32
+
33
+ # BASE_EXPERT_PATH: The parent directory containing all 8 of your pre-trained expert model folders.
34
+ BASE_EXPERT_PATH = "./models"
35
+
36
+ # EXPERT_DIRS: A list of the specific directory names for your 8 expert models. The order is important.
37
+ EXPERT_DIRS = [
38
+ "SmolLM2-135M-Instruct-Actor", "SmolLM2-135M-Instruct-Analyst",
39
+ "SmolLM2-135M-Instruct-Coder", "SmolLM2-135M-Instruct-Encyclopedia",
40
+ "SmolLM2-135M-Instruct-Guardian", "SmolLM2-135M-Instruct-Summarizer",
41
+ "SmolLM2-135M-Instruct-Thinker", "SmolLM2-135M-Instruct-Writer"
42
+ ]
43
+
44
+ # MoE Architecture Parameters
45
+ NUM_EXPERTS = 8 # The number of experts in our committee
46
+ TOP_K = 2 # The number of top experts to route to for each token
47
+
48
+ # Training Hyperparameters
49
+ LEARNING_RATE = 0.001 # The learning rate for the routers. Since we only train routers, it can be slightly higher.
50
+ EPOCHS = 20 # Number of training epochs. Since we're using mock data to validate the process, 20 is sufficient.
51
+ # This needs to be much higher when using real data.
52
+ BATCH_SIZE = 4 # The number of sequences to process in each batch. Adjust based on your VRAM.
53
+ SEQUENCE_LENGTH = 128 # The length of text sequences the model processes. Adjust based on your VRAM.
54
+ LB_LOSS_COEFFICIENT = 0.01 # The weight coefficient for the load balancing loss. This is a critical "balancing valve"
55
+ # used to trade off between "doing the job well" and "distributing work fairly."
56
+
57
+ # Device Configuration
58
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
+ print(f"Using device: {device}")
60
+
61
+
62
+ # --- 1. MoE Architecture Component Definitions ---
63
+ # These are the blueprints for the new parts we've "invented."
64
+
65
+ class MoERouter(nn.Module):
66
+ """
67
+ The Router (or Gate Network) - The "CEO" or "dispatcher" of the expert committee.
68
+ Its structure is a simple linear layer, responsible for scoring all experts for each incoming token.
69
+ """
70
+ def __init__(self, hidden_size: int, num_experts: int):
71
+ super().__init__()
72
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
73
+
74
+ def forward(self, hidden_states):
75
+ # Outputs the "scores" (logits) for each expert, which will later be turned into probabilities via Softmax.
76
+ return self.layer(hidden_states)
77
+
78
+ class MoEModule(nn.Module):
79
+ """
80
+ The Mixture-of-Experts Module - The "conference room" for the entire expert committee.
81
+ This module replaces the standard FFN (MLP) block in the original Llama model.
82
+ It contains one router (the CEO) and a list of experts (the board members).
83
+ """
84
+ def __init__(self, config):
85
+ super().__init__()
86
+ # Get necessary parameters from the global config
87
+ self.hidden_size = config.hidden_size
88
+ self.top_k = TOP_K
89
+ self.num_experts = NUM_EXPERTS
90
+
91
+ # Create the components
92
+ self.router = MoERouter(self.hidden_size, self.num_experts)
93
+ # LlamaMLP is the standard FFN implementation in Hugging Face's Llama, which we use as the base for our "experts".
94
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
95
+
96
+ # A placeholder to temporarily store the load balancing loss for this layer during a forward pass
97
+ self.most_recent_lb_loss = None
98
+
99
+ def forward(self, hidden_states):
100
+ # Store the original shape to reshape the output at the end
101
+ original_shape = hidden_states.shape
102
+ # Flatten the input from (batch, sequence, dim) to (batch * sequence, dim) for token-level routing
103
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
104
+
105
+ # --- Step 1: Routing Decision ---
106
+ # Get scores from the router for each token
107
+ router_logits = self.router(flat_hidden_states)
108
+ # Use Softmax to convert scores to probabilities
109
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
110
+ # Select the top-k experts and their corresponding probabilities
111
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
112
+ # Normalize the probabilities of the top-k experts so they sum to 1
113
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
114
+ # Cast weights back to the model's main dtype (e.g., bfloat16) for efficiency
115
+ routing_weights = routing_weights.to(hidden_states.dtype)
116
+
117
+ # --- Step 2: Calculate and Store Load Balancing Loss ---
118
+ # This is the soul of MoE training: ensuring the router doesn't get "lazy" and uses all experts fairly.
119
+ router_probs_full = F.softmax(router_logits, dim=-1, dtype=torch.float)
120
+ avg_expert_prob = router_probs_full.mean(dim=0) # The average probability for each expert across all tokens
121
+ expert_mask_for_lb = F.one_hot(selected_experts, num_classes=self.num_experts).sum(dim=1) # Checks which experts were chosen for each token
122
+ avg_expert_fraction = expert_mask_for_lb.float().mean(dim=0) # The average fraction of tokens processed by each expert
123
+ # Calculate the loss, multiply by the number of experts as a penalty term, and store it.
124
+ self.most_recent_lb_loss = self.num_experts * torch.sum(avg_expert_prob * avg_expert_fraction)
125
+
126
+ # --- Step 3: Expert Computation and Result Aggregation (Vectorized & Efficient) ---
127
+ # Create an empty tensor to store the final results
128
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
129
+
130
+ # This loop only iterates `top_k` times (e.g., 2), which is very fast.
131
+ for k in range(self.top_k):
132
+ # Get the expert indices and weights for the k-th choice across all tokens
133
+ expert_indices_k = selected_experts[:, k]
134
+ routing_weights_k = routing_weights[:, k]
135
+
136
+ # This loop iterates over all experts, but the computations inside are batched and fast.
137
+ for i in range(self.num_experts):
138
+ # Create a mask to find all tokens that were routed to the current expert `i`
139
+ mask = expert_indices_k == i
140
+ if mask.any(): # If any token was routed to this expert
141
+ # Process all selected tokens in a single batch
142
+ expert_output = self.experts[i](flat_hidden_states[mask])
143
+ # Weight the expert's output by its routing weight and "add" it back to the correct positions in the final tensor
144
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
145
+
146
+ # Reshape the result back to the original (batch, sequence, hidden_size) shape and return
147
+ return final_hidden_states.view(*original_shape)
148
+
149
+ # --- 2. The "Genesis" Function: Assembling, Transplanting, and Modifying the Model ---
150
+ def create_moe_model():
151
+ """
152
+ This is the "Architectural Surgery" function. It is responsible for:
153
+ 1. Building an empty model skeleton with MoE modules.
154
+ 2. "Transplanting" the weights from your 8 pre-trained experts into it.
155
+ 3. Freezing all expert parameters, leaving only the routers trainable.
156
+ """
157
+ print("--- Starting Architectural Surgery ---")
158
+
159
+ # Load the config from the standard model; this is the "genetic blueprint" for our new model
160
+ config = AutoConfig.from_pretrained(MODEL_NAME)
161
+
162
+ print("Step 1: Loading base model skeleton...")
163
+ # Load one of the experts to serve as the "skeleton" for our MoE model.
164
+ # We will use its non-FFN parts (embeddings, attention modules, etc.).
165
+ base_model = AutoModelForCausalLM.from_pretrained(
166
+ os.path.join(BASE_EXPERT_PATH, EXPERT_DIRS[0]),
167
+ torch_dtype=torch.bfloat16,
168
+ device_map=device
169
+ )
170
+
171
+ print("Step 2: Pre-loading all expert weights into CPU memory for efficiency...")
172
+ # To improve efficiency, we load all expert weights from disk into CPU RAM at once.
173
+ # We use `safetensors.torch.load_file` as it is the correct and safe way to load .safetensors files.
174
+ all_experts_state_dicts = [
175
+ load_file(os.path.join(BASE_EXPERT_PATH, expert_dir, 'model.safetensors'), device='cpu')
176
+ for expert_dir in EXPERT_DIRS
177
+ ]
178
+ print("All expert weights pre-loaded.")
179
+
180
+ print("Step 3: Replacing FFNs with MoE modules and transplanting expert weights...")
181
+ # Iterate through all 30 layers of the model
182
+ for layer_idx, layer in enumerate(base_model.model.layers):
183
+ # In each layer, replace the original, standard LlamaMLP with our custom MoEModule
184
+ layer.mlp = MoEModule(config).to(device, dtype=torch.bfloat16)
185
+
186
+ # Begin the "Organ Transplant"
187
+ for expert_idx in range(NUM_EXPERTS):
188
+ # Get the weights for the current expert from memory
189
+ expert_state_dict = all_experts_state_dicts[expert_idx]
190
+ # Filter to get only the weights for the FFN part of the current layer
191
+ expert_mlp_weights = {
192
+ k.replace(f"model.layers.{layer_idx}.mlp.", ""): v
193
+ for k, v in expert_state_dict.items()
194
+ if f"model.layers.{layer_idx}.mlp." in k
195
+ }
196
+ # Load these weights into the corresponding expert "seat" in our MoE module
197
+ layer.mlp.experts[expert_idx].load_state_dict(expert_mlp_weights)
198
+
199
+ print("Step 4: Freezing all parameters except for the routers...")
200
+ # This is our key strategy: only train the "CEO", don't disturb the already-smart "experts".
201
+ for name, param in base_model.named_parameters():
202
+ if "router" not in name:
203
+ param.requires_grad = False
204
+
205
+ print("\n--- Surgery Complete! MoE Model is assembled and ready for training. ---")
206
+ # Print parameter statistics to verify our operation was successful
207
+ trainable_params = sum(p.numel() for p in base_model.parameters() if p.requires_grad)
208
+ total_params = sum(p.numel() for p in base_model.parameters())
209
+ print(f"Total Parameters: {total_params / 1e6:.2f}M")
210
+ print(f"Trainable Parameters (Routers): {trainable_params}")
211
+
212
+ return base_model
213
+
214
+ # --- 3. Main Process: Training and Saving ---
215
+ def main():
216
+ # Step 1: Call the "Genesis" function to create our model
217
+ moe_model = create_moe_model()
218
+
219
+ # Step 2: Create the optimizer. It's smart enough to only include parameters where `requires_grad=True` (i.e., the routers).
220
+ optimizer = optim.AdamW([p for p in moe_model.parameters() if p.requires_grad], lr=LEARNING_RATE)
221
+
222
+ print("\n--- Preparing Simulated Mixed Dataset for Training ---")
223
+ # NOTE: We are using completely random "mock data" here, solely to validate that the entire process runs.
224
+ # To make the routers truly intelligent, you MUST replace this with a real, diverse dataset.
225
+ mock_input_ids = torch.randint(0, moe_model.config.vocab_size, (BATCH_SIZE, SEQUENCE_LENGTH), device=device)
226
+ mock_labels = mock_input_ids.clone()
227
+
228
+ print("--- Starting Router Training Loop (Optimized & Corrected) ---")
229
+ moe_model.train() # Set the model to training mode
230
+
231
+ start_time = time.time()
232
+ for epoch in range(EPOCHS):
233
+ optimizer.zero_grad() # Clear gradients from the previous epoch
234
+
235
+ # --- The Elegant and Correct Forward Pass ---
236
+ # Call the model directly. Hugging Face automatically handles all complex internal details (like attention masks).
237
+ # By providing `labels`, it also automatically calculates the main cross-entropy loss for us.
238
+ outputs = moe_model(input_ids=mock_input_ids, labels=mock_labels)
239
+ main_loss = outputs.loss # Extract the main task loss
240
+
241
+ # --- Safely Collect Load Balancing Losses ---
242
+ total_lb_loss = 0.0
243
+ for layer in moe_model.model.layers:
244
+ total_lb_loss += layer.mlp.most_recent_lb_loss # Retrieve the loss stored in our placeholder
245
+
246
+ # --- Calculate the Final "Composite KPI" (Total Loss) ---
247
+ total_loss = main_loss + LB_LOSS_COEFFICIENT * total_lb_loss
248
+
249
+ # --- Backpropagation and Optimization ---
250
+ total_loss.backward() # Calculate gradients
251
+ optimizer.step() # Update router weights
252
+
253
+ # --- Print Training Logs ---
254
+ if (epoch + 1) % 10 == 0:
255
+ elapsed_time = time.time() - start_time
256
+ print(f"Epoch [{epoch+1:03d}/{EPOCHS}] | Total Loss: {total_loss.item():.4f} | "
257
+ f"Main Loss: {main_loss.item():.4f} | "
258
+ f"Avg LB Loss: {(total_lb_loss.item() / moe_model.config.num_hidden_layers):.4f} | "
259
+ f"Time: {elapsed_time:.2f}s")
260
+ start_time = time.time()
261
+
262
+ print("\n--- Router Training Complete! ---")
263
+
264
+ # --- Step 5: Solidifying our great work onto the disk ---
265
+ print("\n--- Phase 5: Saving the fully trained MoE model to disk ---")
266
+ OUTPUT_MODEL_DIR = "./SmolMoE-8x135M-Instruct-v1-Trained"
267
+ if os.path.exists(OUTPUT_MODEL_DIR):
268
+ shutil.rmtree(OUTPUT_MODEL_DIR)
269
+ os.makedirs(OUTPUT_MODEL_DIR)
270
+
271
+ print("Updating model config with MoE-specific parameters...")
272
+ # We use custom names for our MoE parameters to avoid conflicts with standard Hugging Face generation configs.
273
+ moe_model.config.moe_num_experts = NUM_EXPERTS
274
+ moe_model.config.moe_top_k = TOP_K
275
+
276
+ print(f"Saving model to '{OUTPUT_MODEL_DIR}'...")
277
+ moe_model.save_pretrained(OUTPUT_MODEL_DIR) # Saves weights and the updated config file
278
+
279
+ print("Saving tokenizer...")
280
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) # Save the tokenizer
281
+ tokenizer.save_pretrained(OUTPUT_MODEL_DIR)
282
+
283
+ print("\n--- Model successfully saved! ---")
284
+ print("You can now load this model in other scripts, but you must re-define the custom MoE classes first.")
285
+
286
+ # --- Script Entry Point ---
287
+ # Ensures that the main() function is only called when this file is executed directly.
288
+ if __name__ == "__main__":
289
+ main()
train/train_moe_router_zh_CN.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # Smol-MoE 8x135M - “创世纪”主脚本
3
+ # (最终优化版,包含所有修正与详细注释)
4
+ # ==============================================================================
5
+
6
+ # --- 核心库导入 ---
7
+ # PyTorch 核心
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.optim as optim
11
+ import torch.nn.functional as F
12
+
13
+ # Hugging Face Transformers 库,我们的“乐高积木”来源
14
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
15
+ from transformers.models.llama.modeling_llama import LlamaMLP # Llama模型中标准的FFN模块,我们将其作为“专家”的基础结构
16
+
17
+ # Safetensors 库,用于安全、高效地加载模型权重
18
+ from safetensors.torch import load_file
19
+
20
+ # 标准Python库
21
+ import os # 用于处理文件路径
22
+ import shutil # 用于处理文件夹(如删除旧的模型文件夹)
23
+ import numpy as np # 用于数据分析(在最终测试中)
24
+ import time # 用于计算训练时长
25
+
26
+ # --- 0. 全局配置与超参数 ---
27
+ # 这里是整个项目的“总控制台”,所有关键参数都在此定义。
28
+
29
+ # MODEL_NAME: 基础模型的路径。我们用它来加载初始的配置(config)和分词器(tokenizer)。
30
+ # 注意:它指向一个标准的、未经修改的SmolLM模型。
31
+ MODEL_NAME = "./SmolLM2-135M-Instruct"
32
+
33
+ # BASE_EXPERT_PATH: 存放你所有8个预训练好的专家模型的父文件夹。
34
+ BASE_EXPERT_PATH = "./models"
35
+
36
+ # EXPERT_DIRS: 8个专家模型文件夹的具体名称列表。顺序很重要。
37
+ EXPERT_DIRS = [
38
+ "SmolLM2-135M-Instruct-Actor", "SmolLM2-135M-Instruct-Analyst",
39
+ "SmolLM2-135M-Instruct-Coder", "SmolLM2-135M-Instruct-Encyclopedia",
40
+ "SmolLM2-135M-Instruct-Guardian", "SmolLM2-135M-Instruct-Summarizer",
41
+ "SmolLM2-135M-Instruct-Thinker", "SmolLM2-135M-Instruct-Writer"
42
+ ]
43
+
44
+ # MoE 架构参数
45
+ NUM_EXPERTS = 8 # 专家委员会的专家数量
46
+ TOP_K = 2 # 每次路由时,为每个Token选择的最优专家的数量
47
+
48
+ # 训练超参数
49
+ LEARNING_RATE = 0.001 # 路由器的学习率。只训练路由器,所以可以设置得稍高一些。
50
+ EPOCHS = 20 # 训练轮次。因为我们用的是模拟数据来验证流程,所以20轮就足够了。
51
+ # 当使用真实数据时,你需要把它设置得更高。
52
+ BATCH_SIZE = 4 # 每批次处理的数据量。根据你的显存大小调整。
53
+ SEQUENCE_LENGTH = 128 # 模型处理的文本序列长度。根据你的显存大小调整。
54
+ LB_LOSS_COEFFICIENT = 0.01 # 负载均衡损失的权重系数。这是个关键的“平衡阀”,
55
+ # 用来平衡“任务做得好”和“分配得公平”这两个目标。
56
+
57
+ # 设备配置
58
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
+ print(f"Using device: {device}")
60
+
61
+
62
+ # --- 1. MoE架构组件定义 ---
63
+ # 这里是我们“发明”的新零件的蓝图。
64
+
65
+ class MoERouter(nn.Module):
66
+ """
67
+ 路由器 (Router / Gate Network) - 专家委员会的“CEO”或“调度员”。
68
+ 它的结构非常简单,就是一个线性层,负责为每个输入的Token,给所有专家打分。
69
+ """
70
+ def __init__(self, hidden_size: int, num_experts: int):
71
+ super().__init__()
72
+ self.layer = nn.Linear(hidden_size, num_experts, bias=False)
73
+
74
+ def forward(self, hidden_states):
75
+ # 输出每个专家的“得分”(logits),后续会通过Softmax转换成概率
76
+ return self.layer(hidden_states)
77
+
78
+ class MoEModule(nn.Module):
79
+ """
80
+ 混合专家模块 (MoE Module) - 整个“专家委员会”的会议室。
81
+ 它替换了原来Llama模型中标准的FFN(MLP)模块。
82
+ 它内部包含一个路由器(CEO)和8个专家(董事会成员)。
83
+ """
84
+ def __init__(self, config):
85
+ super().__init__()
86
+ # 从全局配置中获取必要的参数
87
+ self.hidden_size = config.hidden_size
88
+ self.top_k = TOP_K
89
+ self.num_experts = NUM_EXPERTS
90
+
91
+ # 创建组件
92
+ self.router = MoERouter(self.hidden_size, self.num_experts)
93
+ # LlamaMLP是Hugging Face中Llama模型标准的FFN实现,我们用它作为“专家”的基础结构
94
+ self.experts = nn.ModuleList([LlamaMLP(config) for _ in range(self.num_experts)])
95
+
96
+ # 创建一个占位符,用于在训练时临时存储该层的负载均衡损失
97
+ self.most_recent_lb_loss = None
98
+
99
+ def forward(self, hidden_states):
100
+ # 记录输入的原始形状,以便最后恢复
101
+ original_shape = hidden_states.shape
102
+ # 将输入“扁平化”处理,把batch和sequence维度合并,方便进行Token级别的路由
103
+ flat_hidden_states = hidden_states.view(-1, self.hidden_size)
104
+
105
+ # --- 步骤 1: 路由决策 ---
106
+ # 让路由器为每个Token打分
107
+ router_logits = self.router(flat_hidden_states)
108
+ # 用Softmax将分数转换成概率
109
+ routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float)
110
+ # 选出得分最高的Top-K个专家及其对应的概率
111
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
112
+ # 对Top-K个专家的概率进行归一化,确保它们的和为1
113
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
114
+ # 将权重的数据类型转换回模型的主数据类型(如bfloat16)以提高效率
115
+ routing_weights = routing_weights.to(hidden_states.dtype)
116
+
117
+ # --- 步骤 2: 计算并存储负载均衡损失 ---
118
+ # 这是MoE训练的灵魂:确保路由器不会“偏心”,公平地使用所有专家
119
+ router_probs_full = F.softmax(router_logits, dim=-1, dtype=torch.float)
120
+ avg_expert_prob = router_probs_full.mean(dim=0) # 每个专家在所有Token上的平均被选择概率
121
+ expert_mask_for_lb = F.one_hot(selected_experts, num_classes=self.num_experts).sum(dim=1) # 统计每个Token被分配给了哪些专家
122
+ avg_expert_fraction = expert_mask_for_lb.float().mean(dim=0) # 每个专家平均处理的Token比例
123
+ # 计算损失并乘以专家数量作为惩罚项,然后存储起来
124
+ self.most_recent_lb_loss = self.num_experts * torch.sum(avg_expert_prob * avg_expert_fraction)
125
+
126
+ # --- 步骤 3: 专家计算与结果融合 (矢量化高效版) ---
127
+ # 创建一个空的张量用于存放最终结果
128
+ final_hidden_states = torch.zeros_like(flat_hidden_states)
129
+
130
+ # 这个循环只遍历Top-K次(比如2次),非常快
131
+ for k in range(self.top_k):
132
+ # 获取所有Token在第k个选择上的专家索引和权重
133
+ expert_indices_k = selected_experts[:, k]
134
+ routing_weights_k = routing_weights[:, k]
135
+
136
+ # 这个循环遍历所有专家,但内部计算是批处理的,也非常快
137
+ for i in range(self.num_experts):
138
+ # 创建一个掩码,找到所有选择了当前专家i的Token
139
+ mask = expert_indices_k == i
140
+ if mask.any(): # 如果有任何Token选择了这个专家
141
+ # 将这些Token的输入作为一个批次,交给专家i处理
142
+ expert_output = self.experts[i](flat_hidden_states[mask])
143
+ # 将专家的输出乘以对应的路由权重,并“添加”回最终结果张量的正确位置
144
+ final_hidden_states.index_add_(0, torch.where(mask)[0], expert_output * routing_weights_k[mask].unsqueeze(1))
145
+
146
+ # 将结果恢复成原始的(batch, sequence, hidden_size)形状并返回
147
+ return final_hidden_states.view(*original_shape)
148
+
149
+ # --- 2. “创世纪”函数:组装、移植、改造模型 ---
150
+ def create_moe_model():
151
+ """
152
+ 这是“架构手术”函数。它负责:
153
+ 1. 搭建一个带有MoE模块的空壳模型。
154
+ 2. 将你预训练好的8个专家的权重精准地“移植”进去。
155
+ 3. 冻结所有专家,只留下路由器是可训练的。
156
+ """
157
+ print("--- Starting Architectural Surgery ---")
158
+
159
+ # 从标准模型加载配置,这是我们新模型的“基因蓝图”
160
+ config = AutoConfig.from_pretrained(MODEL_NAME)
161
+
162
+ print("Step 1: Loading base model skeleton...")
163
+ # 加载8个专家中的任意一个,作为我们MoE模型的“骨架”
164
+ # 我们将使用它的词嵌入、注意力模块等非FFN部分
165
+ base_model = AutoModelForCausalLM.from_pretrained(
166
+ os.path.join(BASE_EXPERT_PATH, EXPERT_DIRS[0]),
167
+ torch_dtype=torch.bfloat16,
168
+ device_map=device
169
+ )
170
+
171
+ print("Step 2: Pre-loading all expert weights into CPU memory for efficiency...")
172
+ # 为了提高效率,我们一次性把所有专家的权重从硬盘加载到CPU内存
173
+ # 我们使用 `safetensors.torch.load_file`,这是加载.safetensors文件的正确方式
174
+ all_experts_state_dicts = [
175
+ load_file(os.path.join(BASE_EXPERT_PATH, expert_dir, 'model.safetensors'), device='cpu')
176
+ for expert_dir in EXPERT_DIRS
177
+ ]
178
+ print("All expert weights pre-loaded.")
179
+
180
+ print("Step 3: Replacing FFNs with MoE modules and transplanting expert weights...")
181
+ # 遍历模型的30层
182
+ for layer_idx, layer in enumerate(base_model.model.layers):
183
+ # 在每一层,都用我们自己设计的MoEModule替换掉原来标准的LlamaMLP
184
+ layer.mlp = MoEModule(config).to(device, dtype=torch.bfloat16)
185
+
186
+ # 开始“器官移植”
187
+ for expert_idx in range(NUM_EXPERTS):
188
+ # 从内存中获取当前专家的权重字典
189
+ expert_state_dict = all_experts_state_dicts[expert_idx]
190
+ # 筛选出只属于当前层级的FFN��分的权重
191
+ expert_mlp_weights = {
192
+ k.replace(f"model.layers.{layer_idx}.mlp.", ""): v
193
+ for k, v in expert_state_dict.items()
194
+ if f"model.layers.{layer_idx}.mlp." in k
195
+ }
196
+ # 将这些权重加载到MoE模块对应的专家“席位”上
197
+ layer.mlp.experts[expert_idx].load_state_dict(expert_mlp_weights)
198
+
199
+ print("Step 4: Freezing all parameters except for the routers...")
200
+ # 这是关键策略:只训练“CEO”,不打扰已经很聪明的“专家”
201
+ for name, param in base_model.named_parameters():
202
+ if "router" not in name:
203
+ param.requires_grad = False
204
+
205
+ print("\n--- Surgery Complete! MoE Model is assembled and ready for training. ---")
206
+ # 打印参数统计,验证我们的操作是否正确
207
+ trainable_params = sum(p.numel() for p in base_model.parameters() if p.requires_grad)
208
+ total_params = sum(p.numel() for p in base_model.parameters())
209
+ print(f"Total Parameters: {total_params / 1e6:.2f}M")
210
+ print(f"Trainable Parameters (Routers): {trainable_params}")
211
+
212
+ return base_model
213
+
214
+ # --- 3. 主流程:训练与保存 ---
215
+ def main():
216
+ # 步骤 1: 调用“创世纪”函数,创造我们的模型
217
+ moe_model = create_moe_model()
218
+
219
+ # 步骤 2: 创建优化器。它很聪明,只会包含那些 `requires_grad=True` 的参数(也就是路由器)
220
+ optimizer = optim.AdamW([p for p in moe_model.parameters() if p.requires_grad], lr=LEARNING_RATE)
221
+
222
+ print("\n--- Preparing Simulated Mixed Dataset for Training ---")
223
+ # 注意:这里我们用的是完全随机的“模拟数据”,仅用于验证整个流程能跑通。
224
+ # 要想让路由器真正变聪明,必须用真实的、多样化的数据集替换这里。
225
+ mock_input_ids = torch.randint(0, moe_model.config.vocab_size, (BATCH_SIZE, SEQUENCE_LENGTH), device=device)
226
+ mock_labels = mock_input_ids.clone()
227
+
228
+ print("--- Starting Router Training Loop (Optimized & Corrected) ---")
229
+ moe_model.train() # 将模型设置为训练模式
230
+
231
+ start_time = time.time()
232
+ for epoch in range(EPOCHS):
233
+ optimizer.zero_grad() # 每个epoch开始前,清空上一轮的梯度
234
+
235
+ # --- 优雅且正确的前向传播 ---
236
+ # 直接调用模型,Hugging Face会自动为我们处理所有复杂的内部细节(如attention mask)
237
+ # 同时,因为我们提供了 `labels`,它会自动计算主线任务的交叉熵损失
238
+ outputs = moe_model(input_ids=mock_input_ids, labels=mock_labels)
239
+ main_loss = outputs.loss # 提取主线任务损失
240
+
241
+ # --- 安全地收集所有层的负载均衡损失 ---
242
+ total_lb_loss = 0.0
243
+ for layer in moe_model.model.layers:
244
+ total_lb_loss += layer.mlp.most_recent_lb_loss # 从我们设置的占位符中取出损失
245
+
246
+ # --- 计算最终的“复合KPI”(总损失)---
247
+ total_loss = main_loss + LB_LOSS_COEFFICIENT * total_lb_loss
248
+
249
+ # --- 反向传播与优化 ---
250
+ total_loss.backward() # 计算梯度
251
+ optimizer.step() # 更新路由器权重
252
+
253
+ # --- 打印训练日志 ---
254
+ if (epoch + 1) % 10 == 0:
255
+ elapsed_time = time.time() - start_time
256
+ print(f"Epoch [{epoch+1:03d}/{EPOCHS}] | Total Loss: {total_loss.item():.4f} | "
257
+ f"Main Loss: {main_loss.item():.4f} | "
258
+ f"Avg LB Loss: {(total_lb_loss.item() / moe_model.config.num_hidden_layers):.4f} | "
259
+ f"Time: {elapsed_time:.2f}s")
260
+ start_time = time.time()
261
+
262
+ print("\n--- Router Training Complete! ---")
263
+
264
+ # --- 步骤 5: 将我们伟大的作品“固化”到硬盘上 ---
265
+ print("\n--- Phase 5: Saving the fully trained MoE model to disk ---")
266
+ OUTPUT_MODEL_DIR = "./SmolMoE-8x135M-Instruct-v1-Trained"
267
+ if os.path.exists(OUTPUT_MODEL_DIR):
268
+ shutil.rmtree(OUTPUT_MODEL_DIR)
269
+ os.makedirs(OUTPUT_MODEL_DIR)
270
+
271
+ print("Updating model config with MoE-specific parameters...")
272
+ # 使用不会与Hugging Face原生配置冲突的自定义名称来保存我们的MoE参数
273
+ moe_model.config.moe_num_experts = NUM_EXPERTS
274
+ moe_model.config.moe_top_k = TOP_K
275
+
276
+ print(f"Saving model to '{OUTPUT_MODEL_DIR}'...")
277
+ moe_model.save_pretrained(OUTPUT_MODEL_DIR) # 保存权重和配置文件
278
+
279
+ print("Saving tokenizer...")
280
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) # 保存分词器
281
+ tokenizer.save_pretrained(OUTPUT_MODEL_DIR)
282
+
283
+ print("\n--- Model successfully saved! ---")
284
+ print("You can now load this model in other scripts, but you must re-define the custom MoE classes first.")
285
+
286
+ # --- 脚本入口 ---
287
+ # 确保只有在直接运行此文件时,才会执行main函数
288
+ if __name__ == "__main__":
289
+ main()