yujiepan commited on
Commit
f1b9c0f
·
verified ·
1 Parent(s): 54f0942

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: text-generation
4
+ inference: true
5
+ widget:
6
+ - text: Hello!
7
+ example_title: Hello world
8
+ group: Python
9
+ base_model:
10
+ - Qwen/Qwen3-Next-80B-A3B-Instruct
11
+ ---
12
+
13
+ This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [Qwen/Qwen3-Next-80B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct).
14
+
15
+ ### Example usage:
16
+
17
+ - vLLM
18
+
19
+ ```bash
20
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \
21
+ vllm serve tiny-random/qwen3-next-moe \
22
+ --tensor-parallel-size 4 \
23
+ --max-model-len 262144 \
24
+ --speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":2}'
25
+
26
+ ```
27
+
28
+ - SGLang
29
+
30
+ ```bash
31
+ SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 \
32
+ python -m sglang.launch_server \
33
+ --model-path tiny-random/qwen3-next-moe \
34
+ --tp-size 4 --context-length 262144 \
35
+ --mem-fraction-static 0.8 \
36
+ --speculative-algo NEXTN \
37
+ --speculative-num-steps 3 \
38
+ --speculative-eagle-topk 1 \
39
+ --speculative-num-draft-tokens 4
40
+
41
+ ```
42
+
43
+ - Transformers
44
+
45
+ ```python
46
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
47
+ model_id = "tiny-random/qwen3-next-moe"
48
+
49
+ # load the tokenizer and the model
50
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
51
+ model = AutoModelForCausalLM.from_pretrained(
52
+ model_id,
53
+ dtype="auto",
54
+ device_map="cuda",
55
+ )
56
+ # prepare the model input
57
+ prompt = "Give me a short introduction to large language model."
58
+ messages = [
59
+ {"role": "user", "content": prompt},
60
+ ]
61
+ text = tokenizer.apply_chat_template(
62
+ messages,
63
+ tokenize=False,
64
+ add_generation_prompt=True,
65
+ )
66
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
67
+ # conduct text completion
68
+ generated_ids = model.generate(
69
+ **model_inputs,
70
+ max_new_tokens=8,
71
+ )
72
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
73
+ content = tokenizer.decode(output_ids, skip_special_tokens=True)
74
+ print("content:", content)
75
+ ```
76
+
77
+ ### Codes to create this repo:
78
+
79
+ ```python
80
+ from copy import deepcopy
81
+
82
+ import torch
83
+ import torch.nn as nn
84
+ from transformers import (
85
+ AutoConfig,
86
+ AutoModelForCausalLM,
87
+ AutoTokenizer,
88
+ GenerationConfig,
89
+ pipeline,
90
+ set_seed,
91
+ )
92
+
93
+ source_model_id = "Qwen/Qwen3-Next-80B-A3B-Instruct"
94
+ save_folder = "/tmp/tiny-random/qwen3-next-moe"
95
+
96
+ tokenizer = AutoTokenizer.from_pretrained(
97
+ source_model_id, trust_remote_code=True,
98
+ )
99
+ tokenizer.save_pretrained(save_folder)
100
+
101
+ config = AutoConfig.from_pretrained(
102
+ source_model_id, trust_remote_code=True,
103
+ )
104
+ config._name_or_path = source_model_id
105
+ config.hidden_size = 8
106
+ config.intermediate_size = 32
107
+ config.head_dim = 32
108
+ config.num_key_value_heads = 8
109
+ config.num_attention_heads = 16
110
+ config.num_hidden_layers = 4
111
+ config.tie_word_embeddings = False
112
+ config.linear_num_key_heads = 8
113
+ config.linear_num_value_heads = 16
114
+ config.moe_intermediate_size = 32
115
+ config.num_experts = 32
116
+ config.num_experts_per_tok = 10
117
+ config.layer_types = config.layer_types[:4]
118
+ config.shared_expert_intermediate_size = 32
119
+ model = AutoModelForCausalLM.from_config(
120
+ config,
121
+ torch_dtype=torch.bfloat16,
122
+ trust_remote_code=True,
123
+ )
124
+ model.generation_config = GenerationConfig.from_pretrained(
125
+ source_model_id, trust_remote_code=True,
126
+ )
127
+ # MTP
128
+ model.mtp = nn.ModuleDict({
129
+ "pre_fc_norm_embedding": nn.RMSNorm(config.hidden_size),
130
+ "fc": nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False),
131
+ "norm": nn.RMSNorm(config.hidden_size),
132
+ "pre_fc_norm_hidden": nn.RMSNorm(config.hidden_size),
133
+ "layers": nn.ModuleList([deepcopy(model.model.layers[3])]),
134
+ })
135
+ set_seed(42)
136
+ with torch.no_grad():
137
+ for name, p in sorted(model.named_parameters()):
138
+ torch.nn.init.normal_(p, 0, 0.1)
139
+ print(name, p.shape)
140
+ model.save_pretrained(save_folder)
141
+ ```
142
+
143
+ ### Printing the model:
144
+
145
+ ```text
146
+ Qwen3NextForCausalLM(
147
+ (model): Qwen3NextModel(
148
+ (embed_tokens): Embedding(151936, 8)
149
+ (layers): ModuleList(
150
+ (0-2): 3 x Qwen3NextDecoderLayer(
151
+ (linear_attn): Qwen3NextGatedDeltaNet(
152
+ (act): SiLU()
153
+ (conv1d): Conv1d(4096, 4096, kernel_size=(4,), stride=(1,), padding=(3,), groups=4096, bias=False)
154
+ (in_proj_qkvz): Linear(in_features=8, out_features=6144, bias=False)
155
+ (in_proj_ba): Linear(in_features=8, out_features=32, bias=False)
156
+ (norm): FusedRMSNormGated(128, eps=1e-06, activation=silu)
157
+ (out_proj): Linear(in_features=2048, out_features=8, bias=False)
158
+ )
159
+ (mlp): Qwen3NextSparseMoeBlock(
160
+ (gate): Linear(in_features=8, out_features=32, bias=False)
161
+ (experts): ModuleList(
162
+ (0-31): 32 x Qwen3NextMLP(
163
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
164
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
165
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
166
+ (act_fn): SiLU()
167
+ )
168
+ )
169
+ (shared_expert): Qwen3NextMLP(
170
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
171
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
172
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
173
+ (act_fn): SiLU()
174
+ )
175
+ (shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
176
+ )
177
+ (input_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
178
+ (post_attention_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
179
+ )
180
+ (3): Qwen3NextDecoderLayer(
181
+ (self_attn): Qwen3NextAttention(
182
+ (q_proj): Linear(in_features=8, out_features=1024, bias=False)
183
+ (k_proj): Linear(in_features=8, out_features=256, bias=False)
184
+ (v_proj): Linear(in_features=8, out_features=256, bias=False)
185
+ (o_proj): Linear(in_features=512, out_features=8, bias=False)
186
+ (q_norm): Qwen3NextRMSNorm((32,), eps=1e-06)
187
+ (k_norm): Qwen3NextRMSNorm((32,), eps=1e-06)
188
+ )
189
+ (mlp): Qwen3NextSparseMoeBlock(
190
+ (gate): Linear(in_features=8, out_features=32, bias=False)
191
+ (experts): ModuleList(
192
+ (0-31): 32 x Qwen3NextMLP(
193
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
194
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
195
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
196
+ (act_fn): SiLU()
197
+ )
198
+ )
199
+ (shared_expert): Qwen3NextMLP(
200
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
201
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
202
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
203
+ (act_fn): SiLU()
204
+ )
205
+ (shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
206
+ )
207
+ (input_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
208
+ (post_attention_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
209
+ )
210
+ )
211
+ (norm): Qwen3NextRMSNorm((8,), eps=1e-06)
212
+ (rotary_emb): Qwen3NextRotaryEmbedding()
213
+ )
214
+ (lm_head): Linear(in_features=8, out_features=151936, bias=False)
215
+ (mtp): ModuleDict(
216
+ (pre_fc_norm_embedding): RMSNorm((8,), eps=None, elementwise_affine=True)
217
+ (fc): Linear(in_features=16, out_features=8, bias=False)
218
+ (norm): RMSNorm((8,), eps=None, elementwise_affine=True)
219
+ (pre_fc_norm_hidden): RMSNorm((8,), eps=None, elementwise_affine=True)
220
+ (layers): ModuleList(
221
+ (0): Qwen3NextDecoderLayer(
222
+ (self_attn): Qwen3NextAttention(
223
+ (q_proj): Linear(in_features=8, out_features=1024, bias=False)
224
+ (k_proj): Linear(in_features=8, out_features=256, bias=False)
225
+ (v_proj): Linear(in_features=8, out_features=256, bias=False)
226
+ (o_proj): Linear(in_features=512, out_features=8, bias=False)
227
+ (q_norm): Qwen3NextRMSNorm((32,), eps=1e-06)
228
+ (k_norm): Qwen3NextRMSNorm((32,), eps=1e-06)
229
+ )
230
+ (mlp): Qwen3NextSparseMoeBlock(
231
+ (gate): Linear(in_features=8, out_features=32, bias=False)
232
+ (experts): ModuleList(
233
+ (0-31): 32 x Qwen3NextMLP(
234
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
235
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
236
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
237
+ (act_fn): SiLU()
238
+ )
239
+ )
240
+ (shared_expert): Qwen3NextMLP(
241
+ (gate_proj): Linear(in_features=8, out_features=32, bias=False)
242
+ (up_proj): Linear(in_features=8, out_features=32, bias=False)
243
+ (down_proj): Linear(in_features=32, out_features=8, bias=False)
244
+ (act_fn): SiLU()
245
+ )
246
+ (shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
247
+ )
248
+ (input_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
249
+ (post_attention_layernorm): Qwen3NextRMSNorm((8,), eps=1e-06)
250
+ )
251
+ )
252
+ )
253
+ )
254
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3NextForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "decoder_sparse_step": 1,
9
+ "dtype": "bfloat16",
10
+ "eos_token_id": 151645,
11
+ "full_attention_interval": 4,
12
+ "head_dim": 32,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 8,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 32,
17
+ "layer_types": [
18
+ "linear_attention",
19
+ "linear_attention",
20
+ "linear_attention",
21
+ "full_attention"
22
+ ],
23
+ "linear_conv_kernel_dim": 4,
24
+ "linear_key_head_dim": 128,
25
+ "linear_num_key_heads": 8,
26
+ "linear_num_value_heads": 16,
27
+ "linear_value_head_dim": 128,
28
+ "max_position_embeddings": 262144,
29
+ "mlp_only_layers": [],
30
+ "model_type": "qwen3_next",
31
+ "moe_intermediate_size": 32,
32
+ "norm_topk_prob": true,
33
+ "num_attention_heads": 16,
34
+ "num_experts": 32,
35
+ "num_experts_per_tok": 10,
36
+ "num_hidden_layers": 4,
37
+ "num_key_value_heads": 8,
38
+ "output_router_logits": false,
39
+ "partial_rotary_factor": 0.25,
40
+ "rms_norm_eps": 1e-06,
41
+ "rope_scaling": null,
42
+ "rope_theta": 10000000,
43
+ "router_aux_loss_coef": 0.001,
44
+ "shared_expert_intermediate_size": 32,
45
+ "tie_word_embeddings": false,
46
+ "transformers_version": "4.57.0.dev0",
47
+ "use_cache": true,
48
+ "use_sliding_window": false,
49
+ "vocab_size": 151936
50
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.57.0.dev0",
13
+ "trust_remote_code": true
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4378d59f21cb212c6ca10786e64c4bea327f62dbfc93b2f42c83962a80912b
3
+ size 5740672
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1010000,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff