danielhanchen commited on
Commit
b796e7f
·
verified ·
1 Parent(s): 680155c

Upload folder using huggingface_hub

Browse files
chat_template.jinja CHANGED
@@ -60,8 +60,8 @@
60
  {%- set reasoning_content = message.reasoning_content %}
61
  {%- else %}
62
  {%- if '</think>' in content %}
63
- {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
64
- {%- set content = content.split('</think>')[-1].lstrip('\n') %}
65
  {%- endif %}
66
  {%- endif %}
67
  {%- if loop.index0 > ns.last_query_index %}
 
60
  {%- set reasoning_content = message.reasoning_content %}
61
  {%- else %}
62
  {%- if '</think>' in content %}
63
+ {%- set reasoning_content = ((content.split('</think>')|first).rstrip('\n').split('<think>')|last).lstrip('\n') %}
64
+ {%- set content = (content.split('</think>')|last).lstrip('\n') %}
65
  {%- endif %}
66
  {%- endif %}
67
  {%- if loop.index0 > ns.last_query_index %}
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- macro render_content(content, do_vision_count) %}\n {%- if content is string %}\n {{- content }}\n {%- else %}\n {%- for item in content %}\n {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}\n {%- if do_vision_count %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif 'video' in item or item.type == 'video' %}\n {%- if do_vision_count %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in item %}\n {{- item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n{%- endmacro %}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- render_content(messages[0].content, false) + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + render_content(messages[0].content, false) + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" %}\n {%- set content = render_content(message.content, false) %}\n {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- set content = render_content(message.content, True) %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n' }}\n{%- endif %}\n"
3
+ }
config.json CHANGED
@@ -2,8 +2,6 @@
2
  "architectures": [
3
  "Qwen3VLForConditionalGeneration"
4
  ],
5
- "torch_dtype": "bfloat16",
6
- "eos_token_id": 151645,
7
  "image_token_id": 151655,
8
  "model_type": "qwen3_vl",
9
  "pad_token_id": 151654,
@@ -49,7 +47,6 @@
49
  17
50
  ],
51
  "depth": 24,
52
- "torch_dtype": "bfloat16",
53
  "hidden_act": "gelu_pytorch_tanh",
54
  "hidden_size": 1024,
55
  "in_channels": 3,
@@ -65,4 +62,4 @@
65
  },
66
  "vision_end_token_id": 151653,
67
  "vision_start_token_id": 151652
68
- }
 
2
  "architectures": [
3
  "Qwen3VLForConditionalGeneration"
4
  ],
 
 
5
  "image_token_id": 151655,
6
  "model_type": "qwen3_vl",
7
  "pad_token_id": 151654,
 
47
  17
48
  ],
49
  "depth": 24,
 
50
  "hidden_act": "gelu_pytorch_tanh",
51
  "hidden_size": 1024,
52
  "in_channels": 3,
 
62
  },
63
  "vision_end_token_id": 151653,
64
  "vision_start_token_id": 151652
65
+ }
generation_config.json CHANGED
@@ -1,12 +1,14 @@
1
  {
2
- "bos_token_id": 151643,
3
- "do_sample": true,
4
- "eos_token_id": [
5
- 151645,
6
- 151643
7
- ],
8
- "pad_token_id": 151654,
9
- "top_k": 20,
10
- "top_p": 0.95,
11
- "transformers_version": "4.57.0"
12
- }
 
 
 
1
  {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "top_k": 20,
10
+ "top_p": 0.95,
11
+ "repetition_penalty": 1.0,
12
+ "temperature": 1.0,
13
+ "transformers_version": "4.56.0"
14
+ }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4cebe7b7dcac5ad58ea9bbe39983aca54c9d75d1809d419cbf4760b430db6af
3
- size 4990497880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02cab5e606c91e5ede7b1a70c49fa5abc58c77692362ee3be2e6656fd1327793
3
+ size 4967229296
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:210f1f859ee18716c7c6f3c03da22be8fe7e9e1ceedf2a2816dc6235ebde2418
3
- size 3885221448
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000ef17ec19fdd0531d825080c896c86025bc93036de1f34e3d133fa71c2cccf
3
+ size 3908490048
model.safetensors.index.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
  "metadata": {
3
- "total_parameters": 4437815808,
4
  "total_size": 8875631616
5
  },
6
  "weight_map": {
@@ -93,50 +92,50 @@
93
  "model.language_model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
94
  "model.language_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
95
  "model.language_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
96
- "model.language_model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
97
- "model.language_model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
98
  "model.language_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
99
  "model.language_model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
100
- "model.language_model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
101
  "model.language_model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
102
  "model.language_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
103
  "model.language_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
104
  "model.language_model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
105
  "model.language_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
  "model.language_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
- "model.language_model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
108
- "model.language_model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
109
- "model.language_model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
110
- "model.language_model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
111
- "model.language_model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
112
- "model.language_model.layers.17.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
113
- "model.language_model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
114
- "model.language_model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
- "model.language_model.layers.17.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
116
- "model.language_model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
117
- "model.language_model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
118
- "model.language_model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
119
- "model.language_model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
120
- "model.language_model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
121
- "model.language_model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
122
- "model.language_model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
123
- "model.language_model.layers.18.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
124
- "model.language_model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
125
- "model.language_model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
126
- "model.language_model.layers.18.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
127
- "model.language_model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
128
- "model.language_model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
129
- "model.language_model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
130
- "model.language_model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
131
- "model.language_model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
132
- "model.language_model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
133
- "model.language_model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
134
- "model.language_model.layers.19.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
135
- "model.language_model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
136
- "model.language_model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
137
- "model.language_model.layers.19.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
138
- "model.language_model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
139
- "model.language_model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
140
  "model.language_model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
141
  "model.language_model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
142
  "model.language_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
@@ -148,17 +147,17 @@
148
  "model.language_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
149
  "model.language_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
150
  "model.language_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
151
- "model.language_model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
152
  "model.language_model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
153
- "model.language_model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
154
- "model.language_model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
155
- "model.language_model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
- "model.language_model.layers.20.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
157
- "model.language_model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
158
- "model.language_model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
159
- "model.language_model.layers.20.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
160
- "model.language_model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
161
- "model.language_model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
162
  "model.language_model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
163
  "model.language_model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
164
  "model.language_model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
@@ -402,320 +401,320 @@
402
  "model.language_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
403
  "model.language_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
404
  "model.language_model.norm.weight": "model-00002-of-00002.safetensors",
405
- "model.visual.blocks.0.attn.proj.bias": "model-00001-of-00002.safetensors",
406
- "model.visual.blocks.0.attn.proj.weight": "model-00001-of-00002.safetensors",
407
- "model.visual.blocks.0.attn.qkv.bias": "model-00001-of-00002.safetensors",
408
- "model.visual.blocks.0.attn.qkv.weight": "model-00001-of-00002.safetensors",
409
- "model.visual.blocks.0.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
410
- "model.visual.blocks.0.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
411
- "model.visual.blocks.0.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
412
- "model.visual.blocks.0.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
413
- "model.visual.blocks.0.norm1.bias": "model-00001-of-00002.safetensors",
414
- "model.visual.blocks.0.norm1.weight": "model-00001-of-00002.safetensors",
415
- "model.visual.blocks.0.norm2.bias": "model-00001-of-00002.safetensors",
416
- "model.visual.blocks.0.norm2.weight": "model-00001-of-00002.safetensors",
417
- "model.visual.blocks.1.attn.proj.bias": "model-00001-of-00002.safetensors",
418
- "model.visual.blocks.1.attn.proj.weight": "model-00001-of-00002.safetensors",
419
- "model.visual.blocks.1.attn.qkv.bias": "model-00001-of-00002.safetensors",
420
- "model.visual.blocks.1.attn.qkv.weight": "model-00001-of-00002.safetensors",
421
- "model.visual.blocks.1.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
422
- "model.visual.blocks.1.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
423
- "model.visual.blocks.1.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
424
- "model.visual.blocks.1.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
425
- "model.visual.blocks.1.norm1.bias": "model-00001-of-00002.safetensors",
426
- "model.visual.blocks.1.norm1.weight": "model-00001-of-00002.safetensors",
427
- "model.visual.blocks.1.norm2.bias": "model-00001-of-00002.safetensors",
428
- "model.visual.blocks.1.norm2.weight": "model-00001-of-00002.safetensors",
429
- "model.visual.blocks.10.attn.proj.bias": "model-00001-of-00002.safetensors",
430
- "model.visual.blocks.10.attn.proj.weight": "model-00001-of-00002.safetensors",
431
- "model.visual.blocks.10.attn.qkv.bias": "model-00001-of-00002.safetensors",
432
- "model.visual.blocks.10.attn.qkv.weight": "model-00001-of-00002.safetensors",
433
- "model.visual.blocks.10.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
434
- "model.visual.blocks.10.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
435
- "model.visual.blocks.10.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
436
- "model.visual.blocks.10.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
437
- "model.visual.blocks.10.norm1.bias": "model-00001-of-00002.safetensors",
438
- "model.visual.blocks.10.norm1.weight": "model-00001-of-00002.safetensors",
439
- "model.visual.blocks.10.norm2.bias": "model-00001-of-00002.safetensors",
440
- "model.visual.blocks.10.norm2.weight": "model-00001-of-00002.safetensors",
441
- "model.visual.blocks.11.attn.proj.bias": "model-00001-of-00002.safetensors",
442
- "model.visual.blocks.11.attn.proj.weight": "model-00001-of-00002.safetensors",
443
- "model.visual.blocks.11.attn.qkv.bias": "model-00001-of-00002.safetensors",
444
- "model.visual.blocks.11.attn.qkv.weight": "model-00001-of-00002.safetensors",
445
- "model.visual.blocks.11.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
446
- "model.visual.blocks.11.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
447
- "model.visual.blocks.11.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
448
- "model.visual.blocks.11.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
449
- "model.visual.blocks.11.norm1.bias": "model-00001-of-00002.safetensors",
450
- "model.visual.blocks.11.norm1.weight": "model-00001-of-00002.safetensors",
451
- "model.visual.blocks.11.norm2.bias": "model-00001-of-00002.safetensors",
452
- "model.visual.blocks.11.norm2.weight": "model-00001-of-00002.safetensors",
453
- "model.visual.blocks.12.attn.proj.bias": "model-00001-of-00002.safetensors",
454
- "model.visual.blocks.12.attn.proj.weight": "model-00001-of-00002.safetensors",
455
- "model.visual.blocks.12.attn.qkv.bias": "model-00001-of-00002.safetensors",
456
- "model.visual.blocks.12.attn.qkv.weight": "model-00001-of-00002.safetensors",
457
- "model.visual.blocks.12.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
458
- "model.visual.blocks.12.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
459
- "model.visual.blocks.12.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
460
- "model.visual.blocks.12.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
461
- "model.visual.blocks.12.norm1.bias": "model-00001-of-00002.safetensors",
462
- "model.visual.blocks.12.norm1.weight": "model-00001-of-00002.safetensors",
463
- "model.visual.blocks.12.norm2.bias": "model-00001-of-00002.safetensors",
464
- "model.visual.blocks.12.norm2.weight": "model-00001-of-00002.safetensors",
465
- "model.visual.blocks.13.attn.proj.bias": "model-00001-of-00002.safetensors",
466
- "model.visual.blocks.13.attn.proj.weight": "model-00001-of-00002.safetensors",
467
- "model.visual.blocks.13.attn.qkv.bias": "model-00001-of-00002.safetensors",
468
- "model.visual.blocks.13.attn.qkv.weight": "model-00001-of-00002.safetensors",
469
- "model.visual.blocks.13.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
470
- "model.visual.blocks.13.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
471
- "model.visual.blocks.13.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
472
- "model.visual.blocks.13.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
473
- "model.visual.blocks.13.norm1.bias": "model-00001-of-00002.safetensors",
474
- "model.visual.blocks.13.norm1.weight": "model-00001-of-00002.safetensors",
475
- "model.visual.blocks.13.norm2.bias": "model-00001-of-00002.safetensors",
476
- "model.visual.blocks.13.norm2.weight": "model-00001-of-00002.safetensors",
477
- "model.visual.blocks.14.attn.proj.bias": "model-00001-of-00002.safetensors",
478
- "model.visual.blocks.14.attn.proj.weight": "model-00001-of-00002.safetensors",
479
- "model.visual.blocks.14.attn.qkv.bias": "model-00001-of-00002.safetensors",
480
- "model.visual.blocks.14.attn.qkv.weight": "model-00001-of-00002.safetensors",
481
- "model.visual.blocks.14.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
482
- "model.visual.blocks.14.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
483
- "model.visual.blocks.14.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
484
- "model.visual.blocks.14.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
485
- "model.visual.blocks.14.norm1.bias": "model-00001-of-00002.safetensors",
486
- "model.visual.blocks.14.norm1.weight": "model-00001-of-00002.safetensors",
487
- "model.visual.blocks.14.norm2.bias": "model-00001-of-00002.safetensors",
488
- "model.visual.blocks.14.norm2.weight": "model-00001-of-00002.safetensors",
489
- "model.visual.blocks.15.attn.proj.bias": "model-00001-of-00002.safetensors",
490
- "model.visual.blocks.15.attn.proj.weight": "model-00001-of-00002.safetensors",
491
- "model.visual.blocks.15.attn.qkv.bias": "model-00001-of-00002.safetensors",
492
- "model.visual.blocks.15.attn.qkv.weight": "model-00001-of-00002.safetensors",
493
- "model.visual.blocks.15.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
494
- "model.visual.blocks.15.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
495
- "model.visual.blocks.15.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
496
- "model.visual.blocks.15.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
497
- "model.visual.blocks.15.norm1.bias": "model-00001-of-00002.safetensors",
498
- "model.visual.blocks.15.norm1.weight": "model-00001-of-00002.safetensors",
499
- "model.visual.blocks.15.norm2.bias": "model-00001-of-00002.safetensors",
500
- "model.visual.blocks.15.norm2.weight": "model-00001-of-00002.safetensors",
501
- "model.visual.blocks.16.attn.proj.bias": "model-00001-of-00002.safetensors",
502
- "model.visual.blocks.16.attn.proj.weight": "model-00001-of-00002.safetensors",
503
- "model.visual.blocks.16.attn.qkv.bias": "model-00001-of-00002.safetensors",
504
- "model.visual.blocks.16.attn.qkv.weight": "model-00001-of-00002.safetensors",
505
- "model.visual.blocks.16.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
506
- "model.visual.blocks.16.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
507
- "model.visual.blocks.16.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
508
- "model.visual.blocks.16.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
509
- "model.visual.blocks.16.norm1.bias": "model-00001-of-00002.safetensors",
510
- "model.visual.blocks.16.norm1.weight": "model-00001-of-00002.safetensors",
511
- "model.visual.blocks.16.norm2.bias": "model-00001-of-00002.safetensors",
512
- "model.visual.blocks.16.norm2.weight": "model-00001-of-00002.safetensors",
513
- "model.visual.blocks.17.attn.proj.bias": "model-00001-of-00002.safetensors",
514
- "model.visual.blocks.17.attn.proj.weight": "model-00001-of-00002.safetensors",
515
- "model.visual.blocks.17.attn.qkv.bias": "model-00001-of-00002.safetensors",
516
- "model.visual.blocks.17.attn.qkv.weight": "model-00001-of-00002.safetensors",
517
- "model.visual.blocks.17.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
518
- "model.visual.blocks.17.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
519
- "model.visual.blocks.17.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
520
- "model.visual.blocks.17.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
521
- "model.visual.blocks.17.norm1.bias": "model-00001-of-00002.safetensors",
522
- "model.visual.blocks.17.norm1.weight": "model-00001-of-00002.safetensors",
523
- "model.visual.blocks.17.norm2.bias": "model-00001-of-00002.safetensors",
524
- "model.visual.blocks.17.norm2.weight": "model-00001-of-00002.safetensors",
525
- "model.visual.blocks.18.attn.proj.bias": "model-00001-of-00002.safetensors",
526
- "model.visual.blocks.18.attn.proj.weight": "model-00001-of-00002.safetensors",
527
- "model.visual.blocks.18.attn.qkv.bias": "model-00001-of-00002.safetensors",
528
- "model.visual.blocks.18.attn.qkv.weight": "model-00001-of-00002.safetensors",
529
- "model.visual.blocks.18.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
530
- "model.visual.blocks.18.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
531
- "model.visual.blocks.18.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
532
- "model.visual.blocks.18.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
533
- "model.visual.blocks.18.norm1.bias": "model-00001-of-00002.safetensors",
534
- "model.visual.blocks.18.norm1.weight": "model-00001-of-00002.safetensors",
535
- "model.visual.blocks.18.norm2.bias": "model-00001-of-00002.safetensors",
536
- "model.visual.blocks.18.norm2.weight": "model-00001-of-00002.safetensors",
537
- "model.visual.blocks.19.attn.proj.bias": "model-00001-of-00002.safetensors",
538
- "model.visual.blocks.19.attn.proj.weight": "model-00001-of-00002.safetensors",
539
- "model.visual.blocks.19.attn.qkv.bias": "model-00001-of-00002.safetensors",
540
- "model.visual.blocks.19.attn.qkv.weight": "model-00001-of-00002.safetensors",
541
- "model.visual.blocks.19.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
542
- "model.visual.blocks.19.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
543
- "model.visual.blocks.19.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
544
- "model.visual.blocks.19.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
545
- "model.visual.blocks.19.norm1.bias": "model-00001-of-00002.safetensors",
546
- "model.visual.blocks.19.norm1.weight": "model-00001-of-00002.safetensors",
547
- "model.visual.blocks.19.norm2.bias": "model-00001-of-00002.safetensors",
548
- "model.visual.blocks.19.norm2.weight": "model-00001-of-00002.safetensors",
549
- "model.visual.blocks.2.attn.proj.bias": "model-00001-of-00002.safetensors",
550
- "model.visual.blocks.2.attn.proj.weight": "model-00001-of-00002.safetensors",
551
- "model.visual.blocks.2.attn.qkv.bias": "model-00001-of-00002.safetensors",
552
- "model.visual.blocks.2.attn.qkv.weight": "model-00001-of-00002.safetensors",
553
- "model.visual.blocks.2.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
554
- "model.visual.blocks.2.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
555
- "model.visual.blocks.2.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
556
- "model.visual.blocks.2.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
557
- "model.visual.blocks.2.norm1.bias": "model-00001-of-00002.safetensors",
558
- "model.visual.blocks.2.norm1.weight": "model-00001-of-00002.safetensors",
559
- "model.visual.blocks.2.norm2.bias": "model-00001-of-00002.safetensors",
560
- "model.visual.blocks.2.norm2.weight": "model-00001-of-00002.safetensors",
561
- "model.visual.blocks.20.attn.proj.bias": "model-00001-of-00002.safetensors",
562
- "model.visual.blocks.20.attn.proj.weight": "model-00001-of-00002.safetensors",
563
- "model.visual.blocks.20.attn.qkv.bias": "model-00001-of-00002.safetensors",
564
- "model.visual.blocks.20.attn.qkv.weight": "model-00001-of-00002.safetensors",
565
- "model.visual.blocks.20.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
566
- "model.visual.blocks.20.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
567
- "model.visual.blocks.20.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
568
- "model.visual.blocks.20.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
569
- "model.visual.blocks.20.norm1.bias": "model-00001-of-00002.safetensors",
570
- "model.visual.blocks.20.norm1.weight": "model-00001-of-00002.safetensors",
571
- "model.visual.blocks.20.norm2.bias": "model-00001-of-00002.safetensors",
572
- "model.visual.blocks.20.norm2.weight": "model-00001-of-00002.safetensors",
573
- "model.visual.blocks.21.attn.proj.bias": "model-00001-of-00002.safetensors",
574
- "model.visual.blocks.21.attn.proj.weight": "model-00001-of-00002.safetensors",
575
- "model.visual.blocks.21.attn.qkv.bias": "model-00001-of-00002.safetensors",
576
- "model.visual.blocks.21.attn.qkv.weight": "model-00001-of-00002.safetensors",
577
- "model.visual.blocks.21.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
578
- "model.visual.blocks.21.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
579
- "model.visual.blocks.21.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
580
- "model.visual.blocks.21.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
581
- "model.visual.blocks.21.norm1.bias": "model-00001-of-00002.safetensors",
582
- "model.visual.blocks.21.norm1.weight": "model-00001-of-00002.safetensors",
583
- "model.visual.blocks.21.norm2.bias": "model-00001-of-00002.safetensors",
584
- "model.visual.blocks.21.norm2.weight": "model-00001-of-00002.safetensors",
585
- "model.visual.blocks.22.attn.proj.bias": "model-00001-of-00002.safetensors",
586
- "model.visual.blocks.22.attn.proj.weight": "model-00001-of-00002.safetensors",
587
- "model.visual.blocks.22.attn.qkv.bias": "model-00001-of-00002.safetensors",
588
- "model.visual.blocks.22.attn.qkv.weight": "model-00001-of-00002.safetensors",
589
- "model.visual.blocks.22.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
590
- "model.visual.blocks.22.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
591
- "model.visual.blocks.22.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
592
- "model.visual.blocks.22.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
593
- "model.visual.blocks.22.norm1.bias": "model-00001-of-00002.safetensors",
594
- "model.visual.blocks.22.norm1.weight": "model-00001-of-00002.safetensors",
595
- "model.visual.blocks.22.norm2.bias": "model-00001-of-00002.safetensors",
596
- "model.visual.blocks.22.norm2.weight": "model-00001-of-00002.safetensors",
597
- "model.visual.blocks.23.attn.proj.bias": "model-00001-of-00002.safetensors",
598
- "model.visual.blocks.23.attn.proj.weight": "model-00001-of-00002.safetensors",
599
- "model.visual.blocks.23.attn.qkv.bias": "model-00001-of-00002.safetensors",
600
- "model.visual.blocks.23.attn.qkv.weight": "model-00001-of-00002.safetensors",
601
- "model.visual.blocks.23.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
602
- "model.visual.blocks.23.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
603
- "model.visual.blocks.23.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
604
- "model.visual.blocks.23.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
605
- "model.visual.blocks.23.norm1.bias": "model-00001-of-00002.safetensors",
606
- "model.visual.blocks.23.norm1.weight": "model-00001-of-00002.safetensors",
607
- "model.visual.blocks.23.norm2.bias": "model-00001-of-00002.safetensors",
608
- "model.visual.blocks.23.norm2.weight": "model-00001-of-00002.safetensors",
609
- "model.visual.blocks.3.attn.proj.bias": "model-00001-of-00002.safetensors",
610
- "model.visual.blocks.3.attn.proj.weight": "model-00001-of-00002.safetensors",
611
- "model.visual.blocks.3.attn.qkv.bias": "model-00001-of-00002.safetensors",
612
- "model.visual.blocks.3.attn.qkv.weight": "model-00001-of-00002.safetensors",
613
- "model.visual.blocks.3.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
614
- "model.visual.blocks.3.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
615
- "model.visual.blocks.3.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
616
- "model.visual.blocks.3.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
617
- "model.visual.blocks.3.norm1.bias": "model-00001-of-00002.safetensors",
618
- "model.visual.blocks.3.norm1.weight": "model-00001-of-00002.safetensors",
619
- "model.visual.blocks.3.norm2.bias": "model-00001-of-00002.safetensors",
620
- "model.visual.blocks.3.norm2.weight": "model-00001-of-00002.safetensors",
621
- "model.visual.blocks.4.attn.proj.bias": "model-00001-of-00002.safetensors",
622
- "model.visual.blocks.4.attn.proj.weight": "model-00001-of-00002.safetensors",
623
- "model.visual.blocks.4.attn.qkv.bias": "model-00001-of-00002.safetensors",
624
- "model.visual.blocks.4.attn.qkv.weight": "model-00001-of-00002.safetensors",
625
- "model.visual.blocks.4.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
626
- "model.visual.blocks.4.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
627
- "model.visual.blocks.4.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
628
- "model.visual.blocks.4.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
629
- "model.visual.blocks.4.norm1.bias": "model-00001-of-00002.safetensors",
630
- "model.visual.blocks.4.norm1.weight": "model-00001-of-00002.safetensors",
631
- "model.visual.blocks.4.norm2.bias": "model-00001-of-00002.safetensors",
632
- "model.visual.blocks.4.norm2.weight": "model-00001-of-00002.safetensors",
633
- "model.visual.blocks.5.attn.proj.bias": "model-00001-of-00002.safetensors",
634
- "model.visual.blocks.5.attn.proj.weight": "model-00001-of-00002.safetensors",
635
- "model.visual.blocks.5.attn.qkv.bias": "model-00001-of-00002.safetensors",
636
- "model.visual.blocks.5.attn.qkv.weight": "model-00001-of-00002.safetensors",
637
- "model.visual.blocks.5.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
638
- "model.visual.blocks.5.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
639
- "model.visual.blocks.5.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
640
- "model.visual.blocks.5.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
641
- "model.visual.blocks.5.norm1.bias": "model-00001-of-00002.safetensors",
642
- "model.visual.blocks.5.norm1.weight": "model-00001-of-00002.safetensors",
643
- "model.visual.blocks.5.norm2.bias": "model-00001-of-00002.safetensors",
644
- "model.visual.blocks.5.norm2.weight": "model-00001-of-00002.safetensors",
645
- "model.visual.blocks.6.attn.proj.bias": "model-00001-of-00002.safetensors",
646
- "model.visual.blocks.6.attn.proj.weight": "model-00001-of-00002.safetensors",
647
- "model.visual.blocks.6.attn.qkv.bias": "model-00001-of-00002.safetensors",
648
- "model.visual.blocks.6.attn.qkv.weight": "model-00001-of-00002.safetensors",
649
- "model.visual.blocks.6.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
650
- "model.visual.blocks.6.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
651
- "model.visual.blocks.6.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
652
- "model.visual.blocks.6.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
653
- "model.visual.blocks.6.norm1.bias": "model-00001-of-00002.safetensors",
654
- "model.visual.blocks.6.norm1.weight": "model-00001-of-00002.safetensors",
655
- "model.visual.blocks.6.norm2.bias": "model-00001-of-00002.safetensors",
656
- "model.visual.blocks.6.norm2.weight": "model-00001-of-00002.safetensors",
657
- "model.visual.blocks.7.attn.proj.bias": "model-00001-of-00002.safetensors",
658
- "model.visual.blocks.7.attn.proj.weight": "model-00001-of-00002.safetensors",
659
- "model.visual.blocks.7.attn.qkv.bias": "model-00001-of-00002.safetensors",
660
- "model.visual.blocks.7.attn.qkv.weight": "model-00001-of-00002.safetensors",
661
- "model.visual.blocks.7.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
662
- "model.visual.blocks.7.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
663
- "model.visual.blocks.7.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
664
- "model.visual.blocks.7.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
665
- "model.visual.blocks.7.norm1.bias": "model-00001-of-00002.safetensors",
666
- "model.visual.blocks.7.norm1.weight": "model-00001-of-00002.safetensors",
667
- "model.visual.blocks.7.norm2.bias": "model-00001-of-00002.safetensors",
668
- "model.visual.blocks.7.norm2.weight": "model-00001-of-00002.safetensors",
669
- "model.visual.blocks.8.attn.proj.bias": "model-00001-of-00002.safetensors",
670
- "model.visual.blocks.8.attn.proj.weight": "model-00001-of-00002.safetensors",
671
- "model.visual.blocks.8.attn.qkv.bias": "model-00001-of-00002.safetensors",
672
- "model.visual.blocks.8.attn.qkv.weight": "model-00001-of-00002.safetensors",
673
- "model.visual.blocks.8.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
674
- "model.visual.blocks.8.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
675
- "model.visual.blocks.8.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
676
- "model.visual.blocks.8.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
677
- "model.visual.blocks.8.norm1.bias": "model-00001-of-00002.safetensors",
678
- "model.visual.blocks.8.norm1.weight": "model-00001-of-00002.safetensors",
679
- "model.visual.blocks.8.norm2.bias": "model-00001-of-00002.safetensors",
680
- "model.visual.blocks.8.norm2.weight": "model-00001-of-00002.safetensors",
681
- "model.visual.blocks.9.attn.proj.bias": "model-00001-of-00002.safetensors",
682
- "model.visual.blocks.9.attn.proj.weight": "model-00001-of-00002.safetensors",
683
- "model.visual.blocks.9.attn.qkv.bias": "model-00001-of-00002.safetensors",
684
- "model.visual.blocks.9.attn.qkv.weight": "model-00001-of-00002.safetensors",
685
- "model.visual.blocks.9.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
686
- "model.visual.blocks.9.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
687
- "model.visual.blocks.9.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
688
- "model.visual.blocks.9.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
689
- "model.visual.blocks.9.norm1.bias": "model-00001-of-00002.safetensors",
690
- "model.visual.blocks.9.norm1.weight": "model-00001-of-00002.safetensors",
691
- "model.visual.blocks.9.norm2.bias": "model-00001-of-00002.safetensors",
692
- "model.visual.blocks.9.norm2.weight": "model-00001-of-00002.safetensors",
693
- "model.visual.deepstack_merger_list.0.linear_fc1.bias": "model-00001-of-00002.safetensors",
694
- "model.visual.deepstack_merger_list.0.linear_fc1.weight": "model-00001-of-00002.safetensors",
695
- "model.visual.deepstack_merger_list.0.linear_fc2.bias": "model-00001-of-00002.safetensors",
696
- "model.visual.deepstack_merger_list.0.linear_fc2.weight": "model-00001-of-00002.safetensors",
697
- "model.visual.deepstack_merger_list.0.norm.bias": "model-00001-of-00002.safetensors",
698
- "model.visual.deepstack_merger_list.0.norm.weight": "model-00001-of-00002.safetensors",
699
- "model.visual.deepstack_merger_list.1.linear_fc1.bias": "model-00001-of-00002.safetensors",
700
- "model.visual.deepstack_merger_list.1.linear_fc1.weight": "model-00001-of-00002.safetensors",
701
- "model.visual.deepstack_merger_list.1.linear_fc2.bias": "model-00001-of-00002.safetensors",
702
- "model.visual.deepstack_merger_list.1.linear_fc2.weight": "model-00001-of-00002.safetensors",
703
- "model.visual.deepstack_merger_list.1.norm.bias": "model-00001-of-00002.safetensors",
704
- "model.visual.deepstack_merger_list.1.norm.weight": "model-00001-of-00002.safetensors",
705
- "model.visual.deepstack_merger_list.2.linear_fc1.bias": "model-00001-of-00002.safetensors",
706
- "model.visual.deepstack_merger_list.2.linear_fc1.weight": "model-00001-of-00002.safetensors",
707
- "model.visual.deepstack_merger_list.2.linear_fc2.bias": "model-00001-of-00002.safetensors",
708
- "model.visual.deepstack_merger_list.2.linear_fc2.weight": "model-00001-of-00002.safetensors",
709
- "model.visual.deepstack_merger_list.2.norm.bias": "model-00001-of-00002.safetensors",
710
- "model.visual.deepstack_merger_list.2.norm.weight": "model-00001-of-00002.safetensors",
711
- "model.visual.merger.linear_fc1.bias": "model-00001-of-00002.safetensors",
712
- "model.visual.merger.linear_fc1.weight": "model-00001-of-00002.safetensors",
713
- "model.visual.merger.linear_fc2.bias": "model-00001-of-00002.safetensors",
714
- "model.visual.merger.linear_fc2.weight": "model-00001-of-00002.safetensors",
715
- "model.visual.merger.norm.bias": "model-00001-of-00002.safetensors",
716
- "model.visual.merger.norm.weight": "model-00001-of-00002.safetensors",
717
- "model.visual.patch_embed.proj.bias": "model-00001-of-00002.safetensors",
718
- "model.visual.patch_embed.proj.weight": "model-00001-of-00002.safetensors",
719
- "model.visual.pos_embed.weight": "model-00001-of-00002.safetensors"
720
  }
721
  }
 
1
  {
2
  "metadata": {
 
3
  "total_size": 8875631616
4
  },
5
  "weight_map": {
 
92
  "model.language_model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
93
  "model.language_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
94
  "model.language_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.language_model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
96
+ "model.language_model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
97
  "model.language_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
98
  "model.language_model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.language_model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
100
  "model.language_model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
101
  "model.language_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
102
  "model.language_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
103
  "model.language_model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
104
  "model.language_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
105
  "model.language_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.language_model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
107
+ "model.language_model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.language_model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.language_model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.language_model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
111
+ "model.language_model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
112
+ "model.language_model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.language_model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.language_model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
115
+ "model.language_model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.language_model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.language_model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
118
+ "model.language_model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.language_model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.language_model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.language_model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
122
+ "model.language_model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
123
+ "model.language_model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.language_model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.language_model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
126
+ "model.language_model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.language_model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.language_model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.language_model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
130
+ "model.language_model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.language_model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.language_model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
133
+ "model.language_model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
134
+ "model.language_model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
135
+ "model.language_model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.language_model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
137
+ "model.language_model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.language_model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
139
  "model.language_model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
140
  "model.language_model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
141
  "model.language_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
 
147
  "model.language_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
148
  "model.language_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
149
  "model.language_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.language_model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
151
  "model.language_model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
152
+ "model.language_model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.language_model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.language_model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
155
+ "model.language_model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
156
+ "model.language_model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
157
+ "model.language_model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.language_model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
159
+ "model.language_model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.language_model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
  "model.language_model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
162
  "model.language_model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
163
  "model.language_model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
 
401
  "model.language_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
402
  "model.language_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
403
  "model.language_model.norm.weight": "model-00002-of-00002.safetensors",
404
+ "model.visual.blocks.0.attn.proj.bias": "model-00002-of-00002.safetensors",
405
+ "model.visual.blocks.0.attn.proj.weight": "model-00002-of-00002.safetensors",
406
+ "model.visual.blocks.0.attn.qkv.bias": "model-00002-of-00002.safetensors",
407
+ "model.visual.blocks.0.attn.qkv.weight": "model-00002-of-00002.safetensors",
408
+ "model.visual.blocks.0.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
409
+ "model.visual.blocks.0.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
410
+ "model.visual.blocks.0.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
411
+ "model.visual.blocks.0.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
412
+ "model.visual.blocks.0.norm1.bias": "model-00002-of-00002.safetensors",
413
+ "model.visual.blocks.0.norm1.weight": "model-00002-of-00002.safetensors",
414
+ "model.visual.blocks.0.norm2.bias": "model-00002-of-00002.safetensors",
415
+ "model.visual.blocks.0.norm2.weight": "model-00002-of-00002.safetensors",
416
+ "model.visual.blocks.1.attn.proj.bias": "model-00002-of-00002.safetensors",
417
+ "model.visual.blocks.1.attn.proj.weight": "model-00002-of-00002.safetensors",
418
+ "model.visual.blocks.1.attn.qkv.bias": "model-00002-of-00002.safetensors",
419
+ "model.visual.blocks.1.attn.qkv.weight": "model-00002-of-00002.safetensors",
420
+ "model.visual.blocks.1.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
421
+ "model.visual.blocks.1.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
422
+ "model.visual.blocks.1.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
423
+ "model.visual.blocks.1.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
424
+ "model.visual.blocks.1.norm1.bias": "model-00002-of-00002.safetensors",
425
+ "model.visual.blocks.1.norm1.weight": "model-00002-of-00002.safetensors",
426
+ "model.visual.blocks.1.norm2.bias": "model-00002-of-00002.safetensors",
427
+ "model.visual.blocks.1.norm2.weight": "model-00002-of-00002.safetensors",
428
+ "model.visual.blocks.10.attn.proj.bias": "model-00002-of-00002.safetensors",
429
+ "model.visual.blocks.10.attn.proj.weight": "model-00002-of-00002.safetensors",
430
+ "model.visual.blocks.10.attn.qkv.bias": "model-00002-of-00002.safetensors",
431
+ "model.visual.blocks.10.attn.qkv.weight": "model-00002-of-00002.safetensors",
432
+ "model.visual.blocks.10.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
433
+ "model.visual.blocks.10.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
434
+ "model.visual.blocks.10.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
435
+ "model.visual.blocks.10.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
436
+ "model.visual.blocks.10.norm1.bias": "model-00002-of-00002.safetensors",
437
+ "model.visual.blocks.10.norm1.weight": "model-00002-of-00002.safetensors",
438
+ "model.visual.blocks.10.norm2.bias": "model-00002-of-00002.safetensors",
439
+ "model.visual.blocks.10.norm2.weight": "model-00002-of-00002.safetensors",
440
+ "model.visual.blocks.11.attn.proj.bias": "model-00002-of-00002.safetensors",
441
+ "model.visual.blocks.11.attn.proj.weight": "model-00002-of-00002.safetensors",
442
+ "model.visual.blocks.11.attn.qkv.bias": "model-00002-of-00002.safetensors",
443
+ "model.visual.blocks.11.attn.qkv.weight": "model-00002-of-00002.safetensors",
444
+ "model.visual.blocks.11.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
445
+ "model.visual.blocks.11.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
446
+ "model.visual.blocks.11.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
447
+ "model.visual.blocks.11.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
448
+ "model.visual.blocks.11.norm1.bias": "model-00002-of-00002.safetensors",
449
+ "model.visual.blocks.11.norm1.weight": "model-00002-of-00002.safetensors",
450
+ "model.visual.blocks.11.norm2.bias": "model-00002-of-00002.safetensors",
451
+ "model.visual.blocks.11.norm2.weight": "model-00002-of-00002.safetensors",
452
+ "model.visual.blocks.12.attn.proj.bias": "model-00002-of-00002.safetensors",
453
+ "model.visual.blocks.12.attn.proj.weight": "model-00002-of-00002.safetensors",
454
+ "model.visual.blocks.12.attn.qkv.bias": "model-00002-of-00002.safetensors",
455
+ "model.visual.blocks.12.attn.qkv.weight": "model-00002-of-00002.safetensors",
456
+ "model.visual.blocks.12.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
457
+ "model.visual.blocks.12.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
458
+ "model.visual.blocks.12.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
459
+ "model.visual.blocks.12.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
460
+ "model.visual.blocks.12.norm1.bias": "model-00002-of-00002.safetensors",
461
+ "model.visual.blocks.12.norm1.weight": "model-00002-of-00002.safetensors",
462
+ "model.visual.blocks.12.norm2.bias": "model-00002-of-00002.safetensors",
463
+ "model.visual.blocks.12.norm2.weight": "model-00002-of-00002.safetensors",
464
+ "model.visual.blocks.13.attn.proj.bias": "model-00002-of-00002.safetensors",
465
+ "model.visual.blocks.13.attn.proj.weight": "model-00002-of-00002.safetensors",
466
+ "model.visual.blocks.13.attn.qkv.bias": "model-00002-of-00002.safetensors",
467
+ "model.visual.blocks.13.attn.qkv.weight": "model-00002-of-00002.safetensors",
468
+ "model.visual.blocks.13.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
469
+ "model.visual.blocks.13.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
470
+ "model.visual.blocks.13.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
471
+ "model.visual.blocks.13.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
472
+ "model.visual.blocks.13.norm1.bias": "model-00002-of-00002.safetensors",
473
+ "model.visual.blocks.13.norm1.weight": "model-00002-of-00002.safetensors",
474
+ "model.visual.blocks.13.norm2.bias": "model-00002-of-00002.safetensors",
475
+ "model.visual.blocks.13.norm2.weight": "model-00002-of-00002.safetensors",
476
+ "model.visual.blocks.14.attn.proj.bias": "model-00002-of-00002.safetensors",
477
+ "model.visual.blocks.14.attn.proj.weight": "model-00002-of-00002.safetensors",
478
+ "model.visual.blocks.14.attn.qkv.bias": "model-00002-of-00002.safetensors",
479
+ "model.visual.blocks.14.attn.qkv.weight": "model-00002-of-00002.safetensors",
480
+ "model.visual.blocks.14.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
481
+ "model.visual.blocks.14.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
482
+ "model.visual.blocks.14.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
483
+ "model.visual.blocks.14.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
484
+ "model.visual.blocks.14.norm1.bias": "model-00002-of-00002.safetensors",
485
+ "model.visual.blocks.14.norm1.weight": "model-00002-of-00002.safetensors",
486
+ "model.visual.blocks.14.norm2.bias": "model-00002-of-00002.safetensors",
487
+ "model.visual.blocks.14.norm2.weight": "model-00002-of-00002.safetensors",
488
+ "model.visual.blocks.15.attn.proj.bias": "model-00002-of-00002.safetensors",
489
+ "model.visual.blocks.15.attn.proj.weight": "model-00002-of-00002.safetensors",
490
+ "model.visual.blocks.15.attn.qkv.bias": "model-00002-of-00002.safetensors",
491
+ "model.visual.blocks.15.attn.qkv.weight": "model-00002-of-00002.safetensors",
492
+ "model.visual.blocks.15.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
493
+ "model.visual.blocks.15.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
494
+ "model.visual.blocks.15.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
495
+ "model.visual.blocks.15.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
496
+ "model.visual.blocks.15.norm1.bias": "model-00002-of-00002.safetensors",
497
+ "model.visual.blocks.15.norm1.weight": "model-00002-of-00002.safetensors",
498
+ "model.visual.blocks.15.norm2.bias": "model-00002-of-00002.safetensors",
499
+ "model.visual.blocks.15.norm2.weight": "model-00002-of-00002.safetensors",
500
+ "model.visual.blocks.16.attn.proj.bias": "model-00002-of-00002.safetensors",
501
+ "model.visual.blocks.16.attn.proj.weight": "model-00002-of-00002.safetensors",
502
+ "model.visual.blocks.16.attn.qkv.bias": "model-00002-of-00002.safetensors",
503
+ "model.visual.blocks.16.attn.qkv.weight": "model-00002-of-00002.safetensors",
504
+ "model.visual.blocks.16.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
505
+ "model.visual.blocks.16.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
506
+ "model.visual.blocks.16.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
507
+ "model.visual.blocks.16.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
508
+ "model.visual.blocks.16.norm1.bias": "model-00002-of-00002.safetensors",
509
+ "model.visual.blocks.16.norm1.weight": "model-00002-of-00002.safetensors",
510
+ "model.visual.blocks.16.norm2.bias": "model-00002-of-00002.safetensors",
511
+ "model.visual.blocks.16.norm2.weight": "model-00002-of-00002.safetensors",
512
+ "model.visual.blocks.17.attn.proj.bias": "model-00002-of-00002.safetensors",
513
+ "model.visual.blocks.17.attn.proj.weight": "model-00002-of-00002.safetensors",
514
+ "model.visual.blocks.17.attn.qkv.bias": "model-00002-of-00002.safetensors",
515
+ "model.visual.blocks.17.attn.qkv.weight": "model-00002-of-00002.safetensors",
516
+ "model.visual.blocks.17.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
517
+ "model.visual.blocks.17.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
518
+ "model.visual.blocks.17.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
519
+ "model.visual.blocks.17.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
520
+ "model.visual.blocks.17.norm1.bias": "model-00002-of-00002.safetensors",
521
+ "model.visual.blocks.17.norm1.weight": "model-00002-of-00002.safetensors",
522
+ "model.visual.blocks.17.norm2.bias": "model-00002-of-00002.safetensors",
523
+ "model.visual.blocks.17.norm2.weight": "model-00002-of-00002.safetensors",
524
+ "model.visual.blocks.18.attn.proj.bias": "model-00002-of-00002.safetensors",
525
+ "model.visual.blocks.18.attn.proj.weight": "model-00002-of-00002.safetensors",
526
+ "model.visual.blocks.18.attn.qkv.bias": "model-00002-of-00002.safetensors",
527
+ "model.visual.blocks.18.attn.qkv.weight": "model-00002-of-00002.safetensors",
528
+ "model.visual.blocks.18.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
529
+ "model.visual.blocks.18.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
530
+ "model.visual.blocks.18.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
531
+ "model.visual.blocks.18.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
532
+ "model.visual.blocks.18.norm1.bias": "model-00002-of-00002.safetensors",
533
+ "model.visual.blocks.18.norm1.weight": "model-00002-of-00002.safetensors",
534
+ "model.visual.blocks.18.norm2.bias": "model-00002-of-00002.safetensors",
535
+ "model.visual.blocks.18.norm2.weight": "model-00002-of-00002.safetensors",
536
+ "model.visual.blocks.19.attn.proj.bias": "model-00002-of-00002.safetensors",
537
+ "model.visual.blocks.19.attn.proj.weight": "model-00002-of-00002.safetensors",
538
+ "model.visual.blocks.19.attn.qkv.bias": "model-00002-of-00002.safetensors",
539
+ "model.visual.blocks.19.attn.qkv.weight": "model-00002-of-00002.safetensors",
540
+ "model.visual.blocks.19.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
541
+ "model.visual.blocks.19.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
542
+ "model.visual.blocks.19.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
543
+ "model.visual.blocks.19.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
544
+ "model.visual.blocks.19.norm1.bias": "model-00002-of-00002.safetensors",
545
+ "model.visual.blocks.19.norm1.weight": "model-00002-of-00002.safetensors",
546
+ "model.visual.blocks.19.norm2.bias": "model-00002-of-00002.safetensors",
547
+ "model.visual.blocks.19.norm2.weight": "model-00002-of-00002.safetensors",
548
+ "model.visual.blocks.2.attn.proj.bias": "model-00002-of-00002.safetensors",
549
+ "model.visual.blocks.2.attn.proj.weight": "model-00002-of-00002.safetensors",
550
+ "model.visual.blocks.2.attn.qkv.bias": "model-00002-of-00002.safetensors",
551
+ "model.visual.blocks.2.attn.qkv.weight": "model-00002-of-00002.safetensors",
552
+ "model.visual.blocks.2.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
553
+ "model.visual.blocks.2.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
554
+ "model.visual.blocks.2.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
555
+ "model.visual.blocks.2.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
556
+ "model.visual.blocks.2.norm1.bias": "model-00002-of-00002.safetensors",
557
+ "model.visual.blocks.2.norm1.weight": "model-00002-of-00002.safetensors",
558
+ "model.visual.blocks.2.norm2.bias": "model-00002-of-00002.safetensors",
559
+ "model.visual.blocks.2.norm2.weight": "model-00002-of-00002.safetensors",
560
+ "model.visual.blocks.20.attn.proj.bias": "model-00002-of-00002.safetensors",
561
+ "model.visual.blocks.20.attn.proj.weight": "model-00002-of-00002.safetensors",
562
+ "model.visual.blocks.20.attn.qkv.bias": "model-00002-of-00002.safetensors",
563
+ "model.visual.blocks.20.attn.qkv.weight": "model-00002-of-00002.safetensors",
564
+ "model.visual.blocks.20.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
565
+ "model.visual.blocks.20.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
566
+ "model.visual.blocks.20.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
567
+ "model.visual.blocks.20.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
568
+ "model.visual.blocks.20.norm1.bias": "model-00002-of-00002.safetensors",
569
+ "model.visual.blocks.20.norm1.weight": "model-00002-of-00002.safetensors",
570
+ "model.visual.blocks.20.norm2.bias": "model-00002-of-00002.safetensors",
571
+ "model.visual.blocks.20.norm2.weight": "model-00002-of-00002.safetensors",
572
+ "model.visual.blocks.21.attn.proj.bias": "model-00002-of-00002.safetensors",
573
+ "model.visual.blocks.21.attn.proj.weight": "model-00002-of-00002.safetensors",
574
+ "model.visual.blocks.21.attn.qkv.bias": "model-00002-of-00002.safetensors",
575
+ "model.visual.blocks.21.attn.qkv.weight": "model-00002-of-00002.safetensors",
576
+ "model.visual.blocks.21.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
577
+ "model.visual.blocks.21.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
578
+ "model.visual.blocks.21.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
579
+ "model.visual.blocks.21.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
580
+ "model.visual.blocks.21.norm1.bias": "model-00002-of-00002.safetensors",
581
+ "model.visual.blocks.21.norm1.weight": "model-00002-of-00002.safetensors",
582
+ "model.visual.blocks.21.norm2.bias": "model-00002-of-00002.safetensors",
583
+ "model.visual.blocks.21.norm2.weight": "model-00002-of-00002.safetensors",
584
+ "model.visual.blocks.22.attn.proj.bias": "model-00002-of-00002.safetensors",
585
+ "model.visual.blocks.22.attn.proj.weight": "model-00002-of-00002.safetensors",
586
+ "model.visual.blocks.22.attn.qkv.bias": "model-00002-of-00002.safetensors",
587
+ "model.visual.blocks.22.attn.qkv.weight": "model-00002-of-00002.safetensors",
588
+ "model.visual.blocks.22.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
589
+ "model.visual.blocks.22.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
590
+ "model.visual.blocks.22.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
591
+ "model.visual.blocks.22.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
592
+ "model.visual.blocks.22.norm1.bias": "model-00002-of-00002.safetensors",
593
+ "model.visual.blocks.22.norm1.weight": "model-00002-of-00002.safetensors",
594
+ "model.visual.blocks.22.norm2.bias": "model-00002-of-00002.safetensors",
595
+ "model.visual.blocks.22.norm2.weight": "model-00002-of-00002.safetensors",
596
+ "model.visual.blocks.23.attn.proj.bias": "model-00002-of-00002.safetensors",
597
+ "model.visual.blocks.23.attn.proj.weight": "model-00002-of-00002.safetensors",
598
+ "model.visual.blocks.23.attn.qkv.bias": "model-00002-of-00002.safetensors",
599
+ "model.visual.blocks.23.attn.qkv.weight": "model-00002-of-00002.safetensors",
600
+ "model.visual.blocks.23.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
601
+ "model.visual.blocks.23.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
602
+ "model.visual.blocks.23.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
603
+ "model.visual.blocks.23.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
604
+ "model.visual.blocks.23.norm1.bias": "model-00002-of-00002.safetensors",
605
+ "model.visual.blocks.23.norm1.weight": "model-00002-of-00002.safetensors",
606
+ "model.visual.blocks.23.norm2.bias": "model-00002-of-00002.safetensors",
607
+ "model.visual.blocks.23.norm2.weight": "model-00002-of-00002.safetensors",
608
+ "model.visual.blocks.3.attn.proj.bias": "model-00002-of-00002.safetensors",
609
+ "model.visual.blocks.3.attn.proj.weight": "model-00002-of-00002.safetensors",
610
+ "model.visual.blocks.3.attn.qkv.bias": "model-00002-of-00002.safetensors",
611
+ "model.visual.blocks.3.attn.qkv.weight": "model-00002-of-00002.safetensors",
612
+ "model.visual.blocks.3.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
613
+ "model.visual.blocks.3.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
614
+ "model.visual.blocks.3.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
615
+ "model.visual.blocks.3.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
616
+ "model.visual.blocks.3.norm1.bias": "model-00002-of-00002.safetensors",
617
+ "model.visual.blocks.3.norm1.weight": "model-00002-of-00002.safetensors",
618
+ "model.visual.blocks.3.norm2.bias": "model-00002-of-00002.safetensors",
619
+ "model.visual.blocks.3.norm2.weight": "model-00002-of-00002.safetensors",
620
+ "model.visual.blocks.4.attn.proj.bias": "model-00002-of-00002.safetensors",
621
+ "model.visual.blocks.4.attn.proj.weight": "model-00002-of-00002.safetensors",
622
+ "model.visual.blocks.4.attn.qkv.bias": "model-00002-of-00002.safetensors",
623
+ "model.visual.blocks.4.attn.qkv.weight": "model-00002-of-00002.safetensors",
624
+ "model.visual.blocks.4.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
625
+ "model.visual.blocks.4.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
626
+ "model.visual.blocks.4.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
627
+ "model.visual.blocks.4.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
628
+ "model.visual.blocks.4.norm1.bias": "model-00002-of-00002.safetensors",
629
+ "model.visual.blocks.4.norm1.weight": "model-00002-of-00002.safetensors",
630
+ "model.visual.blocks.4.norm2.bias": "model-00002-of-00002.safetensors",
631
+ "model.visual.blocks.4.norm2.weight": "model-00002-of-00002.safetensors",
632
+ "model.visual.blocks.5.attn.proj.bias": "model-00002-of-00002.safetensors",
633
+ "model.visual.blocks.5.attn.proj.weight": "model-00002-of-00002.safetensors",
634
+ "model.visual.blocks.5.attn.qkv.bias": "model-00002-of-00002.safetensors",
635
+ "model.visual.blocks.5.attn.qkv.weight": "model-00002-of-00002.safetensors",
636
+ "model.visual.blocks.5.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
637
+ "model.visual.blocks.5.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
638
+ "model.visual.blocks.5.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
639
+ "model.visual.blocks.5.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
640
+ "model.visual.blocks.5.norm1.bias": "model-00002-of-00002.safetensors",
641
+ "model.visual.blocks.5.norm1.weight": "model-00002-of-00002.safetensors",
642
+ "model.visual.blocks.5.norm2.bias": "model-00002-of-00002.safetensors",
643
+ "model.visual.blocks.5.norm2.weight": "model-00002-of-00002.safetensors",
644
+ "model.visual.blocks.6.attn.proj.bias": "model-00002-of-00002.safetensors",
645
+ "model.visual.blocks.6.attn.proj.weight": "model-00002-of-00002.safetensors",
646
+ "model.visual.blocks.6.attn.qkv.bias": "model-00002-of-00002.safetensors",
647
+ "model.visual.blocks.6.attn.qkv.weight": "model-00002-of-00002.safetensors",
648
+ "model.visual.blocks.6.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
649
+ "model.visual.blocks.6.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
650
+ "model.visual.blocks.6.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
651
+ "model.visual.blocks.6.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
652
+ "model.visual.blocks.6.norm1.bias": "model-00002-of-00002.safetensors",
653
+ "model.visual.blocks.6.norm1.weight": "model-00002-of-00002.safetensors",
654
+ "model.visual.blocks.6.norm2.bias": "model-00002-of-00002.safetensors",
655
+ "model.visual.blocks.6.norm2.weight": "model-00002-of-00002.safetensors",
656
+ "model.visual.blocks.7.attn.proj.bias": "model-00002-of-00002.safetensors",
657
+ "model.visual.blocks.7.attn.proj.weight": "model-00002-of-00002.safetensors",
658
+ "model.visual.blocks.7.attn.qkv.bias": "model-00002-of-00002.safetensors",
659
+ "model.visual.blocks.7.attn.qkv.weight": "model-00002-of-00002.safetensors",
660
+ "model.visual.blocks.7.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
661
+ "model.visual.blocks.7.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
662
+ "model.visual.blocks.7.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
663
+ "model.visual.blocks.7.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
664
+ "model.visual.blocks.7.norm1.bias": "model-00002-of-00002.safetensors",
665
+ "model.visual.blocks.7.norm1.weight": "model-00002-of-00002.safetensors",
666
+ "model.visual.blocks.7.norm2.bias": "model-00002-of-00002.safetensors",
667
+ "model.visual.blocks.7.norm2.weight": "model-00002-of-00002.safetensors",
668
+ "model.visual.blocks.8.attn.proj.bias": "model-00002-of-00002.safetensors",
669
+ "model.visual.blocks.8.attn.proj.weight": "model-00002-of-00002.safetensors",
670
+ "model.visual.blocks.8.attn.qkv.bias": "model-00002-of-00002.safetensors",
671
+ "model.visual.blocks.8.attn.qkv.weight": "model-00002-of-00002.safetensors",
672
+ "model.visual.blocks.8.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
673
+ "model.visual.blocks.8.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
674
+ "model.visual.blocks.8.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
675
+ "model.visual.blocks.8.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
676
+ "model.visual.blocks.8.norm1.bias": "model-00002-of-00002.safetensors",
677
+ "model.visual.blocks.8.norm1.weight": "model-00002-of-00002.safetensors",
678
+ "model.visual.blocks.8.norm2.bias": "model-00002-of-00002.safetensors",
679
+ "model.visual.blocks.8.norm2.weight": "model-00002-of-00002.safetensors",
680
+ "model.visual.blocks.9.attn.proj.bias": "model-00002-of-00002.safetensors",
681
+ "model.visual.blocks.9.attn.proj.weight": "model-00002-of-00002.safetensors",
682
+ "model.visual.blocks.9.attn.qkv.bias": "model-00002-of-00002.safetensors",
683
+ "model.visual.blocks.9.attn.qkv.weight": "model-00002-of-00002.safetensors",
684
+ "model.visual.blocks.9.mlp.linear_fc1.bias": "model-00002-of-00002.safetensors",
685
+ "model.visual.blocks.9.mlp.linear_fc1.weight": "model-00002-of-00002.safetensors",
686
+ "model.visual.blocks.9.mlp.linear_fc2.bias": "model-00002-of-00002.safetensors",
687
+ "model.visual.blocks.9.mlp.linear_fc2.weight": "model-00002-of-00002.safetensors",
688
+ "model.visual.blocks.9.norm1.bias": "model-00002-of-00002.safetensors",
689
+ "model.visual.blocks.9.norm1.weight": "model-00002-of-00002.safetensors",
690
+ "model.visual.blocks.9.norm2.bias": "model-00002-of-00002.safetensors",
691
+ "model.visual.blocks.9.norm2.weight": "model-00002-of-00002.safetensors",
692
+ "model.visual.deepstack_merger_list.0.linear_fc1.bias": "model-00002-of-00002.safetensors",
693
+ "model.visual.deepstack_merger_list.0.linear_fc1.weight": "model-00002-of-00002.safetensors",
694
+ "model.visual.deepstack_merger_list.0.linear_fc2.bias": "model-00002-of-00002.safetensors",
695
+ "model.visual.deepstack_merger_list.0.linear_fc2.weight": "model-00002-of-00002.safetensors",
696
+ "model.visual.deepstack_merger_list.0.norm.bias": "model-00002-of-00002.safetensors",
697
+ "model.visual.deepstack_merger_list.0.norm.weight": "model-00002-of-00002.safetensors",
698
+ "model.visual.deepstack_merger_list.1.linear_fc1.bias": "model-00002-of-00002.safetensors",
699
+ "model.visual.deepstack_merger_list.1.linear_fc1.weight": "model-00002-of-00002.safetensors",
700
+ "model.visual.deepstack_merger_list.1.linear_fc2.bias": "model-00002-of-00002.safetensors",
701
+ "model.visual.deepstack_merger_list.1.linear_fc2.weight": "model-00002-of-00002.safetensors",
702
+ "model.visual.deepstack_merger_list.1.norm.bias": "model-00002-of-00002.safetensors",
703
+ "model.visual.deepstack_merger_list.1.norm.weight": "model-00002-of-00002.safetensors",
704
+ "model.visual.deepstack_merger_list.2.linear_fc1.bias": "model-00002-of-00002.safetensors",
705
+ "model.visual.deepstack_merger_list.2.linear_fc1.weight": "model-00002-of-00002.safetensors",
706
+ "model.visual.deepstack_merger_list.2.linear_fc2.bias": "model-00002-of-00002.safetensors",
707
+ "model.visual.deepstack_merger_list.2.linear_fc2.weight": "model-00002-of-00002.safetensors",
708
+ "model.visual.deepstack_merger_list.2.norm.bias": "model-00002-of-00002.safetensors",
709
+ "model.visual.deepstack_merger_list.2.norm.weight": "model-00002-of-00002.safetensors",
710
+ "model.visual.merger.linear_fc1.bias": "model-00002-of-00002.safetensors",
711
+ "model.visual.merger.linear_fc1.weight": "model-00002-of-00002.safetensors",
712
+ "model.visual.merger.linear_fc2.bias": "model-00002-of-00002.safetensors",
713
+ "model.visual.merger.linear_fc2.weight": "model-00002-of-00002.safetensors",
714
+ "model.visual.merger.norm.bias": "model-00002-of-00002.safetensors",
715
+ "model.visual.merger.norm.weight": "model-00002-of-00002.safetensors",
716
+ "model.visual.patch_embed.proj.bias": "model-00002-of-00002.safetensors",
717
+ "model.visual.patch_embed.proj.weight": "model-00002-of-00002.safetensors",
718
+ "model.visual.pos_embed.weight": "model-00002-of-00002.safetensors"
719
  }
720
  }
tokenizer_config.json CHANGED
@@ -238,5 +238,5 @@
238
  "split_special_tokens": false,
239
  "tokenizer_class": "Qwen2Tokenizer",
240
  "unk_token": null,
241
- "chat_template": "{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- macro render_content(content, do_vision_count) %}\n {%- if content is string %}\n {{- content }}\n {%- else %}\n {%- for item in content %}\n {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}\n {%- if do_vision_count %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif 'video' in item or item.type == 'video' %}\n {%- if do_vision_count %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in item %}\n {{- item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n{%- endmacro %}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- render_content(messages[0].content, false) + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + render_content(messages[0].content, false) + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" %}\n {%- set content = render_content(message.content, false) %}\n {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- set content = render_content(message.content, True) %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n' }}\n{%- endif %}\n"
242
  }
 
238
  "split_special_tokens": false,
239
  "tokenizer_class": "Qwen2Tokenizer",
240
  "unk_token": null,
241
+ "chat_template": "{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- macro render_content(content, do_vision_count) %}\n {%- if content is string %}\n {{- content }}\n {%- else %}\n {%- for item in content %}\n {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}\n {%- if do_vision_count %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif 'video' in item or item.type == 'video' %}\n {%- if do_vision_count %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- endif %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in item %}\n {{- item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n{%- endmacro %}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- render_content(messages[0].content, false) + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + render_content(messages[0].content, false) + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" %}\n {%- set content = render_content(message.content, false) %}\n {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- set content = render_content(message.content, True) %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = ((content.split('</think>')|first).rstrip('\\n').split('<think>')|last).lstrip('\\n') %}\n {%- set content = (content.split('</think>')|last).lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n' }}\n{%- endif %}\n"
242
  }