i-riyad commited on
Commit
39a5817
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
38
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE.pdf ADDED
Binary file (76.7 kB). View file
 
README.md ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ base_model:
4
+ - Qwen/Qwen2.5-VL-7B-Instruct
5
+ license: other
6
+ license_name: nvidia-open-model-license
7
+ license_link: >-
8
+ https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license
9
+ library_name: Model Optimizer
10
+ tags:
11
+ - nvidia
12
+ - ModelOpt
13
+ - qwen2_5_vl
14
+ - quantized
15
+ - FP4
16
+ - fp4
17
+ extra_gated_prompt: >-
18
+ # NVIDIA Open Model License Agreement
19
+
20
+ Version Release Date: April 28, 2025
21
+
22
+ This NVIDIA Open Model License Agreement (the "<ins>Agreement</ins>") is a legal agreement between the Legal Entity You represent, or if no entity is identified, You and NVIDIA Corporation and its Affiliates ("<ins>NVIDIA</ins>") and governs Your use of the Models that NVIDIA provides to You under this Agreement. NVIDIA and You are each a "<ins>party</ins>" and collectively the "<ins>parties</ins>."
23
+
24
+ NVIDIA models released under this Agreement are intended to be used permissively and enable the further development of AI technologies. Subject to the terms of this Agreement, NVIDIA confirms that:
25
+
26
+ * Models are commercially usable.
27
+
28
+ * You are free to create and distribute Derivative Models.
29
+
30
+ * NVIDIA does not claim ownership to any outputs generated using the Models or Model Derivatives.
31
+
32
+ By using, reproducing, modifying, distributing, performing or displaying any portion or element of the Model or Derivative Model, or otherwise accepting the terms of this Agreement, you agree to be bound by this Agreement.
33
+
34
+ ## 1. Definitions
35
+
36
+ The following definitions apply to this Agreement:
37
+
38
+ 1.1. "<ins>Qwen2.5-VL-7B-Instruct-FP4 Model</ins>" means a multimodal Model shared under this Agreement.
39
+
40
+ 1.2. "<ins>Derivative Model</ins>" means all (a) modifications to the Model, (b) works based on the Model, and (c) any other derivative works of the Model. An output is not a Derivative Model.
41
+
42
+ 1.3. "<ins>Legal Entity</ins>" means the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "<ins>control</ins>" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of fifty percent (50%) or more of the outstanding shares, or (c) beneficial ownership of such entity.
43
+
44
+ 1.4. "<ins>Model</ins>" means the machine learning model, software, checkpoints, learnt weights, algorithms, parameters, configuration files and documentation shared under this Agreement.
45
+
46
+ 1.5. "<ins>You</ins>" or "<ins>Your</ins>" means an individual or Legal Entity exercising permissions granted by this Agreement.
47
+
48
+ ## 2. Conditions for Use, License Grant, AI Ethics and IP Ownership
49
+
50
+ 2.1. Conditions for Use. The Model and any Derivative Model are subject to additional terms as described in Section 2 and Section 3 of this Agreement and govern Your use. If You institute copyright or patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model or a Derivative Model constitutes direct or contributory copyright or patent infringement, then any licenses granted to You under this Agreement for that Model or Derivative Model will terminate as of the date such litigation is filed. If You bypass, disable, reduce the efficacy of, or circumvent any technical limitation, safety guardrail or associated safety guardrail hyperparameter, encryption, security, digital rights management, or authentication mechanism contained in the Model, your rights under this Agreement will automatically terminate. NVIDIA may update this Agreement to comply with legal and regulatory requirements at any time and You agree to either comply with any updated license or cease Your copying, use, and distribution of the Model and any Derivative Model.
51
+
52
+ 2.2. License Grant. The rights granted herein are explicitly conditioned on Your full compliance with the terms of this Agreement. Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, revocable (as stated in Section 2.1) license to publicly perform, publicly display, reproduce, use, create derivative works of, make, have made, sell, offer for sale, distribute (through multiple tiers of distribution) and import the Model.
53
+
54
+ 2.3. AI Ethics. Use of the Models under the Agreement must be consistent with NVIDIA's Trustworthy AI terms found at https://www.nvidia.com/en-us/agreements/trustworthy-ai/terms/.
55
+
56
+ 2.4. NVIDIA owns the Model and any Model Derivatives created by NVIDIA. Subject to NVIDIA’s underlying ownership rights in the Model or its Model Derivatives, You are and will be the owner of Your Model Derivatives. NVIDIA claims no ownership rights in outputs. You are responsible for outputs and their subsequent uses. Except as expressly granted in this Agreement, (a) NVIDIA reserves all rights, interests and remedies in connection with the Model and (b) no other license or right is granted to you by implication, estoppel or otherwise.
57
+
58
+ ## 3. Redistribution
59
+
60
+ You may reproduce and distribute copies of the Model or Derivative Models thereof in any medium, with or without modifications, provided that You meet the following conditions:
61
+
62
+ 3.1. If you distribute the Model, You must give any other recipients of the Model a copy of this Agreement and include the following attribution notice within a “Notice” text file with such copies: “Licensed by NVIDIA Corporation under the NVIDIA Open Model License”;
63
+
64
+ 3.2. If you distribute or make available a Qwen2.5-VL-7B-Instruct-FP4 Model, or a product or service (including an AI model) that contains or uses a Qwen2.5-VL-7B-Instruct-FP4 Model, use a Qwen2.5-VL-7B-Instruct-FP4 Model to create a Derivative Model, or use a Qwen2.5-VL-7B-Instruct-FP4 Model or its outputs to create, train, fine tune, or otherwise improve an AI model, you will include “Built on Qwen2.5-VL-7B-Instruct-FP4” on a related website, user interface, blogpost, about page, or product documentation; and
65
+
66
+ 3.3. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Models as a whole, provided Your use, reproduction, and distribution of the Model otherwise complies with the conditions stated in this Agreement.
67
+
68
+ ## 4. Separate Components.
69
+
70
+ The Models may include or be distributed with components provided with separate legal notices or terms that accompany the components, such as an Open Source Software License or other third-party license. The components are subject to the applicable other licenses, including any proprietary notices, disclaimers, requirements and extended use rights; except that this Agreement will prevail regarding the use of third-party Open Source Software License, unless a third-party Open Source Software License requires its license terms to prevail. “Open Source Software License” means any software, data or documentation subject to any license identified as an open source license by the Open Source Initiative (https://opensource.org), Free Software Foundation (https://www.fsf.org) or other similar open source organization or listed by the Software Package Data Exchange (SPDX) Workgroup under the Linux Foundation (https://www.spdx.org).
71
+
72
+ ## 5. Trademarks
73
+
74
+ This Agreement does not grant permission to use the trade names, trademarks, service marks, or product names of NVIDIA, except as required for reasonable and customary use in describing the origin of the Model and reproducing the content of the “Notice” text file.
75
+
76
+ ## **6. Disclaimer of Warranty**
77
+
78
+ **Unless required by applicable law or agreed to in writing, NVIDIA provides the Model on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivative Models and outputs and assume any risks associated with Your exercise of permissions under this Agreement.**
79
+
80
+ ## **7. Limitation of Liability**
81
+
82
+ **In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, will NVIDIA be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this Agreement or out of the use or inability to use the Model, Derivative Models or outputs (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if NVIDIA has been advised of the possibility of such damages.**
83
+
84
+ ## 8. Indemnity
85
+
86
+ You will indemnify and hold harmless NVIDIA from and against any claim by any third party arising out of or related to your use or distribution of the Model, Model Derivatives or outputs.
87
+
88
+ ## 9. Feedback
89
+
90
+ NVIDIA appreciates your feedback, and You agree that NVIDIA may use it without restriction or compensation to You.
91
+
92
+ ## 10. Governing Law
93
+
94
+ This Agreement will be governed in all respects by the laws of the United States and the laws of the State of Delaware, without regard to conflict of laws principles or the United Nations Convention on Contracts for the International Sale of Goods. The state and federal courts residing in Santa Clara County, California will have exclusive jurisdiction over any dispute or claim arising out of or related to this Agreement, and the parties irrevocably consent to personal jurisdiction and venue in those courts; except that, either party may apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction.
95
+
96
+ ## 11. Trade and Compliance
97
+
98
+ You agree to comply with all applicable export, import, trade and economic sanctions laws and regulations, as amended, including without limitation U.S. Export Administration Regulations and Office of Foreign Assets Control regulations. These laws include restrictions on destinations, end-users and end-use.
99
+ extra_gated_fields:
100
+ By clicking Submit below, I accept the terms of the NVIDIA Open Model License Agreement and acknowledge that I am an adult of legal age of majority in the country in which the Qwen2_5-VL Models will be used and have authority to accept this Agreement: checkbox
101
+ extra_gated_description: >-
102
+ The information you provide will be collected, stored, processed and shared in accordance with the [NVIDIA Privacy Policy](https://www.nvidia.com/en-us/about-nvidia/privacy-policy/).
103
+ extra_gated_button_content: Submit
104
+ ---
105
+
106
+ # Model Overview
107
+
108
+ ## Description:
109
+ The NVIDIA Qwen2.5-VL-7B-Instruct-FP4 model is the quantized version of Alibaba's Qwen2.5-VL-7B-Instruct model, which is an auto-regressive language model that uses an optimized transformer architecture. For more information, please check [here](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct). The NVIDIA Qwen2.5-VL-7B-Instruct-FP4 model is quantized with [TensorRT Model Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer).
110
+
111
+ This model is ready for commercial/non-commercial use. <br>
112
+
113
+ ## Third-Party Community Consideration
114
+ This model is not owned or developed by NVIDIA. It was developed and built to a third party’s requirements for this application and use case. See the Non-NVIDIA [(Qwen2.5-VL-7B-Instruct) Model Card](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct).
115
+
116
+ ### License/Terms of Use:
117
+ Use of this model is governed by [nvidia-open-model-license](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/)
118
+ ADDITIONAL INFORMATION: [Apache 2.0](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md).
119
+
120
+ ### Deployment Geography:
121
+ Global, except in European Union <br>
122
+
123
+ ### Use Case: <br>
124
+ Developers looking to take off the shelf pre-quantized models for deployment in AI Agent systems, chatbots, RAG systems, and other AI-powered applications. <br>
125
+
126
+ ### Release Date: <br>
127
+ Huggingface 08/22/2025 via https://huggingface.co/nvidia/Qwen2.5-VL-7B-Instruct-FP4 <br>
128
+
129
+ ## Model Architecture:
130
+ **Architecture Type:** Transformers <br>
131
+ **Network Architecture:** Qwen2.5-VL-7B <br>
132
+
133
+ **This model was developed based on Qwen2.5-VL-7B
134
+ ** Number of model parameters 7*10^9
135
+
136
+ ## Input:
137
+ **Input Type(s):** Multilingual text, and images <br>
138
+ **Input Format(s):** String, Images <br>
139
+ **Input Parameters:** One-Dimensional (1D), Two-Dimensional (2D) <br>
140
+ **Other Properties Related to Input:** Context length up to 32K <br>
141
+
142
+ ## Output:
143
+ **Output Type(s):** Text <br>
144
+ **Output Format:** String <br>
145
+ **Output Parameters:** 1D (One-Dimensional): Sequences <br>
146
+ **Other Properties Related to Output:** N/A <br>
147
+
148
+ Our AI models are designed and/or optimized to run on NVIDIA GPU-accelerated systems. By leveraging NVIDIA’s hardware (e.g. GPU cores) and software frameworks (e.g., CUDA libraries), they achieve faster training and inference times compared to CPU-only solutions. <br>
149
+
150
+ ## Software Integration:
151
+ **Supported Runtime Engine(s):** <br>
152
+ * TensorRT-LLM <br>
153
+
154
+ **Supported Hardware Microarchitecture Compatibility:** <br>
155
+ * NVIDIA Blackwell <br>
156
+
157
+ **Preferred Operating System(s):** <br>
158
+ * Linux <br>
159
+
160
+ ## Model Version(s):
161
+ The model is quantized with nvidia-modelopt **v0.35.0** <br>
162
+
163
+ ## Post Training Quantization
164
+ This model was obtained by quantizing the weights and activations of Qwen2.5-VL-7B-Instruct to FP4 data type, ready for inference with TensorRT-LLM. Only the weights and activations of the linear operators within transformer blocks of the language model are quantized.
165
+
166
+ ## Training, Testing, and Evaluation Datasets:
167
+ ** Data Modality
168
+ [Image]
169
+ [Text]
170
+ ## Calibration Dataset:
171
+ ** Link: [cnn_dailymail](https://huggingface.co/datasets/abisee/cnn_dailymail) <br>
172
+ ** Data collection method: Automated. <br>
173
+ ** Labeling method: Automated. <br>
174
+
175
+ ## Training Datasets:
176
+ ** Data Collection Method by Dataset: Undisclosed <br>
177
+ ** Labeling Method by Dataset: Undisclosed<br>
178
+ ** Properties: Undisclosed
179
+
180
+ ## Testing Dataset:
181
+ ** Data Collection Method by Dataset: Undisclosed <br>
182
+ ** Labeling Method by Dataset: Undisclosed <br>
183
+ ** Properties: Undisclosed <br>
184
+
185
+ ## Inference:
186
+ **Engine:** TensorRT-LLM <br>
187
+ **Test Hardware:** B200 coming soon <br>
188
+ ** Currently supported on DGX Spark <br>
189
+
190
+ ## Usage
191
+
192
+ ### Deploy with TensorRT-LLM
193
+
194
+ To deploy the quantized checkpoint with [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) LLM API, follow the sample codes below:
195
+
196
+ * LLM API sample usage:
197
+ ```
198
+ from tensorrt_llm import LLM, SamplingParams
199
+
200
+
201
+ def main():
202
+
203
+ prompts = [
204
+ "Hello, my name is",
205
+ "The president of the United States is",
206
+ "The capital of France is",
207
+ "The future of AI is",
208
+ ]
209
+ sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
210
+
211
+ llm = LLM(model="nvidia/Qwen2.5-VL-7B-Instruct-FP4", tensor_parallel_size=4)
212
+
213
+ outputs = llm.generate(prompts, sampling_params)
214
+
215
+ # Print the outputs.
216
+ for output in outputs:
217
+ prompt = output.prompt
218
+ generated_text = output.outputs[0].text
219
+ print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
220
+
221
+
222
+ # The entry point of the program needs to be protected for spawning processes.
223
+ if __name__ == '__main__':
224
+ main()
225
+
226
+ ```
227
+
228
+
229
+ ## Ethical Considerations
230
+
231
+ NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
232
+ Please report model quality, risk, security vulnerabilities or NVIDIA AI Concerns here.
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
5
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
config.json ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2_5_VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "torch_dtype": "bfloat16",
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "image_token_id": 151655,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 18944,
14
+ "max_position_embeddings": 128000,
15
+ "max_window_layers": 28,
16
+ "model_type": "qwen2_5_vl",
17
+ "num_attention_heads": 28,
18
+ "num_hidden_layers": 28,
19
+ "num_key_value_heads": 4,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": {
22
+ "mrope_section": [
23
+ 16,
24
+ 24,
25
+ 24
26
+ ],
27
+ "rope_type": "default",
28
+ "type": "default"
29
+ },
30
+ "rope_theta": 1000000.0,
31
+ "sliding_window": 32768,
32
+ "text_config": {
33
+ "architectures": [
34
+ "Qwen2_5_VLForConditionalGeneration"
35
+ ],
36
+ "attention_dropout": 0.0,
37
+ "bos_token_id": 151643,
38
+ "torch_dtype": "bfloat16",
39
+ "eos_token_id": 151645,
40
+ "hidden_act": "silu",
41
+ "hidden_size": 3584,
42
+ "image_token_id": null,
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 18944,
45
+ "layer_types": [
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention"
74
+ ],
75
+ "max_position_embeddings": 128000,
76
+ "max_window_layers": 28,
77
+ "model_type": "qwen2_5_vl_text",
78
+ "num_attention_heads": 28,
79
+ "num_hidden_layers": 28,
80
+ "num_key_value_heads": 4,
81
+ "rms_norm_eps": 1e-06,
82
+ "rope_scaling": {
83
+ "mrope_section": [
84
+ 16,
85
+ 24,
86
+ 24
87
+ ],
88
+ "rope_type": "default",
89
+ "type": "default"
90
+ },
91
+ "rope_theta": 1000000.0,
92
+ "sliding_window": null,
93
+ "use_cache": true,
94
+ "use_sliding_window": false,
95
+ "video_token_id": null,
96
+ "vision_end_token_id": 151653,
97
+ "vision_start_token_id": 151652,
98
+ "vision_token_id": 151654,
99
+ "vocab_size": 152064
100
+ },
101
+ "tie_word_embeddings": false,
102
+ "transformers_version": "4.56.1",
103
+ "use_cache": true,
104
+ "use_sliding_window": false,
105
+ "video_token_id": 151656,
106
+ "vision_config": {
107
+ "depth": 32,
108
+ "fullatt_block_indexes": [
109
+ 7,
110
+ 15,
111
+ 23,
112
+ 31
113
+ ],
114
+ "hidden_act": "silu",
115
+ "hidden_size": 1280,
116
+ "in_channels": 3,
117
+ "in_chans": 3,
118
+ "initializer_range": 0.02,
119
+ "intermediate_size": 3420,
120
+ "model_type": "qwen2_5_vl",
121
+ "num_heads": 16,
122
+ "out_hidden_size": 3584,
123
+ "patch_size": 14,
124
+ "spatial_merge_size": 2,
125
+ "spatial_patch_size": 14,
126
+ "temporal_patch_size": 2,
127
+ "tokens_per_second": 2,
128
+ "window_size": 112
129
+ },
130
+ "vision_end_token_id": 151653,
131
+ "vision_start_token_id": 151652,
132
+ "vision_token_id": 151654,
133
+ "vocab_size": 152064,
134
+ "quantization_config": {
135
+ "config_groups": {
136
+ "group_0": {
137
+ "input_activations": {
138
+ "dynamic": false,
139
+ "num_bits": 4,
140
+ "type": "float",
141
+ "group_size": 16
142
+ },
143
+ "weights": {
144
+ "dynamic": false,
145
+ "num_bits": 4,
146
+ "type": "float",
147
+ "group_size": 16
148
+ },
149
+ "targets": [
150
+ "Linear"
151
+ ]
152
+ }
153
+ },
154
+ "ignore": [
155
+ "model.layers.visual*",
156
+ "lm_head"
157
+ ],
158
+ "quant_algo": "NVFP4",
159
+ "producer": {
160
+ "name": "modelopt",
161
+ "version": "0.37.0.dev16+ga6fa34cda.d20250909"
162
+ },
163
+ "quant_method": "modelopt"
164
+ }
165
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 1e-06,
11
+ "transformers_version": "4.56.1"
12
+ }
hf_quant_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "producer": {
3
+ "name": "modelopt",
4
+ "version": "0.37.0.dev16+ga6fa34cda.d20250909"
5
+ },
6
+ "quantization": {
7
+ "quant_algo": "NVFP4",
8
+ "kv_cache_quant_algo": null,
9
+ "group_size": 16,
10
+ "exclude_modules": [
11
+ "model.layers.visual*",
12
+ "lm_head"
13
+ ]
14
+ }
15
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fd4af01e69b84b090e830bf687f83576ef5b7eca1da60f377cf7e83ae074fc
3
+ size 4989051872
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b67d3920398db4ad78f0816d0bdf3887fc2174240ee42ad85992cf32cf332802
3
+ size 2215324648
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "Qwen2VLImageProcessorFast",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "input_data_format": null,
24
+ "max_pixels": 12845056,
25
+ "merge_size": 2,
26
+ "min_pixels": 3136,
27
+ "patch_size": 14,
28
+ "processor_class": "Qwen2_5_VLProcessor",
29
+ "resample": 3,
30
+ "rescale_factor": 0.00392156862745098,
31
+ "return_tensors": null,
32
+ "size": {
33
+ "longest_edge": 12845056,
34
+ "shortest_edge": 3136
35
+ },
36
+ "temporal_patch_size": 2
37
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": "<|endoftext|>",
18
+ "pad_token": "<|endoftext|>"
19
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_pad": null,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "do_sample_frames": false,
13
+ "fps": null,
14
+ "image_mean": [
15
+ 0.48145466,
16
+ 0.4578275,
17
+ 0.40821073
18
+ ],
19
+ "image_std": [
20
+ 0.26862954,
21
+ 0.26130258,
22
+ 0.27577711
23
+ ],
24
+ "input_data_format": null,
25
+ "max_frames": 768,
26
+ "max_pixels": 12845056,
27
+ "merge_size": 2,
28
+ "min_frames": 4,
29
+ "min_pixels": 3136,
30
+ "num_frames": null,
31
+ "patch_size": 14,
32
+ "processor_class": "Qwen2_5_VLProcessor",
33
+ "resample": 3,
34
+ "rescale_factor": 0.00392156862745098,
35
+ "return_metadata": false,
36
+ "size": {
37
+ "longest_edge": 12845056,
38
+ "shortest_edge": 3136
39
+ },
40
+ "size_divisor": null,
41
+ "temporal_patch_size": 2,
42
+ "video_metadata": null,
43
+ "video_processor_type": "Qwen2VLVideoProcessor"
44
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff