cpatonn's picture
Upload folder using huggingface_hub
0999b72 verified
{
"architectures": [
"InternVLChatModel"
],
"auto_map": {
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
"AutoModel": "modeling_internvl_chat.InternVLChatModel",
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
},
"downsample_ratio": 0.5,
"dtype": "bfloat16",
"dynamic_image_size": true,
"eos_token_id": 151645,
"force_image_size": 448,
"llm_config": {
"_name_or_path": "/root/codespace/checkpoints/Qwen3-14B",
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"debug": false,
"dtype": "bfloat16",
"eos_token_id": 151645,
"ep_size": 1,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 5120,
"initializer_range": 0.02,
"intermediate_size": 17408,
"layer_types": [
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention"
],
"max_position_embeddings": 40960,
"max_window_layers": 40,
"micro_forward": false,
"model_type": "qwen3",
"num_attention_heads": 40,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000,
"skip_checkpoint": false,
"sliding_window": null,
"use_cache": false,
"use_deepep": false,
"use_sliding_window": false,
"vocab_size": 151936
},
"max_dynamic_patch": 12,
"min_dynamic_patch": 1,
"model_type": "internvl_chat",
"output_attentions": false,
"pad2square": false,
"pad_token_id": 151643,
"ps_version": "v2",
"quantization_config": {
"config_groups": {
"group_0": {
"format": "pack-quantized",
"input_activations": null,
"output_activations": null,
"targets": [
"Linear"
],
"weights": {
"actorder": null,
"block_structure": null,
"dynamic": false,
"group_size": 32,
"num_bits": 8,
"observer": "mse",
"observer_kwargs": {},
"strategy": "group",
"symmetric": true,
"type": "int"
}
}
},
"format": "pack-quantized",
"global_compression_ratio": null,
"ignore": [
"vision_model.encoder.layers.0.attn.qkv",
"vision_model.encoder.layers.0.attn.proj",
"vision_model.encoder.layers.0.mlp.fc1",
"vision_model.encoder.layers.0.mlp.fc2",
"vision_model.encoder.layers.1.attn.qkv",
"vision_model.encoder.layers.1.attn.proj",
"vision_model.encoder.layers.1.mlp.fc1",
"vision_model.encoder.layers.1.mlp.fc2",
"vision_model.encoder.layers.2.attn.qkv",
"vision_model.encoder.layers.2.attn.proj",
"vision_model.encoder.layers.2.mlp.fc1",
"vision_model.encoder.layers.2.mlp.fc2",
"vision_model.encoder.layers.3.attn.qkv",
"vision_model.encoder.layers.3.attn.proj",
"vision_model.encoder.layers.3.mlp.fc1",
"vision_model.encoder.layers.3.mlp.fc2",
"vision_model.encoder.layers.4.attn.qkv",
"vision_model.encoder.layers.4.attn.proj",
"vision_model.encoder.layers.4.mlp.fc1",
"vision_model.encoder.layers.4.mlp.fc2",
"vision_model.encoder.layers.5.attn.qkv",
"vision_model.encoder.layers.5.attn.proj",
"vision_model.encoder.layers.5.mlp.fc1",
"vision_model.encoder.layers.5.mlp.fc2",
"vision_model.encoder.layers.6.attn.qkv",
"vision_model.encoder.layers.6.attn.proj",
"vision_model.encoder.layers.6.mlp.fc1",
"vision_model.encoder.layers.6.mlp.fc2",
"vision_model.encoder.layers.7.attn.qkv",
"vision_model.encoder.layers.7.attn.proj",
"vision_model.encoder.layers.7.mlp.fc1",
"vision_model.encoder.layers.7.mlp.fc2",
"vision_model.encoder.layers.8.attn.qkv",
"vision_model.encoder.layers.8.attn.proj",
"vision_model.encoder.layers.8.mlp.fc1",
"vision_model.encoder.layers.8.mlp.fc2",
"vision_model.encoder.layers.9.attn.qkv",
"vision_model.encoder.layers.9.attn.proj",
"vision_model.encoder.layers.9.mlp.fc1",
"vision_model.encoder.layers.9.mlp.fc2",
"vision_model.encoder.layers.10.attn.qkv",
"vision_model.encoder.layers.10.attn.proj",
"vision_model.encoder.layers.10.mlp.fc1",
"vision_model.encoder.layers.10.mlp.fc2",
"vision_model.encoder.layers.11.attn.qkv",
"vision_model.encoder.layers.11.attn.proj",
"vision_model.encoder.layers.11.mlp.fc1",
"vision_model.encoder.layers.11.mlp.fc2",
"vision_model.encoder.layers.12.attn.qkv",
"vision_model.encoder.layers.12.attn.proj",
"vision_model.encoder.layers.12.mlp.fc1",
"vision_model.encoder.layers.12.mlp.fc2",
"vision_model.encoder.layers.13.attn.qkv",
"vision_model.encoder.layers.13.attn.proj",
"vision_model.encoder.layers.13.mlp.fc1",
"vision_model.encoder.layers.13.mlp.fc2",
"vision_model.encoder.layers.14.attn.qkv",
"vision_model.encoder.layers.14.attn.proj",
"vision_model.encoder.layers.14.mlp.fc1",
"vision_model.encoder.layers.14.mlp.fc2",
"vision_model.encoder.layers.15.attn.qkv",
"vision_model.encoder.layers.15.attn.proj",
"vision_model.encoder.layers.15.mlp.fc1",
"vision_model.encoder.layers.15.mlp.fc2",
"vision_model.encoder.layers.16.attn.qkv",
"vision_model.encoder.layers.16.attn.proj",
"vision_model.encoder.layers.16.mlp.fc1",
"vision_model.encoder.layers.16.mlp.fc2",
"vision_model.encoder.layers.17.attn.qkv",
"vision_model.encoder.layers.17.attn.proj",
"vision_model.encoder.layers.17.mlp.fc1",
"vision_model.encoder.layers.17.mlp.fc2",
"vision_model.encoder.layers.18.attn.qkv",
"vision_model.encoder.layers.18.attn.proj",
"vision_model.encoder.layers.18.mlp.fc1",
"vision_model.encoder.layers.18.mlp.fc2",
"vision_model.encoder.layers.19.attn.qkv",
"vision_model.encoder.layers.19.attn.proj",
"vision_model.encoder.layers.19.mlp.fc1",
"vision_model.encoder.layers.19.mlp.fc2",
"vision_model.encoder.layers.20.attn.qkv",
"vision_model.encoder.layers.20.attn.proj",
"vision_model.encoder.layers.20.mlp.fc1",
"vision_model.encoder.layers.20.mlp.fc2",
"vision_model.encoder.layers.21.attn.qkv",
"vision_model.encoder.layers.21.attn.proj",
"vision_model.encoder.layers.21.mlp.fc1",
"vision_model.encoder.layers.21.mlp.fc2",
"vision_model.encoder.layers.22.attn.qkv",
"vision_model.encoder.layers.22.attn.proj",
"vision_model.encoder.layers.22.mlp.fc1",
"vision_model.encoder.layers.22.mlp.fc2",
"vision_model.encoder.layers.23.attn.qkv",
"vision_model.encoder.layers.23.attn.proj",
"vision_model.encoder.layers.23.mlp.fc1",
"vision_model.encoder.layers.23.mlp.fc2",
"language_model.lm_head",
"mlp1.1",
"mlp1.3"
],
"kv_cache_scheme": null,
"quant_method": "compressed-tensors",
"quantization_status": "compressed",
"sparsity_config": {},
"transform_config": {},
"version": "0.11.1.a20250828"
},
"select_layer": -1,
"template": "internvl2_5",
"tie_word_embeddings": false,
"transformers_version": null,
"use_backbone_lora": 0,
"use_llm_lora": 0,
"use_thumbnail": true,
"vision_config": {
"architectures": [
"InternVisionModel"
],
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_intern_vit.InternVisionConfig",
"AutoModel": "modeling_intern_vit.InternVisionModel"
},
"drop_path_rate": 0.0,
"dropout": 0.0,
"dtype": "bfloat16",
"hidden_act": "gelu",
"hidden_size": 1024,
"image_size": 448,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-06,
"model_type": "intern_vit_6b",
"norm_type": "layer_norm",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"patch_size": 14,
"qk_normalization": false,
"qkv_bias": true,
"use_fa3": false,
"use_flash_attn": false
}
}