Upload folder using huggingface_hub
Browse files
model-00001-of-00004.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:797a30293944e0d9d5895f90ec37197e214efcd6c00f6ab0e8819ca945517627
|
| 3 |
+
size 4914272203
|
model-00002-of-00004.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be2cfaaaba5cac0748cc952940d5567abf86c6af9f233f30b2c28b33a0c4885d
|
| 3 |
+
size 4960910733
|
model-00003-of-00004.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e75f05fe0dfe32038b657e13de8b538a80156315680b558ea35e50f88829bc5e
|
| 3 |
+
size 3814033465
|
model.safetensors.index.json
CHANGED
|
@@ -842,13 +842,13 @@
|
|
| 842 |
"language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|
| 843 |
"language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00002-of-00004.safetensors",
|
| 844 |
"language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00004.safetensors",
|
| 845 |
-
"language_model.model.layers.26.input_layernorm.weight": "model-
|
| 846 |
-
"language_model.model.layers.26.mlp.down_proj.weight": "model-
|
| 847 |
-
"language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-
|
| 848 |
-
"language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-
|
| 849 |
-
"language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-
|
| 850 |
-
"language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-
|
| 851 |
-
"language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
| 852 |
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 853 |
"language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 854 |
"language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
|
@@ -861,7 +861,7 @@
|
|
| 861 |
"language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|
| 862 |
"language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00004.safetensors",
|
| 863 |
"language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00004.safetensors",
|
| 864 |
-
"language_model.model.layers.26.post_attention_layernorm.weight": "model-
|
| 865 |
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 866 |
"language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 867 |
"language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
|
@@ -906,30 +906,30 @@
|
|
| 906 |
"language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 907 |
"language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 908 |
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 909 |
-
"language_model.model.layers.27.self_attn.k_proj.weight": "model-
|
| 910 |
-
"language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-
|
| 911 |
-
"language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-
|
| 912 |
-
"language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-
|
| 913 |
-
"language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-
|
| 914 |
-
"language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
| 915 |
-
"language_model.model.layers.27.self_attn.o_proj.weight": "model-
|
| 916 |
-
"language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-
|
| 917 |
-
"language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-
|
| 918 |
-
"language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-
|
| 919 |
-
"language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-
|
| 920 |
-
"language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
| 921 |
-
"language_model.model.layers.27.self_attn.q_proj.weight": "model-
|
| 922 |
-
"language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-
|
| 923 |
-
"language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-
|
| 924 |
-
"language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-
|
| 925 |
-
"language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-
|
| 926 |
-
"language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
| 927 |
-
"language_model.model.layers.27.self_attn.v_proj.weight": "model-
|
| 928 |
-
"language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-
|
| 929 |
-
"language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-
|
| 930 |
-
"language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-
|
| 931 |
-
"language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-
|
| 932 |
-
"language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
| 933 |
"language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 934 |
"language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 935 |
"language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
|
@@ -1735,7 +1735,7 @@
|
|
| 1735 |
"language_model.model.layers.9.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00004.safetensors",
|
| 1736 |
"language_model.model.layers.9.mlp.gate_proj.weight.quant_map": "model-00001-of-00004.safetensors",
|
| 1737 |
"language_model.model.layers.9.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00004.safetensors",
|
| 1738 |
-
"language_model.model.layers.9.mlp.up_proj.weight": "model-
|
| 1739 |
"language_model.model.layers.9.mlp.up_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 1740 |
"language_model.model.layers.9.mlp.up_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
| 1741 |
"language_model.model.layers.9.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|
|
|
|
| 842 |
"language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|
| 843 |
"language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00002-of-00004.safetensors",
|
| 844 |
"language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00004.safetensors",
|
| 845 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 846 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 847 |
+
"language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
| 848 |
+
"language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-00003-of-00004.safetensors",
|
| 849 |
+
"language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00004.safetensors",
|
| 850 |
+
"language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 851 |
+
"language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 852 |
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 853 |
"language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 854 |
"language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
|
|
|
| 861 |
"language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|
| 862 |
"language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00004.safetensors",
|
| 863 |
"language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00004.safetensors",
|
| 864 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 865 |
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 866 |
"language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 867 |
"language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
|
|
|
| 906 |
"language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 907 |
"language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 908 |
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 909 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 910 |
+
"language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
| 911 |
+
"language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00004.safetensors",
|
| 912 |
+
"language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00004.safetensors",
|
| 913 |
+
"language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 914 |
+
"language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 915 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 916 |
+
"language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
| 917 |
+
"language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00004.safetensors",
|
| 918 |
+
"language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00004.safetensors",
|
| 919 |
+
"language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 920 |
+
"language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 921 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 922 |
+
"language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
| 923 |
+
"language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00004.safetensors",
|
| 924 |
+
"language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00004.safetensors",
|
| 925 |
+
"language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 926 |
+
"language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 927 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 928 |
+
"language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
| 929 |
+
"language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00004.safetensors",
|
| 930 |
+
"language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00004.safetensors",
|
| 931 |
+
"language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-00003-of-00004.safetensors",
|
| 932 |
+
"language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00004.safetensors",
|
| 933 |
"language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 934 |
"language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 935 |
"language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00004.safetensors",
|
|
|
|
| 1735 |
"language_model.model.layers.9.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00004.safetensors",
|
| 1736 |
"language_model.model.layers.9.mlp.gate_proj.weight.quant_map": "model-00001-of-00004.safetensors",
|
| 1737 |
"language_model.model.layers.9.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00004.safetensors",
|
| 1738 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 1739 |
"language_model.model.layers.9.mlp.up_proj.weight.absmax": "model-00002-of-00004.safetensors",
|
| 1740 |
"language_model.model.layers.9.mlp.up_proj.weight.nested_absmax": "model-00002-of-00004.safetensors",
|
| 1741 |
"language_model.model.layers.9.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00004.safetensors",
|