lkg67 finalf0 commited on
Commit
476e0b2
·
verified ·
0 Parent(s):

Duplicate from openbmb/MiniCPM-V-2_6-gguf

Browse files

Co-authored-by: Hongji Zhu <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ggml-model-f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ ggml-model-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
38
+ mmproj-model-f16.gguf filter=lfs diff=lfs merge=lfs -text
39
+ ggml-model-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
40
+ ggml-model-IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text
41
+ ggml-model-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
42
+ ggml-model-IQ4_NL.gguf filter=lfs diff=lfs merge=lfs -text
43
+ ggml-model-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
44
+ ggml-model-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
45
+ ggml-model-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
46
+ ggml-model-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
47
+ ggml-model-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
48
+ ggml-model-Q3_K.gguf filter=lfs diff=lfs merge=lfs -text
49
+ ggml-model-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
50
+ ggml-model-Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
51
+ ggml-model-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
52
+ ggml-model-Q4_K.gguf filter=lfs diff=lfs merge=lfs -text
53
+ ggml-model-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
54
+ ggml-model-Q5_1.gguf filter=lfs diff=lfs merge=lfs -text
55
+ ggml-model-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
56
+ ggml-model-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
57
+ ggml-model-Q5_K.gguf filter=lfs diff=lfs merge=lfs -text
58
+ ggml-model-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
59
+ ggml-model-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## MiniCPM-V 2.6
2
+
3
+ ### Prepare models and code
4
+
5
+ Download [MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) PyTorch model from huggingface to "MiniCPM-V-2_6" folder.
6
+
7
+ Clone llama.cpp:
8
+ ```bash
9
+ git clone [email protected]:OpenBMB/llama.cpp.git
10
+ cd llama.cpp
11
+ git checkout minicpmv-main
12
+ ```
13
+
14
+ ### Usage of MiniCPM-V 2.6
15
+
16
+ Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us)
17
+
18
+ ```bash
19
+ python ./examples/llava/minicpmv-surgery.py -m ../MiniCPM-V-2_6
20
+ python ./examples/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 3
21
+ python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model
22
+
23
+ # quantize int4 version
24
+ ./llama-quantize ../MiniCPM-V-2_6/model/ggml-model-f16.gguf ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M
25
+ ```
26
+
27
+ Build for Linux or Mac
28
+
29
+ ```bash
30
+ make
31
+ make llama-minicpmv-cli
32
+ ```
33
+
34
+ Inference on Linux or Mac
35
+ ```
36
+ # run f16 version
37
+ ./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
38
+
39
+ # run quantized int4 version
40
+ ./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
41
+
42
+ # or run in interactive mode
43
+ ./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -i
44
+ ```
ggml-model-IQ3_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf78997419de1094e3ffa1f71aef9f13abfb531c6ace2d29bdd4d226cabcb66
3
+ size 3572217184
ggml-model-IQ3_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8a21308c7a6b4deda7cfd94e2128207b37797a0a2727ce0b5bfc2990b07d62
3
+ size 3497397600
ggml-model-IQ3_XS.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e92e3e1b88079ba5f262a92dc6e638729f5039666c4606af99b1f6638152923
3
+ size 3344461152
ggml-model-IQ4_NL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37367ae200f107c1095145de243ee38e2c3efa8ae93c5e376098d50c0ae69984
3
+ size 4461289792
ggml-model-IQ4_XS.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba1e23aa5470520c8c920d7944479133af9ae091a23653529be4348f77ba1e18
3
+ size 4248358752
ggml-model-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cec163c9ae08fbac5f65dcf05b80fd717acc4ac7bc631b28c0adb3ec4948e7fd
3
+ size 3014290240
ggml-model-Q3_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a050acc927498ca4e05d38207bbd89d013154a7b9b24ac9ff4e45c870bc7f62
3
+ size 3806596448
ggml-model-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:970be5beebeb77298e7390b840f9be7e4749a47190b8fd1b7ddb7e30a8d43099
3
+ size 4086664544
ggml-model-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a050acc927498ca4e05d38207bbd89d013154a7b9b24ac9ff4e45c870bc7f62
3
+ size 3806596448
ggml-model-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad5363fdd5bff45ad840a1b179e8f32294d9a9e5a53902f5eca4458c868055a2
3
+ size 3490573664
ggml-model-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:262843d4806aeb402336980badd414a72576b20b1e5d537647da15f16c4a4df0
3
+ size 4429406528
ggml-model-Q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12e6b9a27b91297f8ee275b2ac0ca796e527cbbaee1c1de22fcab72541581495
3
+ size 4871210240
ggml-model-Q4_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
3
+ size 4681089344
ggml-model-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
3
+ size 4681089344
ggml-model-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8e13697c94a6ffdd485d4054b493a43640d5220303ee9600dd8dab4c3da564
3
+ size 4455784768
ggml-model-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c58f333a60ccd839d7e78d6baa7c800119f6a4c3fad220daecec5aae3a45960
3
+ size 5313013952
ggml-model-Q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b4c781907a067b5c17e2fe2eb46597f8a5b11cb200d570126bb4651fdd8dd6
3
+ size 5754817664
ggml-model-Q5_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d7bad99e6548783dbc35d87cc0e04095cfa63eec985ddbbaee3122f94de6913
3
+ size 5442668736
ggml-model-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d7bad99e6548783dbc35d87cc0e04095cfa63eec985ddbbaee3122f94de6913
3
+ size 5442668736
ggml-model-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:278092401a8159aa61f2fce7f39af09e51d0b5ee3a12ccfc5b1f2088b3671392
3
+ size 5313013952
ggml-model-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f93a5c114e396d65e76f3350508c291af8c9a89508ddf88d32a2263cf21a590
3
+ size 6251846848
ggml-model-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dcd11b83b5e4922b51ac13bb21f40e808d702be056d949d681302c9999c0730
3
+ size 8095482304
ggml-model-f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d98f9902ee3503d2027a89f6e8181431394e38bbb4b52d14851fafe88775735
3
+ size 15232135744
mmproj-model-f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
3
+ size 1044425152