Boynn commited on
Commit
a4c99a6
·
verified ·
1 Parent(s): b0669cc

Upload fine-tuned CLIP model

Browse files
Files changed (3) hide show
  1. README.md +26 -0
  2. config.json +25 -0
  3. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: mit
4
+ authors:
5
+ - Hengyu Shi
6
+ tags:
7
+ - clip
8
+ - vision
9
+ - text
10
+ - multimodal
11
+ ---
12
+
13
+ # Authors
14
+
15
+ - Hengyu Shi
16
+ - Boynn
17
+
18
+ # Fine-tuned CLIP-ViT-bigG-14 Model
19
+
20
+ This model is a fine-tuned version based on laion/CLIP-ViT-bigG-14-laion2B-39B-b160k.
21
+
22
+
23
+ ## Usage Method
24
+
25
+ base_model = CLIPTextModelWithProjection.from_pretrained("kaonai/CLIP-ViT-bigG-14-laion2B-39B-b160k-sft")
26
+
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "kaonai/CLIP-ViT-bigG-14-laion2B-39B-b160k-sft",
3
+ "architectures": [
4
+ "CLIPTextModelWithProjection"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1280,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5120,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 20,
19
+ "num_hidden_layers": 32,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 1280,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.46.3",
24
+ "vocab_size": 49408
25
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27b35e803b219864bbb4da8070a97bdb37384ee65f034d21bd19c9e6284cbe56
3
+ size 2778702264