Spaces:
Running
Running
onuruls
commited on
Commit
·
70e1e32
1
Parent(s):
13e8c84
fix
Browse files- app.py +9 -0
- requirements.txt +2 -0
- src/models/clip_lp.py +2 -0
- src/models/clip_multilabel.py +2 -1
- src/models/eva_headpreserving.py +2 -1
- src/models/siglip_lp.py +1 -0
app.py
CHANGED
|
@@ -9,6 +9,15 @@ from video_utils import (
|
|
| 9 |
merge_seconds_union, redact_with_ffmpeg
|
| 10 |
)
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
APP_TITLE = "Content Moderation Demo (Image & Video)"
|
| 13 |
APP_DESC = """
|
| 14 |
Minimal prototype: image/video analysis, model & category selection, and threshold control.
|
|
|
|
| 9 |
merge_seconds_union, redact_with_ffmpeg
|
| 10 |
)
|
| 11 |
|
| 12 |
+
import os
|
| 13 |
+
try:
|
| 14 |
+
from huggingface_hub import login
|
| 15 |
+
tok = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
|
| 16 |
+
if tok: login(tok)
|
| 17 |
+
except Exception:
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
APP_TITLE = "Content Moderation Demo (Image & Video)"
|
| 22 |
APP_DESC = """
|
| 23 |
Minimal prototype: image/video analysis, model & category selection, and threshold control.
|
requirements.txt
CHANGED
|
@@ -9,3 +9,5 @@ gradio>=4.44.0
|
|
| 9 |
opencv-python-headless
|
| 10 |
ffmpeg-python
|
| 11 |
timm
|
|
|
|
|
|
|
|
|
| 9 |
opencv-python-headless
|
| 10 |
ffmpeg-python
|
| 11 |
timm
|
| 12 |
+
sentencepiece
|
| 13 |
+
|
src/models/clip_lp.py
CHANGED
|
@@ -10,6 +10,7 @@ class CLIPLinearProbe:
|
|
| 10 |
def __init__(self, head_path):
|
| 11 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
self.torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
|
|
|
|
| 13 |
self.model, _, self.preprocess = open_clip.create_model_and_transforms(
|
| 14 |
"ViT-L-14", pretrained="openai", device=self.device
|
| 15 |
)
|
|
@@ -18,6 +19,7 @@ class CLIPLinearProbe:
|
|
| 18 |
self.w = torch.from_numpy(npz["w"]).to(self.device).float()
|
| 19 |
self.b = torch.from_numpy(npz["b"]).to(self.device).float()
|
| 20 |
|
|
|
|
| 21 |
if self.device == "cuda":
|
| 22 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 23 |
torch.backends.cudnn.benchmark = True
|
|
|
|
| 10 |
def __init__(self, head_path):
|
| 11 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
self.torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
|
| 13 |
+
|
| 14 |
self.model, _, self.preprocess = open_clip.create_model_and_transforms(
|
| 15 |
"ViT-L-14", pretrained="openai", device=self.device
|
| 16 |
)
|
|
|
|
| 19 |
self.w = torch.from_numpy(npz["w"]).to(self.device).float()
|
| 20 |
self.b = torch.from_numpy(npz["b"]).to(self.device).float()
|
| 21 |
|
| 22 |
+
self.use_amp = False
|
| 23 |
if self.device == "cuda":
|
| 24 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 25 |
torch.backends.cudnn.benchmark = True
|
src/models/clip_multilabel.py
CHANGED
|
@@ -16,7 +16,7 @@ class CLIPMultiLabel:
|
|
| 16 |
self.model.eval().requires_grad_(False)
|
| 17 |
|
| 18 |
ckpt = torch.load(head_path, map_location=self.device, weights_only=True)
|
| 19 |
-
state = ckpt.get("
|
| 20 |
|
| 21 |
w = state["head.weight"].to(self.device).float()
|
| 22 |
b = state["head.bias"].to(self.device).float()
|
|
@@ -24,6 +24,7 @@ class CLIPMultiLabel:
|
|
| 24 |
|
| 25 |
self.w, self.b = w, b
|
| 26 |
|
|
|
|
| 27 |
if self.device == "cuda":
|
| 28 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 29 |
torch.backends.cudnn.benchmark = True
|
|
|
|
| 16 |
self.model.eval().requires_grad_(False)
|
| 17 |
|
| 18 |
ckpt = torch.load(head_path, map_location=self.device, weights_only=True)
|
| 19 |
+
state = ckpt.get("state_dict", ckpt)
|
| 20 |
|
| 21 |
w = state["head.weight"].to(self.device).float()
|
| 22 |
b = state["head.bias"].to(self.device).float()
|
|
|
|
| 24 |
|
| 25 |
self.w, self.b = w, b
|
| 26 |
|
| 27 |
+
self.use_amp = False
|
| 28 |
if self.device == "cuda":
|
| 29 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 30 |
torch.backends.cudnn.benchmark = True
|
src/models/eva_headpreserving.py
CHANGED
|
@@ -21,7 +21,6 @@ class EVAHeadPreserving:
|
|
| 21 |
tag_csv: str = "selected_tags.csv"):
|
| 22 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 23 |
self.torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
|
| 24 |
-
self.use_amp = (self.device == "cuda")
|
| 25 |
|
| 26 |
self.categories = list(categories)
|
| 27 |
self.tag_csv = tag_csv
|
|
@@ -54,9 +53,11 @@ class EVAHeadPreserving:
|
|
| 54 |
with torch.no_grad():
|
| 55 |
self.custom_head.weight.copy_(w)
|
| 56 |
self.custom_head.bias.copy_(b)
|
|
|
|
| 57 |
|
| 58 |
self.tag_names = load_tag_names(T, self.tag_csv)
|
| 59 |
|
|
|
|
| 60 |
if self.device == "cuda":
|
| 61 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 62 |
torch.backends.cudnn.allow_tf32 = True
|
|
|
|
| 21 |
tag_csv: str = "selected_tags.csv"):
|
| 22 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 23 |
self.torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
|
|
|
|
| 24 |
|
| 25 |
self.categories = list(categories)
|
| 26 |
self.tag_csv = tag_csv
|
|
|
|
| 53 |
with torch.no_grad():
|
| 54 |
self.custom_head.weight.copy_(w)
|
| 55 |
self.custom_head.bias.copy_(b)
|
| 56 |
+
self.use_amp = True
|
| 57 |
|
| 58 |
self.tag_names = load_tag_names(T, self.tag_csv)
|
| 59 |
|
| 60 |
+
self.use_amp = False
|
| 61 |
if self.device == "cuda":
|
| 62 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 63 |
torch.backends.cudnn.allow_tf32 = True
|
src/models/siglip_lp.py
CHANGED
|
@@ -19,6 +19,7 @@ class SigLIPLinearProbe:
|
|
| 19 |
self.w = torch.from_numpy(npz["w"]).to(self.device).float()
|
| 20 |
self.b = torch.from_numpy(npz["b"]).to(self.device).float()
|
| 21 |
|
|
|
|
| 22 |
if self.device == "cuda":
|
| 23 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 24 |
torch.backends.cudnn.benchmark = True
|
|
|
|
| 19 |
self.w = torch.from_numpy(npz["w"]).to(self.device).float()
|
| 20 |
self.b = torch.from_numpy(npz["b"]).to(self.device).float()
|
| 21 |
|
| 22 |
+
self.use_amp = False
|
| 23 |
if self.device == "cuda":
|
| 24 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 25 |
torch.backends.cudnn.benchmark = True
|