Spaces:
Sleeping
Sleeping
File size: 2,376 Bytes
03d26aa d6876be 03d26aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
# ==========================================
# Hugging Face ๋ชจ๋ธ ์ฌ์ฉ - ๊ฐ์ ๋ถ์ Gradio
# ==========================================
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from peft import PeftModel
# ๋ชจ๋ธ ๋ก๋
print("๋ชจ๋ธ ๋ก๋ ์ค...")
BASE_MODEL = "klue/bert-base"
LORA_MODEL = "jhj0423/nsmc-sentiment-lora" # ์ฌ๋ฌ๋ถ์ Model
tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL)
base_model = AutoModelForSequenceClassification.from_pretrained(
BASE_MODEL,
num_labels=2
)
model = PeftModel.from_pretrained(base_model, LORA_MODEL)
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
print(f"์๋ฃ! (Device: {device})")
# ๊ฐ์ ๋ถ์ ํจ์
def analyze_sentiment(text):
if not text.strip():
return "ํ
์คํธ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์", {}
# ํ ํฌ๋์ด์ง
inputs = tokenizer(
text,
return_tensors="pt",
truncation=True,
max_length=128,
padding=True
).to(device)
# ์์ธก
with torch.no_grad():
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=-1)[0]
# ๊ฒฐ๊ณผ
pred = torch.argmax(probs).item()
label = "๐ ๊ธ์ " if pred == 1 else "๐ ๋ถ์ "
confidence = probs[pred].item()
result = f"**{label}** (ํ์ ๋: {confidence*100:.1f}%)"
prob_dict = {
"๐ ๋ถ์ ": float(probs[0]),
"๐ ๊ธ์ ": float(probs[1])
}
return result, prob_dict
# Gradio UI
demo = gr.Interface(
fn=analyze_sentiment,
inputs=gr.Textbox(
label="์ํ ๋ฆฌ๋ทฐ",
placeholder="์ํ์ ๋ํ ๋ฆฌ๋ทฐ๋ฅผ ์
๋ ฅํ์ธ์...",
lines=3
),
outputs=[
gr.Markdown(label="๋ถ์ ๊ฒฐ๊ณผ"),
gr.Label(label="๊ฐ์ ํ๋ฅ ", num_top_classes=2)
],
title="์ํ ๋ฆฌ๋ทฐ ๊ฐ์ ๋ถ์",
description="LoRA๋ก ํ์ธํ๋๋ NSMC ๊ฐ์ ๋ถ์ ๋ชจ๋ธ์
๋๋ค.",
examples=[
["์ ๋ง ์ฌ๋ฏธ์๋ ์ํ์์ด์! ๊ฐ๋ ฅ ์ถ์ฒํฉ๋๋ค."],
["์๊ฐ ๋ญ๋น์์ต๋๋ค. ๋ณ๋ก์์ด์."],
["๋ฐฐ์ฐ๋ค์ ์ฐ๊ธฐ๊ฐ ํ๋ฅญํ์ต๋๋ค."],
["์คํ ๋ฆฌ๊ฐ ์ง๋ฃจํ๊ณ ์ฌ๋ฏธ์์์ด์."],
],
theme="soft",
allow_flagging="never"
)
# ์คํ
demo.launch(share=True, debug=True) |