File size: 2,376 Bytes
03d26aa
 
 
 
 
 
 
 
 
 
 
 
 
d6876be
03d26aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# ==========================================
# Hugging Face ๋ชจ๋ธ ์‚ฌ์šฉ - ๊ฐ์ • ๋ถ„์„ Gradio
# ==========================================

import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from peft import PeftModel

# ๋ชจ๋ธ ๋กœ๋“œ
print("๋ชจ๋ธ ๋กœ๋“œ ์ค‘...")

BASE_MODEL = "klue/bert-base"
LORA_MODEL = "jhj0423/nsmc-sentiment-lora" # ์—ฌ๋Ÿฌ๋ถ„์˜ Model 

tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL)
base_model = AutoModelForSequenceClassification.from_pretrained(
    BASE_MODEL,
    num_labels=2
)
model = PeftModel.from_pretrained(base_model, LORA_MODEL)
model.eval()

device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)

print(f"์™„๋ฃŒ! (Device: {device})")

# ๊ฐ์ • ๋ถ„์„ ํ•จ์ˆ˜
def analyze_sentiment(text):
    if not text.strip():
        return "ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”", {}
    
    # ํ† ํฌ๋‚˜์ด์ง•
    inputs = tokenizer(
        text,
        return_tensors="pt",
        truncation=True,
        max_length=128,
        padding=True
    ).to(device)
    
    # ์˜ˆ์ธก
    with torch.no_grad():
        outputs = model(**inputs)
        probs = torch.softmax(outputs.logits, dim=-1)[0]
    
    # ๊ฒฐ๊ณผ
    pred = torch.argmax(probs).item()
    label = "๐Ÿ˜Š ๊ธ์ •" if pred == 1 else "๐Ÿ˜ž ๋ถ€์ •"
    confidence = probs[pred].item()
    
    result = f"**{label}** (ํ™•์‹ ๋„: {confidence*100:.1f}%)"
    
    prob_dict = {
        "๐Ÿ˜ž ๋ถ€์ •": float(probs[0]),
        "๐Ÿ˜Š ๊ธ์ •": float(probs[1])
    }
    
    return result, prob_dict

# Gradio UI
demo = gr.Interface(
    fn=analyze_sentiment,
    inputs=gr.Textbox(
        label="์˜ํ™” ๋ฆฌ๋ทฐ",
        placeholder="์˜ํ™”์— ๋Œ€ํ•œ ๋ฆฌ๋ทฐ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”...",
        lines=3
    ),
    outputs=[
        gr.Markdown(label="๋ถ„์„ ๊ฒฐ๊ณผ"),
        gr.Label(label="๊ฐ์ • ํ™•๋ฅ ", num_top_classes=2)
    ],
    title="์˜ํ™” ๋ฆฌ๋ทฐ ๊ฐ์ • ๋ถ„์„",
    description="LoRA๋กœ ํŒŒ์ธํŠœ๋‹๋œ NSMC ๊ฐ์ • ๋ถ„์„ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค.",
    examples=[
        ["์ •๋ง ์žฌ๋ฏธ์žˆ๋Š” ์˜ํ™”์˜€์–ด์š”! ๊ฐ•๋ ฅ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค."],
        ["์‹œ๊ฐ„ ๋‚ญ๋น„์˜€์Šต๋‹ˆ๋‹ค. ๋ณ„๋กœ์˜€์–ด์š”."],
        ["๋ฐฐ์šฐ๋“ค์˜ ์—ฐ๊ธฐ๊ฐ€ ํ›Œ๋ฅญํ–ˆ์Šต๋‹ˆ๋‹ค."],
        ["์Šคํ† ๋ฆฌ๊ฐ€ ์ง€๋ฃจํ•˜๊ณ  ์žฌ๋ฏธ์—†์—ˆ์–ด์š”."],
    ],
    theme="soft",
    allow_flagging="never"
)

# ์‹คํ–‰
demo.launch(share=True, debug=True)