Spaces:
Sleeping
Sleeping
| # ========================================== | |
| # Hugging Face λͺ¨λΈ μ¬μ© - κ°μ λΆμ Gradio | |
| # ========================================== | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| from peft import PeftModel | |
| # λͺ¨λΈ λ‘λ | |
| print("λͺ¨λΈ λ‘λ μ€...") | |
| BASE_MODEL = "klue/bert-base" | |
| LORA_MODEL = "jhj0423/nsmc-sentiment-lora" # μ¬λ¬λΆμ Model | |
| tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL) | |
| base_model = AutoModelForSequenceClassification.from_pretrained( | |
| BASE_MODEL, | |
| num_labels=2 | |
| ) | |
| model = PeftModel.from_pretrained(base_model, LORA_MODEL) | |
| model.eval() | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model.to(device) | |
| print(f"μλ£! (Device: {device})") | |
| # κ°μ λΆμ ν¨μ | |
| def analyze_sentiment(text): | |
| if not text.strip(): | |
| return "ν μ€νΈλ₯Ό μ λ ₯ν΄μ£ΌμΈμ", {} | |
| # ν ν¬λμ΄μ§ | |
| inputs = tokenizer( | |
| text, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=128, | |
| padding=True | |
| ).to(device) | |
| # μμΈ‘ | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probs = torch.softmax(outputs.logits, dim=-1)[0] | |
| # κ²°κ³Ό | |
| pred = torch.argmax(probs).item() | |
| label = "π κΈμ " if pred == 1 else "π λΆμ " | |
| confidence = probs[pred].item() | |
| result = f"**{label}** (νμ λ: {confidence*100:.1f}%)" | |
| prob_dict = { | |
| "π λΆμ ": float(probs[0]), | |
| "π κΈμ ": float(probs[1]) | |
| } | |
| return result, prob_dict | |
| # Gradio UI | |
| demo = gr.Interface( | |
| fn=analyze_sentiment, | |
| inputs=gr.Textbox( | |
| label="μν 리뷰", | |
| placeholder="μνμ λν 리뷰λ₯Ό μ λ ₯νμΈμ...", | |
| lines=3 | |
| ), | |
| outputs=[ | |
| gr.Markdown(label="λΆμ κ²°κ³Ό"), | |
| gr.Label(label="κ°μ νλ₯ ", num_top_classes=2) | |
| ], | |
| title="μν 리뷰 κ°μ λΆμ", | |
| description="LoRAλ‘ νμΈνλλ NSMC κ°μ λΆμ λͺ¨λΈμ λλ€.", | |
| examples=[ | |
| ["μ λ§ μ¬λ―Έμλ μνμμ΄μ! κ°λ ₯ μΆμ²ν©λλ€."], | |
| ["μκ° λλΉμμ΅λλ€. λ³λ‘μμ΄μ."], | |
| ["λ°°μ°λ€μ μ°κΈ°κ° νλ₯νμ΅λλ€."], | |
| ["μ€ν λ¦¬κ° μ§λ£¨νκ³ μ¬λ―Έμμμ΄μ."], | |
| ], | |
| theme="soft", | |
| allow_flagging="never" | |
| ) | |
| # μ€ν | |
| demo.launch(share=True, debug=True) |