Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import torch | |
| # model ๋ถ๋ฌ์ค๊ธฐ | |
| model_name = "gj5520/kkachi60_en2ko" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model = model.to(device).eval() | |
| # ๋ฒ์ญ ํจ์ | |
| def translate(text: str): | |
| prefix = "translate English to Korean: " | |
| inputs = tokenizer(prefix + text, | |
| return_tensors="pt", | |
| truncation=True, | |
| padding="longest").to(device) | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=100, | |
| num_beams=5, | |
| no_repeat_ngram_size=3, | |
| early_stopping=True, | |
| ) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Gradio ์ธํฐํ์ด์ค ์ ์ | |
| interface = gr.Interface( | |
| fn=translate, | |
| inputs=gr.Textbox( | |
| lines=3, | |
| placeholder="Enter English text here..." | |
| ), | |
| outputs="text", | |
| title="์์ดโํ๊ตญ์ด ๋ฒ์ญ๊ธฐ (kkachi60_en2ko)", | |
| description="Hugging Face kkachi60_en2ko ๋ชจ๋ธ์ ์ฌ์ฉํ ์ค์๊ฐ ๋ฒ์ญ" | |
| ) | |
| # Spaces์์ ์๋ ์คํ | |
| if __name__ == "__main__": | |
| interface.launch() | |