import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch # model 불러오기 model_name = "gj5520/kkachi60_en2ko" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) device = "cuda" if torch.cuda.is_available() else "cpu" model = model.to(device).eval() # 번역 함수 def translate(text: str): prefix = "translate English to Korean: " inputs = tokenizer(prefix + text, return_tensors="pt", truncation=True, padding="longest").to(device) outputs = model.generate( **inputs, max_new_tokens=100, num_beams=5, no_repeat_ngram_size=3, early_stopping=True, ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Gradio 인터페이스 정의 interface = gr.Interface( fn=translate, inputs=gr.Textbox( lines=3, placeholder="Enter English text here..." ), outputs="text", title="영어→한국어 번역기 (kkachi60_en2ko)", description="Hugging Face kkachi60_en2ko 모델을 사용한 실시간 번역" ) # Spaces에서 자동 실행 if __name__ == "__main__": interface.launch()