Myllm / app.py
shakeel143's picture
Create app.py
2632b1e verified
raw
history blame contribute delete
648 Bytes
from fastapi import FastAPI, Form
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
app = FastAPI()
MODEL_NAME = "meituan/DeepSeek-R1-Distill-Qwen-32B" # example
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
# Possibly load quantized or INT8 version
@app.post("/chat")
async def chat(prompt: str = Form(...)):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens=200)
reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"reply": reply}