rahul7star's picture
Create app_low.py
775d1cf verified
raw
history blame
617 Bytes
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("rubricreward/mR3-Qwen3-14B-en-prompt-en-thinking")
model = AutoModelForCausalLM.from_pretrained("rubricreward/mR3-Qwen3-14B-en-prompt-en-thinking")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))