Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from peft import PeftModel | |
| import torch | |
| # --- Load tokenizer and model for CPU --- | |
| tokenizer = AutoTokenizer.from_pretrained("unsloth/Qwen3-1.7B") | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| "unsloth/Qwen3-1.7B", | |
| dtype=torch.float32, | |
| device_map={"": "cpu"}, | |
| ) | |
| model = PeftModel.from_pretrained(base_model, "khazarai/Qwen3-ALP-AZ").to("cpu") | |
| # --- Chatbot logic --- | |
| def generate_response(user_input, chat_history): | |
| if not user_input.strip(): | |
| return chat_history, chat_history | |
| chat_history.append({"role": "user", "content": user_input}) | |
| text = tokenizer.apply_chat_template( | |
| chat_history, | |
| tokenize=False, | |
| add_generation_prompt=True, | |
| enable_thinking=False, | |
| ) | |
| inputs = tokenizer(text, return_tensors="pt").to("cpu") | |
| output_tokens = model.generate( | |
| **inputs, | |
| max_new_tokens=1024, | |
| temperature=0.7, | |
| top_p=0.8, | |
| top_k=20, | |
| do_sample=True | |
| ) | |
| response = tokenizer.decode(output_tokens[0], skip_special_tokens=True) | |
| response = response.split(user_input)[-1].strip() | |
| chat_history.append({"role": "assistant", "content": response}) | |
| gr_chat_history = [ | |
| (m["content"], chat_history[i + 1]["content"]) | |
| for i, m in enumerate(chat_history[:-1]) | |
| if m["role"] == "user" | |
| ] | |
| return gr_chat_history, chat_history | |
| # --- Advanced UI Design --- | |
| with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="slate")) as demo: | |
| gr.HTML(""" | |
| <style> | |
| body {background: radial-gradient(circle at top, #E9D5FF 0%, #F5F3FF 100%);} | |
| .gradio-container {font-family: 'Inter', sans-serif;} | |
| .chat-header { | |
| text-align: center; | |
| background: linear-gradient(90deg, #C084FC, #A855F7); | |
| color: white; | |
| padding: 20px 10px; | |
| border-radius: 18px; | |
| margin-bottom: 20px; | |
| box-shadow: 0px 4px 20px rgba(168,85,247,0.3); | |
| } | |
| .chat-header h1 { | |
| font-size: 2.4em; | |
| font-weight: 800; | |
| margin-bottom: 0px; | |
| } | |
| .chat-header p { | |
| margin-top: 5px; | |
| color: #F3E8FF; | |
| font-weight: 500; | |
| } | |
| .send-btn { | |
| background: linear-gradient(90deg, #C084FC, #A855F7); | |
| color: white !important; | |
| transition: all 0.25s ease-in-out; | |
| } | |
| .send-btn:hover { | |
| transform: scale(1.05); | |
| box-shadow: 0 0 12px rgba(192,132,252,0.5); | |
| } | |
| .textbox { | |
| backdrop-filter: blur(12px); | |
| background-color: rgba(255,255,255,0.6); | |
| border-radius: 16px !important; | |
| } | |
| .footer { | |
| text-align: center; | |
| margin-top: 25px; | |
| color: #6B7280; | |
| font-size: 0.9em; | |
| } | |
| </style> | |
| <div class="chat-header"> | |
| <h1> π§ Azerbaijani Chatbot </h1> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=6): | |
| chatbot = gr.Chatbot( | |
| label="π¬ Chat-az", | |
| height=600, | |
| bubble_full_width=True, | |
| show_copy_button=True, | |
| avatar_images=( | |
| "https://cdn-icons-png.flaticon.com/512/1077/1077012.png", # user | |
| "https://cdn-icons-png.flaticon.com/512/4140/4140048.png", # bot | |
| ), | |
| ) | |
| user_input = gr.Textbox( | |
| placeholder="Ask about..", | |
| label="Type your question", | |
| lines=3, | |
| elem_classes=["textbox"], | |
| autofocus=True, | |
| ) | |
| with gr.Row(): | |
| send_btn = gr.Button("π Send", variant="primary", elem_classes=["send-btn"]) | |
| clear_btn = gr.Button("π§Ή Clear Chat") | |
| state = gr.State([]) | |
| send_btn.click(generate_response, [user_input, state], [chatbot, state]) | |
| user_input.submit(generate_response, [user_input, state], [chatbot, state]) | |
| clear_btn.click(lambda: ([], []), None, [chatbot, state]) | |
| demo.launch(share=True) | |