mohamedyd commited on
Commit
8dfd732
·
verified ·
1 Parent(s): d4df029

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -2
app.py CHANGED
@@ -1,4 +1,37 @@
1
  import streamlit as st
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from peft import PeftModel
4
 
5
+ # Load the base model and the fine-tuned model
6
+ @st.cache_resource
7
+ def load_model():
8
+ base_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-3B-Instruct")
9
+ model = PeftModel.from_pretrained(base_model, "mohamedyd/Natural-Coder-3B-Instruct-V1")
10
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-3B-Instruct")
11
+ return model, tokenizer
12
+
13
+ model, tokenizer = load_model()
14
+
15
+ # Streamlit app
16
+ st.title("Natural-Coder-3B-Instruct-V1 Model Interaction")
17
+
18
+ # Text input for user prompt
19
+ user_input = st.text_area("Enter your prompt here:", height=150)
20
+
21
+ # Button to generate response
22
+ if st.button("Generate Response"):
23
+ if user_input:
24
+ # Tokenize the input
25
+ inputs = tokenizer(user_input, return_tensors="pt")
26
+
27
+ # Generate response
28
+ outputs = model.generate(**inputs, max_length=512, num_return_sequences=1)
29
+
30
+ # Decode the output
31
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ # Display the response
34
+ st.write("Model Response:")
35
+ st.write(response)
36
+ else:
37
+ st.write("Please enter a prompt to generate a response.")