Aranwer commited on
Commit
882c653
·
verified ·
1 Parent(s): fef001b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -242
app.py CHANGED
@@ -1,246 +1,30 @@
1
- import os
2
- import gradio as gr
3
- from typing import List
4
- import pyttsx3
5
- import speech_recognition as sr
6
- from groq import Groq
7
- from mistralai import Client # Importing the new Mistral Client
8
 
9
- # Set up API keys (in production, use proper secret management)
10
- os.environ["MISTRAL_API_KEY"] = "cNjUx79Hl0A2AeiAMf6yi7o7ah4APoZy"
11
- os.environ["GROQ_API_KEY"] = "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"
 
12
 
13
- # Initialize clients
14
- mistral_client = Client(api_key=os.environ["MISTRAL_API_KEY"]) # Corrected client initialization
15
- groq_client = Groq(api_key=os.environ["GROQ_API_KEY"])
16
-
17
- # Initialize speech engine
18
- engine = pyttsx3.init()
19
- recognizer = sr.Recognizer()
20
-
21
- class KasotiGame:
22
- def __init__(self):
23
- self.reset_game()
24
-
25
- def reset_game(self):
26
- self.questions_asked = []
27
- self.answers_given = []
28
- self.current_question = ""
29
- self.game_over = False
30
- self.guess_attempted = False
31
- self.current_language = "English"
32
- self.audio_mode = False
33
-
34
- def generate_question(self) -> str:
35
- if len(self.questions_asked) >= 20:
36
- self.game_over = True
37
- return "I've reached the maximum number of questions. Game over!"
38
-
39
- if not self.questions_asked:
40
- question = "Is it a living being?"
41
- else:
42
- prompt = f"""
43
- We're playing a guessing game where you try to guess what I'm thinking of by asking yes/no questions.
44
- Here is the history of questions and answers so far:
45
- {self._format_qa_history()}
46
- Based on this, generate the next yes/no question that narrows possibilities. Only output the question itself.
47
- """
48
- chat_response = mistral_client.chat_complete(
49
- model="mistral-small-latest",
50
- messages=[{"role": "user", "content": prompt}],
51
- temperature=1.5,
52
- top_p=1,
53
- max_tokens=0,
54
- n=1
55
- )
56
- question = chat_response["prediction"]["content"].strip()
57
- question = question.split("\n")[0]
58
- if not question.endswith("?"):
59
- question += "?"
60
-
61
- self.current_question = question
62
- self.questions_asked.append(question)
63
- return question
64
-
65
- def process_answer(self, answer: str) -> str:
66
- if self.game_over:
67
- return "The game is already over. Please start a new game."
68
-
69
- answer = answer.lower().strip()
70
- if answer not in ["yes", "no", "y", "n"]:
71
- return "Please answer with 'yes' or 'no'."
72
-
73
- clean_answer = "yes" if answer in ["yes", "y"] else "no"
74
- self.answers_given.append(clean_answer)
75
-
76
- if len(self.questions_asked) >= 5 and len(self.questions_asked) % 3 == 0:
77
- return self.make_guess()
78
-
79
- return self.generate_question()
80
-
81
- def make_guess(self) -> str:
82
- prompt = f"""
83
- We're playing a guessing game. Here’s the history:
84
- {self._format_qa_history()}
85
- Make your best guess. Respond like: "I think it's [your guess]." or say "I'm not sure yet."
86
- """
87
- chat_response = mistral_client.chat_complete(
88
- model="mistral-small-latest",
89
- messages=[{"role": "user", "content": prompt}],
90
- temperature=1.5,
91
- top_p=1,
92
- max_tokens=0,
93
- n=1
94
- )
95
- guess = chat_response["prediction"]["content"].strip()
96
- self.guess_attempted = True
97
-
98
- if "I think it's" in guess or "I'm not sure yet" in guess:
99
- return guess
100
- return f"I think it's {guess}"
101
-
102
- def get_suggestion(self) -> str:
103
- if not self.current_question:
104
- return "No current question to provide suggestions for."
105
-
106
- prompt = f"""
107
- A player is asked: "{self.current_question}"
108
- They are unsure how to answer. Give a few hints about what typically leads to "yes" or "no" responses.
109
- Be brief and helpful.
110
- """
111
- chat_response = groq_client.chat.completions.create(
112
- model="mixtral-8x7b-32768",
113
- messages=[{"role": "user", "content": prompt}]
114
- )
115
- return chat_response.choices[0].message.content
116
-
117
- def _format_qa_history(self) -> str:
118
- return "\n".join(f"Q: {q}\nA: {a}" for q, a in zip(self.questions_asked, self.answers_given))
119
-
120
- def speak(self, text: str):
121
- """Convert text to speech."""
122
- if self.current_language == "Urdu":
123
- try:
124
- engine.setProperty('voice', 'ur')
125
- except:
126
- pass
127
- else:
128
- engine.setProperty('voice', 'en')
129
- engine.say(text)
130
- engine.runAndWait()
131
-
132
- def listen(self) -> str:
133
- """Capture and recognize speech input."""
134
- with sr.Microphone() as source:
135
- print("Listening...")
136
- audio = recognizer.listen(source)
137
- try:
138
- language_code = "ur-PK" if self.current_language == "Urdu" else "en-US"
139
- text = recognizer.recognize_google(audio, language=language_code)
140
- return text.lower()
141
- except sr.UnknownValueError:
142
- return "Could not understand audio"
143
- except sr.RequestError:
144
- return "API unavailable"
145
-
146
- # Initialize game
147
- game = KasotiGame()
148
-
149
- def play_kasoti(answer: str, mode: str, language: str, use_audio: bool):
150
- global game
151
-
152
- if mode == "new":
153
- game.reset_game()
154
- game.audio_mode = use_audio
155
- game.current_language = language
156
- question = game.generate_question()
157
-
158
- if use_audio:
159
- game.speak(question)
160
- return question, "", "\n".join(game.questions_asked), ""
161
 
162
- if answer.lower() in ["not sure", "unsure", "i'm not sure"]:
163
- suggestion = game.get_suggestion()
164
- return game.current_question, "", "\n".join(game.questions_asked), suggestion
165
-
166
- response = game.process_answer(answer)
167
-
168
- if game.audio_mode:
169
- game.speak(response)
170
-
171
- history = "\n".join(f"Q: {q}\nA: {a}" for q, a in zip(game.questions_asked, game.answers_given))
172
-
173
- return response, "", history, ""
174
-
175
- def toggle_audio(audio_mode: bool):
176
- game.audio_mode = audio_mode
177
- return "Audio mode: ON" if audio_mode else "Audio mode: OFF"
178
-
179
- # Gradio UI
180
- with gr.Blocks(title="Kasoti Game") as demo:
181
- gr.Markdown("# 🎮 Kasoti - The Guessing Game")
182
- gr.Markdown("Think of a famous person, place, or object. I'll try to guess it by asking yes/no questions!")
183
-
184
- with gr.Row():
185
- with gr.Column():
186
- language = gr.Dropdown(
187
- choices=["English", "Urdu"],
188
- value="English",
189
- label="Select Language"
190
- )
191
- audio_mode = gr.Checkbox(
192
- label="Enable Audio Mode",
193
- value=False
194
- )
195
- mode = gr.Radio(
196
- choices=["continue", "new"],
197
- value="new",
198
- label="Game Mode",
199
- visible=True
200
- )
201
- answer = gr.Textbox(
202
- label="Your Answer (yes/no)",
203
- placeholder="Type 'yes', 'no', or 'not sure'"
204
- )
205
- submit_btn = gr.Button("Submit")
206
-
207
- audio_status = gr.Textbox(
208
- label="Audio Status",
209
- interactive=False
210
- )
211
-
212
- with gr.Column():
213
- output = gr.Textbox(
214
- label="Game Output",
215
- interactive=False
216
- )
217
- history = gr.Textbox(
218
- label="Game History",
219
- interactive=False,
220
- lines=10
221
- )
222
- suggestion = gr.Textbox(
223
- label="Suggestion (if unsure)",
224
- interactive=False
225
- )
226
-
227
- # Events
228
- audio_mode.change(
229
- fn=toggle_audio,
230
- inputs=audio_mode,
231
- outputs=audio_status
232
- )
233
-
234
- submit_btn.click(
235
- fn=play_kasoti,
236
- inputs=[answer, mode, language, audio_mode],
237
- outputs=[output, answer, history, suggestion]
238
  )
239
-
240
- demo.load(
241
- fn=lambda: ("", "", "", ""),
242
- outputs=[output, answer, history, suggestion]
243
- )
244
-
245
- if __name__ == "__main__":
246
- demo.launch()
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
 
2
 
3
+ # Load the Mistral model and tokenizer from Hugging Face Hub
4
+ model_name = "mistralai/mistral-7b" # Change this to the desired model name
5
+ model = AutoModelForCausalLM.from_pretrained(model_name)
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
 
8
+ # Function to generate text
9
+ def generate_text(prompt: str, max_length: int = 50):
10
+ # Tokenize input prompt
11
+ inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Generate text
14
+ outputs = model.generate(
15
+ input_ids=inputs['input_ids'],
16
+ max_length=max_length, # Max tokens in output
17
+ num_return_sequences=1, # Number of sequences to generate
18
+ temperature=0.7, # Adjust for randomness (higher = more random)
19
+ top_p=0.9, # Use top-p sampling for diversity
20
+ top_k=50 # Top-k sampling for diversity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
+
23
+ # Decode the generated text and return it
24
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+ return generated_text
26
+
27
+ # Example usage
28
+ prompt = "The future of artificial intelligence is"
29
+ generated_output = generate_text(prompt)
30
+ print("Generated Text:", generated_output)