Spaces:
Runtime error
Runtime error
Update lunacode.py
Browse files- lunacode.py +2 -21
lunacode.py
CHANGED
|
@@ -4,9 +4,6 @@ from bs4 import BeautifulSoup
|
|
| 4 |
from duckduckgo_search import DDGS
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
import wikipedia
|
| 7 |
-
from gtts import gTTS
|
| 8 |
-
import os
|
| 9 |
-
import uuid
|
| 10 |
|
| 11 |
model_path = "cosmosai471/Luna-v2"
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
@@ -15,25 +12,16 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 15 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 16 |
|
| 17 |
def detect_mode(query):
|
| 18 |
-
if not isinstance(query, str):
|
| 19 |
-
return "general"
|
| 20 |
-
query = query.lower()
|
| 21 |
code_keywords = ["code", "program", "python", "javascript", "function", "script", "build", "html", "css"]
|
| 22 |
creative_keywords = ["story", "write a story", "poem", "creative", "imagine", "novel", "dialogue"]
|
| 23 |
|
| 24 |
-
if any(kw in query for kw in code_keywords):
|
| 25 |
return "code"
|
| 26 |
-
elif any(kw in query for kw in creative_keywords):
|
| 27 |
return "creative"
|
| 28 |
else:
|
| 29 |
return "general"
|
| 30 |
|
| 31 |
-
def text_to_speech_luna(text):
|
| 32 |
-
tts = gTTS(text=text, lang="en")
|
| 33 |
-
filename = f"/tmp/luna_reply_{uuid.uuid4().hex}.mp3"
|
| 34 |
-
tts.save(filename)
|
| 35 |
-
return filename
|
| 36 |
-
|
| 37 |
def get_generation_params(query):
|
| 38 |
mode = detect_mode(query)
|
| 39 |
if mode == "code":
|
|
@@ -101,13 +89,6 @@ def scrape_first_good_content(urls):
|
|
| 101 |
continue
|
| 102 |
return None, None
|
| 103 |
|
| 104 |
-
|
| 105 |
-
def text_to_speech_luna(text):
|
| 106 |
-
tts = gTTS(text=text, lang="en")
|
| 107 |
-
filename = f"/tmp/luna_reply_{uuid.uuid4().hex}.mp3"
|
| 108 |
-
tts.save(filename)
|
| 109 |
-
return filename
|
| 110 |
-
|
| 111 |
def smart_luna_answer(user_question, max_tokens=512):
|
| 112 |
temperature, top_p = get_generation_params(user_question)
|
| 113 |
|
|
|
|
| 4 |
from duckduckgo_search import DDGS
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
import wikipedia
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
model_path = "cosmosai471/Luna-v2"
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
|
|
| 12 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
|
| 14 |
def detect_mode(query):
|
|
|
|
|
|
|
|
|
|
| 15 |
code_keywords = ["code", "program", "python", "javascript", "function", "script", "build", "html", "css"]
|
| 16 |
creative_keywords = ["story", "write a story", "poem", "creative", "imagine", "novel", "dialogue"]
|
| 17 |
|
| 18 |
+
if any(kw in query.lower() for kw in code_keywords):
|
| 19 |
return "code"
|
| 20 |
+
elif any(kw in query.lower() for kw in creative_keywords):
|
| 21 |
return "creative"
|
| 22 |
else:
|
| 23 |
return "general"
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def get_generation_params(query):
|
| 26 |
mode = detect_mode(query)
|
| 27 |
if mode == "code":
|
|
|
|
| 89 |
continue
|
| 90 |
return None, None
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
def smart_luna_answer(user_question, max_tokens=512):
|
| 93 |
temperature, top_p = get_generation_params(user_question)
|
| 94 |
|