Update app.py
Browse files
app.py
CHANGED
|
@@ -12,8 +12,9 @@ from langchain_community.embeddings import OpenAIEmbeddings
|
|
| 12 |
from langchain_community.vectorstores import Chroma
|
| 13 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 14 |
from langchain.chains import RetrievalQA
|
|
|
|
| 15 |
from langchain.agents import AgentExecutor, Tool
|
| 16 |
-
from langchain.
|
| 17 |
from PIL import Image
|
| 18 |
from decord import VideoReader, cpu
|
| 19 |
from tavily import TavilyClient
|
|
@@ -135,15 +136,15 @@ def handle_input(user_prompt, image=None, video=None, audio=None, doc=None, webs
|
|
| 135 |
)
|
| 136 |
)
|
| 137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
def llm_function(query):
|
| 139 |
-
|
| 140 |
-
model=MODEL,
|
| 141 |
-
messages=[{"role": "user", "content": query}]
|
| 142 |
-
)
|
| 143 |
-
return response.choices[0].message.content
|
| 144 |
|
| 145 |
-
|
| 146 |
-
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
|
| 147 |
|
| 148 |
if image:
|
| 149 |
image = Image.open(image).convert('RGB')
|
|
@@ -207,11 +208,11 @@ def main_interface(user_prompt, image=None, audio=None, doc=None, voice_only=Fal
|
|
| 207 |
response = handle_input(user_prompt, image=image, audio=audio, doc=doc, websearch=websearch)
|
| 208 |
|
| 209 |
if voice_only:
|
| 210 |
-
|
| 211 |
-
return
|
| 212 |
else:
|
| 213 |
return response, None
|
| 214 |
|
| 215 |
-
# Launch the
|
| 216 |
demo = create_ui()
|
| 217 |
-
demo.launch(
|
|
|
|
| 12 |
from langchain_community.vectorstores import Chroma
|
| 13 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 14 |
from langchain.chains import RetrievalQA
|
| 15 |
+
from langchain import LLMChain, PromptTemplate
|
| 16 |
from langchain.agents import AgentExecutor, Tool
|
| 17 |
+
from langchain.llms import OpenAI
|
| 18 |
from PIL import Image
|
| 19 |
from decord import VideoReader, cpu
|
| 20 |
from tavily import TavilyClient
|
|
|
|
| 136 |
)
|
| 137 |
)
|
| 138 |
|
| 139 |
+
# Create an LLMChain using a prompt template and the model
|
| 140 |
+
prompt_template = PromptTemplate(input_variables=["query"], template="{query}")
|
| 141 |
+
llm = OpenAI(model=MODEL)
|
| 142 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt_template)
|
| 143 |
+
|
| 144 |
def llm_function(query):
|
| 145 |
+
return llm_chain.run(query)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
+
agent_executor = AgentExecutor.from_llm_and_tools(llm=llm_function, tools=tools, verbose=True)
|
|
|
|
| 148 |
|
| 149 |
if image:
|
| 150 |
image = Image.open(image).convert('RGB')
|
|
|
|
| 208 |
response = handle_input(user_prompt, image=image, audio=audio, doc=doc, websearch=websearch)
|
| 209 |
|
| 210 |
if voice_only:
|
| 211 |
+
audio_output = play_voice_output(response)
|
| 212 |
+
return "Response generated.", audio_output
|
| 213 |
else:
|
| 214 |
return response, None
|
| 215 |
|
| 216 |
+
# Launch the UI
|
| 217 |
demo = create_ui()
|
| 218 |
+
demo.launch()
|