Spaces:
Sleeping
Sleeping
| from langchain_core.runnables import RunnablePassthrough | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_community.chat_models import ChatOllama | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_pinecone import PineconeVectorStore | |
| from langchain_community.embeddings import SentenceTransformerEmbeddings | |
| def make_chain_llm(retriever,llm): | |
| def format_docs(docs): | |
| # ๊ฒ์ํ ๋ฌธ์ ๊ฒฐ๊ณผ๋ฅผ ํ๋์ ๋ฌธ๋จ์ผ๋ก ํฉ์ณ์ค๋๋ค. | |
| return "\n\n".join(doc.page_content for doc in docs) | |
| # LangChain์ด ์ง์ํ๋ ๋ค๋ฅธ ์ฑํ ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค. ์ฌ๊ธฐ์๋ Ollama๋ฅผ ์ฌ์ฉํฉ๋๋ค. | |
| # llm = ChatOllama(model="zephyr:latest") | |
| template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\ | |
| "์ ์ํ๋ context์์๋ง ๋๋ตํ๊ณ context์ ์๋ ๋ด์ฉ์ ์์ฑํ์ง๋ง"\ | |
| "make answer in korean. ํ๊ตญ์ด๋ก ๋๋ตํ์ธ์"\ | |
| "\n\nContext:\n{context}\n;"\ | |
| "Question: {question}"\ | |
| "\n\nAnswer:" | |
| prompt = ChatPromptTemplate.from_template(template) | |
| rag_chain = ( | |
| {"context": retriever| format_docs, "question": RunnablePassthrough()} | |
| | prompt | |
| | llm | |
| | StrOutputParser() | |
| ) | |
| return rag_chain | |