diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..111a8d0bf1db7cfa05e3cc9a3fccd88a7e48350b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.10-slim + +RUN useradd -m -u 1000 user +RUN apt-get update +RUN apt-get install poppler-utils -y + +WORKDIR /app + +COPY --chown=user ./requirements.txt requirements.txt + +RUN pip install --no-cache-dir --upgrade -r requirements.txt + +COPY --chown=user . /app + +CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/apis/V1/configs/__pycache__/database_config.cpython-310.pyc b/apis/V1/configs/__pycache__/database_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5038e32083164a0400e0912da772a3052c5f6549 Binary files /dev/null and b/apis/V1/configs/__pycache__/database_config.cpython-310.pyc differ diff --git a/apis/V1/configs/__pycache__/llm_config.cpython-310.pyc b/apis/V1/configs/__pycache__/llm_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bbb0764ba4e359448c4d202f2743fa26b9b068f Binary files /dev/null and b/apis/V1/configs/__pycache__/llm_config.cpython-310.pyc differ diff --git a/apis/V1/configs/database_config.py b/apis/V1/configs/database_config.py new file mode 100644 index 0000000000000000000000000000000000000000..0078ed56f40fcb1f984a44f4ee16a01c75d6483d --- /dev/null +++ b/apis/V1/configs/database_config.py @@ -0,0 +1,16 @@ +import os +from dotenv import load_dotenv + +load_dotenv() +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi + +# Create a new client and connect to the server +client = MongoClient(os.getenv("MONGODB_URL"), server_api=ServerApi("1")) +db = client.bandict_db +collection_name = db["user"] +try: + client.admin.command("ping") + print("Pinged your deployment. You successfully connected to MongoDB!") +except Exception as e: + print(e) diff --git a/apis/V1/configs/drant_config.py b/apis/V1/configs/drant_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7aea0879d7d8b11b3057fc00637a1b3023bfb5d --- /dev/null +++ b/apis/V1/configs/drant_config.py @@ -0,0 +1,34 @@ +from langchain_google_genai import GoogleGenerativeAI, GoogleGenerativeAIEmbeddings +from langchain_community.document_loaders import TextLoader +from langchain_qdrant import Qdrant +from langchain_text_splitters import CharacterTextSplitter +import qdrant_client +from .vector_embedding_config import embeddings +import os + +url = os.getenv("QDRANT_URL") +qdrant_api_key = os.getenv("QDRANT_API_KEY") +client = qdrant_client.QdrantClient(url, api_key=qdrant_api_key) + +# collection_config = qdrant_client.http.models.VectorParams( +# size=768, # 768 for instructor-xl, 1536 for OpenAI +# distance=qdrant_client.http.models.Distance.COSINE, +# ) +# client.recreate_collection( +# collection_name="BANDict", +# vectors_config=collection_config, +# ) +# vectorstore = Qdrant( +# client=client, +# collection_name="BANDict", +# embeddings=embeddings, +# ) + +# loader = TextLoader("./note.txt") +# documents = loader.load() +# text_splitter = CharacterTextSplitter( +# chunk_size=100, +# chunk_overlap=20, +# ) +# docs = text_splitter.split_documents(documents) +# vectorstore.add_documents(docs) diff --git a/apis/V1/configs/llm_config.py b/apis/V1/configs/llm_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a69d6c98abee491ef56fc14d03418fb850bf4d --- /dev/null +++ b/apis/V1/configs/llm_config.py @@ -0,0 +1,19 @@ +from langchain_google_genai import GoogleGenerativeAI +import os +from langchain.globals import set_llm_cache +from langchain.cache import InMemoryCache + +GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") +# llm = GoogleGenerativeAI( +# model="models/gemini-pro", +# temperature=0, +# verbose=True, +# google_api_key=GOOGLE_API_KEY, +# ) +llm = GoogleGenerativeAI( + model="gemini-1.5-flash", + temperature=0, + verbose=True, + google_api_key=GOOGLE_API_KEY, +) +set_llm_cache(InMemoryCache()) diff --git a/apis/V1/configs/vector_embedding_config.py b/apis/V1/configs/vector_embedding_config.py new file mode 100644 index 0000000000000000000000000000000000000000..77e7689b8ef08a878a4d937a4f7c82a95311c3bb --- /dev/null +++ b/apis/V1/configs/vector_embedding_config.py @@ -0,0 +1,8 @@ +from langchain_google_genai import GoogleGenerativeAIEmbeddings +import os + +GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") + +embeddings = GoogleGenerativeAIEmbeddings( + model="models/embedding-001", google_api_key=GOOGLE_API_KEY +) diff --git a/apis/V1/controllers/__pycache__/auth_controller.cpython-310.pyc b/apis/V1/controllers/__pycache__/auth_controller.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0867f88a525fc4e921e86d966a78c949b9658e7a Binary files /dev/null and b/apis/V1/controllers/__pycache__/auth_controller.cpython-310.pyc differ diff --git a/apis/V1/controllers/__pycache__/word_controller.cpython-310.pyc b/apis/V1/controllers/__pycache__/word_controller.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bee335c0fadaf69ad8db7263ff9d883fcdb8b14 Binary files /dev/null and b/apis/V1/controllers/__pycache__/word_controller.cpython-310.pyc differ diff --git a/apis/V1/controllers/auth_controller.py b/apis/V1/controllers/auth_controller.py new file mode 100644 index 0000000000000000000000000000000000000000..39f00c6c11669e872312b82a4a4254c69f7589e4 --- /dev/null +++ b/apis/V1/controllers/auth_controller.py @@ -0,0 +1,35 @@ +from fastapi import HTTPException, status +from ..models.users import User, UserSchema +from ..configs.database_config import db +from ..interfaces.auth_interface import Credential +from ..providers import jwt as jwt_provider +from ..middlewares.auth_middleware import get_current_user +import jwt + + +collection_name = db["user"] + + +def login_control(token): + if not token: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Authorization Token is required", + ) + decoded_token = jwt.decode(token, options={"verify_signature": False}) + user = { + "name": decoded_token["name"], + "email": decoded_token["email"], + "picture": decoded_token["picture"], + } + user = User(**user) + + existing_user = UserSchema.find_by_email(user.email) + if not existing_user: + user_id = UserSchema(**user.dict()).create() + else: + user_id = existing_user["_id"] + + token = jwt_provider.encrypt({"id": str(user_id)}) + + return token diff --git a/apis/V1/controllers/word_controller.py b/apis/V1/controllers/word_controller.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d39f226758b8538cecefdaeac0f016a809762f --- /dev/null +++ b/apis/V1/controllers/word_controller.py @@ -0,0 +1,68 @@ +from fastapi import APIRouter, Depends, HTTPException +from typing import Annotated +from ..middlewares.auth_middleware import get_current_user +from ..models.users import User +from ..models.words import WordSchema +from ..configs.database_config import db +from ..utils.response_fmt import jsonResponseFmt +from bson import ObjectId +import random + +router = APIRouter(prefix="/word", tags=["Words"]) +collection = db["word"] + +user_dependency = Annotated[User, Depends(get_current_user)] + + +def list_word_controlller(user): + user_id = user.get("id") + try: + print("user_id", user_id) + words = collection.find({"user_id": user_id}) + words = WordSchema.read_all_words_by_user_id(user_id) + return words + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +def add_word_controller(user, word): + user_id = user.get("id") + try: + existing_word = WordSchema.check_existing_word(word.word, user_id) + if existing_word: + return jsonResponseFmt(None, msg="Existed", code=400) + random.shuffle(word.options) + WordSchema(**word.dict()).create(user_id) + return jsonResponseFmt(None, code=201) + except Exception as e: + return jsonResponseFmt(None, msg=str(e), code=500) + + +def update_word_controller(user, word_id, word_data): + user_id = user.get("id") + try: + print("user", user) + print("word_id", word_id) + print("word_data", word_data) + + word_data.user_id = user_id + print("word_data", word_data.dict()) + WordSchema(**word_data.dict()).update(str(word_id)) + return jsonResponseFmt(None, code=200) + except Exception as e: + return jsonResponseFmt(None, msg=str(e), code=500) + + +def delete_word_controller(user, word_id): + user_id = user.get("id") + try: + existing_word = collection.find_one( + {"_id": ObjectId(word_id), "user_id": user_id} + ) + if not existing_word: + return jsonResponseFmt(None, msg="Word not found", code=404) + collection.delete_one({"_id": ObjectId(word_id)}) + return jsonResponseFmt(None, code=200) + except Exception as e: + return jsonResponseFmt(None, msg=str(e), code=500) diff --git a/apis/V1/interfaces/__pycache__/auth_interface.cpython-310.pyc b/apis/V1/interfaces/__pycache__/auth_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8107bba24fc12245c0a90d41cefe66ac98710f90 Binary files /dev/null and b/apis/V1/interfaces/__pycache__/auth_interface.cpython-310.pyc differ diff --git a/apis/V1/interfaces/__pycache__/llm_interface.cpython-310.pyc b/apis/V1/interfaces/__pycache__/llm_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ad86e3976dd71b5e4ef07f19ee6eb0e1a684083 Binary files /dev/null and b/apis/V1/interfaces/__pycache__/llm_interface.cpython-310.pyc differ diff --git a/apis/V1/interfaces/auth_interface.py b/apis/V1/interfaces/auth_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..5c556248c28c253562fcdf72e7537cc6b4d48f92 --- /dev/null +++ b/apis/V1/interfaces/auth_interface.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel, Field + + +class Credential(BaseModel): + credential: str = Field(..., example="F9P/3?@q2!vq") + + +class _LoginResponseInterface(BaseModel): + token: str = Field(..., title="JWT Token") + + +class LoginResponseInterface(BaseModel): + msg: str = Field(..., title="Message") + data: _LoginResponseInterface = Field(..., title="User Data") + + +class AuthInterface(BaseModel): + gtoken: str = Field(..., title="Google Access-Token") diff --git a/apis/V1/interfaces/llm_interface.py b/apis/V1/interfaces/llm_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..88d244267acab2085954bf4deccb13d3ede6d742 --- /dev/null +++ b/apis/V1/interfaces/llm_interface.py @@ -0,0 +1,21 @@ +from pydantic import BaseModel, Field + + +class TranslateOutput(BaseModel): + answer: str = Field(description="translated word") + word: str = Field(description="word to be translated") + + +class GrammarlyOutput(BaseModel): + corrected_sentence: str = Field(description="corrected sentence") + incorrect: list = Field(description="list of incorrect words or phrases") + correct: list = Field(description="list of correct words or phrases") + + +class ClassifyDocumentOutput(BaseModel): + type: str = Field(description="document type RnD or Business") + + +class ClassifyAndSummarizeOutput(BaseModel): + summary: str = Field(description="summary of the document") + type: str = Field(description="document type RnD or Business") diff --git a/apis/V1/middlewares/__pycache__/auth_middleware.cpython-310.pyc b/apis/V1/middlewares/__pycache__/auth_middleware.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31afda634af4f9deab3c282edfe880b61c27c781 Binary files /dev/null and b/apis/V1/middlewares/__pycache__/auth_middleware.cpython-310.pyc differ diff --git a/apis/V1/middlewares/auth_middleware.py b/apis/V1/middlewares/auth_middleware.py new file mode 100644 index 0000000000000000000000000000000000000000..bd3d76adc544e73355664ab03e56becd5abe7fa7 --- /dev/null +++ b/apis/V1/middlewares/auth_middleware.py @@ -0,0 +1,41 @@ +from typing import Annotated +from fastapi import Depends +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from ..schemas.user_schema import getUser +from ..providers import jwt +from ..configs.database_config import db +from bson import ObjectId +from jose import JWTError +from ..utils.response_fmt import jsonResponseFmt + +security = HTTPBearer() + +collection = db["user"] + + +# Get the auth token from the request header, +# parse token to get user data, and return the user data. +def get_current_user( + credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)] +): + try: + # Get token + token = credentials.credentials + # If Authorization is not provided, return Un-authorized. + if not token: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + + # Decrypted token to get user data. + payload = jwt.decrypt(token) + print("payload", payload) + user_id: str = payload["id"] + if not user_id: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + user = collection.find_one({"_id": ObjectId(user_id)}) + # If user is not found, return Un-authorized. + if not user: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + + return getUser(user) + except JWTError: + return jsonResponseFmt(None, msg="Authentication failed", code=401) diff --git a/apis/V1/models/__pycache__/users.cpython-310.pyc b/apis/V1/models/__pycache__/users.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..175f9090b23e20b0adf36f156a037ace0056b433 Binary files /dev/null and b/apis/V1/models/__pycache__/users.cpython-310.pyc differ diff --git a/apis/V1/models/__pycache__/words.cpython-310.pyc b/apis/V1/models/__pycache__/words.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d08b1fe6020dd5bfbddb73aec15dec859af5f4e Binary files /dev/null and b/apis/V1/models/__pycache__/words.cpython-310.pyc differ diff --git a/apis/V1/models/users.py b/apis/V1/models/users.py new file mode 100644 index 0000000000000000000000000000000000000000..ea0c6cdaa246e9e4f0327fcff8642db502c96a00 --- /dev/null +++ b/apis/V1/models/users.py @@ -0,0 +1,84 @@ +from pydantic import BaseModel, Field, EmailStr +from typing import Dict, AnyStr +from ..configs.database_config import db +from ..utils.utils import get_current_time +from bson import ObjectId + + +collection = db["user"] + + +class User(BaseModel): + name: str = Field("", title="User Name") + email: EmailStr = Field("", title="User Email") + picture: str = Field("", title="User Picture") + + class Config: + schema_extra = { + "example": { + "name": "John Doe", + "email": "johnUS192@gmail.com", + "picture": "https://example.com/picture.jpg", + } + } + + +class UserSchema: + def __init__( + self, + id: AnyStr = None, + name: AnyStr = "", + email: AnyStr = "", + picture: AnyStr = "", + created_at=get_current_time(), + ): + self.id = id + self.name = name + self.email = email + self.picture = picture + self.created_at = created_at + + def to_dict(self): + data_dict = { + "name": self.name, + "email": self.email, + "picture": self.picture, + "created_at": self.created_at, + } + if self.id is not None: + data_dict["_id"] = self.id + return data_dict + + @staticmethod + def from_dict(data: Dict): + return UserSchema( + id=data.get("_id"), + name=data.get("name"), + email=data.get("email"), + picture=data.get("picture"), + created_at=data.get("created_at"), + ) + + @staticmethod + def find_all(): + data = collection.find() + return [UserSchema.from_dict(user).to_dict for user in data] + + @staticmethod + def find_by_id(id): + data = collection.find_one({"_id": ObjectId(id)}) + if data is None: + return None + return UserSchema.from_dict(data).to_dict() + + @staticmethod + def find_by_email(email): + data = collection.find_one({"email": email}) + if data is None: + return None + return UserSchema.from_dict(data).to_dict() + + def create(self): + user = collection.insert_one(self.to_dict()) + user_id = str(user.inserted_id) + return user_id diff --git a/apis/V1/models/words.py b/apis/V1/models/words.py new file mode 100644 index 0000000000000000000000000000000000000000..32f46ab0fbd84a8ce4fb228825297b5fe41711f8 --- /dev/null +++ b/apis/V1/models/words.py @@ -0,0 +1,96 @@ +from pydantic import BaseModel, Field +from typing import Optional, AnyStr, List, Dict +from ..utils.utils import get_current_time +from ..configs.database_config import db +from bson import ObjectId + +collection = db["word"] + + +class Word(BaseModel): + id: Optional[str] = Field(title="ID") + word: str = Field(title="Word") + answer: str = Field(title="Meaning") + options: list = Field(title="Options") + user_id: Optional[str] = Field(title="User ID") + + class Config: + schema_extra = { + "example": { + "id": "666460100c23ec4225cb2bc3", + "word": "Apple", + "answer": "táo", + "options": ["dừa", "thanh long", "ổi", "táo"], + "user_id": "6661455703d07f73ba", + } + } + + +class WordSchema: + def __init__( + self, + id: AnyStr = None, + word: AnyStr = "", + answer: AnyStr = "", + options: List = [AnyStr], + user_id: AnyStr = "", + created_at=get_current_time(), + ): + self.id = id + self.word = word + self.answer = answer + self.options = options + self.user_id = user_id + self.created_at = created_at + + def to_dict(self): + data_dict = { + "word": self.word, + "answer": self.answer, + "options": self.options, + "user_id": self.user_id, + "created_at": self.created_at, + } + if self.id is not None: + data_dict["_id"] = str(self.id) + return data_dict + + @staticmethod + def from_dict(data: Dict): + return WordSchema( + id=data.get("_id"), + word=data.get("word"), + answer=data.get("answer"), + options=data.get("options"), + user_id=data.get("user_id"), + created_at=data.get("created_at"), + ) + + def create(self, user_id: str): + word_dict = self.to_dict() + word_dict["user_id"] = user_id + collection.insert_one(word_dict) + + @staticmethod + def read_all_words_by_user_id(user_id: str): + data = collection.find({"user_id": user_id}) + return [WordSchema.from_dict(d).to_dict() for d in data] + + @staticmethod + def read_word_by_id(word_id: str, user_id: str): + data = collection.find_one({"_id": ObjectId(word_id), "user_id": user_id}) + return WordSchema.from_dict(data).to_dict() + + @staticmethod + def check_existing_word(word: str, user_id: str): + return collection.find_one({"word": word, "user_id": user_id}) + + def update(self, word_id: str): + collection.update_one( + {"_id": ObjectId(word_id)}, + {"$set": self.to_dict()}, + ) + + @staticmethod + def delete(word_id: str): + collection.delete_one({"_id": ObjectId(word_id)}) diff --git a/apis/V1/prompts/__pycache__/document_type_clf.cpython-310.pyc b/apis/V1/prompts/__pycache__/document_type_clf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0885b709d0881d55ef3a16450a5bbabfc622840a Binary files /dev/null and b/apis/V1/prompts/__pycache__/document_type_clf.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/grammarly.cpython-310.pyc b/apis/V1/prompts/__pycache__/grammarly.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a25c1bc83b480b536d3c303bf9df736708186f28 Binary files /dev/null and b/apis/V1/prompts/__pycache__/grammarly.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/summarize_doc.cpython-310.pyc b/apis/V1/prompts/__pycache__/summarize_doc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d56e86fc9dd645f118720ef0c6a043ea5baf292 Binary files /dev/null and b/apis/V1/prompts/__pycache__/summarize_doc.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/translate_abstract_BIZ.cpython-310.pyc b/apis/V1/prompts/__pycache__/translate_abstract_BIZ.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54d3d2f96a001254721102da6b584d5baa9f5b84 Binary files /dev/null and b/apis/V1/prompts/__pycache__/translate_abstract_BIZ.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/translate_abstract_RnD.cpython-310.pyc b/apis/V1/prompts/__pycache__/translate_abstract_RnD.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..534fd1636ff8711f1a88c4b7cd8d0f61be22e9fd Binary files /dev/null and b/apis/V1/prompts/__pycache__/translate_abstract_RnD.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/translate_detail.cpython-310.pyc b/apis/V1/prompts/__pycache__/translate_detail.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1d551835a21d10a4b221a667e732733a7937328 Binary files /dev/null and b/apis/V1/prompts/__pycache__/translate_detail.cpython-310.pyc differ diff --git a/apis/V1/prompts/__pycache__/translate_test.cpython-310.pyc b/apis/V1/prompts/__pycache__/translate_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a58ea1cc8dd4091644bb2e95ce467d4be4db00cd Binary files /dev/null and b/apis/V1/prompts/__pycache__/translate_test.cpython-310.pyc differ diff --git a/apis/V1/prompts/document_type_clf.py b/apis/V1/prompts/document_type_clf.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc2736f632a519b65c3bad597e2a06954fa3ae0 --- /dev/null +++ b/apis/V1/prompts/document_type_clf.py @@ -0,0 +1,78 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import ClassifyDocumentOutput + +# System message for initial context and instructions +system_instructions = SystemMessage( + content="""You are an expert in classifying document types. Identify the type of document based on short content as Business or RnD.""" +) + +# Example system message to clarify format with examples +system_examples = SystemMessage( + content=""" +Examples: + +1. +Document: +"A central goal of machine learning is the development of systems that can solve many problems in as many data domains as possible. Current architectures, however, cannot be applied beyond a small set of stereotyped settings, as they bake in domain & task assumptions or scale poorly to large inputs or outputs. In this work, we propose Perceiver IO, a general-purpose architecture that handles data from arbitrary settings while scaling linearly with the size of inputs and outputs." +Type: RnD + +2. +Document: +"The global economy is projected to grow at a slower pace this year, as rising inflation and supply chain disruptions continue to impact markets worldwide. Central banks are adjusting their monetary policies in response to these challenges." +Type: Business + +3. +Document: +"The company’s latest financial report shows a significant increase in net income, driven by cost reduction strategies and increased sales in emerging markets. Investors are optimistic about the future performance given these strong quarterly results." +Type: Business + +4. +Document: +"Our research introduces a novel approach to quantum computing, focusing on error correction and scalability. The proposed methods significantly improve the stability of qubits, which is crucial for the practical implementation of quantum algorithms." +Type: RnD + +5. +Document: +"An analysis of the stock market reveals that technology stocks have outperformed other sectors over the past quarter. This trend is attributed to the rapid digital transformation across industries and the increasing demand for tech solutions." +Type: Business + +6. +Document: +"The startup has launched an innovative mobile app designed to streamline project management for remote teams. The app includes features such as real-time collaboration, task tracking, and performance analytics, making it a comprehensive tool for businesses." +Type: Business +""" +) + +# Function to create the prompt template dynamically based on input +def create_prompt(document): + user_message = HumanMessage( + content=f""" +Document: {document} +Output format: +"type": type of document, + +If the document is Business, please type "Business" else type "RnD". +{{format_instructions}} +""" + ) + # Combine all messages into a conversation + messages = [system_instructions, system_examples, user_message] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=ClassifyDocumentOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["document"], + template=create_prompt("{document}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain_type_classify = prompt | llm | parser diff --git a/apis/V1/prompts/grammarly.py b/apis/V1/prompts/grammarly.py new file mode 100644 index 0000000000000000000000000000000000000000..e382e6a29e2c409ef58c549c02e570acae6537ca --- /dev/null +++ b/apis/V1/prompts/grammarly.py @@ -0,0 +1,82 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import GrammarlyOutput + +# System message for initial context and instructions +system_instructions = SystemMessage( + content="""You are an expert in checking grammar errors. Identify the errors and provide corrections.""" +) + +# Example system message to clarify format with examples +system_examples = SystemMessage( + content=""" +Examples: + +1. +Sentence: "She don't know nothing about the new project, and there is many details that needs to be explained." +Incorrect: [don't know nothing, is, needs] +Correct: [doesn't know anything, are, need] +Corrected sentence: "She doesn't know anything about the new project, and there are many details that need to be explained." + +2. +Sentence: "He go to the store every day, but he never buy nothing." +Incorrect: [go, buy nothing] +Correct: [goes, buys anything] +Corrected sentence: "He goes to the store every day, but he never buys anything." + +3. +Sentence: "She quickly ran to the store, bought some groceries, and returning home." +Incorrect: [returning] +Correct: [returned] +Corrected sentence: "She quickly ran to the store, bought some groceries, and returned home." + +4. +Sentence: "Each of the students have completed their assignments, but the teacher is not satisfied with their works." +Incorrect: [have, works] +Correct: [has, work] +Corrected sentence: "Each of the students has completed their assignments, but the teacher is not satisfied with their work." + +5. +Sentence: "If he was more careful, he will not make so many mistakes, which is causing problems for the team." +Incorrect: [was, will, is] +Correct: [were, would, are] +Corrected sentence: "If he were more careful, he would not make so many mistakes, which are causing problems for the team." +""" +) + + +# Function to create the prompt template dynamically based on input +def create_prompt(sentence): + user_message = HumanMessage( + content=f""" +My sentence: {sentence} +Output format: +"sentence": my sentence, +"incorrect": [list of incorrect words or phrases], +"correct": [list of correct words or phrases], +"corrected_sentence": corrected sentence + +If no corrections are needed, return the original sentence as the corrected sentence and empty lists for incorrect and correct. +{{format_instructions}} +""" + ) + # Combine all messages into a conversation + messages = [system_instructions, system_examples, user_message] + chat_prompt = ChatPromptTemplate(messages=messages) + return chat_prompt.format() + + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=GrammarlyOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["sentence"], + template=create_prompt("{sentence}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain_grammarly = prompt | llm | parser diff --git a/apis/V1/prompts/summarize_doc.py b/apis/V1/prompts/summarize_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..c96531cfa2b785a00d7eebe45b9c804f86733562 --- /dev/null +++ b/apis/V1/prompts/summarize_doc.py @@ -0,0 +1,19 @@ +from ..configs.llm_config import llm +from langchain_core.output_parsers import JsonOutputParser + +from langchain_core.prompts import PromptTemplate +from ..interfaces.llm_interface import ClassifyAndSummarizeOutput + +# Define prompt +prompt_template = """Write a concise summary of the following: +"{text}" and classify the document type as Business or RnD. +Return the answer as: \n{format_instructions}. +""" +parser = JsonOutputParser(pydantic_object=ClassifyAndSummarizeOutput) +prompt = PromptTemplate.from_template( + template=prompt_template, + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Define LLM chain +chain_summarize = prompt | llm | parser diff --git a/apis/V1/prompts/translate.py b/apis/V1/prompts/translate.py new file mode 100644 index 0000000000000000000000000000000000000000..95a573a6773ecbdd759864fe075cfec283b35513 --- /dev/null +++ b/apis/V1/prompts/translate.py @@ -0,0 +1,60 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import TranslateOutput + +# System message for initial context and instructions +system_instructions = SystemMessage( + content="""You are a master in translation. You are given a sentence and a word. + Translate the word to Vietnamese based on the context of the sentence. """ +) + +# Example system message to clarify format with examples +system_examples = SystemMessage( + content=""" +Example 1: + My sentence: "outperforms a Transformer-based BERT baseline on the GLUE language benchmark" + My word: "outperforms" + Answer: "vượt trội" +Example 2: + My sentence: "I love my dog so much, I was interested in him the first time I saw him" + My word: "so much" + Answer: "rất nhiều" +""" +) + + +# Function to create the prompt template dynamically based on input +def create_prompt(sentence, word): + user_message = HumanMessage( + content=f""" +My sentence: {sentence} +My word: {word} + +If this word is not in dictionary, return "Không tìm thấy từ này trong từ điển". +Else, Translate the word to Vietnamese based on the context of the sentence. Return the answer as the translated word. +I want output to have the format: "answer": translated word, "options": list of options. +Options is a list of words consisting of the answer and 3 random words which can be used to create flashcards for learning. +""" + ) + ai_response = AIMessage(content="Answer: [Your AI will provide the answer here]") + # Combine all messages into a conversation + messages = [system_instructions, system_examples, user_message, ai_response] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=TranslateOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["sentence", "word"], + template=create_prompt("{sentence}", "{word}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain = prompt | llm | parser diff --git a/apis/V1/prompts/translate_abstract_BIZ.py b/apis/V1/prompts/translate_abstract_BIZ.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6cb88ce7d650a47430e73fd359100f23bc2484 --- /dev/null +++ b/apis/V1/prompts/translate_abstract_BIZ.py @@ -0,0 +1,74 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import TranslateOutput + +# System message for initial context and instructions +system_instructions = SystemMessage( + content="""You are an expert in translating English into Vietnamese for Finance and Economics topics. Translate the given word or phrase based on the context of the abstract and sentence. +1. Read the abstract to understand the context. +2. Translate the given word or phrase into Vietnamese using the context. +3. If the word or phrase is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +4. If the word or phrase is an acronym, expand it and provide the translation. +""" +) + +# Example system message to clarify format with examples +system_examples = SystemMessage( + content=""" +**Examples:** + +Abstract: "The efficient market hypothesis (EMH) suggests that financial markets are informationally efficient, meaning that asset prices fully reflect all available information." +Sentence: "The efficient market hypothesis (EMH) suggests that financial markets are informationally efficient." +Word: "efficient" +Answer: "hiệu quả" + +Abstract: "Quantitative easing (QE) is a monetary policy whereby a central bank purchases government securities or other securities from the market in order to increase the money supply and encourage lending and investment." +Sentence: "Quantitative easing (QE) is a monetary policy whereby a central bank purchases government securities." +Word: "Quantitative easing" +Answer: "Nới lỏng định lượng" + +Abstract: "Inflation is the rate at which the general level of prices for goods and services rises, eroding purchasing power." +Sentence: "Inflation is the rate at which the general level of prices for goods and services rises." +Word: "Inflation" +Answer: "lạm phát" + +Abstract: "GDP, or Gross Domestic Product, measures the total value of all goods and services produced within a country in a given period." +Sentence: "GDP, or Gross Domestic Product, measures the total value of all goods and services produced within a country." +Word: "GDP" +Answer: "Tổng sản phẩm quốc nội" +""" +) + +# Function to create the prompt template dynamically based on input +def create_prompt(abstract, sentence, word): + user_message = HumanMessage( + content=f""" +Abstract: {abstract} +Sentence: {sentence} +Word: {word} + +Translate the word to Vietnamese based on the context of the sentence and abstract. +If the word is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +If it is an acronym, expand and translate it. +{{format_instructions}} +""" + ) + messages = [system_instructions, system_examples, user_message] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=TranslateOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["abstract", "sentence", "word"], + template=create_prompt("{abstract}", "{sentence}", "{word}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain_BIZ = prompt | llm | parser diff --git a/apis/V1/prompts/translate_abstract_RnD.py b/apis/V1/prompts/translate_abstract_RnD.py new file mode 100644 index 0000000000000000000000000000000000000000..460a5eee4cf589d441175081f1522ada6079e270 --- /dev/null +++ b/apis/V1/prompts/translate_abstract_RnD.py @@ -0,0 +1,71 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import TranslateOutput + +system_instructions = SystemMessage( + content="""You are an expert in translating English into Vietnamese for R&D topics. Translate the given word or phrase based on the context of the abstract and sentence. +1. Read the abstract to understand the context. +2. Translate the given word or phrase into Vietnamese using the context. +3. If the word or phrase is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +4. If the word or phrase is an acronym, expand it and provide the translation. +""" +) + +system_examples = SystemMessage( + content=""" +**Examples:** + +Abstract: "In recent years, there has been significant progress in the development of Transformer-based models for natural language processing tasks. These models, such as BERT, have set new benchmarks in various language understanding tasks." +Sentence: "outperforms a Transformer-based BERT baseline on the GLUE language benchmark" +Word: "outperforms" +Answer: "vượt trội" + +Abstract: "This paper explores the use of reinforcement learning in optimizing neural network architectures. We demonstrate that our approach achieves state-of-the-art results on several benchmark datasets." +Sentence: "Our approach achieves state-of-the-art results on several benchmark datasets." +Word: "approach" +Answer: "phương pháp" + +Abstract: "The integration of machine learning techniques in medical diagnosis has shown promising results. Our study focuses on the application of deep learning to detect early signs of diseases." +Sentence: "The integration of machine learning techniques in medical diagnosis has shown promising results." +Word: "integration" +Answer: "tích hợp" + +Abstract: "Transformer-based models like BERT have transformed NLP tasks by achieving new state-of-the-art results." +Sentence: "Transformer-based models like BERT have transformed NLP tasks." +Word: "BERT" +Answer: "Bidirectional Encoder Representations from Transformers" +""" +) + +def create_prompt(abstract, sentence, word): + user_message = HumanMessage( + content=f""" +Abstract: {abstract} +Sentence: {sentence} +Word: {word} + +Translate the word to Vietnamese based on the context of the sentence and abstract. +If the word is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +If it is an acronym, expand and translate it. +{{format_instructions}} +""" + ) + messages = [system_instructions, system_examples, user_message] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=TranslateOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["abstract", "sentence", "word"], + template=create_prompt("{abstract}", "{sentence}", "{word}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain_RnD = prompt | llm | parser diff --git a/apis/V1/prompts/translate_detail.py b/apis/V1/prompts/translate_detail.py new file mode 100644 index 0000000000000000000000000000000000000000..adf58e43b94a25551bd306fb6e87951813edde88 --- /dev/null +++ b/apis/V1/prompts/translate_detail.py @@ -0,0 +1,39 @@ +import os +from langchain_core.prompts import PromptTemplate +import google.generativeai as genai +from pdf2image import convert_from_path +from langchain_core.output_parsers import JsonOutputParser + +t_short = """ +Requirement: +Give me information word: "{word}". Based on the information I provided. Return the information in the following Format and Example +Format: +{{ +"word": the word you are looking for, +"explanation": the explanation of the word based on the context, topic or field of the document (vietnamese) +}} +Example: +{{ +"word": loss function, +"explanation": Hàm mất mát là một hàm số đo lường sự khác biệt giữa giá trị dự đoán và giá trị thực tế của một mô hình học máy. Hàm mất mát càng nhỏ thì mô hình càng tốt. +}} +""" +a = JsonOutputParser() + + +class AskImage: + def __init__(self) -> None: + genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) + self.model = genai.GenerativeModel("gemini-1.5-flash") + self.prompt = PromptTemplate.from_template(t_short) + self.context = [] + + def explain_word(self, word: str, path_context: str): + self.context = convert_from_path(path_context) + prompt = self.prompt.format(word=word) + invoke = [prompt] + self.context + result = self.model.generate_content(invoke) + return a.parse(result.text) + + def ask(self, question: str): + return self.model.generate_content([question] + self.context) diff --git a/apis/V1/prompts/translate_test.py b/apis/V1/prompts/translate_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a60cc3586d3a7de48115ec7bbcb3b3438d93b1 --- /dev/null +++ b/apis/V1/prompts/translate_test.py @@ -0,0 +1,72 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import HumanMessage, SystemMessage +from ..configs.llm_config import llm +from pydantic import BaseModel, Field + +class TranslateOutput(BaseModel): + answer: str = Field(description="translated word") + word: str = Field(description="word to be translated") + +system_instructions = SystemMessage( + content="""You are a master at translating English into Vietnamese. You are provided with an abstract of a paper, a sentence containing the word you want to translate, and the word itself. Translate the word into Vietnamese based on the context of the document and the sentence. +1. Read the abstract to understand the context. +2. Use the context to translate the given word into Vietnamese. +3. If the word is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +4. If the word is an acronym, expand it and provide the translation. +""" +) + +system_examples = SystemMessage( + content=""" +**Examples:** + +Abstract: "In recent years, there has been significant progress in the development of Transformer-based models for natural language processing tasks. These models, such as BERT, have set new benchmarks in various language understanding tasks." +Sentence: "outperforms a Transformer-based BERT baseline on the GLUE language benchmark" +Word: "outperforms" +Answer: "vượt trội" + +Abstract: "This paper explores the use of reinforcement learning in optimizing neural network architectures. We demonstrate that our approach achieves state-of-the-art results on several benchmark datasets." +Sentence: "Our approach achieves state-of-the-art results on several benchmark datasets." +Word: "approach" +Answer: "phương pháp" + +Abstract: "The integration of machine learning techniques in medical diagnosis has shown promising results. Our study focuses on the application of deep learning to detect early signs of diseases." +Sentence: "The integration of machine learning techniques in medical diagnosis has shown promising results." +Word: "integration" +Answer: "tích hợp" + +Abstract: "Transformer-based models like BERT have transformed NLP tasks by achieving new state-of-the-art results." +Sentence: "Transformer-based models like BERT have transformed NLP tasks." +Word: "BERT" +Answer: "Bidirectional Encoder Representations from Transformers" +""" +) + +def create_prompt(abstract, sentence, word): + user_message = HumanMessage( + content=f""" +Abstract: {abstract} +Sentence: {sentence} +Word: {word} + +Translate the word to Vietnamese based on the context of the sentence and abstract. +If the word is not in the dictionary, return "Không tìm thấy từ này trong từ điển". +If it is an acronym, expand and translate it. +Return the answer as: "answer": Vietnamese word, "word": input word (English). +""" + ) + messages = [system_instructions, system_examples, user_message] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + +parser = JsonOutputParser(pydantic_object=TranslateOutput) + +prompt = PromptTemplate( + input_variables=["abstract", "sentence", "word"], + template=create_prompt("{abstract}", "{sentence}", "{word}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +chain_test_RnD = prompt | llm | parser diff --git a/apis/V1/providers/__init__.py b/apis/V1/providers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c943df1c710de8b9705009d6f9e42c783ec7acb9 --- /dev/null +++ b/apis/V1/providers/__init__.py @@ -0,0 +1,5 @@ +from .jwt_provider import JWTProvider +from .llm_provider import chain + +jwt = JWTProvider() +llm = chain diff --git a/apis/V1/providers/__pycache__/__init__.cpython-310.pyc b/apis/V1/providers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..451d9ba6e2fe1dd995146461fb1f0cffe8eba265 Binary files /dev/null and b/apis/V1/providers/__pycache__/__init__.cpython-310.pyc differ diff --git a/apis/V1/providers/__pycache__/jwt_provider.cpython-310.pyc b/apis/V1/providers/__pycache__/jwt_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d04203f182f0850ec4a6c883b009fdeaa2454a Binary files /dev/null and b/apis/V1/providers/__pycache__/jwt_provider.cpython-310.pyc differ diff --git a/apis/V1/providers/__pycache__/llm_provider.cpython-310.pyc b/apis/V1/providers/__pycache__/llm_provider.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4780b0bfcef51812ac4133ac4f461913ca65650 Binary files /dev/null and b/apis/V1/providers/__pycache__/llm_provider.cpython-310.pyc differ diff --git a/apis/V1/providers/jwt_provider.py b/apis/V1/providers/jwt_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..65c1ec37fb81fbdf21dc387183c47177f859d51e --- /dev/null +++ b/apis/V1/providers/jwt_provider.py @@ -0,0 +1,34 @@ +from typing import AnyStr, Dict +import os +from fastapi import HTTPException, status +from jose import jwt, JWTError + + +class JWTProvider: + """ + Perform JWT Encryption and Decryption + """ + + def __init__( + self, secret: AnyStr = os.environ.get("JWT_SECRET"), algorithm: AnyStr = "HS256" + ): + self.secret = secret + self.algorithm = algorithm + + def encrypt(self, data: Dict) -> AnyStr: + """ + Encrypt the data with JWT + """ + return jwt.encode(data, self.secret, algorithm=self.algorithm) + + def decrypt(self, token: AnyStr) -> Dict | None: + """ + Decrypt the token with JWT + """ + try: + return jwt.decode(token, self.secret, algorithms=[self.algorithm]) + except JWTError as e: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail=f"Could not validate credentials. {str(e)}", + ) diff --git a/apis/V1/providers/llm_provider.py b/apis/V1/providers/llm_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ebba51d5c0a47e3c7b4a8557978d9848007a7c --- /dev/null +++ b/apis/V1/providers/llm_provider.py @@ -0,0 +1,60 @@ +from langchain.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from ..configs.llm_config import llm +from ..interfaces.llm_interface import TranslateOutput + +# System message for initial context and instructions +system_instructions = SystemMessage( + content="""You are a master in translation. You are given a sentence and a word. + Translate the word to Vietnamese based on the context of the sentence. """ +) + +# Example system message to clarify format with examples +system_examples = SystemMessage( + content=""" +Example 1: + My sentence: "outperforms a Transformer-based BERT baseline on the GLUE language benchmark" + My word: "outperforms" + Answer: "vượt trội" +Example 2: + My sentence: "I love my dog so much, I was interested in him the first time I saw him" + My word: "so much" + Answer: "rất nhiều" +""" +) + + +# Function to create the prompt template dynamically based on input +def create_prompt(sentence, word): + user_message = HumanMessage( + content=f""" +My sentence: {sentence} +My word: {word} + +If this word is not in dictionary, return "Không tìm thấy từ này trong từ điển". +Else, Translate the word to Vietnamese based on the context of the sentence. Return the answer as the translated word. +I want output to have the format: "word": input word ,"answer": translated word, "options": list of options. +Options is a list of words consisting of the answer and 3 random words which can be used to create flashcards for learning. +""" + ) + ai_response = AIMessage(content="Answer: [Your AI will provide the answer here]") + # Combine all messages into a conversation + messages = [system_instructions, system_examples, user_message, ai_response] + chat_prompt = ChatPromptTemplate(messages=messages) + + return chat_prompt.format() + + +# Use the JSON output parser +parser = JsonOutputParser(pydantic_object=TranslateOutput) + +# Create the prompt template +prompt = PromptTemplate( + input_variables=["sentence", "word"], + template=create_prompt("{sentence}", "{word}"), + partial_variables={"format_instructions": parser.get_format_instructions()}, +) + +# Chain the prompt with the LLM and parser +chain = prompt | llm | parser diff --git a/apis/V1/routes/__pycache__/auth.cpython-310.pyc b/apis/V1/routes/__pycache__/auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a158cf8068139a7b0ac67da4c60f656193cf02c7 Binary files /dev/null and b/apis/V1/routes/__pycache__/auth.cpython-310.pyc differ diff --git a/apis/V1/routes/__pycache__/upload.cpython-310.pyc b/apis/V1/routes/__pycache__/upload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a9388c5839e4112452b3a6153a4248307faef0f Binary files /dev/null and b/apis/V1/routes/__pycache__/upload.cpython-310.pyc differ diff --git a/apis/V1/routes/__pycache__/word.cpython-310.pyc b/apis/V1/routes/__pycache__/word.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..741eb059438b655962de9fcd1d59afd10c5936e8 Binary files /dev/null and b/apis/V1/routes/__pycache__/word.cpython-310.pyc differ diff --git a/apis/V1/routes/auth.py b/apis/V1/routes/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..b70b4c05d1a4e01c3378c45e2a9636c841b09072 --- /dev/null +++ b/apis/V1/routes/auth.py @@ -0,0 +1,50 @@ +from fastapi import APIRouter, status, Response, Depends +from typing import Annotated +from ..models.users import User +from ..controllers.auth_controller import login_control +from ..configs.database_config import db +from ..interfaces.auth_interface import _LoginResponseInterface +from ..interfaces.auth_interface import Credential +from ..utils.response_fmt import jsonResponseFmt +from ..middlewares.auth_middleware import get_current_user +from ..utils.utils import get_current_time +from ..models.users import UserSchema + +print(get_current_time()) +router = APIRouter(prefix="/auth", tags=["Authentications"]) + +collection_name = db["user"] + +user_dependency = Annotated[User, Depends(get_current_user)] + + +@router.post( + "/login", status_code=status.HTTP_200_OK, response_model=_LoginResponseInterface +) +async def login(credential: Credential): + # print("credential", credential) + token = login_control(credential.credential) + print("token", token) + return jsonResponseFmt({"token": token}) + + +@router.get("") +async def get_me(user: user_dependency): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + return jsonResponseFmt(user) + + +@router.get("/logout", status_code=status.HTTP_200_OK) +async def logout(user: user_dependency, response: Response): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + response.delete_cookie("token") + return jsonResponseFmt(None) + + +@router.post("/findall") +async def find_all(user_data: User): + alo = UserSchema(**user_data.dict()).create() + print("alo", alo) + return jsonResponseFmt(alo) diff --git a/apis/V1/routes/upload.py b/apis/V1/routes/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..d3e8477dc34a1f32c84d8b9656c551084284e222 --- /dev/null +++ b/apis/V1/routes/upload.py @@ -0,0 +1,59 @@ +from fastapi import Depends, UploadFile, status, Query +import requests +from fastapi import APIRouter +from ..prompts.translate_detail import AskImage +from typing import Annotated +from ..models.users import User +from ..middlewares.auth_middleware import get_current_user +from ..utils.response_fmt import jsonResponseFmt +import os + +router = APIRouter(prefix="/upload", tags=["Upload"]) + +user_dependency = Annotated[User, Depends(get_current_user)] + +AI = AskImage() + + +@router.get("/pdf_url", status_code=status.HTTP_200_OK) +async def upload_pdf_URL(user: user_dependency, pdf_url: str = Query(min_length=3)): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + try: + user_id = user["id"] + response = requests.get(pdf_url) + if response.status_code == 200: + os.makedirs("storage", exist_ok=True) + with open(f"storage//{user_id}.pdf", "wb") as f: + f.write(response.content) + return jsonResponseFmt(None, msg="Success", code=200) + except: + return jsonResponseFmt(None, msg="Error", code=400) + + +@router.post("/pdf", status_code=status.HTTP_200_OK) +async def upload_pdf(pdf_file: UploadFile, user: user_dependency): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + try: + user_id = user["id"] + contents = await pdf_file.read() + os.makedirs("storage", exist_ok=True) + with open(f"storage//{user_id}.pdf", "wb") as f: + f.write(contents) + return jsonResponseFmt(None, msg="Success", code=200) + except Exception as e: + return jsonResponseFmt(None, msg="Error", code=400) + + +@router.get("/explain_word", status_code=status.HTTP_200_OK) +async def upload_pdf(user: user_dependency, word: str = Query(min_length=2)): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + user_id = user["id"] + try: + res = AI.explain_word(word, f"storage//{user_id}.pdf") + except Exception as e: + print(e) + return jsonResponseFmt(None, msg="Error", code=400) + return res diff --git a/apis/V1/routes/word.py b/apis/V1/routes/word.py new file mode 100644 index 0000000000000000000000000000000000000000..65a25e9d509461bb86cdc18c92ab75be5c446a72 --- /dev/null +++ b/apis/V1/routes/word.py @@ -0,0 +1,56 @@ +from fastapi import APIRouter, status, Path, Depends +from typing import Annotated +from ..middlewares.auth_middleware import get_current_user +from ..models.words import Word, WordSchema +from ..models.users import User +from ..configs.database_config import db +from ..utils.response_fmt import jsonResponseFmt +from ..controllers.word_controller import ( + list_word_controlller, + add_word_controller, + update_word_controller, + delete_word_controller, +) + +router = APIRouter(prefix="/word", tags=["Words"]) +collection = db["word"] + +user_dependency = Annotated[User, Depends(get_current_user)] + + +@router.get("/listWord", status_code=status.HTTP_200_OK) +async def list_words(user: user_dependency): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + word = list_word_controlller(user) + return jsonResponseFmt(word) + + +@router.post("/addWord", status_code=status.HTTP_201_CREATED) +async def add_word(word: Word, user: user_dependency): + print("word", word) + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + return add_word_controller(user, word) + + +@router.put("/updateWord/{word_id}", status_code=status.HTTP_200_OK) +async def update_word( + user: user_dependency, word_data: Word, word_id: str = Path(min_length=1) +): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + return update_word_controller(user, word_id, word_data) + + +@router.delete("/deleteWord/{word_id}", status_code=status.HTTP_200_OK) +async def delete_word(user: user_dependency, word_id: str = Path(min_length=1)): + if user is None: + return jsonResponseFmt(None, msg="Authentication failed", code=401) + return delete_word_controller(user, word_id) + + +@router.post("/test") +async def test(word: Word): + user_id = "6661455a0b293703d07f73ba" + print(WordSchema(**word.dict()).add_word(user_id)) diff --git a/apis/V1/schemas/__pycache__/user_schema.cpython-310.pyc b/apis/V1/schemas/__pycache__/user_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c682c37e803abf6b9fcd90e4dfa3c871dcf145b Binary files /dev/null and b/apis/V1/schemas/__pycache__/user_schema.cpython-310.pyc differ diff --git a/apis/V1/schemas/user_schema.py b/apis/V1/schemas/user_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..e370bede8f0ea0cf53f5dc8788f364e0aac7be23 --- /dev/null +++ b/apis/V1/schemas/user_schema.py @@ -0,0 +1,12 @@ +def getUser(user) -> dict: + return { + "id": str(user["_id"]), + "name": user["name"], + "email": user["email"], + "picture": user["picture"], + # "role": user["role"], + } + + +def list_serial(users) -> list: + return [getUser(user) for user in users] diff --git a/apis/V1/schemas/word_schema.py b/apis/V1/schemas/word_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..1b800a774957402076e10c5c5b2500c6d6c2e674 --- /dev/null +++ b/apis/V1/schemas/word_schema.py @@ -0,0 +1,12 @@ +def getword(word) -> dict: + return { + "id": str(word["_id"]), + "word": word["word"], + "answer": word["answer"], + "options": word["options"], + "user_id": word["user_id"], + } + + +def list_serial(words) -> list: + return [getword(word) for word in words] diff --git a/apis/V1/utils/__pycache__/response_fmt.cpython-310.pyc b/apis/V1/utils/__pycache__/response_fmt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24a1415fa1bc25632a6f743c146c39eb6c9c75cb Binary files /dev/null and b/apis/V1/utils/__pycache__/response_fmt.cpython-310.pyc differ diff --git a/apis/V1/utils/__pycache__/utils.cpython-310.pyc b/apis/V1/utils/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b329a19ddce14bbcc731d9338807fd12e8d0465 Binary files /dev/null and b/apis/V1/utils/__pycache__/utils.cpython-310.pyc differ diff --git a/apis/V1/utils/response_fmt.py b/apis/V1/utils/response_fmt.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8c158aa0c7e05273894e2a2ac6736ade88a6a9 --- /dev/null +++ b/apis/V1/utils/response_fmt.py @@ -0,0 +1,6 @@ +from typing import Any +from fastapi.responses import JSONResponse + + +def jsonResponseFmt(data: Any, msg: str = "Success", code: int = 200, **kwargs): + return JSONResponse({"msg": msg, "data": data}, code, **kwargs) diff --git a/apis/V1/utils/utils.py b/apis/V1/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74940922488e7170e3fa0349e765fef6c23d3026 --- /dev/null +++ b/apis/V1/utils/utils.py @@ -0,0 +1,8 @@ +import datetime + + +def get_current_time() -> str: + """ + Get the current time in the string format. + """ + return datetime.datetime.now().isoformat() diff --git a/apis/__init__.py b/apis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53e138916f36b53229178888126085825c1492ef --- /dev/null +++ b/apis/__init__.py @@ -0,0 +1,10 @@ +from fastapi import APIRouter +from .V1.routes.auth import router as auth_router +from .V1.routes.word import router as word_router +from .V1.routes.upload import router as upload_router + +api_router = APIRouter() + +api_router.include_router(auth_router) +api_router.include_router(word_router) +api_router.include_router(upload_router) diff --git a/apis/__pycache__/__init__.cpython-310.pyc b/apis/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d4480d8b825d827d755466b97af36f54f8aa128 Binary files /dev/null and b/apis/__pycache__/__init__.cpython-310.pyc differ diff --git a/apis/__pycache__/create_app.cpython-310.pyc b/apis/__pycache__/create_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..754366b10a3d66d36710a518e08bfe2465f00c5a Binary files /dev/null and b/apis/__pycache__/create_app.cpython-310.pyc differ diff --git a/apis/create_app.py b/apis/create_app.py new file mode 100644 index 0000000000000000000000000000000000000000..f6834a2fed457ce259099efec60151396211079e --- /dev/null +++ b/apis/create_app.py @@ -0,0 +1,64 @@ +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from .V1.providers.llm_provider import chain +from .V1.prompts.grammarly import chain_grammarly +from .V1.prompts.translate_abstract_RnD import chain_RnD +from .V1.prompts.translate_abstract_BIZ import chain_BIZ +from .V1.prompts.document_type_clf import chain_type_classify +from .V1.prompts.translate_test import chain_test_RnD +from .V1.prompts.summarize_doc import chain_summarize +from langserve import add_routes + + +def create_app(): + # app = FastAPI(**swagger_config) + app = FastAPI( + title="BANDic+", description="API for BANDic+", version="1.0.0", docs_url="/" + ) + + # CORs handling + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + add_routes( + app, + chain_summarize, + path="/summary", + enabled_endpoints=["invoke"], + ) + add_routes( + app, + chain_test_RnD, + path="/translate_test", + enabled_endpoints=["invoke"], + ) + add_routes( + app, + chain_RnD, + path="/translate_RnD", + enabled_endpoints=["invoke"], + ) + add_routes( + app, + chain_BIZ, + path="/translate_BIZ", + enabled_endpoints=["invoke"], + ) + add_routes( + app, + chain_type_classify, + path="/classify_document_type", + enabled_endpoints=["invoke"], + ) + add_routes( + app, + chain_grammarly, + path="/grammarly", + enabled_endpoints=["stream"], + ) + + return app diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..853130ecb534d63374594ecfee66f785a9192fc1 --- /dev/null +++ b/app.py @@ -0,0 +1,16 @@ +from dotenv import load_dotenv + +load_dotenv() + + +from apis.create_app import create_app +from apis import api_router +import uvicorn + + +app = create_app() + + +app.include_router(api_router) +if __name__ == "__main__": + uvicorn.run(app,host="0.0.0.0", port=3002) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..64b7ab939011662caa7bf4b736e274dadf0861f1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,18 @@ +fastapi==0.111.0 +langchain==0.2.3 +langchain_community==0.2.4 +langchain_core==0.2.5 +langchain_google_genai==1.0.6 +langchain_qdrant==0.1.0 +langchain_text_splitters==0.2.1 +langserve==0.2.2 +pydantic==1.10.13 +PyJWT==2.8.0 +pymongo==4.7.3 +python-dotenv==1.0.1 +python_jose==3.3.0 +qdrant_client==1.9.1 +uvicorn==0.30.1 +sse_starlette +pdf2image +google-generativeai \ No newline at end of file