wfmedeiros commited on
Commit
5752c15
·
verified ·
1 Parent(s): 0833fb8

Create settings.yaml

Browse files
Files changed (1) hide show
  1. settings.yaml +101 -0
settings.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ server:
2
+ port: 8001
3
+ cors:
4
+ enabled: true
5
+ allow_origins: ["*"]
6
+ allow_methods: ["*"]
7
+ allow_headers: ["*"]
8
+ auth:
9
+ enabled: false
10
+ secret: "Basic c2VjcmV0OmtleQ=="
11
+ data:
12
+ local_ingestion:
13
+ enabled: false
14
+ allow_ingest_from: ["*"]
15
+ local_data_folder: local_data/private_gpt
16
+ ui:
17
+ enabled: true
18
+ path: /
19
+ default_chat_system_prompt: >
20
+ Você é um assistente de IA especializado em educação, agindo como um professor experiente e criativo. Sua principal função é criar aulas, planos de aula e planos de ensino detalhados para facilitar o trabalho de outros professores. Responda sempre em Português do Brasil.
21
+ default_query_system_prompt: >
22
+ You can only answer questions about the provided context. If you know the answer but it is not based in the provided context, don't provide the answer, just state the answer is not in the context provided.
23
+ default_summarization_system_prompt: >
24
+ Provide a comprehensive summary of the provided context information. The summary should cover all the key points and main ideas presented in the original text, while also condensing the information into a concise and easy-to-understand format. Please ensure that the summary includes relevant details and examples that support the main ideas, while avoiding any unnecessary information or repetition.
25
+ delete_file_button_enabled: true
26
+ delete_all_files_button_enabled: true
27
+ llm:
28
+ mode: llamacpp
29
+ prompt_style: "llama2"
30
+ max_new_tokens: 512
31
+ context_window: 3900
32
+ temperature: 0.1
33
+ rag:
34
+ similarity_top_k: 2
35
+ rerank:
36
+ enabled: false
37
+ model: cross-encoder/ms-marco-MiniLM-L-2-v2
38
+ top_n: 1
39
+ summarize:
40
+ use_async: true
41
+ clickhouse:
42
+ host: localhost
43
+ port: 8443
44
+ username: admin
45
+ password: clickhouse
46
+ database: embeddings
47
+ llamacpp:
48
+ llm_hf_repo_id: "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
49
+ llm_hf_model_file: "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
50
+ embedding:
51
+ mode: huggingface
52
+ ingest_mode: simple
53
+ embed_dim: 768
54
+ huggingface:
55
+ embedding_hf_model_name: "BAAI/bge-large-en-v1.5"
56
+ access_token: ${HF_TOKEN:}
57
+ trust_remote_code: true
58
+ vectorstore:
59
+ database: qdrant
60
+ nodestore:
61
+ database: simple
62
+ milvus:
63
+ uri: local_data/private_gpt/milvus/milvus_local.db
64
+ collection_name: milvus_db
65
+ overwrite: false
66
+ qdrant:
67
+ path: local_data/private_gpt/qdrant
68
+ postgres:
69
+ host: localhost
70
+ port: 5432
71
+ database: postgres
72
+ user: postgres
73
+ password: postgres
74
+ schema_name: private_gpt
75
+ sagemaker:
76
+ llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140
77
+ embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479
78
+ openai:
79
+ api_key: ${OPENAI_API_KEY:}
80
+ model: gpt-3.5-turbo
81
+ embedding_api_key: ${OPENAI_API_KEY:}
82
+ ollama:
83
+ llm_model: llama3.1
84
+ embedding_model: nomic-embed-text
85
+ api_base: http://localhost:11434
86
+ embedding_api_base: http://localhost:11434
87
+ keep_alive: 5m
88
+ request_timeout: 120.0
89
+ autopull_models: true
90
+ azopenai:
91
+ api_key: ${AZ_OPENAI_API_KEY:}
92
+ azure_endpoint: ${AZ_OPENAI_ENDPOINT:}
93
+ embedding_deployment_name: ${AZ_OPENAI_EMBEDDING_DEPLOYMENT_NAME:}
94
+ llm_deployment_name: ${AZ_OPENAI_LLM_DEPLOYMENT_NAME:}
95
+ api_version: "2023-05-15"
96
+ embedding_model: text-embedding-ada-002
97
+ llm_model: gpt-35-turbo
98
+ gemini:
99
+ api_key: ${GOOGLE_API_KEY:}
100
+ model: models/gemini-pro
101
+ embedding_model: models/embedding-001