Siyuan0730 commited on
Commit
c1726fc
·
1 Parent(s): 45de1d9

加入session后重整的代码逻辑

Browse files
Files changed (1) hide show
  1. app.py +99 -76
app.py CHANGED
@@ -10,7 +10,6 @@ from nltk.corpus import stopwords
10
  from nltk.stem import WordNetLemmatizer
11
  from collections import Counter
12
  import nltk
13
- import time
14
 
15
  openai.api_key = st.secrets["OPENAI_API_KEY"]
16
 
@@ -229,26 +228,59 @@ def initialize_app(added_files, num_lessons, language):
229
  course_outline_list = courseOutlineGenerating(temp_file_paths, num_lessons, language)
230
  outline_generating_state.text("Generating Course Outline...Done")
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  file_proc_state.empty()
233
  vdb_state.empty()
234
  outline_generating_state.empty()
235
-
236
- return embeddings_df, faiss_index, course_outline_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
  def app():
239
  st.title("OmniTutor v0.0.2")
240
 
241
- if "openai_model" not in st.session_state:
242
- st.session_state["openai_model"] = "gpt-3.5-turbo"
243
- # Initialize chat history
244
- if "messages" not in st.session_state:
245
- st.session_state.messages = []
246
-
247
- # Display chat messages from history on app rerun - 这部分不用session,利用好rerun
248
- for message in st.session_state.messages:
249
- with st.chat_message(message["role"]):
250
- st.markdown(message["content"])
251
-
252
  with st.sidebar:
253
  st.image("https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/20231021212525.png")
254
  added_files = st.file_uploader('Upload .md file', type=['.md'], accept_multiple_files=True)
@@ -259,75 +291,66 @@ def app():
259
  language = 'Chinese'
260
  btn = st.button('submit')
261
 
262
- col1, col2 = st.columns([0.6,0.4])
263
-
264
- user_question = st.chat_input("Enter your questions when learning...")
265
-
266
  if btn:
267
-
268
- if "embeddings_df" and "faiss_index" and "course_outline_list" not in st.session_state:
269
- st.session_state.embeddings_df, st.session_state.faiss_index, st.session_state.course_outline_list = initialize_app(added_files, num_lessons, language)
 
 
 
270
  #embeddings_df, faiss_index, course_outline_list = initialize_app(added_files, num_lessons, language)
 
 
271
 
272
  with col1:
273
- st.text("Processing file...Done")
274
- st.text("Constructing vector database from provided materials...Done")
275
- st.text("Generating Course Outline...Done")
276
-
277
  #把课程大纲打印出来
278
- course_outline_string = ''
279
- lessons_count = 0
280
- for outline in st.session_state.course_outline_list:
281
- lessons_count += 1
282
- course_outline_string += f"{lessons_count}." + outline[0]
283
- course_outline_string += '\n' + outline[1] + '\n\n'
284
- #time.sleep(1)
285
- with st.expander("Check the course outline", expanded=False):
286
- st.write(course_outline_string)
287
-
288
- count_generating_content = 0
289
- for lesson in st.session_state.course_outline_list:
290
- count_generating_content += 1
291
- content_generating_state = st.text(f"Writing content for lesson {count_generating_content}...")
292
- retrievedChunksList = searchVDB(lesson, st.session_state.embeddings_df, st.session_state.faiss_index)
293
- courseContent = generateCourse(lesson, retrievedChunksList, language)
294
- content_generating_state.text(f"Writing content for lesson {count_generating_content}...Done")
295
- #st.text_area("Course Content", value=courseContent)
296
- with st.expander(f"Learn the lesson {count_generating_content} ", expanded=False):
297
- st.markdown(courseContent)
298
-
299
- with col2:
300
- st.caption(''':blue[AI Assistant]: Ask this TA any questions related to this course and get direct answers. :sunglasses:''')
301
- # Set a default model
302
-
303
- with st.chat_message("assistant"):
304
- st.write("Hello👋, how can I help you today? 😄")
305
-
306
- #这里的session.state就是保存了这个对话会话的一些基本信息和设置
307
- if user_question:
308
- retrieved_chunks_for_user = searchVDB(user_question, st.session_state.embeddings_df, st.session_state.faiss_index)
309
- #retrieved_chunks_for_user = []
310
- prompt = decorate_user_question(user_question, retrieved_chunks_for_user)
311
- st.session_state.messages.append({"role": "user", "content": prompt})
312
- with st.chat_message("user"):
313
- st.markdown(user_question)
314
- # Display assistant response in chat message container
315
- with st.chat_message("assistant"):
316
- message_placeholder = st.empty()
317
- full_response = ""
318
- for response in openai.ChatCompletion.create(
319
- model=st.session_state["openai_model"],
320
- messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
321
- stream=True,
322
- ):
323
- full_response += response.choices[0].delta.get("content", "")
324
- message_placeholder.markdown(full_response + "▌")
325
- message_placeholder.markdown(full_response)
326
- st.session_state.messages.append({"role": "assistant", "content": full_response})
327
-
328
 
 
329
 
330
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
 
332
 
333
  if __name__ == "__main__":
 
10
  from nltk.stem import WordNetLemmatizer
11
  from collections import Counter
12
  import nltk
 
13
 
14
  openai.api_key = st.secrets["OPENAI_API_KEY"]
15
 
 
228
  course_outline_list = courseOutlineGenerating(temp_file_paths, num_lessons, language)
229
  outline_generating_state.text("Generating Course Outline...Done")
230
 
231
+ outline_presenting = st.empty()
232
+ course_outline_string = ''
233
+ lessons_count = 0
234
+ for outline in course_outline_list:
235
+ lessons_count += 1
236
+ course_outline_string += f"{lessons_count}." + outline[0]
237
+ course_outline_string += '\n\n' + outline[1] + '\n\n'
238
+ with outline_presenting.expander("Check the course outline", expanded=False):
239
+ st.write(course_outline_string)
240
+
241
+ content_presenting = st.empty()
242
+ content_generating_state = st.empty()
243
+ count_generating_content = 0
244
+ course_content_list = []
245
+ for lesson in course_outline_list:
246
+ count_generating_content += 1
247
+ content_generating_state = st.text(f"Writing content for lesson {count_generating_content}...")
248
+ retrievedChunksList = searchVDB(lesson, st.session_state.embeddings_df, st.session_state.faiss_index)
249
+ courseContent = generateCourse(lesson, retrievedChunksList, language)
250
+ course_content_list.append(courseContent)
251
+ content_generating_state.text(f"Writing content for lesson {count_generating_content}...Done")
252
+ with content_presenting.expander(f"Learn the lesson {count_generating_content} ", expanded=False):
253
+ st.markdown(courseContent)
254
+
255
  file_proc_state.empty()
256
  vdb_state.empty()
257
  outline_generating_state.empty()
258
+ outline_presenting.empty()
259
+ content_presenting.empty()
260
+ content_generating_state.empty()
261
+
262
+ return embeddings_df, faiss_index, course_outline_list, course_content_list
263
+
264
+ def regenerate_outline(course_outline_list):
265
+ course_outline_string = ''
266
+ lessons_count = 0
267
+ for outline in course_outline_list:
268
+ lessons_count += 1
269
+ course_outline_string += f"{lessons_count}." + outline[0]
270
+ course_outline_string += '\n\n' + outline[1] + '\n\n'
271
+ with st.expander("Check the course outline", expanded=False):
272
+ st.write(course_outline_string)
273
+
274
+ def regenerate_content(course_content_list):
275
+ count_generating_content = 0
276
+ for content in course_content_list:
277
+ count_generating_content += 1
278
+ with st.expander(f"Learn the lesson {count_generating_content} ", expanded=False):
279
+ st.markdown(content)
280
 
281
  def app():
282
  st.title("OmniTutor v0.0.2")
283
 
 
 
 
 
 
 
 
 
 
 
 
284
  with st.sidebar:
285
  st.image("https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/20231021212525.png")
286
  added_files = st.file_uploader('Upload .md file', type=['.md'], accept_multiple_files=True)
 
291
  language = 'Chinese'
292
  btn = st.button('submit')
293
 
 
 
 
 
294
  if btn:
295
+
296
+ if "embeddings_df" and "faiss_index" and "course_outline_list" and "course_content_list" not in st.session_state:
297
+ st.session_state.embeddings_df,
298
+ st.session_state.faiss_index,
299
+ st.session_state.course_outline_list,
300
+ st.session_state.course_content_list = initialize_app(added_files, num_lessons, language)
301
  #embeddings_df, faiss_index, course_outline_list = initialize_app(added_files, num_lessons, language)
302
+
303
+ col1, col2 = st.columns([0.6,0.4])
304
 
305
  with col1:
 
 
 
 
306
  #把课程大纲打印出来
307
+ regenerate_outline(st.session_state.course_outline_list)
308
+ #把课程内容打印出来
309
+ regenerate_content(st.session_state.course_content_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
 
311
+ with col2:
312
 
313
+ st.caption(''':blue[AI Assistant]: Ask this TA any questions related to this course and get direct answers. :sunglasses:''')
314
+ # Set a default model
315
+
316
+ with st.chat_message("assistant"):
317
+ st.write("Hello👋, how can I help you today? 😄")
318
+
319
+ if "openai_model" not in st.session_state:
320
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
321
+ # Initialize chat history
322
+ if "messages" not in st.session_state:
323
+ st.session_state.messages = []
324
+
325
+ # Display chat messages from history on app rerun - 这部分不用session,利用好rerun
326
+ for message in st.session_state.messages:
327
+ with st.chat_message(message["role"]):
328
+ st.markdown(message["content"][0])
329
+
330
+ user_question = st.chat_input("Enter your questions when learning...")
331
+
332
+ #这里的session.state就是保存了这个对话会话的一些基本信息和设置
333
+ if user_question:
334
+ retrieved_chunks_for_user = searchVDB(user_question, st.session_state.embeddings_df, st.session_state.faiss_index)
335
+ #retrieved_chunks_for_user = []
336
+ prompt = decorate_user_question(user_question, retrieved_chunks_for_user)
337
+ st.session_state.messages.append({"role": "user", "content": [user_question, prompt]})
338
+ with st.chat_message("user"):
339
+ st.markdown(user_question)
340
+ # Display assistant response in chat message container
341
+ with st.chat_message("assistant"):
342
+ message_placeholder = st.empty()
343
+ full_response = ""
344
+ for response in openai.ChatCompletion.create(
345
+ model=st.session_state["openai_model"],
346
+ messages=[{"role": m["role"], "content": m["content"][1]} for m in st.session_state.messages],
347
+ stream=True,
348
+ ):
349
+ full_response += response.choices[0].delta.get("content", "")
350
+ message_placeholder.markdown(full_response + "▌")
351
+ message_placeholder.markdown(full_response)
352
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
353
+
354
 
355
 
356
  if __name__ == "__main__":