Spaces:
Runtime error
Runtime error
Commit
Β·
8153b2e
1
Parent(s):
b4d29e1
Update interface.py
Browse files- interface.py +7 -8
interface.py
CHANGED
|
@@ -32,7 +32,7 @@ class ChatInterface:
|
|
| 32 |
Supports both regular image files and DICOM medical imaging files.
|
| 33 |
"""
|
| 34 |
|
| 35 |
-
def __init__(self, agent, tools_dict
|
| 36 |
"""
|
| 37 |
Initialize the chat interface.
|
| 38 |
|
|
@@ -48,7 +48,6 @@ class ChatInterface:
|
|
| 48 |
# Separate storage for original and display paths
|
| 49 |
self.original_file_path = None # For LLM (.dcm or other)
|
| 50 |
self.display_file_path = None # For UI (always viewable format)
|
| 51 |
-
self.session_details = session_details
|
| 52 |
|
| 53 |
def handle_upload(self, file_path: str) -> str:
|
| 54 |
"""
|
|
@@ -104,7 +103,7 @@ class ChatInterface:
|
|
| 104 |
return history, gr.Textbox(value=message, interactive=False)
|
| 105 |
|
| 106 |
async def process_message(
|
| 107 |
-
self, message: str, display_image: Optional[str], chat_history: List[ChatMessage]
|
| 108 |
) -> AsyncGenerator[Tuple[List[ChatMessage], Optional[str], str], None]:
|
| 109 |
"""
|
| 110 |
Process a message and generate responses.
|
|
@@ -202,7 +201,7 @@ class ChatInterface:
|
|
| 202 |
yield chat_history, self.display_file_path
|
| 203 |
|
| 204 |
finally:
|
| 205 |
-
store_chat_history(
|
| 206 |
|
| 207 |
def store_chat_history(username, session_id, chat_history):
|
| 208 |
"""
|
|
@@ -415,7 +414,7 @@ def create_demo(agent, tools_dict):
|
|
| 415 |
bot_msg = chat_msg.then(
|
| 416 |
interface.process_message,
|
| 417 |
inputs=[txt, image_display, chatbot],
|
| 418 |
-
outputs=[chatbot, image_display, txt],
|
| 419 |
)
|
| 420 |
bot_msg.then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 421 |
|
|
@@ -426,7 +425,7 @@ def create_demo(agent, tools_dict):
|
|
| 426 |
).then(
|
| 427 |
interface.process_message,
|
| 428 |
inputs=[txt, image_display, chatbot],
|
| 429 |
-
outputs=[chatbot, image_display, txt],
|
| 430 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 431 |
|
| 432 |
analyze2_btn.click(
|
|
@@ -436,7 +435,7 @@ def create_demo(agent, tools_dict):
|
|
| 436 |
).then(
|
| 437 |
interface.process_message,
|
| 438 |
inputs=[txt, image_display, chatbot],
|
| 439 |
-
outputs=[chatbot, image_display, txt],
|
| 440 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 441 |
|
| 442 |
segment_btn.click(
|
|
@@ -445,7 +444,7 @@ def create_demo(agent, tools_dict):
|
|
| 445 |
interface.add_message, inputs=[txt, image_display, chatbot], outputs=[chatbot, txt]
|
| 446 |
).then(
|
| 447 |
interface.process_message,
|
| 448 |
-
inputs=[txt, image_display, chatbot],
|
| 449 |
outputs=[chatbot, image_display, txt],
|
| 450 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 451 |
|
|
|
|
| 32 |
Supports both regular image files and DICOM medical imaging files.
|
| 33 |
"""
|
| 34 |
|
| 35 |
+
def __init__(self, agent, tools_dict):
|
| 36 |
"""
|
| 37 |
Initialize the chat interface.
|
| 38 |
|
|
|
|
| 48 |
# Separate storage for original and display paths
|
| 49 |
self.original_file_path = None # For LLM (.dcm or other)
|
| 50 |
self.display_file_path = None # For UI (always viewable format)
|
|
|
|
| 51 |
|
| 52 |
def handle_upload(self, file_path: str) -> str:
|
| 53 |
"""
|
|
|
|
| 103 |
return history, gr.Textbox(value=message, interactive=False)
|
| 104 |
|
| 105 |
async def process_message(
|
| 106 |
+
self, message: str, display_image: Optional[str], session_details: dict, chat_history: List[ChatMessage]
|
| 107 |
) -> AsyncGenerator[Tuple[List[ChatMessage], Optional[str], str], None]:
|
| 108 |
"""
|
| 109 |
Process a message and generate responses.
|
|
|
|
| 201 |
yield chat_history, self.display_file_path
|
| 202 |
|
| 203 |
finally:
|
| 204 |
+
store_chat_history(session_details['username'], session_details['session_id'], chat_history)
|
| 205 |
|
| 206 |
def store_chat_history(username, session_id, chat_history):
|
| 207 |
"""
|
|
|
|
| 414 |
bot_msg = chat_msg.then(
|
| 415 |
interface.process_message,
|
| 416 |
inputs=[txt, image_display, chatbot],
|
| 417 |
+
outputs=[chatbot, image_display, session_details, txt],
|
| 418 |
)
|
| 419 |
bot_msg.then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 420 |
|
|
|
|
| 425 |
).then(
|
| 426 |
interface.process_message,
|
| 427 |
inputs=[txt, image_display, chatbot],
|
| 428 |
+
outputs=[chatbot, image_display, session_details, txt],
|
| 429 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 430 |
|
| 431 |
analyze2_btn.click(
|
|
|
|
| 435 |
).then(
|
| 436 |
interface.process_message,
|
| 437 |
inputs=[txt, image_display, chatbot],
|
| 438 |
+
outputs=[chatbot, image_display, session_details, txt],
|
| 439 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 440 |
|
| 441 |
segment_btn.click(
|
|
|
|
| 444 |
interface.add_message, inputs=[txt, image_display, chatbot], outputs=[chatbot, txt]
|
| 445 |
).then(
|
| 446 |
interface.process_message,
|
| 447 |
+
inputs=[txt, image_display, session_details, chatbot],
|
| 448 |
outputs=[chatbot, image_display, txt],
|
| 449 |
).then(lambda: gr.Textbox(interactive=True), None, [txt])
|
| 450 |
|