Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from llama_index.core import Settings | |
| from llama_index.embeddings.huggingface import HuggingFaceEmbedding | |
| from parse_tabular import create_symptom_index # Change this import | |
| import json | |
| # Configure embeddings globally | |
| Settings.embed_model = HuggingFaceEmbedding( | |
| model_name="sentence-transformers/all-MiniLM-L6-v2" | |
| ) | |
| # Create the index at startup | |
| symptom_index = create_symptom_index() | |
| # --- System prompt --- | |
| SYSTEM_PROMPT = """ | |
| You are a medical assistant helping a user narrow down to the most likely ICD-10 code. | |
| At each turn, EITHER ask one focused clarifying question (e.g. "Is your cough dry or productive?") | |
| or, if you have enough info, output a final JSON with fields: | |
| {"diagnoses":[…], "confidences":[…]}. | |
| """ | |
| def process_speech(new_transcript, history): | |
| if not new_transcript: | |
| return history | |
| try: | |
| response = symptom_index.as_query_engine().query(new_transcript) | |
| formatted_response = { | |
| "diagnoses": [str(response).split(":")[0]], | |
| "confidences": [0.8], | |
| "follow_up": "Is the cough productive or dry?" | |
| } | |
| history.append({"role": "user", "content": new_transcript}) | |
| history.append({"role": "assistant", "content": json.dumps(formatted_response, indent=2)}) | |
| except Exception as e: | |
| error_response = {"error": str(e), "status": "error"} | |
| history.append({"role": "user", "content": new_transcript}) | |
| history.append({"role": "assistant", "content": json.dumps(error_response, indent=2)}) | |
| return history | |
| # Build Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Symptom to ICD-10 Code Lookup") | |
| chatbot = gr.Chatbot( | |
| label="Conversation", | |
| type="messages", | |
| height=400 | |
| ) | |
| microphone = gr.Microphone( | |
| streaming=True, | |
| type="filepath" # Use 'filepath' to get the path to the recorded audio file | |
| ) | |
| # Connect the streaming microphone to the chat | |
| microphone.stream( | |
| fn=process_speech, | |
| inputs=[microphone, chatbot], | |
| outputs=chatbot, | |
| show_progress="hidden" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| mcp_server=True | |
| ) | |