Spaces:
Running
on
Zero
Running
on
Zero
Y Phung Nguyen
commited on
Commit
Β·
4f99918
1
Parent(s):
2c2c542
Upd models loader #5
Browse files
config.py
CHANGED
|
@@ -54,7 +54,8 @@ DESCRIPTION = """
|
|
| 54 |
<p>π <strong>Document RAG:</strong> Answer based on uploaded medical documents</p>
|
| 55 |
<p>π <strong>Web Search:</strong> Fetch knowledge from reliable online medical resources</p>
|
| 56 |
<p>π <strong>Multi-language:</strong> Automatic translation for non-English queries</p>
|
| 57 |
-
<p>
|
|
|
|
| 58 |
</center>
|
| 59 |
"""
|
| 60 |
CSS = """
|
|
|
|
| 54 |
<p>π <strong>Document RAG:</strong> Answer based on uploaded medical documents</p>
|
| 55 |
<p>π <strong>Web Search:</strong> Fetch knowledge from reliable online medical resources</p>
|
| 56 |
<p>π <strong>Multi-language:</strong> Automatic translation for non-English queries</p>
|
| 57 |
+
<p>Tips: Customise configurations, system prompt to see where the magic happens!</p>
|
| 58 |
+
<p>Note: Case GPU aborted errors, please select another model or try again later!</p>
|
| 59 |
</center>
|
| 60 |
"""
|
| 61 |
CSS = """
|
ui.py
CHANGED
|
@@ -356,7 +356,7 @@ def create_demo():
|
|
| 356 |
if config.global_whisper_model is not None:
|
| 357 |
status_lines.append("β
ASR (Whisper): loaded and ready")
|
| 358 |
else:
|
| 359 |
-
status_lines.append("
|
| 360 |
else:
|
| 361 |
status_lines.append("β ASR: library not available")
|
| 362 |
|
|
@@ -394,9 +394,9 @@ def create_demo():
|
|
| 394 |
# ASR (Whisper) model status
|
| 395 |
if WHISPER_AVAILABLE:
|
| 396 |
if config.global_whisper_model is not None:
|
| 397 |
-
status_lines.append("β
ASR (Whisper
|
| 398 |
else:
|
| 399 |
-
status_lines.append("
|
| 400 |
else:
|
| 401 |
status_lines.append("β ASR: library not available")
|
| 402 |
|
|
@@ -566,10 +566,44 @@ def create_demo():
|
|
| 566 |
outputs=[model_status, submit_button, message_input]
|
| 567 |
)
|
| 568 |
|
| 569 |
-
#
|
| 570 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 571 |
demo.load(
|
| 572 |
-
fn=
|
| 573 |
inputs=None,
|
| 574 |
outputs=[model_status]
|
| 575 |
)
|
|
|
|
| 356 |
if config.global_whisper_model is not None:
|
| 357 |
status_lines.append("β
ASR (Whisper): loaded and ready")
|
| 358 |
else:
|
| 359 |
+
status_lines.append("β³ ASR (Whisper): will load on first use")
|
| 360 |
else:
|
| 361 |
status_lines.append("β ASR: library not available")
|
| 362 |
|
|
|
|
| 394 |
# ASR (Whisper) model status
|
| 395 |
if WHISPER_AVAILABLE:
|
| 396 |
if config.global_whisper_model is not None:
|
| 397 |
+
status_lines.append("β
ASR (Whisper): loaded and ready")
|
| 398 |
else:
|
| 399 |
+
status_lines.append("β³ ASR (Whisper): will load on first use")
|
| 400 |
else:
|
| 401 |
status_lines.append("β ASR: library not available")
|
| 402 |
|
|
|
|
| 566 |
outputs=[model_status, submit_button, message_input]
|
| 567 |
)
|
| 568 |
|
| 569 |
+
# GPU-decorated function to load Whisper ASR model on-demand
|
| 570 |
+
@spaces.GPU(max_duration=120)
|
| 571 |
+
def load_whisper_model_on_demand():
|
| 572 |
+
"""Load Whisper ASR model when needed"""
|
| 573 |
+
try:
|
| 574 |
+
if WHISPER_AVAILABLE and config.global_whisper_model is None:
|
| 575 |
+
logger.info("[ASR] Loading Whisper model on-demand...")
|
| 576 |
+
initialize_whisper_model()
|
| 577 |
+
if config.global_whisper_model is not None:
|
| 578 |
+
logger.info("[ASR] β
Whisper model loaded successfully!")
|
| 579 |
+
return "β
ASR (Whisper): loaded"
|
| 580 |
+
else:
|
| 581 |
+
logger.warning("[ASR] β οΈ Whisper model failed to load")
|
| 582 |
+
return "β οΈ ASR (Whisper): failed to load"
|
| 583 |
+
elif config.global_whisper_model is not None:
|
| 584 |
+
return "β
ASR (Whisper): already loaded"
|
| 585 |
+
else:
|
| 586 |
+
return "β ASR: library not available"
|
| 587 |
+
except Exception as e:
|
| 588 |
+
logger.error(f"[ASR] Error loading Whisper model: {e}")
|
| 589 |
+
return f"β ASR: error - {str(e)[:100]}"
|
| 590 |
+
|
| 591 |
+
# Load models on startup - medical model loads via stream_chat, Whisper loads on-demand
|
| 592 |
+
# Note: We skip startup loading to avoid GPU conflicts, models load when first needed
|
| 593 |
+
def update_startup_status():
|
| 594 |
+
"""Update status display on startup without loading models"""
|
| 595 |
+
try:
|
| 596 |
+
result = check_model_status(DEFAULT_MEDICAL_MODEL)
|
| 597 |
+
if result and isinstance(result, tuple) and len(result) == 2:
|
| 598 |
+
return result[0]
|
| 599 |
+
else:
|
| 600 |
+
return "β οΈ Checking model status..."
|
| 601 |
+
except Exception as e:
|
| 602 |
+
logger.error(f"Error in update_startup_status: {e}")
|
| 603 |
+
return f"β οΈ Error: {str(e)[:100]}"
|
| 604 |
+
|
| 605 |
demo.load(
|
| 606 |
+
fn=update_startup_status,
|
| 607 |
inputs=None,
|
| 608 |
outputs=[model_status]
|
| 609 |
)
|
voice.py
CHANGED
|
@@ -81,11 +81,22 @@ def transcribe_audio_whisper(audio_path: str) -> str:
|
|
| 81 |
try:
|
| 82 |
logger.info(f"[ASR] Starting Whisper transcription for: {audio_path}")
|
| 83 |
if config.global_whisper_model is None:
|
| 84 |
-
logger.info("[ASR] Whisper model not loaded, initializing...")
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
if config.global_whisper_model is None:
|
| 88 |
-
logger.error("[ASR]
|
| 89 |
return ""
|
| 90 |
|
| 91 |
# Extract processor and model from stored dict
|
|
|
|
| 81 |
try:
|
| 82 |
logger.info(f"[ASR] Starting Whisper transcription for: {audio_path}")
|
| 83 |
if config.global_whisper_model is None:
|
| 84 |
+
logger.info("[ASR] Whisper model not loaded, initializing now (on-demand)...")
|
| 85 |
+
try:
|
| 86 |
+
initialize_whisper_model()
|
| 87 |
+
if config.global_whisper_model is None:
|
| 88 |
+
logger.error("[ASR] Failed to initialize Whisper model - check logs for errors")
|
| 89 |
+
return ""
|
| 90 |
+
else:
|
| 91 |
+
logger.info("[ASR] β
Whisper model loaded successfully on-demand!")
|
| 92 |
+
except Exception as e:
|
| 93 |
+
logger.error(f"[ASR] Error initializing Whisper model: {e}")
|
| 94 |
+
import traceback
|
| 95 |
+
logger.debug(f"[ASR] Full traceback: {traceback.format_exc()}")
|
| 96 |
+
return ""
|
| 97 |
|
| 98 |
if config.global_whisper_model is None:
|
| 99 |
+
logger.error("[ASR] Whisper model is still None after initialization attempt")
|
| 100 |
return ""
|
| 101 |
|
| 102 |
# Extract processor and model from stored dict
|