RioShiina commited on
Commit
23bb9cf
·
verified ·
1 Parent(s): 839b683

UX Improvement: Show progress when clicking 'Pre-download LoRAs'.

Browse files
Files changed (1) hide show
  1. app.py +21 -4
app.py CHANGED
@@ -480,8 +480,10 @@ with gr.Blocks(css="#col-container {margin: 0 auto; max-width: 1024px;}") as dem
480
  gr.Markdown("### Negative Prompt"); info_neg_prompt_output = gr.Textbox(lines=3, interactive=False, show_label=False)
481
  gr.Markdown("### Other Parameters"); info_params_output = gr.Textbox(lines=5, interactive=False, show_label=False)
482
 
483
- gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by <a href='https://civitai.com/user/RioShiina'>RioShiina</a> with ❤</div>")
484
-
 
 
485
  def add_lora_row(current_count):
486
  current_count = int(current_count)
487
  if current_count < MAX_LORAS:
@@ -490,10 +492,24 @@ with gr.Blocks(css="#col-container {margin: 0 auto; max-width: 1024px;}") as dem
490
  return updates
491
  return {lora_count_state: current_count}
492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  add_lora_button.click(fn=add_lora_row, inputs=[lora_count_state], outputs=[lora_count_state, add_lora_button] + lora_rows)
494
 
495
- predownload_lora_button.click(fn=pre_download_loras, inputs=[civitai_api_key, *all_lora_inputs], outputs=[predownload_status])
496
-
497
  run_button.click(fn=infer,
498
  inputs=[base_model_name, prompt, negative_prompt, seed, batch_size, width, height, guidance_scale, num_inference_steps, sampler, schedule_type, civitai_api_key, zero_gpu_duration, *all_lora_inputs],
499
  outputs=[result])
@@ -503,4 +519,5 @@ with gr.Blocks(css="#col-container {margin: 0 auto; max-width: 1024px;}") as dem
503
  txt2img_outputs = [base_model_name, prompt, negative_prompt, seed, batch_size, zero_gpu_duration, width, height, guidance_scale, num_inference_steps, sampler, schedule_type, *all_lora_inputs, tabs]
504
  send_to_txt2img_button.click(fn=send_info_to_txt2img, inputs=[info_image_input], outputs=txt2img_outputs)
505
 
 
506
  demo.queue().launch()
 
480
  gr.Markdown("### Negative Prompt"); info_neg_prompt_output = gr.Textbox(lines=3, interactive=False, show_label=False)
481
  gr.Markdown("### Other Parameters"); info_params_output = gr.Textbox(lines=5, interactive=False, show_label=False)
482
 
483
+ gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by <a href='https://civitai.com/user/RioShiina'>RioShiina</a> with ❤️</div>")
484
+
485
+ # --- Event Handlers ---
486
+
487
  def add_lora_row(current_count):
488
  current_count = int(current_count)
489
  if current_count < MAX_LORAS:
 
492
  return updates
493
  return {lora_count_state: current_count}
494
 
495
+ def start_lora_predownload():
496
+ """This function provides immediate feedback to the user."""
497
+ return "⏳ Downloading... please wait. This may take a moment."
498
+
499
+ # --- Chain events for immediate feedback ---
500
+ predownload_click_event = predownload_lora_button.click(
501
+ fn=start_lora_predownload,
502
+ inputs=None,
503
+ outputs=[predownload_status],
504
+ queue=False
505
+ ).then(
506
+ fn=pre_download_loras,
507
+ inputs=[civitai_api_key, *all_lora_inputs],
508
+ outputs=[predownload_status]
509
+ )
510
+
511
  add_lora_button.click(fn=add_lora_row, inputs=[lora_count_state], outputs=[lora_count_state, add_lora_button] + lora_rows)
512
 
 
 
513
  run_button.click(fn=infer,
514
  inputs=[base_model_name, prompt, negative_prompt, seed, batch_size, width, height, guidance_scale, num_inference_steps, sampler, schedule_type, civitai_api_key, zero_gpu_duration, *all_lora_inputs],
515
  outputs=[result])
 
519
  txt2img_outputs = [base_model_name, prompt, negative_prompt, seed, batch_size, zero_gpu_duration, width, height, guidance_scale, num_inference_steps, sampler, schedule_type, *all_lora_inputs, tabs]
520
  send_to_txt2img_button.click(fn=send_info_to_txt2img, inputs=[info_image_input], outputs=txt2img_outputs)
521
 
522
+
523
  demo.queue().launch()