Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- README.md +9 -13
- app.py +129 -0
- chain_injectors/__init__.py +0 -0
- chain_injectors/conditioning_injector.py +81 -0
- chain_injectors/controlnet_injector.py +49 -0
- chain_injectors/ipadapter_injector.py +106 -0
- comfy_integration/__init__.py +0 -0
- comfy_integration/nodes.py +39 -0
- comfy_integration/setup.py +73 -0
- core/__init__.py +0 -0
- core/generation_logic.py +25 -0
- core/model_manager.py +142 -0
- core/pipelines/__init__.py +0 -0
- core/pipelines/base_pipeline.py +53 -0
- core/pipelines/controlnet_preprocessor.py +143 -0
- core/pipelines/sd_image_pipeline.py +434 -0
- core/pipelines/workflow_recipes/_partials/_base_sampler.yaml +23 -0
- core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml +50 -0
- core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml +26 -0
- core/pipelines/workflow_recipes/_partials/input/img2img.yaml +19 -0
- core/pipelines/workflow_recipes/_partials/input/inpaint.yaml +25 -0
- core/pipelines/workflow_recipes/_partials/input/outpaint.yaml +38 -0
- core/pipelines/workflow_recipes/_partials/input/txt2img.yaml +8 -0
- core/pipelines/workflow_recipes/sd_unified_recipe.yaml +10 -0
- core/settings.py +117 -0
- core/shared_state.py +1 -0
- core/workflow_assembler.py +179 -0
- requirements.txt +52 -0
- scripts/__init__.py +0 -0
- scripts/build_sage_attention.py +99 -0
- ui/__init__.py +0 -0
- ui/events.py +771 -0
- ui/layout.py +114 -0
- ui/shared/hires_fix_ui.py +74 -0
- ui/shared/img2img_ui.py +57 -0
- ui/shared/inpaint_ui.py +81 -0
- ui/shared/outpaint_ui.py +68 -0
- ui/shared/txt2img_ui.py +38 -0
- ui/shared/ui_components.py +338 -0
- utils/__init__.py +0 -0
- utils/app_utils.py +601 -0
- yaml/constants.yaml +32 -0
- yaml/controlnet_models.yaml +71 -0
- yaml/file_list.yaml +177 -0
- yaml/injectors.yaml +12 -0
- yaml/ipadapter.yaml +33 -0
- yaml/model_list.yaml +17 -0
README.md
CHANGED
|
@@ -1,13 +1,9 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: ImageGen
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: gradio
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
license: gpl-3.0
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: ImageGen - Illustrious
|
| 3 |
+
emoji: 🖼
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: gradio
|
| 7 |
+
app_file: app.py
|
| 8 |
+
short_description: Multi-task image generator with dynamic, chainable workflows
|
| 9 |
+
---
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import spaces
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import requests
|
| 5 |
+
import site
|
| 6 |
+
|
| 7 |
+
APP_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 8 |
+
if APP_DIR not in sys.path:
|
| 9 |
+
sys.path.insert(0, APP_DIR)
|
| 10 |
+
print(f"✅ Added project root '{APP_DIR}' to sys.path.")
|
| 11 |
+
|
| 12 |
+
SAGE_PATCH_APPLIED = False
|
| 13 |
+
|
| 14 |
+
def apply_sage_attention_patch():
|
| 15 |
+
global SAGE_PATCH_APPLIED
|
| 16 |
+
if SAGE_PATCH_APPLIED:
|
| 17 |
+
return "SageAttention patch already applied."
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from comfy import model_management
|
| 21 |
+
import sageattention
|
| 22 |
+
|
| 23 |
+
print("--- [Runtime Patch] sageattention package found. Applying patch... ---")
|
| 24 |
+
model_management.sage_attention_enabled = lambda: True
|
| 25 |
+
model_management.pytorch_attention_enabled = lambda: False
|
| 26 |
+
|
| 27 |
+
SAGE_PATCH_APPLIED = True
|
| 28 |
+
return "✅ Successfully enabled SageAttention."
|
| 29 |
+
except ImportError:
|
| 30 |
+
SAGE_PATCH_APPLIED = False
|
| 31 |
+
msg = "--- [Runtime Patch] ⚠️ sageattention package not found. Continuing with default attention. ---"
|
| 32 |
+
print(msg)
|
| 33 |
+
return msg
|
| 34 |
+
except Exception as e:
|
| 35 |
+
SAGE_PATCH_APPLIED = False
|
| 36 |
+
msg = f"--- [Runtime Patch] ❌ An error occurred while applying SageAttention patch: {e} ---"
|
| 37 |
+
print(msg)
|
| 38 |
+
return msg
|
| 39 |
+
|
| 40 |
+
@spaces.GPU
|
| 41 |
+
def dummy_gpu_for_startup():
|
| 42 |
+
print("--- [GPU Startup] Dummy function for startup check initiated. ---")
|
| 43 |
+
patch_result = apply_sage_attention_patch()
|
| 44 |
+
print(f"--- [GPU Startup] {patch_result} ---")
|
| 45 |
+
print("--- [GPU Startup] Startup check passed. ---")
|
| 46 |
+
return "Startup check passed."
|
| 47 |
+
|
| 48 |
+
def main():
|
| 49 |
+
from utils.app_utils import print_welcome_message
|
| 50 |
+
from scripts import build_sage_attention
|
| 51 |
+
|
| 52 |
+
print_welcome_message()
|
| 53 |
+
|
| 54 |
+
print("--- [Setup] Attempting to build and install SageAttention... ---")
|
| 55 |
+
try:
|
| 56 |
+
build_sage_attention.install_sage_attention()
|
| 57 |
+
print("--- [Setup] ✅ SageAttention installation process finished. ---")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"--- [Setup] ❌ SageAttention installation failed: {e}. Continuing with default attention. ---")
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
print("--- [Setup] Reloading site-packages to detect newly installed packages... ---")
|
| 63 |
+
try:
|
| 64 |
+
site.main()
|
| 65 |
+
print("--- [Setup] ✅ Site-packages reloaded. ---")
|
| 66 |
+
except Exception as e:
|
| 67 |
+
print(f"--- [Setup] ⚠️ Warning: Could not fully reload site-packages: {e} ---")
|
| 68 |
+
|
| 69 |
+
from comfy_integration import setup as setup_comfyui
|
| 70 |
+
from utils.app_utils import (
|
| 71 |
+
build_preprocessor_model_map,
|
| 72 |
+
build_preprocessor_parameter_map,
|
| 73 |
+
load_ipadapter_presets
|
| 74 |
+
)
|
| 75 |
+
from core import shared_state
|
| 76 |
+
from core.settings import ALL_MODEL_MAP, ALL_FILE_DOWNLOAD_MAP
|
| 77 |
+
|
| 78 |
+
def check_all_model_urls_on_startup():
|
| 79 |
+
print("--- [Setup] Checking all model URL validity (one-time check) ---")
|
| 80 |
+
for display_name, model_info in ALL_MODEL_MAP.items():
|
| 81 |
+
repo_id, filename, _, _ = model_info
|
| 82 |
+
if not repo_id: continue
|
| 83 |
+
|
| 84 |
+
download_info = ALL_FILE_DOWNLOAD_MAP.get(filename, {})
|
| 85 |
+
repo_file_path = download_info.get('repository_file_path', filename)
|
| 86 |
+
url = f"https://huggingface.co/{repo_id}/resolve/main/{repo_file_path}"
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
response = requests.head(url, timeout=5, allow_redirects=True)
|
| 90 |
+
if response.status_code >= 400:
|
| 91 |
+
print(f"❌ Invalid URL for '{display_name}': {url} (Status: {response.status_code})")
|
| 92 |
+
shared_state.INVALID_MODEL_URLS[display_name] = True
|
| 93 |
+
except requests.RequestException as e:
|
| 94 |
+
print(f"❌ URL check failed for '{display_name}': {e}")
|
| 95 |
+
shared_state.INVALID_MODEL_URLS[display_name] = True
|
| 96 |
+
print("--- [Setup] ✅ Finished checking model URLs. ---")
|
| 97 |
+
|
| 98 |
+
print("--- Starting Application Setup ---")
|
| 99 |
+
|
| 100 |
+
setup_comfyui.initialize_comfyui()
|
| 101 |
+
|
| 102 |
+
check_all_model_urls_on_startup()
|
| 103 |
+
|
| 104 |
+
print("--- Building ControlNet preprocessor maps ---")
|
| 105 |
+
from core.generation_logic import build_reverse_map
|
| 106 |
+
build_reverse_map()
|
| 107 |
+
build_preprocessor_model_map()
|
| 108 |
+
build_preprocessor_parameter_map()
|
| 109 |
+
print("--- ✅ ControlNet preprocessor setup complete. ---")
|
| 110 |
+
|
| 111 |
+
print("--- Loading IPAdapter presets ---")
|
| 112 |
+
load_ipadapter_presets()
|
| 113 |
+
print("--- ✅ IPAdapter setup complete. ---")
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
print("--- Environment configured. Proceeding with module imports. ---")
|
| 117 |
+
from ui.layout import build_ui
|
| 118 |
+
from ui.events import attach_event_handlers
|
| 119 |
+
|
| 120 |
+
print(f"✅ Working directory is stable: {os.getcwd()}")
|
| 121 |
+
|
| 122 |
+
demo = build_ui(attach_event_handlers)
|
| 123 |
+
|
| 124 |
+
print("--- Launching Gradio Interface ---")
|
| 125 |
+
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
if __name__ == "__main__":
|
| 129 |
+
main()
|
chain_injectors/__init__.py
ADDED
|
File without changes
|
chain_injectors/conditioning_injector.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 6 |
+
|
| 7 |
+
target_node_id = None
|
| 8 |
+
target_input_name = None
|
| 9 |
+
|
| 10 |
+
if ksampler_name in assembler.node_map:
|
| 11 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 12 |
+
if 'positive' in assembler.workflow[ksampler_id]['inputs']:
|
| 13 |
+
target_node_id = ksampler_id
|
| 14 |
+
target_input_name = 'positive'
|
| 15 |
+
print(f"Conditioning injector targeting KSampler node '{ksampler_name}'.")
|
| 16 |
+
else:
|
| 17 |
+
print(f"Warning: KSampler node '{ksampler_name}' for Conditioning chain not found. Skipping.")
|
| 18 |
+
return
|
| 19 |
+
|
| 20 |
+
if not target_node_id:
|
| 21 |
+
print("Warning: Conditioning chain could not find a valid injection point (KSampler may be missing 'positive' input). Skipping.")
|
| 22 |
+
return
|
| 23 |
+
|
| 24 |
+
clip_source_str = chain_definition.get('clip_source')
|
| 25 |
+
if not clip_source_str:
|
| 26 |
+
print("Warning: 'clip_source' definition missing in the recipe for the Conditioning chain. Skipping.")
|
| 27 |
+
return
|
| 28 |
+
clip_node_name, clip_idx_str = clip_source_str.split(':')
|
| 29 |
+
if clip_node_name not in assembler.node_map:
|
| 30 |
+
print(f"Warning: CLIP source node '{clip_node_name}' for Conditioning chain not found. Skipping.")
|
| 31 |
+
return
|
| 32 |
+
clip_connection = [assembler.node_map[clip_node_name], int(clip_idx_str)]
|
| 33 |
+
|
| 34 |
+
original_positive_connection = assembler.workflow[target_node_id]['inputs'][target_input_name]
|
| 35 |
+
|
| 36 |
+
area_conditioning_outputs = []
|
| 37 |
+
|
| 38 |
+
for item_data in chain_items:
|
| 39 |
+
prompt = item_data.get('prompt', '')
|
| 40 |
+
if not prompt or not prompt.strip():
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
text_encode_id = assembler._get_unique_id()
|
| 44 |
+
text_encode_node = assembler._get_node_template("CLIPTextEncode")
|
| 45 |
+
text_encode_node['inputs']['text'] = prompt
|
| 46 |
+
text_encode_node['inputs']['clip'] = clip_connection
|
| 47 |
+
assembler.workflow[text_encode_id] = text_encode_node
|
| 48 |
+
|
| 49 |
+
set_area_id = assembler._get_unique_id()
|
| 50 |
+
set_area_node = assembler._get_node_template("ConditioningSetArea")
|
| 51 |
+
set_area_node['inputs']['width'] = item_data.get('width', 1024)
|
| 52 |
+
set_area_node['inputs']['height'] = item_data.get('height', 1024)
|
| 53 |
+
set_area_node['inputs']['x'] = item_data.get('x', 0)
|
| 54 |
+
set_area_node['inputs']['y'] = item_data.get('y', 0)
|
| 55 |
+
set_area_node['inputs']['strength'] = item_data.get('strength', 1.0)
|
| 56 |
+
set_area_node['inputs']['conditioning'] = [text_encode_id, 0]
|
| 57 |
+
assembler.workflow[set_area_id] = set_area_node
|
| 58 |
+
|
| 59 |
+
area_conditioning_outputs.append([set_area_id, 0])
|
| 60 |
+
|
| 61 |
+
if not area_conditioning_outputs:
|
| 62 |
+
return
|
| 63 |
+
|
| 64 |
+
current_combined_conditioning = area_conditioning_outputs[0]
|
| 65 |
+
if len(area_conditioning_outputs) > 1:
|
| 66 |
+
for i in range(1, len(area_conditioning_outputs)):
|
| 67 |
+
combine_id = assembler._get_unique_id()
|
| 68 |
+
combine_node = assembler._get_node_template("ConditioningCombine")
|
| 69 |
+
combine_node['inputs']['conditioning_1'] = current_combined_conditioning
|
| 70 |
+
combine_node['inputs']['conditioning_2'] = area_conditioning_outputs[i]
|
| 71 |
+
assembler.workflow[combine_id] = combine_node
|
| 72 |
+
current_combined_conditioning = [combine_id, 0]
|
| 73 |
+
|
| 74 |
+
final_combine_id = assembler._get_unique_id()
|
| 75 |
+
final_combine_node = assembler._get_node_template("ConditioningCombine")
|
| 76 |
+
final_combine_node['inputs']['conditioning_1'] = original_positive_connection
|
| 77 |
+
final_combine_node['inputs']['conditioning_2'] = current_combined_conditioning
|
| 78 |
+
assembler.workflow[final_combine_id] = final_combine_node
|
| 79 |
+
|
| 80 |
+
assembler.workflow[target_node_id]['inputs'][target_input_name] = [final_combine_id, 0]
|
| 81 |
+
print(f"Conditioning injector applied. Redirected '{target_input_name}' input with {len(area_conditioning_outputs)} regional prompts.")
|
chain_injectors/controlnet_injector.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 6 |
+
if ksampler_name not in assembler.node_map:
|
| 7 |
+
print(f"Warning: Target node '{ksampler_name}' for ControlNet chain not found. Skipping chain injection.")
|
| 8 |
+
return
|
| 9 |
+
|
| 10 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 11 |
+
|
| 12 |
+
if 'positive' not in assembler.workflow[ksampler_id]['inputs'] or \
|
| 13 |
+
'negative' not in assembler.workflow[ksampler_id]['inputs']:
|
| 14 |
+
print(f"Warning: KSampler node '{ksampler_name}' is missing 'positive' or 'negative' inputs. Skipping ControlNet chain.")
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
current_positive_connection = assembler.workflow[ksampler_id]['inputs']['positive']
|
| 18 |
+
current_negative_connection = assembler.workflow[ksampler_id]['inputs']['negative']
|
| 19 |
+
|
| 20 |
+
for item_data in chain_items:
|
| 21 |
+
cn_loader_id = assembler._get_unique_id()
|
| 22 |
+
cn_loader_node = assembler._get_node_template("ControlNetLoader")
|
| 23 |
+
cn_loader_node['inputs']['control_net_name'] = item_data['control_net_name']
|
| 24 |
+
assembler.workflow[cn_loader_id] = cn_loader_node
|
| 25 |
+
|
| 26 |
+
image_loader_id = assembler._get_unique_id()
|
| 27 |
+
image_loader_node = assembler._get_node_template("LoadImage")
|
| 28 |
+
image_loader_node['inputs']['image'] = item_data['image']
|
| 29 |
+
assembler.workflow[image_loader_id] = image_loader_node
|
| 30 |
+
|
| 31 |
+
apply_cn_id = assembler._get_unique_id()
|
| 32 |
+
apply_cn_node = assembler._get_node_template(chain_definition['template'])
|
| 33 |
+
|
| 34 |
+
apply_cn_node['inputs']['strength'] = item_data['strength']
|
| 35 |
+
|
| 36 |
+
apply_cn_node['inputs']['positive'] = current_positive_connection
|
| 37 |
+
apply_cn_node['inputs']['negative'] = current_negative_connection
|
| 38 |
+
apply_cn_node['inputs']['control_net'] = [cn_loader_id, 0]
|
| 39 |
+
apply_cn_node['inputs']['image'] = [image_loader_id, 0]
|
| 40 |
+
|
| 41 |
+
assembler.workflow[apply_cn_id] = apply_cn_node
|
| 42 |
+
|
| 43 |
+
current_positive_connection = [apply_cn_id, 0]
|
| 44 |
+
current_negative_connection = [apply_cn_id, 1]
|
| 45 |
+
|
| 46 |
+
assembler.workflow[ksampler_id]['inputs']['positive'] = current_positive_connection
|
| 47 |
+
assembler.workflow[ksampler_id]['inputs']['negative'] = current_negative_connection
|
| 48 |
+
|
| 49 |
+
print(f"ControlNet injector applied. KSampler inputs redirected through {len(chain_items)} ControlNet nodes.")
|
chain_injectors/ipadapter_injector.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
final_settings = {}
|
| 6 |
+
if chain_items and isinstance(chain_items[-1], dict) and chain_items[-1].get('is_final_settings'):
|
| 7 |
+
final_settings = chain_items.pop()
|
| 8 |
+
|
| 9 |
+
if not chain_items:
|
| 10 |
+
return
|
| 11 |
+
|
| 12 |
+
end_node_name = chain_definition.get('end')
|
| 13 |
+
if not end_node_name or end_node_name not in assembler.node_map:
|
| 14 |
+
print(f"Warning: Target node '{end_node_name}' for IPAdapter chain not found. Skipping chain injection.")
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
end_node_id = assembler.node_map[end_node_name]
|
| 18 |
+
|
| 19 |
+
if 'model' not in assembler.workflow[end_node_id]['inputs']:
|
| 20 |
+
print(f"Warning: Target node '{end_node_name}' is missing 'model' input. Skipping IPAdapter chain.")
|
| 21 |
+
return
|
| 22 |
+
|
| 23 |
+
current_model_connection = assembler.workflow[end_node_id]['inputs']['model']
|
| 24 |
+
|
| 25 |
+
model_type = final_settings.get('model_type', 'sdxl')
|
| 26 |
+
megapixels = 1.05 if model_type == 'sdxl' else 0.39
|
| 27 |
+
|
| 28 |
+
pos_embed_outputs = []
|
| 29 |
+
neg_embed_outputs = []
|
| 30 |
+
|
| 31 |
+
for i, item_data in enumerate(chain_items):
|
| 32 |
+
loader_type = 'FaceID' if 'FACEID' in item_data.get('preset', '') else 'Unified'
|
| 33 |
+
|
| 34 |
+
loader_template_name = "IPAdapterUnifiedLoader"
|
| 35 |
+
if loader_type == 'FaceID':
|
| 36 |
+
loader_template_name = "IPAdapterUnifiedLoaderFaceID"
|
| 37 |
+
|
| 38 |
+
image_loader_id = assembler._get_unique_id()
|
| 39 |
+
image_loader_node = assembler._get_node_template("LoadImage")
|
| 40 |
+
image_loader_node['inputs']['image'] = item_data['image']
|
| 41 |
+
assembler.workflow[image_loader_id] = image_loader_node
|
| 42 |
+
|
| 43 |
+
image_scaler_id = assembler._get_unique_id()
|
| 44 |
+
image_scaler_node = assembler._get_node_template("ImageScaleToTotalPixels")
|
| 45 |
+
image_scaler_node['inputs']['image'] = [image_loader_id, 0]
|
| 46 |
+
image_scaler_node['inputs']['megapixels'] = megapixels
|
| 47 |
+
image_scaler_node['inputs']['upscale_method'] = "lanczos"
|
| 48 |
+
assembler.workflow[image_scaler_id] = image_scaler_node
|
| 49 |
+
|
| 50 |
+
ipadapter_loader_id = assembler._get_unique_id()
|
| 51 |
+
ipadapter_loader_node = assembler._get_node_template(loader_template_name)
|
| 52 |
+
ipadapter_loader_node['inputs']['model'] = current_model_connection
|
| 53 |
+
ipadapter_loader_node['inputs']['preset'] = item_data['preset']
|
| 54 |
+
if loader_type == 'FaceID':
|
| 55 |
+
ipadapter_loader_node['inputs']['lora_strength'] = item_data.get('lora_strength', 0.6)
|
| 56 |
+
assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
|
| 57 |
+
|
| 58 |
+
encoder_id = assembler._get_unique_id()
|
| 59 |
+
encoder_node = assembler._get_node_template("IPAdapterEncoder")
|
| 60 |
+
encoder_node['inputs']['weight'] = item_data['weight']
|
| 61 |
+
encoder_node['inputs']['ipadapter'] = [ipadapter_loader_id, 1]
|
| 62 |
+
encoder_node['inputs']['image'] = [image_scaler_id, 0]
|
| 63 |
+
assembler.workflow[encoder_id] = encoder_node
|
| 64 |
+
|
| 65 |
+
pos_embed_outputs.append([encoder_id, 0])
|
| 66 |
+
neg_embed_outputs.append([encoder_id, 1])
|
| 67 |
+
|
| 68 |
+
pos_combiner_id = assembler._get_unique_id()
|
| 69 |
+
pos_combiner_node = assembler._get_node_template("IPAdapterCombineEmbeds")
|
| 70 |
+
pos_combiner_node['inputs']['method'] = final_settings.get('final_combine_method', 'concat')
|
| 71 |
+
for i, conn in enumerate(pos_embed_outputs):
|
| 72 |
+
pos_combiner_node['inputs'][f'embed{i+1}'] = conn
|
| 73 |
+
assembler.workflow[pos_combiner_id] = pos_combiner_node
|
| 74 |
+
|
| 75 |
+
neg_combiner_id = assembler._get_unique_id()
|
| 76 |
+
neg_combiner_node = assembler._get_node_template("IPAdapterCombineEmbeds")
|
| 77 |
+
neg_combiner_node['inputs']['method'] = final_settings.get('final_combine_method', 'concat')
|
| 78 |
+
for i, conn in enumerate(neg_embed_outputs):
|
| 79 |
+
neg_combiner_node['inputs'][f'embed{i+1}'] = conn
|
| 80 |
+
assembler.workflow[neg_combiner_id] = neg_combiner_node
|
| 81 |
+
|
| 82 |
+
final_loader_type = 'FaceID' if 'FACEID' in final_settings.get('final_preset', '') else 'Unified'
|
| 83 |
+
final_loader_template_name = "IPAdapterUnifiedLoader"
|
| 84 |
+
if final_loader_type == 'FaceID':
|
| 85 |
+
final_loader_template_name = "IPAdapterUnifiedLoaderFaceID"
|
| 86 |
+
|
| 87 |
+
final_loader_id = assembler._get_unique_id()
|
| 88 |
+
final_loader_node = assembler._get_node_template(final_loader_template_name)
|
| 89 |
+
final_loader_node['inputs']['model'] = current_model_connection
|
| 90 |
+
final_loader_node['inputs']['preset'] = final_settings.get('final_preset', 'STANDARD (medium strength)')
|
| 91 |
+
if final_loader_type == 'FaceID':
|
| 92 |
+
final_loader_node['inputs']['lora_strength'] = final_settings.get('final_lora_strength', 0.6)
|
| 93 |
+
assembler.workflow[final_loader_id] = final_loader_node
|
| 94 |
+
|
| 95 |
+
apply_embeds_id = assembler._get_unique_id()
|
| 96 |
+
apply_embeds_node = assembler._get_node_template("IPAdapterEmbeds")
|
| 97 |
+
apply_embeds_node['inputs']['weight'] = final_settings.get('final_weight', 1.0)
|
| 98 |
+
apply_embeds_node['inputs']['embeds_scaling'] = final_settings.get('final_embeds_scaling', 'V only')
|
| 99 |
+
apply_embeds_node['inputs']['model'] = [final_loader_id, 0]
|
| 100 |
+
apply_embeds_node['inputs']['ipadapter'] = [final_loader_id, 1]
|
| 101 |
+
apply_embeds_node['inputs']['pos_embed'] = [pos_combiner_id, 0]
|
| 102 |
+
apply_embeds_node['inputs']['neg_embed'] = [neg_combiner_id, 0]
|
| 103 |
+
assembler.workflow[apply_embeds_id] = apply_embeds_node
|
| 104 |
+
|
| 105 |
+
assembler.workflow[end_node_id]['inputs']['model'] = [apply_embeds_id, 0]
|
| 106 |
+
print(f"IPAdapter injector applied. Redirected '{end_node_name}' model input through {len(chain_items)} reference images.")
|
comfy_integration/__init__.py
ADDED
|
File without changes
|
comfy_integration/nodes.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import execution
|
| 3 |
+
import server
|
| 4 |
+
from nodes import (
|
| 5 |
+
init_extra_nodes, CheckpointLoaderSimple, EmptyLatentImage, KSampler,
|
| 6 |
+
VAEDecode, SaveImage, NODE_CLASS_MAPPINGS, LoadImage, VAEEncode,
|
| 7 |
+
VAEEncodeForInpaint, ImagePadForOutpaint, LatentUpscaleBy, RepeatLatentBatch
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def import_custom_nodes() -> None:
|
| 12 |
+
loop = asyncio.new_event_loop()
|
| 13 |
+
asyncio.set_event_loop(loop)
|
| 14 |
+
server_instance = server.PromptServer(loop)
|
| 15 |
+
execution.PromptQueue(server_instance)
|
| 16 |
+
|
| 17 |
+
loop.run_until_complete(init_extra_nodes())
|
| 18 |
+
|
| 19 |
+
import_custom_nodes()
|
| 20 |
+
|
| 21 |
+
CLIPTextEncode = NODE_CLASS_MAPPINGS['CLIPTextEncode']
|
| 22 |
+
CLIPTextEncodeSDXL = NODE_CLASS_MAPPINGS['CLIPTextEncodeSDXL']
|
| 23 |
+
LoraLoader = NODE_CLASS_MAPPINGS['LoraLoader']
|
| 24 |
+
CLIPSetLastLayer = NODE_CLASS_MAPPINGS['CLIPSetLastLayer']
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
KSamplerNode = NODE_CLASS_MAPPINGS['KSampler']
|
| 28 |
+
SAMPLER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["sampler_name"][0]
|
| 29 |
+
SCHEDULER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["scheduler"][0]
|
| 30 |
+
except Exception:
|
| 31 |
+
print("⚠️ Could not dynamically get sampler/scheduler choices, using fallback list.")
|
| 32 |
+
SAMPLER_CHOICES = ['euler', 'dpmpp_2m_sde_gpu']
|
| 33 |
+
SCHEDULER_CHOICES = ['normal', 'karras']
|
| 34 |
+
|
| 35 |
+
checkpointloadersimple = CheckpointLoaderSimple()
|
| 36 |
+
loraloader = LoraLoader()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
print("✅ ComfyUI custom nodes and class mappings are ready.")
|
comfy_integration/setup.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import shutil
|
| 4 |
+
|
| 5 |
+
from core.settings import *
|
| 6 |
+
|
| 7 |
+
def move_and_overwrite(src, dst):
|
| 8 |
+
if os.path.isdir(src):
|
| 9 |
+
if os.path.exists(dst):
|
| 10 |
+
shutil.rmtree(dst)
|
| 11 |
+
shutil.move(src, dst)
|
| 12 |
+
elif os.path.isfile(src):
|
| 13 |
+
if os.path.exists(dst):
|
| 14 |
+
os.remove(dst)
|
| 15 |
+
shutil.move(src, dst)
|
| 16 |
+
|
| 17 |
+
def initialize_comfyui():
|
| 18 |
+
APP_DIR = sys.path[0]
|
| 19 |
+
COMFYUI_TEMP_DIR = "ComfyUI_temp"
|
| 20 |
+
|
| 21 |
+
print("--- Cloning ComfyUI Repository ---")
|
| 22 |
+
if not os.path.exists(COMFYUI_TEMP_DIR):
|
| 23 |
+
os.system(f"git clone https://github.com/comfyanonymous/ComfyUI {COMFYUI_TEMP_DIR}")
|
| 24 |
+
print("✅ ComfyUI repository cloned.")
|
| 25 |
+
else:
|
| 26 |
+
print("✅ ComfyUI repository already exists.")
|
| 27 |
+
|
| 28 |
+
print(f"--- Merging ComfyUI from '{COMFYUI_TEMP_DIR}' to '{APP_DIR}' ---")
|
| 29 |
+
for item in os.listdir(COMFYUI_TEMP_DIR):
|
| 30 |
+
src_path = os.path.join(COMFYUI_TEMP_DIR, item)
|
| 31 |
+
dst_path = os.path.join(APP_DIR, item)
|
| 32 |
+
if item == '.git':
|
| 33 |
+
continue
|
| 34 |
+
move_and_overwrite(src_path, dst_path)
|
| 35 |
+
|
| 36 |
+
try:
|
| 37 |
+
shutil.rmtree(COMFYUI_TEMP_DIR)
|
| 38 |
+
print("✅ ComfyUI merged and temporary directory removed.")
|
| 39 |
+
except OSError as e:
|
| 40 |
+
print(f"⚠️ Could not remove temporary directory '{COMFYUI_TEMP_DIR}': {e}")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
print("--- Cloning third-party extensions for ComfyUI ---")
|
| 44 |
+
controlnet_aux_path = os.path.join(APP_DIR, "custom_nodes", "comfyui_controlnet_aux")
|
| 45 |
+
if not os.path.exists(controlnet_aux_path):
|
| 46 |
+
os.system(f"git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git {controlnet_aux_path}")
|
| 47 |
+
print("✅ comfyui_controlnet_aux extension cloned.")
|
| 48 |
+
else:
|
| 49 |
+
print("✅ comfyui_controlnet_aux extension already exists.")
|
| 50 |
+
|
| 51 |
+
ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI_IPAdapter_plus")
|
| 52 |
+
if not os.path.exists(ipadapter_plus_path):
|
| 53 |
+
os.system(f"git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git {ipadapter_plus_path}")
|
| 54 |
+
print("✅ ComfyUI_IPAdapter_plus extension cloned.")
|
| 55 |
+
else:
|
| 56 |
+
print("✅ ComfyUI_IPAdapter_plus extension already exists.")
|
| 57 |
+
|
| 58 |
+
print(f"✅ Current working directory is: {os.getcwd()}")
|
| 59 |
+
|
| 60 |
+
import comfy.model_management
|
| 61 |
+
print("--- Environment Ready ---")
|
| 62 |
+
|
| 63 |
+
print("✅ ComfyUI initialized with default attention mechanism.")
|
| 64 |
+
|
| 65 |
+
os.makedirs(os.path.join(APP_DIR, CHECKPOINT_DIR), exist_ok=True)
|
| 66 |
+
os.makedirs(os.path.join(APP_DIR, LORA_DIR), exist_ok=True)
|
| 67 |
+
os.makedirs(os.path.join(APP_DIR, EMBEDDING_DIR), exist_ok=True)
|
| 68 |
+
os.makedirs(os.path.join(APP_DIR, CONTROLNET_DIR), exist_ok=True)
|
| 69 |
+
os.makedirs(os.path.join(APP_DIR, DIFFUSION_MODELS_DIR), exist_ok=True)
|
| 70 |
+
os.makedirs(os.path.join(APP_DIR, VAE_DIR), exist_ok=True)
|
| 71 |
+
os.makedirs(os.path.join(APP_DIR, TEXT_ENCODERS_DIR), exist_ok=True)
|
| 72 |
+
os.makedirs(os.path.join(APP_DIR, INPUT_DIR), exist_ok=True)
|
| 73 |
+
print("✅ All required model directories are present.")
|
core/__init__.py
ADDED
|
File without changes
|
core/generation_logic.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
from core.pipelines.controlnet_preprocessor import ControlNetPreprocessorPipeline
|
| 5 |
+
from core.pipelines.sd_image_pipeline import SdImagePipeline
|
| 6 |
+
|
| 7 |
+
controlnet_preprocessor_pipeline = ControlNetPreprocessorPipeline()
|
| 8 |
+
sd_image_pipeline = SdImagePipeline()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_reverse_map():
|
| 12 |
+
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 13 |
+
import core.pipelines.controlnet_preprocessor as cn_module
|
| 14 |
+
|
| 15 |
+
if cn_module.REVERSE_DISPLAY_NAME_MAP is None:
|
| 16 |
+
cn_module.REVERSE_DISPLAY_NAME_MAP = {v: k for k, v in NODE_DISPLAY_NAME_MAPPINGS.items()}
|
| 17 |
+
if "Semantic Segmentor (legacy, alias for UniFormer)" not in cn_module.REVERSE_DISPLAY_NAME_MAP:
|
| 18 |
+
cn_module.REVERSE_DISPLAY_NAME_MAP["Semantic Segmentor (legacy, alias for UniFormer)"] = "SemSegPreprocessor"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def run_cn_preprocessor_entry(*args, **kwargs):
|
| 22 |
+
return controlnet_preprocessor_pipeline.run(*args, **kwargs)
|
| 23 |
+
|
| 24 |
+
def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
|
| 25 |
+
return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
|
core/model_manager.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
from typing import Dict, List, Any, Set
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import gradio as gr
|
| 6 |
+
from comfy import model_management
|
| 7 |
+
|
| 8 |
+
from core.settings import ALL_MODEL_MAP, CHECKPOINT_DIR, LORA_DIR, DIFFUSION_MODELS_DIR, VAE_DIR, TEXT_ENCODERS_DIR
|
| 9 |
+
from comfy_integration.nodes import checkpointloadersimple, LoraLoader
|
| 10 |
+
from nodes import NODE_CLASS_MAPPINGS
|
| 11 |
+
from utils.app_utils import get_value_at_index, _ensure_model_downloaded
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ModelManager:
|
| 15 |
+
_instance = None
|
| 16 |
+
|
| 17 |
+
def __new__(cls, *args, **kwargs):
|
| 18 |
+
if not cls._instance:
|
| 19 |
+
cls._instance = super(ModelManager, cls).__new__(cls, *args, **kwargs)
|
| 20 |
+
return cls._instance
|
| 21 |
+
|
| 22 |
+
def __init__(self):
|
| 23 |
+
if hasattr(self, 'initialized'):
|
| 24 |
+
return
|
| 25 |
+
self.loaded_models: Dict[str, Any] = {}
|
| 26 |
+
self.initialized = True
|
| 27 |
+
print("✅ ModelManager initialized.")
|
| 28 |
+
|
| 29 |
+
def get_loaded_model_names(self) -> Set[str]:
|
| 30 |
+
return set(self.loaded_models.keys())
|
| 31 |
+
|
| 32 |
+
def _load_single_model(self, display_name: str, progress) -> Any:
|
| 33 |
+
print(f"--- [ModelManager] Loading model: '{display_name}' ---")
|
| 34 |
+
|
| 35 |
+
filename = _ensure_model_downloaded(display_name, progress)
|
| 36 |
+
|
| 37 |
+
_, _, model_type, _ = ALL_MODEL_MAP[display_name]
|
| 38 |
+
|
| 39 |
+
loader_map = {
|
| 40 |
+
"SDXL": (checkpointloadersimple, "load_checkpoint", {"ckpt_name": filename}),
|
| 41 |
+
"SD1.5": (checkpointloadersimple, "load_checkpoint", {"ckpt_name": filename}),
|
| 42 |
+
"UNET": (NODE_CLASS_MAPPINGS["UNETLoader"](), "load_unet", {"unet_name": filename, "weight_dtype": "default"}),
|
| 43 |
+
"VAE": (NODE_CLASS_MAPPINGS["VAELoader"](), "load_vae", {"vae_name": filename}),
|
| 44 |
+
"TEXT_ENCODER": (NODE_CLASS_MAPPINGS["CLIPLoader"](), "load_clip", {"clip_name": filename, "type": "wan", "device": "default"}),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
if model_type not in loader_map:
|
| 48 |
+
if model_type == "LORA":
|
| 49 |
+
print(f"--- [ModelManager] ✅ '{display_name}' is a LoRA. It will be loaded dynamically. ---")
|
| 50 |
+
return (filename,)
|
| 51 |
+
raise ValueError(f"[ModelManager] No loader configured for model type '{model_type}'")
|
| 52 |
+
|
| 53 |
+
loader_instance, method_name, kwargs = loader_map[model_type]
|
| 54 |
+
|
| 55 |
+
load_method = getattr(loader_instance, method_name)
|
| 56 |
+
loaded_tuple = load_method(**kwargs)
|
| 57 |
+
|
| 58 |
+
print(f"--- [ModelManager] ✅ Successfully loaded '{display_name}' to CPU/RAM ---")
|
| 59 |
+
return loaded_tuple
|
| 60 |
+
|
| 61 |
+
def move_models_to_gpu(self, required_models: List[str]):
|
| 62 |
+
print(f"--- [ModelManager] Moving models to GPU: {required_models} ---")
|
| 63 |
+
models_to_load_gpu = []
|
| 64 |
+
for name in required_models:
|
| 65 |
+
if name in self.loaded_models:
|
| 66 |
+
model_tuple = self.loaded_models[name]
|
| 67 |
+
_, _, model_type, _ = ALL_MODEL_MAP[name]
|
| 68 |
+
if model_type in ["SDXL", "SD1.5"]:
|
| 69 |
+
models_to_load_gpu.append(get_value_at_index(model_tuple, 0))
|
| 70 |
+
|
| 71 |
+
if models_to_load_gpu:
|
| 72 |
+
model_management.load_models_gpu(models_to_load_gpu)
|
| 73 |
+
print("--- [ModelManager] ✅ Models successfully moved to GPU. ---")
|
| 74 |
+
else:
|
| 75 |
+
print("--- [ModelManager] ⚠️ No checkpoint models found to move to GPU. ---")
|
| 76 |
+
|
| 77 |
+
def ensure_models_downloaded(self, required_models: List[str], progress):
|
| 78 |
+
print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
|
| 79 |
+
for i, display_name in enumerate(required_models):
|
| 80 |
+
if progress and hasattr(progress, '__call__'):
|
| 81 |
+
progress(i / len(required_models), desc=f"Checking file: {display_name}")
|
| 82 |
+
try:
|
| 83 |
+
_ensure_model_downloaded(display_name, progress)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
raise gr.Error(f"Failed to download model '{display_name}'. Reason: {e}")
|
| 86 |
+
print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
|
| 87 |
+
|
| 88 |
+
def load_managed_models(self, required_models: List[str], active_loras: List[Dict[str, Any]], progress) -> Dict[str, Any]:
|
| 89 |
+
required_set = set(required_models)
|
| 90 |
+
current_set = set(self.loaded_models.keys())
|
| 91 |
+
|
| 92 |
+
loras_changed = len(active_loras) > 0 or len(current_set - required_set) > 0
|
| 93 |
+
|
| 94 |
+
models_to_unload = current_set - required_set
|
| 95 |
+
if models_to_unload or loras_changed:
|
| 96 |
+
if models_to_unload:
|
| 97 |
+
print(f"--- [ModelManager] Models to unload: {models_to_unload} ---")
|
| 98 |
+
if loras_changed and not models_to_unload:
|
| 99 |
+
models_to_unload = current_set.intersection(required_set)
|
| 100 |
+
print(f"--- [ModelManager] LoRA configuration changed. Reloading base model(s): {models_to_unload} ---")
|
| 101 |
+
|
| 102 |
+
model_management.unload_all_models()
|
| 103 |
+
self.loaded_models.clear()
|
| 104 |
+
gc.collect()
|
| 105 |
+
torch.cuda.empty_cache()
|
| 106 |
+
print("--- [ModelManager] All models unloaded to free RAM. ---")
|
| 107 |
+
|
| 108 |
+
models_to_load = required_set if (models_to_unload or loras_changed) else required_set - current_set
|
| 109 |
+
|
| 110 |
+
if models_to_load:
|
| 111 |
+
print(f"--- [ModelManager] Models to load: {models_to_load} ---")
|
| 112 |
+
for i, display_name in enumerate(models_to_load):
|
| 113 |
+
progress(i / len(models_to_load), desc=f"Loading model: {display_name}")
|
| 114 |
+
try:
|
| 115 |
+
loaded_model_data = self._load_single_model(display_name, progress)
|
| 116 |
+
|
| 117 |
+
if active_loras and ALL_MODEL_MAP[display_name][2] in ["SDXL", "SD1.5"]:
|
| 118 |
+
print(f"--- [ModelManager] Applying {len(active_loras)} LoRAs on CPU... ---")
|
| 119 |
+
lora_loader = LoraLoader()
|
| 120 |
+
patched_model, patched_clip = loaded_model_data[0], loaded_model_data[1]
|
| 121 |
+
|
| 122 |
+
for lora_info in active_loras:
|
| 123 |
+
patched_model, patched_clip = lora_loader.load_lora(
|
| 124 |
+
model=patched_model,
|
| 125 |
+
clip=patched_clip,
|
| 126 |
+
lora_name=lora_info["lora_name"],
|
| 127 |
+
strength_model=lora_info["strength_model"],
|
| 128 |
+
strength_clip=lora_info["strength_clip"]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
loaded_model_data = (patched_model, patched_clip, loaded_model_data[2])
|
| 132 |
+
print(f"--- [ModelManager] ✅ All LoRAs merged into the model on CPU. ---")
|
| 133 |
+
|
| 134 |
+
self.loaded_models[display_name] = loaded_model_data
|
| 135 |
+
except Exception as e:
|
| 136 |
+
raise gr.Error(f"Failed to load model or apply LoRA '{display_name}'. Reason: {e}")
|
| 137 |
+
else:
|
| 138 |
+
print(f"--- [ModelManager] All required models are already loaded. ---")
|
| 139 |
+
|
| 140 |
+
return {name: self.loaded_models[name] for name in required_models}
|
| 141 |
+
|
| 142 |
+
model_manager = ModelManager()
|
core/pipelines/__init__.py
ADDED
|
File without changes
|
core/pipelines/base_pipeline.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
from typing import List, Any, Dict
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import spaces
|
| 5 |
+
import tempfile
|
| 6 |
+
import imageio
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
class BasePipeline(ABC):
|
| 10 |
+
def __init__(self):
|
| 11 |
+
from core.model_manager import model_manager
|
| 12 |
+
self.model_manager = model_manager
|
| 13 |
+
|
| 14 |
+
@abstractmethod
|
| 15 |
+
def get_required_models(self, **kwargs) -> List[str]:
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
@abstractmethod
|
| 19 |
+
def run(self, *args, progress: gr.Progress, **kwargs) -> Any:
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def _ensure_models_downloaded(self, progress: gr.Progress, **kwargs):
|
| 23 |
+
"""Ensures model files are downloaded before requesting GPU."""
|
| 24 |
+
required_models = self.get_required_models(**kwargs)
|
| 25 |
+
self.model_manager.ensure_models_downloaded(required_models, progress=progress)
|
| 26 |
+
|
| 27 |
+
def _execute_gpu_logic(self, gpu_function: callable, duration: int, default_duration: int, task_name: str, *args, **kwargs):
|
| 28 |
+
final_duration = default_duration
|
| 29 |
+
try:
|
| 30 |
+
if duration is not None and int(duration) > 0:
|
| 31 |
+
final_duration = int(duration)
|
| 32 |
+
except (ValueError, TypeError):
|
| 33 |
+
print(f"Invalid ZeroGPU duration input for {task_name}. Using default {default_duration}s.")
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
print(f"Requesting ZeroGPU for {task_name} with duration: {final_duration} seconds.")
|
| 37 |
+
gpu_runner = spaces.GPU(duration=final_duration)(gpu_function)
|
| 38 |
+
|
| 39 |
+
return gpu_runner(*args, **kwargs)
|
| 40 |
+
|
| 41 |
+
def _encode_video_from_frames(self, frames_tensor_cpu: 'torch.Tensor', fps: int, progress: gr.Progress) -> str:
|
| 42 |
+
progress(0.9, desc="Encoding video on CPU...")
|
| 43 |
+
frames_np = (frames_tensor_cpu.numpy() * 255.0).astype(np.uint8)
|
| 44 |
+
|
| 45 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video_file:
|
| 46 |
+
video_path = temp_video_file.name
|
| 47 |
+
writer = imageio.get_writer(video_path, fps=fps, codec='libx264', quality=8)
|
| 48 |
+
for frame in frames_np:
|
| 49 |
+
writer.append_data(frame)
|
| 50 |
+
writer.close()
|
| 51 |
+
|
| 52 |
+
progress(1.0, desc="Done!")
|
| 53 |
+
return video_path
|
core/pipelines/controlnet_preprocessor.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List
|
| 2 |
+
import imageio
|
| 3 |
+
import tempfile
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import spaces
|
| 9 |
+
|
| 10 |
+
from .base_pipeline import BasePipeline
|
| 11 |
+
from comfy_integration.nodes import NODE_CLASS_MAPPINGS
|
| 12 |
+
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 13 |
+
from utils.app_utils import get_value_at_index
|
| 14 |
+
|
| 15 |
+
REVERSE_DISPLAY_NAME_MAP = None
|
| 16 |
+
CPU_ONLY_PREPROCESSORS = {
|
| 17 |
+
"Binary Lines", "Canny Edge", "Color Pallete", "Fake Scribble Lines (aka scribble_hed)",
|
| 18 |
+
"Image Intensity", "Image Luminance", "Inpaint Preprocessor", "PyraCanny", "Scribble Lines",
|
| 19 |
+
"Scribble XDoG Lines", "Standard Lineart", "Content Shuffle", "Tile"
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
def run_node_by_function_name(node_instance: Any, **kwargs) -> Any:
|
| 23 |
+
node_class = type(node_instance)
|
| 24 |
+
function_name = getattr(node_class, 'FUNCTION', None)
|
| 25 |
+
if not function_name:
|
| 26 |
+
raise AttributeError(f"Node class '{node_class.__name__}' is missing the required 'FUNCTION' attribute.")
|
| 27 |
+
execution_method = getattr(node_instance, function_name, None)
|
| 28 |
+
if not callable(execution_method):
|
| 29 |
+
raise AttributeError(f"Method '{function_name}' not found or not callable on node '{node_class.__name__}'.")
|
| 30 |
+
return execution_method(**kwargs)
|
| 31 |
+
|
| 32 |
+
class ControlNetPreprocessorPipeline(BasePipeline):
|
| 33 |
+
def get_required_models(self, **kwargs) -> List[str]:
|
| 34 |
+
return []
|
| 35 |
+
|
| 36 |
+
def _gpu_logic(
|
| 37 |
+
self, pil_images: List[Image.Image], preprocessor_name: str, model_name: str,
|
| 38 |
+
params: Dict[str, Any], progress=gr.Progress(track_tqdm=True)
|
| 39 |
+
) -> List[Image.Image]:
|
| 40 |
+
global REVERSE_DISPLAY_NAME_MAP
|
| 41 |
+
if REVERSE_DISPLAY_NAME_MAP is None:
|
| 42 |
+
raise RuntimeError("REVERSE_DISPLAY_NAME_MAP has not been initialized. `build_reverse_map` must be called on startup.")
|
| 43 |
+
|
| 44 |
+
class_name = REVERSE_DISPLAY_NAME_MAP.get(preprocessor_name)
|
| 45 |
+
if not class_name or class_name not in NODE_CLASS_MAPPINGS:
|
| 46 |
+
raise ValueError(f"Preprocessor '{preprocessor_name}' not found.")
|
| 47 |
+
|
| 48 |
+
preprocessor_instance = NODE_CLASS_MAPPINGS[class_name]()
|
| 49 |
+
call_args = {**params, 'ckpt_name': model_name}
|
| 50 |
+
|
| 51 |
+
processed_pil_images = []
|
| 52 |
+
total_frames = len(pil_images)
|
| 53 |
+
|
| 54 |
+
for i, frame_pil in enumerate(pil_images):
|
| 55 |
+
progress(i / total_frames, desc=f"Processing frame {i+1}/{total_frames} with {preprocessor_name}...")
|
| 56 |
+
|
| 57 |
+
frame_tensor = torch.from_numpy(np.array(frame_pil).astype(np.float32) / 255.0).unsqueeze(0)
|
| 58 |
+
|
| 59 |
+
resolution_arg = {'resolution': max(frame_tensor.shape[2], frame_tensor.shape[3])}
|
| 60 |
+
|
| 61 |
+
result_tuple = run_node_by_function_name(
|
| 62 |
+
preprocessor_instance,
|
| 63 |
+
image=frame_tensor,
|
| 64 |
+
**resolution_arg,
|
| 65 |
+
**call_args
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
processed_tensor = get_value_at_index(result_tuple, 0)
|
| 69 |
+
processed_np = (processed_tensor.squeeze(0).cpu().numpy().clip(0, 1) * 255.0).astype(np.uint8)
|
| 70 |
+
processed_pil_images.append(Image.fromarray(processed_np))
|
| 71 |
+
|
| 72 |
+
return processed_pil_images
|
| 73 |
+
|
| 74 |
+
def run(self, input_type, image_input, video_input, preprocessor_name, model_name, zero_gpu_duration, *args, progress=gr.Progress(track_tqdm=True)):
|
| 75 |
+
from utils import app_utils
|
| 76 |
+
pil_images, is_video, fps = [], False, 30
|
| 77 |
+
|
| 78 |
+
progress(0, desc="Reading input file...")
|
| 79 |
+
if input_type == "Image":
|
| 80 |
+
if image_input is None: raise gr.Error("Please provide an input image.")
|
| 81 |
+
pil_images = [image_input]
|
| 82 |
+
elif input_type == "Video":
|
| 83 |
+
if video_input is None: raise gr.Error("Please provide an input video.")
|
| 84 |
+
try:
|
| 85 |
+
video_reader = imageio.get_reader(video_input)
|
| 86 |
+
meta = video_reader.get_meta_data()
|
| 87 |
+
fps = meta.get('fps', 30)
|
| 88 |
+
pil_images = [Image.fromarray(frame) for frame in video_reader]
|
| 89 |
+
is_video = True
|
| 90 |
+
video_reader.close()
|
| 91 |
+
except Exception as e: raise gr.Error(f"Failed to read video file: {e}")
|
| 92 |
+
else:
|
| 93 |
+
raise gr.Error("Invalid input type selected.")
|
| 94 |
+
|
| 95 |
+
if not pil_images: raise gr.Error("Could not extract any frames from the input.")
|
| 96 |
+
|
| 97 |
+
if app_utils.PREPROCESSOR_PARAMETER_MAP is None:
|
| 98 |
+
raise RuntimeError("Preprocessor parameter map is not built. Check startup logs.")
|
| 99 |
+
|
| 100 |
+
params_config = app_utils.PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
|
| 101 |
+
sliders_params = [p for p in params_config if p['type'] in ["INT", "FLOAT"]]
|
| 102 |
+
dropdown_params = [p for p in params_config if isinstance(p['type'], list)]
|
| 103 |
+
checkbox_params = [p for p in params_config if p['type'] == "BOOLEAN"]
|
| 104 |
+
ordered_params_config = sliders_params + dropdown_params + checkbox_params
|
| 105 |
+
param_names = [p['name'] for p in ordered_params_config]
|
| 106 |
+
provided_params = {param_names[i]: args[i] for i in range(len(param_names))}
|
| 107 |
+
|
| 108 |
+
if preprocessor_name not in CPU_ONLY_PREPROCESSORS:
|
| 109 |
+
print(f"--- '{preprocessor_name}' requires GPU, requesting ZeroGPU. ---")
|
| 110 |
+
try:
|
| 111 |
+
processed_pil_images = self._execute_gpu_logic(
|
| 112 |
+
self._gpu_logic,
|
| 113 |
+
duration=zero_gpu_duration,
|
| 114 |
+
default_duration=60,
|
| 115 |
+
task_name=f"Preprocessor '{preprocessor_name}'",
|
| 116 |
+
pil_images=pil_images,
|
| 117 |
+
preprocessor_name=preprocessor_name,
|
| 118 |
+
model_name=model_name,
|
| 119 |
+
params=provided_params,
|
| 120 |
+
progress=progress
|
| 121 |
+
)
|
| 122 |
+
except Exception as e:
|
| 123 |
+
import traceback; traceback.print_exc()
|
| 124 |
+
raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on GPU: {e}")
|
| 125 |
+
else:
|
| 126 |
+
print(f"--- Running '{preprocessor_name}' on CPU, no ZeroGPU requested. ---")
|
| 127 |
+
try:
|
| 128 |
+
processed_pil_images = self._gpu_logic(pil_images, preprocessor_name, model_name, provided_params, progress=progress)
|
| 129 |
+
except Exception as e:
|
| 130 |
+
import traceback; traceback.print_exc()
|
| 131 |
+
raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on CPU: {e}")
|
| 132 |
+
|
| 133 |
+
if not processed_pil_images: raise gr.Error("Processing returned no frames.")
|
| 134 |
+
|
| 135 |
+
progress(0.9, desc="Finalizing output...")
|
| 136 |
+
if is_video:
|
| 137 |
+
frames_np = [np.array(img) for img in processed_pil_images]
|
| 138 |
+
frames_tensor = torch.from_numpy(np.stack(frames_np)).to(torch.float32) / 255.0
|
| 139 |
+
video_path = self._encode_video_from_frames(frames_tensor, fps, progress)
|
| 140 |
+
return [video_path]
|
| 141 |
+
else:
|
| 142 |
+
progress(1.0, desc="Done!")
|
| 143 |
+
return processed_pil_images
|
core/pipelines/sd_image_pipeline.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import shutil
|
| 4 |
+
import torch
|
| 5 |
+
import gradio as gr
|
| 6 |
+
from PIL import Image, ImageChops
|
| 7 |
+
from typing import List, Dict, Any
|
| 8 |
+
from collections import defaultdict, deque
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from .base_pipeline import BasePipeline
|
| 12 |
+
from core.settings import *
|
| 13 |
+
from comfy_integration.nodes import *
|
| 14 |
+
from utils.app_utils import get_value_at_index, sanitize_prompt, get_lora_path, get_embedding_path, ensure_controlnet_model_downloaded, ensure_ipadapter_models_downloaded, sanitize_filename
|
| 15 |
+
from core.workflow_assembler import WorkflowAssembler
|
| 16 |
+
|
| 17 |
+
class SdImagePipeline(BasePipeline):
|
| 18 |
+
def get_required_models(self, model_display_name: str, **kwargs) -> List[str]:
|
| 19 |
+
return [model_display_name]
|
| 20 |
+
|
| 21 |
+
def _topological_sort(self, workflow: Dict[str, Any]) -> List[str]:
|
| 22 |
+
graph = defaultdict(list)
|
| 23 |
+
in_degree = {node_id: 0 for node_id in workflow}
|
| 24 |
+
|
| 25 |
+
for node_id, node_info in workflow.items():
|
| 26 |
+
for input_value in node_info.get('inputs', {}).values():
|
| 27 |
+
if isinstance(input_value, list) and len(input_value) == 2 and isinstance(input_value[0], str):
|
| 28 |
+
source_node_id = input_value[0]
|
| 29 |
+
if source_node_id in workflow:
|
| 30 |
+
graph[source_node_id].append(node_id)
|
| 31 |
+
in_degree[node_id] += 1
|
| 32 |
+
|
| 33 |
+
queue = deque([node_id for node_id, degree in in_degree.items() if degree == 0])
|
| 34 |
+
|
| 35 |
+
sorted_nodes = []
|
| 36 |
+
while queue:
|
| 37 |
+
current_node_id = queue.popleft()
|
| 38 |
+
sorted_nodes.append(current_node_id)
|
| 39 |
+
|
| 40 |
+
for neighbor_node_id in graph[current_node_id]:
|
| 41 |
+
in_degree[neighbor_node_id] -= 1
|
| 42 |
+
if in_degree[neighbor_node_id] == 0:
|
| 43 |
+
queue.append(neighbor_node_id)
|
| 44 |
+
|
| 45 |
+
if len(sorted_nodes) != len(workflow):
|
| 46 |
+
raise RuntimeError("Workflow contains a cycle and cannot be executed.")
|
| 47 |
+
|
| 48 |
+
return sorted_nodes
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _execute_workflow(self, workflow: Dict[str, Any], initial_objects: Dict[str, Any]):
|
| 52 |
+
with torch.no_grad():
|
| 53 |
+
computed_outputs = initial_objects
|
| 54 |
+
|
| 55 |
+
try:
|
| 56 |
+
sorted_node_ids = self._topological_sort(workflow)
|
| 57 |
+
print(f"--- [Workflow Executor] Execution order: {sorted_node_ids}")
|
| 58 |
+
except RuntimeError as e:
|
| 59 |
+
print("--- [Workflow Executor] ERROR: Failed to sort workflow. Dumping graph details. ---")
|
| 60 |
+
for node_id, node_info in workflow.items():
|
| 61 |
+
print(f" Node {node_id} ({node_info['class_type']}):")
|
| 62 |
+
for input_name, input_value in node_info['inputs'].items():
|
| 63 |
+
if isinstance(input_value, list) and len(input_value) == 2 and isinstance(input_value[0], str):
|
| 64 |
+
print(f" - {input_name} <- [{input_value[0]}, {input_value[1]}]")
|
| 65 |
+
raise e
|
| 66 |
+
|
| 67 |
+
for node_id in sorted_node_ids:
|
| 68 |
+
if node_id in computed_outputs:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
node_info = workflow[node_id]
|
| 72 |
+
class_type = node_info['class_type']
|
| 73 |
+
|
| 74 |
+
node_class = NODE_CLASS_MAPPINGS.get(class_type)
|
| 75 |
+
if node_class is None:
|
| 76 |
+
raise RuntimeError(f"Could not find node class '{class_type}'. Is it imported in comfy_integration/nodes.py?")
|
| 77 |
+
|
| 78 |
+
node_instance = node_class()
|
| 79 |
+
|
| 80 |
+
kwargs = {}
|
| 81 |
+
for param_name, param_value in node_info['inputs'].items():
|
| 82 |
+
if isinstance(param_value, list) and len(param_value) == 2 and isinstance(param_value[0], str):
|
| 83 |
+
source_node_id, output_index = param_value
|
| 84 |
+
if source_node_id not in computed_outputs:
|
| 85 |
+
raise RuntimeError(f"Workflow integrity error: Output of node {source_node_id} needed for {node_id} but not yet computed.")
|
| 86 |
+
|
| 87 |
+
source_output_tuple = computed_outputs[source_node_id]
|
| 88 |
+
kwargs[param_name] = get_value_at_index(source_output_tuple, output_index)
|
| 89 |
+
else:
|
| 90 |
+
kwargs[param_name] = param_value
|
| 91 |
+
|
| 92 |
+
function_name = getattr(node_class, 'FUNCTION')
|
| 93 |
+
execution_method = getattr(node_instance, function_name)
|
| 94 |
+
|
| 95 |
+
result = execution_method(**kwargs)
|
| 96 |
+
computed_outputs[node_id] = result
|
| 97 |
+
|
| 98 |
+
final_node_id = None
|
| 99 |
+
for node_id in reversed(sorted_node_ids):
|
| 100 |
+
if workflow[node_id]['class_type'] == 'SaveImage':
|
| 101 |
+
final_node_id = node_id
|
| 102 |
+
break
|
| 103 |
+
|
| 104 |
+
if not final_node_id:
|
| 105 |
+
raise RuntimeError("Workflow does not contain a 'SaveImage' node as the output.")
|
| 106 |
+
|
| 107 |
+
save_image_inputs = workflow[final_node_id]['inputs']
|
| 108 |
+
image_source_node_id, image_source_index = save_image_inputs['images']
|
| 109 |
+
|
| 110 |
+
return get_value_at_index(computed_outputs[image_source_node_id], image_source_index)
|
| 111 |
+
|
| 112 |
+
def _gpu_logic(self, ui_inputs: Dict, loras_string: str, required_models_for_gpu: List[str], workflow: Dict[str, Any], assembler: WorkflowAssembler, progress=gr.Progress(track_tqdm=True)):
|
| 113 |
+
model_display_name = ui_inputs['model_display_name']
|
| 114 |
+
|
| 115 |
+
progress(0.1, desc="Moving models to GPU...")
|
| 116 |
+
self.model_manager.move_models_to_gpu(required_models_for_gpu)
|
| 117 |
+
|
| 118 |
+
progress(0.4, desc="Executing workflow...")
|
| 119 |
+
|
| 120 |
+
loaded_model_tuple = self.model_manager.loaded_models[model_display_name]
|
| 121 |
+
|
| 122 |
+
ckpt_loader_node_id = assembler.node_map.get("ckpt_loader")
|
| 123 |
+
if not ckpt_loader_node_id:
|
| 124 |
+
raise RuntimeError("Workflow is missing the 'ckpt_loader' node required for model injection.")
|
| 125 |
+
|
| 126 |
+
initial_objects = {
|
| 127 |
+
ckpt_loader_node_id: loaded_model_tuple
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
|
| 131 |
+
|
| 132 |
+
output_images = []
|
| 133 |
+
start_seed = ui_inputs['seed'] if ui_inputs['seed'] != -1 else random.randint(0, 2**64 - 1)
|
| 134 |
+
for i in range(decoded_images_tensor.shape[0]):
|
| 135 |
+
img_tensor = decoded_images_tensor[i]
|
| 136 |
+
pil_image = Image.fromarray((img_tensor.cpu().numpy() * 255.0).astype("uint8"))
|
| 137 |
+
current_seed = start_seed + i
|
| 138 |
+
|
| 139 |
+
width_for_meta = ui_inputs.get('width', 'N/A')
|
| 140 |
+
height_for_meta = ui_inputs.get('height', 'N/A')
|
| 141 |
+
|
| 142 |
+
params_string = f"{ui_inputs['positive_prompt']}\nNegative prompt: {ui_inputs['negative_prompt']}\n"
|
| 143 |
+
params_string += f"Steps: {ui_inputs['num_inference_steps']}, Sampler: {ui_inputs['sampler']}, Scheduler: {ui_inputs['scheduler']}, CFG scale: {ui_inputs['guidance_scale']}, Seed: {current_seed}, Size: {width_for_meta}x{height_for_meta}, Base Model: {model_display_name}"
|
| 144 |
+
if ui_inputs['task_type'] != 'txt2img': params_string += f", Denoise: {ui_inputs['denoise']}"
|
| 145 |
+
if loras_string: params_string += f", {loras_string}"
|
| 146 |
+
|
| 147 |
+
pil_image.info = {'parameters': params_string.strip()}
|
| 148 |
+
output_images.append(pil_image)
|
| 149 |
+
|
| 150 |
+
return output_images
|
| 151 |
+
|
| 152 |
+
def run(self, ui_inputs: Dict, progress):
|
| 153 |
+
progress(0, desc="Preparing models...")
|
| 154 |
+
|
| 155 |
+
task_type = ui_inputs['task_type']
|
| 156 |
+
|
| 157 |
+
ui_inputs['positive_prompt'] = sanitize_prompt(ui_inputs.get('positive_prompt', ''))
|
| 158 |
+
ui_inputs['negative_prompt'] = sanitize_prompt(ui_inputs.get('negative_prompt', ''))
|
| 159 |
+
|
| 160 |
+
required_models = self.get_required_models(model_display_name=ui_inputs['model_display_name'])
|
| 161 |
+
|
| 162 |
+
self.model_manager.ensure_models_downloaded(required_models, progress=progress)
|
| 163 |
+
|
| 164 |
+
lora_data = ui_inputs.get('lora_data', [])
|
| 165 |
+
active_loras_for_gpu, active_loras_for_meta = [], []
|
| 166 |
+
sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
|
| 167 |
+
|
| 168 |
+
for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
|
| 169 |
+
if scale > 0 and lora_id and lora_id.strip():
|
| 170 |
+
lora_filename = None
|
| 171 |
+
if source == "File":
|
| 172 |
+
lora_filename = sanitize_filename(lora_id)
|
| 173 |
+
elif source == "Civitai":
|
| 174 |
+
local_path, status = get_lora_path(source, lora_id, ui_inputs['civitai_api_key'], progress)
|
| 175 |
+
if local_path: lora_filename = os.path.basename(local_path)
|
| 176 |
+
else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
|
| 177 |
+
|
| 178 |
+
if lora_filename:
|
| 179 |
+
active_loras_for_gpu.append({"lora_name": lora_filename, "strength_model": scale, "strength_clip": scale})
|
| 180 |
+
active_loras_for_meta.append(f"{source} {lora_id}:{scale}")
|
| 181 |
+
|
| 182 |
+
progress(0.1, desc="Loading models into RAM...")
|
| 183 |
+
self.model_manager.load_managed_models(required_models, active_loras=active_loras_for_gpu, progress=progress)
|
| 184 |
+
|
| 185 |
+
ui_inputs['denoise'] = 1.0
|
| 186 |
+
if task_type == 'img2img': ui_inputs['denoise'] = ui_inputs.get('img2img_denoise', 0.7)
|
| 187 |
+
elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
|
| 188 |
+
|
| 189 |
+
temp_files_to_clean = []
|
| 190 |
+
|
| 191 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 192 |
+
|
| 193 |
+
if task_type == 'img2img':
|
| 194 |
+
input_image_pil = ui_inputs.get('img2img_image')
|
| 195 |
+
if input_image_pil:
|
| 196 |
+
temp_file_path = os.path.join(INPUT_DIR, f"temp_input_{random.randint(1000, 9999)}.png")
|
| 197 |
+
input_image_pil.save(temp_file_path, "PNG")
|
| 198 |
+
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 199 |
+
temp_files_to_clean.append(temp_file_path)
|
| 200 |
+
ui_inputs['width'] = input_image_pil.width
|
| 201 |
+
ui_inputs['height'] = input_image_pil.height
|
| 202 |
+
|
| 203 |
+
elif task_type == 'inpaint':
|
| 204 |
+
inpaint_dict = ui_inputs.get('inpaint_image_dict')
|
| 205 |
+
if not inpaint_dict or not inpaint_dict.get('background') or not inpaint_dict.get('layers'):
|
| 206 |
+
raise gr.Error("Inpainting requires an input image and a drawn mask.")
|
| 207 |
+
|
| 208 |
+
background_img = inpaint_dict['background'].convert("RGBA")
|
| 209 |
+
|
| 210 |
+
composite_mask_pil = Image.new('L', background_img.size, 0)
|
| 211 |
+
for layer in inpaint_dict['layers']:
|
| 212 |
+
if layer:
|
| 213 |
+
layer_alpha = layer.split()[-1]
|
| 214 |
+
composite_mask_pil = ImageChops.lighter(composite_mask_pil, layer_alpha)
|
| 215 |
+
|
| 216 |
+
inverted_mask_alpha = Image.fromarray(255 - np.array(composite_mask_pil), mode='L')
|
| 217 |
+
r, g, b, _ = background_img.split()
|
| 218 |
+
composite_image_with_mask = Image.merge('RGBA', [r, g, b, inverted_mask_alpha])
|
| 219 |
+
|
| 220 |
+
temp_file_path = os.path.join(INPUT_DIR, f"temp_inpaint_composite_{random.randint(1000, 9999)}.png")
|
| 221 |
+
composite_image_with_mask.save(temp_file_path, "PNG")
|
| 222 |
+
|
| 223 |
+
ui_inputs['inpaint_image'] = os.path.basename(temp_file_path)
|
| 224 |
+
temp_files_to_clean.append(temp_file_path)
|
| 225 |
+
ui_inputs.pop('inpaint_mask', None)
|
| 226 |
+
|
| 227 |
+
elif task_type == 'outpaint':
|
| 228 |
+
input_image_pil = ui_inputs.get('outpaint_image')
|
| 229 |
+
if input_image_pil:
|
| 230 |
+
temp_file_path = os.path.join(INPUT_DIR, f"temp_input_{random.randint(1000, 9999)}.png")
|
| 231 |
+
input_image_pil.save(temp_file_path, "PNG")
|
| 232 |
+
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 233 |
+
temp_files_to_clean.append(temp_file_path)
|
| 234 |
+
|
| 235 |
+
elif task_type == 'hires_fix':
|
| 236 |
+
input_image_pil = ui_inputs.get('hires_image')
|
| 237 |
+
if input_image_pil:
|
| 238 |
+
temp_file_path = os.path.join(INPUT_DIR, f"temp_input_{random.randint(1000, 9999)}.png")
|
| 239 |
+
input_image_pil.save(temp_file_path, "PNG")
|
| 240 |
+
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 241 |
+
temp_files_to_clean.append(temp_file_path)
|
| 242 |
+
|
| 243 |
+
embedding_data = ui_inputs.get('embedding_data', [])
|
| 244 |
+
embedding_filenames = []
|
| 245 |
+
if embedding_data:
|
| 246 |
+
emb_sources, emb_ids, emb_files = embedding_data[0::3], embedding_data[1::3], embedding_data[2::3]
|
| 247 |
+
for i, (source, emb_id, _) in enumerate(zip(emb_sources, emb_ids, emb_files)):
|
| 248 |
+
if emb_id and emb_id.strip():
|
| 249 |
+
emb_filename = None
|
| 250 |
+
if source == "File":
|
| 251 |
+
emb_filename = sanitize_filename(emb_id)
|
| 252 |
+
elif source == "Civitai":
|
| 253 |
+
local_path, status = get_embedding_path(source, emb_id, ui_inputs['civitai_api_key'], progress)
|
| 254 |
+
if local_path: emb_filename = os.path.basename(local_path)
|
| 255 |
+
else: raise gr.Error(f"Failed to prepare Embedding {emb_id}: {status}")
|
| 256 |
+
|
| 257 |
+
if emb_filename:
|
| 258 |
+
embedding_filenames.append(emb_filename)
|
| 259 |
+
|
| 260 |
+
if embedding_filenames:
|
| 261 |
+
embedding_prompt_text = " ".join([f"embedding:{f}" for f in embedding_filenames])
|
| 262 |
+
if ui_inputs['positive_prompt']:
|
| 263 |
+
ui_inputs['positive_prompt'] = f"{ui_inputs['positive_prompt']}, {embedding_prompt_text}"
|
| 264 |
+
else:
|
| 265 |
+
ui_inputs['positive_prompt'] = embedding_prompt_text
|
| 266 |
+
|
| 267 |
+
controlnet_data = ui_inputs.get('controlnet_data', [])
|
| 268 |
+
active_controlnets = []
|
| 269 |
+
(cn_images, _, _, cn_strengths, cn_filepaths) = [controlnet_data[i::5] for i in range(5)]
|
| 270 |
+
for i in range(len(cn_images)):
|
| 271 |
+
if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
|
| 272 |
+
ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
|
| 273 |
+
|
| 274 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 275 |
+
cn_temp_path = os.path.join(INPUT_DIR, f"temp_cn_{i}_{random.randint(1000, 9999)}.png")
|
| 276 |
+
cn_images[i].save(cn_temp_path, "PNG")
|
| 277 |
+
temp_files_to_clean.append(cn_temp_path)
|
| 278 |
+
active_controlnets.append({
|
| 279 |
+
"image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
|
| 280 |
+
"start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
|
| 281 |
+
})
|
| 282 |
+
|
| 283 |
+
ipadapter_data = ui_inputs.get('ipadapter_data', [])
|
| 284 |
+
active_ipadapters = []
|
| 285 |
+
if ipadapter_data:
|
| 286 |
+
num_ipa_units = (len(ipadapter_data) - 5) // 3
|
| 287 |
+
final_preset, final_weight, final_lora_strength, final_embeds_scaling, final_combine_method = ipadapter_data[-5:]
|
| 288 |
+
ipa_images, ipa_weights, ipa_lora_strengths = [ipadapter_data[i*num_ipa_units:(i+1)*num_ipa_units] for i in range(3)]
|
| 289 |
+
|
| 290 |
+
all_presets_to_download = set()
|
| 291 |
+
|
| 292 |
+
for i in range(num_ipa_units):
|
| 293 |
+
if ipa_images[i] and ipa_weights[i] > 0 and final_preset:
|
| 294 |
+
all_presets_to_download.add(final_preset)
|
| 295 |
+
|
| 296 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 297 |
+
ipa_temp_path = os.path.join(INPUT_DIR, f"temp_ipa_{i}_{random.randint(1000, 9999)}.png")
|
| 298 |
+
ipa_images[i].save(ipa_temp_path, "PNG")
|
| 299 |
+
temp_files_to_clean.append(ipa_temp_path)
|
| 300 |
+
active_ipadapters.append({
|
| 301 |
+
"image": os.path.basename(ipa_temp_path), "preset": final_preset,
|
| 302 |
+
"weight": ipa_weights[i], "lora_strength": ipa_lora_strengths[i]
|
| 303 |
+
})
|
| 304 |
+
|
| 305 |
+
if active_ipadapters and final_preset:
|
| 306 |
+
all_presets_to_download.add(final_preset)
|
| 307 |
+
|
| 308 |
+
for preset in all_presets_to_download:
|
| 309 |
+
ensure_ipadapter_models_downloaded(preset, progress)
|
| 310 |
+
|
| 311 |
+
if active_ipadapters:
|
| 312 |
+
active_ipadapters.append({
|
| 313 |
+
'is_final_settings': True, 'model_type': 'sdxl', 'final_preset': final_preset,
|
| 314 |
+
'final_weight': final_weight, 'final_lora_strength': final_lora_strength,
|
| 315 |
+
'final_embeds_scaling': final_embeds_scaling, 'final_combine_method': final_combine_method
|
| 316 |
+
})
|
| 317 |
+
|
| 318 |
+
from utils.app_utils import get_vae_path
|
| 319 |
+
vae_source = ui_inputs.get('vae_source')
|
| 320 |
+
vae_id = ui_inputs.get('vae_id')
|
| 321 |
+
vae_file = ui_inputs.get('vae_file')
|
| 322 |
+
vae_name_override = None
|
| 323 |
+
|
| 324 |
+
if vae_source and vae_source != "None":
|
| 325 |
+
if vae_source == "File":
|
| 326 |
+
vae_name_override = sanitize_filename(vae_id)
|
| 327 |
+
elif vae_source == "Civitai" and vae_id and vae_id.strip():
|
| 328 |
+
local_path, status = get_vae_path(vae_source, vae_id, ui_inputs.get('civitai_api_key'), progress)
|
| 329 |
+
if local_path: vae_name_override = os.path.basename(local_path)
|
| 330 |
+
else: raise gr.Error(f"Failed to prepare VAE {vae_id}: {status}")
|
| 331 |
+
|
| 332 |
+
if vae_name_override:
|
| 333 |
+
ui_inputs['vae_name'] = vae_name_override
|
| 334 |
+
|
| 335 |
+
conditioning_data = ui_inputs.get('conditioning_data', [])
|
| 336 |
+
active_conditioning = []
|
| 337 |
+
if conditioning_data:
|
| 338 |
+
num_units = len(conditioning_data) // 6
|
| 339 |
+
prompts = conditioning_data[0*num_units : 1*num_units]
|
| 340 |
+
widths = conditioning_data[1*num_units : 2*num_units]
|
| 341 |
+
heights = conditioning_data[2*num_units : 3*num_units]
|
| 342 |
+
xs = conditioning_data[3*num_units : 4*num_units]
|
| 343 |
+
ys = conditioning_data[4*num_units : 5*num_units]
|
| 344 |
+
strengths = conditioning_data[5*num_units : 6*num_units]
|
| 345 |
+
|
| 346 |
+
for i in range(num_units):
|
| 347 |
+
if prompts[i] and prompts[i].strip():
|
| 348 |
+
active_conditioning.append({
|
| 349 |
+
"prompt": prompts[i],
|
| 350 |
+
"width": int(widths[i]),
|
| 351 |
+
"height": int(heights[i]),
|
| 352 |
+
"x": int(xs[i]),
|
| 353 |
+
"y": int(ys[i]),
|
| 354 |
+
"strength": float(strengths[i])
|
| 355 |
+
})
|
| 356 |
+
|
| 357 |
+
loras_string = f"LoRAs: [{', '.join(active_loras_for_meta)}]" if active_loras_for_meta else ""
|
| 358 |
+
|
| 359 |
+
progress(0.8, desc="Assembling workflow...")
|
| 360 |
+
|
| 361 |
+
if ui_inputs.get('seed') == -1:
|
| 362 |
+
ui_inputs['seed'] = random.randint(0, 2**32 - 1)
|
| 363 |
+
|
| 364 |
+
dynamic_values = {'task_type': ui_inputs['task_type'], 'model_type': "sdxl"}
|
| 365 |
+
|
| 366 |
+
recipe_path = os.path.join(os.path.dirname(__file__), "workflow_recipes", "sd_unified_recipe.yaml")
|
| 367 |
+
assembler = WorkflowAssembler(recipe_path, dynamic_values=dynamic_values)
|
| 368 |
+
|
| 369 |
+
workflow_inputs = {
|
| 370 |
+
"positive_prompt": ui_inputs['positive_prompt'], "negative_prompt": ui_inputs['negative_prompt'],
|
| 371 |
+
"seed": ui_inputs['seed'], "steps": ui_inputs['num_inference_steps'], "cfg": ui_inputs['guidance_scale'],
|
| 372 |
+
"sampler_name": ui_inputs['sampler'], "scheduler": ui_inputs['scheduler'],
|
| 373 |
+
"batch_size": ui_inputs['batch_size'],
|
| 374 |
+
"denoise": ui_inputs['denoise'],
|
| 375 |
+
"input_image": ui_inputs.get('input_image'),
|
| 376 |
+
"inpaint_image": ui_inputs.get('inpaint_image'),
|
| 377 |
+
"inpaint_mask": ui_inputs.get('inpaint_mask'),
|
| 378 |
+
"left": ui_inputs.get('outpaint_left'), "top": ui_inputs.get('outpaint_top'),
|
| 379 |
+
"right": ui_inputs.get('outpaint_right'), "bottom": ui_inputs.get('outpaint_bottom'),
|
| 380 |
+
"hires_upscaler": ui_inputs.get('hires_upscaler'), "hires_scale_by": ui_inputs.get('hires_scale_by'),
|
| 381 |
+
"model_name": ALL_MODEL_MAP[ui_inputs['model_display_name']][1],
|
| 382 |
+
"vae_name": ui_inputs.get('vae_name'),
|
| 383 |
+
"controlnet_chain": active_controlnets,
|
| 384 |
+
"ipadapter_chain": active_ipadapters,
|
| 385 |
+
"conditioning_chain": active_conditioning,
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
if task_type == 'txt2img':
|
| 389 |
+
workflow_inputs['width'] = ui_inputs['width']
|
| 390 |
+
workflow_inputs['height'] = ui_inputs['height']
|
| 391 |
+
|
| 392 |
+
workflow = assembler.assemble(workflow_inputs)
|
| 393 |
+
|
| 394 |
+
if workflow_inputs.get("vae_name"):
|
| 395 |
+
print("--- [Workflow Patch] VAE override provided. Adding VAELoader and rewiring connections. ---")
|
| 396 |
+
vae_loader_id = assembler._get_unique_id()
|
| 397 |
+
vae_loader_node = assembler._get_node_template("VAELoader")
|
| 398 |
+
vae_loader_node['inputs']['vae_name'] = workflow_inputs["vae_name"]
|
| 399 |
+
workflow[vae_loader_id] = vae_loader_node
|
| 400 |
+
|
| 401 |
+
vae_decode_id = assembler.node_map.get("vae_decode")
|
| 402 |
+
if vae_decode_id and vae_decode_id in workflow:
|
| 403 |
+
workflow[vae_decode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
| 404 |
+
print(f" - Rewired 'vae_decode' (ID: {vae_decode_id}) to use new VAELoader.")
|
| 405 |
+
|
| 406 |
+
vae_encode_id = assembler.node_map.get("vae_encode")
|
| 407 |
+
if vae_encode_id and vae_encode_id in workflow:
|
| 408 |
+
workflow[vae_encode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
| 409 |
+
print(f" - Rewired 'vae_encode' (ID: {vae_encode_id}) to use new VAELoader.")
|
| 410 |
+
else:
|
| 411 |
+
print("--- [Workflow Info] No VAE override. Using VAE from checkpoint. ---")
|
| 412 |
+
|
| 413 |
+
progress(1.0, desc="All models ready. Requesting GPU for generation...")
|
| 414 |
+
|
| 415 |
+
try:
|
| 416 |
+
results = self._execute_gpu_logic(
|
| 417 |
+
self._gpu_logic,
|
| 418 |
+
duration=ui_inputs['zero_gpu_duration'],
|
| 419 |
+
default_duration=60,
|
| 420 |
+
task_name=f"ImageGen ({task_type})",
|
| 421 |
+
ui_inputs=ui_inputs,
|
| 422 |
+
loras_string=loras_string,
|
| 423 |
+
required_models_for_gpu=required_models,
|
| 424 |
+
workflow=workflow,
|
| 425 |
+
assembler=assembler,
|
| 426 |
+
progress=progress
|
| 427 |
+
)
|
| 428 |
+
finally:
|
| 429 |
+
for temp_file in temp_files_to_clean:
|
| 430 |
+
if temp_file and os.path.exists(temp_file):
|
| 431 |
+
os.remove(temp_file)
|
| 432 |
+
print(f"✅ Cleaned up temp file: {temp_file}")
|
| 433 |
+
|
| 434 |
+
return results
|
core/pipelines/workflow_recipes/_partials/_base_sampler.yaml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
ksampler:
|
| 3 |
+
class_type: KSampler
|
| 4 |
+
|
| 5 |
+
vae_decode:
|
| 6 |
+
class_type: VAEDecode
|
| 7 |
+
save_image:
|
| 8 |
+
class_type: SaveImage
|
| 9 |
+
params: {}
|
| 10 |
+
|
| 11 |
+
connections:
|
| 12 |
+
- from: "ksampler:0"
|
| 13 |
+
to: "vae_decode:samples"
|
| 14 |
+
- from: "vae_decode:0"
|
| 15 |
+
to: "save_image:images"
|
| 16 |
+
|
| 17 |
+
ui_map:
|
| 18 |
+
seed: "ksampler:seed"
|
| 19 |
+
steps: "ksampler:steps"
|
| 20 |
+
cfg: "ksampler:cfg"
|
| 21 |
+
sampler_name: "ksampler:sampler_name"
|
| 22 |
+
scheduler: "ksampler:scheduler"
|
| 23 |
+
denoise: "ksampler:denoise"
|
core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
ckpt_loader:
|
| 3 |
+
class_type: CheckpointLoaderSimple
|
| 4 |
+
title: "Load SDXL Checkpoint"
|
| 5 |
+
|
| 6 |
+
pos_prompt:
|
| 7 |
+
class_type: CLIPTextEncode
|
| 8 |
+
title: "Positive Prompt Encoder"
|
| 9 |
+
|
| 10 |
+
neg_prompt:
|
| 11 |
+
class_type: CLIPTextEncode
|
| 12 |
+
title: "Negative Prompt Encoder"
|
| 13 |
+
|
| 14 |
+
connections:
|
| 15 |
+
- from: "ckpt_loader:0"
|
| 16 |
+
to: "ksampler:model"
|
| 17 |
+
- from: "ckpt_loader:1"
|
| 18 |
+
to: "pos_prompt:clip"
|
| 19 |
+
- from: "ckpt_loader:1"
|
| 20 |
+
to: "neg_prompt:clip"
|
| 21 |
+
|
| 22 |
+
- from: "ckpt_loader:2"
|
| 23 |
+
to: "vae_decode:vae"
|
| 24 |
+
|
| 25 |
+
- from: "pos_prompt:0"
|
| 26 |
+
to: "ksampler:positive"
|
| 27 |
+
- from: "neg_prompt:0"
|
| 28 |
+
to: "ksampler:negative"
|
| 29 |
+
|
| 30 |
+
ui_map:
|
| 31 |
+
model_name: "ckpt_loader:ckpt_name"
|
| 32 |
+
positive_prompt: "pos_prompt:text"
|
| 33 |
+
negative_prompt: "neg_prompt:text"
|
| 34 |
+
|
| 35 |
+
dynamic_controlnet_chains:
|
| 36 |
+
controlnet_chain:
|
| 37 |
+
template: "ControlNetApplyAdvanced"
|
| 38 |
+
ksampler_node: "ksampler"
|
| 39 |
+
|
| 40 |
+
dynamic_ipadapter_chains:
|
| 41 |
+
ipadapter_chain:
|
| 42 |
+
end: "ksampler"
|
| 43 |
+
final_preset: "{{ ipadapter_final_preset }}"
|
| 44 |
+
final_weight: "{{ ipadapter_final_weight }}"
|
| 45 |
+
final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
|
| 46 |
+
|
| 47 |
+
dynamic_conditioning_chains:
|
| 48 |
+
conditioning_chain:
|
| 49 |
+
ksampler_node: "ksampler"
|
| 50 |
+
clip_source: "ckpt_loader:1"
|
core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
input_image_loader:
|
| 3 |
+
class_type: LoadImage
|
| 4 |
+
|
| 5 |
+
vae_encode:
|
| 6 |
+
class_type: VAEEncode
|
| 7 |
+
|
| 8 |
+
latent_upscaler:
|
| 9 |
+
class_type: LatentUpscaleBy
|
| 10 |
+
|
| 11 |
+
latent_source:
|
| 12 |
+
class_type: RepeatLatentBatch
|
| 13 |
+
|
| 14 |
+
connections:
|
| 15 |
+
- from: "input_image_loader:0"
|
| 16 |
+
to: "vae_encode:pixels"
|
| 17 |
+
- from: "vae_encode:0"
|
| 18 |
+
to: "latent_upscaler:samples"
|
| 19 |
+
- from: "latent_upscaler:0"
|
| 20 |
+
to: "latent_source:samples"
|
| 21 |
+
|
| 22 |
+
ui_map:
|
| 23 |
+
input_image: "input_image_loader:image"
|
| 24 |
+
hires_upscaler: "latent_upscaler:upscale_method"
|
| 25 |
+
hires_scale_by: "latent_upscaler:scale_by"
|
| 26 |
+
batch_size: "latent_source:amount"
|
core/pipelines/workflow_recipes/_partials/input/img2img.yaml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
input_image_loader:
|
| 3 |
+
class_type: LoadImage
|
| 4 |
+
|
| 5 |
+
vae_encode:
|
| 6 |
+
class_type: VAEEncode
|
| 7 |
+
|
| 8 |
+
latent_source:
|
| 9 |
+
class_type: RepeatLatentBatch
|
| 10 |
+
|
| 11 |
+
connections:
|
| 12 |
+
- from: "input_image_loader:0"
|
| 13 |
+
to: "vae_encode:pixels"
|
| 14 |
+
- from: "vae_encode:0"
|
| 15 |
+
to: "latent_source:samples"
|
| 16 |
+
|
| 17 |
+
ui_map:
|
| 18 |
+
input_image: "input_image_loader:image"
|
| 19 |
+
batch_size: "latent_source:amount"
|
core/pipelines/workflow_recipes/_partials/input/inpaint.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
inpaint_loader:
|
| 3 |
+
class_type: LoadImage
|
| 4 |
+
title: "Load Inpaint Image+Mask"
|
| 5 |
+
|
| 6 |
+
vae_encode:
|
| 7 |
+
class_type: VAEEncodeForInpaint
|
| 8 |
+
params:
|
| 9 |
+
grow_mask_by: 6
|
| 10 |
+
|
| 11 |
+
latent_source:
|
| 12 |
+
class_type: RepeatLatentBatch
|
| 13 |
+
|
| 14 |
+
connections:
|
| 15 |
+
- from: "inpaint_loader:0"
|
| 16 |
+
to: "vae_encode:pixels"
|
| 17 |
+
- from: "inpaint_loader:1"
|
| 18 |
+
to: "vae_encode:mask"
|
| 19 |
+
|
| 20 |
+
- from: "vae_encode:0"
|
| 21 |
+
to: "latent_source:samples"
|
| 22 |
+
|
| 23 |
+
ui_map:
|
| 24 |
+
inpaint_image: "inpaint_loader:image"
|
| 25 |
+
batch_size: "latent_source:amount"
|
core/pipelines/workflow_recipes/_partials/input/outpaint.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
input_image_loader:
|
| 3 |
+
class_type: LoadImage
|
| 4 |
+
|
| 5 |
+
pad_image:
|
| 6 |
+
class_type: ImagePadForOutpaint
|
| 7 |
+
params:
|
| 8 |
+
feathering: 10
|
| 9 |
+
|
| 10 |
+
vae_encode:
|
| 11 |
+
class_type: VAEEncodeForInpaint
|
| 12 |
+
params:
|
| 13 |
+
grow_mask_by: 6
|
| 14 |
+
|
| 15 |
+
latent_source:
|
| 16 |
+
class_type: RepeatLatentBatch
|
| 17 |
+
|
| 18 |
+
connections:
|
| 19 |
+
- from: "input_image_loader:0"
|
| 20 |
+
to: "pad_image:image"
|
| 21 |
+
|
| 22 |
+
- from: "pad_image:0"
|
| 23 |
+
to: "vae_encode:pixels"
|
| 24 |
+
- from: "pad_image:1"
|
| 25 |
+
to: "vae_encode:mask"
|
| 26 |
+
|
| 27 |
+
- from: "vae_encode:0"
|
| 28 |
+
to: "latent_source:samples"
|
| 29 |
+
|
| 30 |
+
ui_map:
|
| 31 |
+
input_image: "input_image_loader:image"
|
| 32 |
+
|
| 33 |
+
left: "pad_image:left"
|
| 34 |
+
top: "pad_image:top"
|
| 35 |
+
right: "pad_image:right"
|
| 36 |
+
bottom: "pad_image:bottom"
|
| 37 |
+
|
| 38 |
+
batch_size: "latent_source:amount"
|
core/pipelines/workflow_recipes/_partials/input/txt2img.yaml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: EmptyLatentImage
|
| 4 |
+
|
| 5 |
+
ui_map:
|
| 6 |
+
width: "latent_source:width"
|
| 7 |
+
height: "latent_source:height"
|
| 8 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/sd_unified_recipe.yaml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
imports:
|
| 2 |
+
- "_partials/_base_sampler.yaml"
|
| 3 |
+
- "_partials/input/{{ task_type }}.yaml"
|
| 4 |
+
- "_partials/conditioning/sdxl.yaml"
|
| 5 |
+
|
| 6 |
+
connections:
|
| 7 |
+
- from: "latent_source:0"
|
| 8 |
+
to: "ksampler:latent_image"
|
| 9 |
+
- from: "ckpt_loader:2"
|
| 10 |
+
to: "vae_encode:vae"
|
core/settings.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
import os
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
|
| 5 |
+
CHECKPOINT_DIR = "models/checkpoints"
|
| 6 |
+
LORA_DIR = "models/loras"
|
| 7 |
+
EMBEDDING_DIR = "models/embeddings"
|
| 8 |
+
CONTROLNET_DIR = "models/controlnet"
|
| 9 |
+
DIFFUSION_MODELS_DIR = "models/diffusion_models"
|
| 10 |
+
VAE_DIR = "models/vae"
|
| 11 |
+
TEXT_ENCODERS_DIR = "models/text_encoders"
|
| 12 |
+
INPUT_DIR = "input"
|
| 13 |
+
OUTPUT_DIR = "output"
|
| 14 |
+
|
| 15 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 16 |
+
_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_list.yaml')
|
| 17 |
+
_FILE_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'file_list.yaml')
|
| 18 |
+
_IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 19 |
+
_CONSTANTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'constants.yaml')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
|
| 23 |
+
if not os.path.exists(filepath):
|
| 24 |
+
print(f"Warning: Constants file not found at {filepath}. Using fallback values.")
|
| 25 |
+
return {}
|
| 26 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 27 |
+
return yaml.safe_load(f)
|
| 28 |
+
|
| 29 |
+
def load_file_download_map(filepath=_FILE_LIST_PATH):
|
| 30 |
+
if not os.path.exists(filepath):
|
| 31 |
+
raise FileNotFoundError(f"The file list (for downloads) was not found at: {filepath}")
|
| 32 |
+
|
| 33 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 34 |
+
file_list_data = yaml.safe_load(f)
|
| 35 |
+
|
| 36 |
+
download_info_map = {}
|
| 37 |
+
for category, files in file_list_data.get('file', {}).items():
|
| 38 |
+
if isinstance(files, list):
|
| 39 |
+
for file_info in files:
|
| 40 |
+
if 'filename' in file_info:
|
| 41 |
+
file_info['category'] = category
|
| 42 |
+
download_info_map[file_info['filename']] = file_info
|
| 43 |
+
return download_info_map
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def load_models_from_yaml(model_list_filepath=_MODEL_LIST_PATH, download_map=None):
|
| 47 |
+
if not os.path.exists(model_list_filepath):
|
| 48 |
+
raise FileNotFoundError(f"The model list file was not found at: {model_list_filepath}")
|
| 49 |
+
if download_map is None:
|
| 50 |
+
raise ValueError("download_map must be provided to load_models_from_yaml")
|
| 51 |
+
|
| 52 |
+
with open(model_list_filepath, 'r', encoding='utf-8') as f:
|
| 53 |
+
model_data = yaml.safe_load(f)
|
| 54 |
+
|
| 55 |
+
model_maps = {
|
| 56 |
+
"MODEL_MAP_CHECKPOINT": OrderedDict(),
|
| 57 |
+
"ALL_MODEL_MAP": OrderedDict(),
|
| 58 |
+
}
|
| 59 |
+
category_map_names = {
|
| 60 |
+
"Checkpoint": "MODEL_MAP_CHECKPOINT",
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
for category, models in model_data.items():
|
| 64 |
+
if category in category_map_names:
|
| 65 |
+
map_name = category_map_names[category]
|
| 66 |
+
if not isinstance(models, list): continue
|
| 67 |
+
for model in models:
|
| 68 |
+
display_name = model['display_name']
|
| 69 |
+
filename = model['path']
|
| 70 |
+
|
| 71 |
+
download_info = download_map.get(filename, {})
|
| 72 |
+
repo_id = download_info.get('repo_id', '')
|
| 73 |
+
|
| 74 |
+
model_tuple = (
|
| 75 |
+
repo_id,
|
| 76 |
+
filename,
|
| 77 |
+
"SDXL",
|
| 78 |
+
None
|
| 79 |
+
)
|
| 80 |
+
model_maps[map_name][display_name] = model_tuple
|
| 81 |
+
model_maps["ALL_MODEL_MAP"][display_name] = model_tuple
|
| 82 |
+
|
| 83 |
+
return model_maps
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
ALL_FILE_DOWNLOAD_MAP = load_file_download_map()
|
| 87 |
+
loaded_maps = load_models_from_yaml(download_map=ALL_FILE_DOWNLOAD_MAP)
|
| 88 |
+
MODEL_MAP_CHECKPOINT = loaded_maps["MODEL_MAP_CHECKPOINT"]
|
| 89 |
+
ALL_MODEL_MAP = loaded_maps["ALL_MODEL_MAP"]
|
| 90 |
+
|
| 91 |
+
MODEL_TYPE_MAP = {k: v[2] for k, v in ALL_MODEL_MAP.items()}
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
print(f"FATAL: Could not load model configuration from YAML. Error: {e}")
|
| 95 |
+
ALL_FILE_DOWNLOAD_MAP = {}
|
| 96 |
+
MODEL_MAP_CHECKPOINT, ALL_MODEL_MAP = {}, {}
|
| 97 |
+
MODEL_TYPE_MAP = {}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
_constants = load_constants_from_yaml()
|
| 102 |
+
MAX_LORAS = _constants.get('MAX_LORAS', 5)
|
| 103 |
+
MAX_EMBEDDINGS = _constants.get('MAX_EMBEDDINGS', 5)
|
| 104 |
+
MAX_CONDITIONINGS = _constants.get('MAX_CONDITIONINGS', 10)
|
| 105 |
+
MAX_CONTROLNETS = _constants.get('MAX_CONTROLNETS', 5)
|
| 106 |
+
MAX_IPADAPTERS = _constants.get('MAX_IPADAPTERS', 5)
|
| 107 |
+
LORA_SOURCE_CHOICES = _constants.get('LORA_SOURCE_CHOICES', ["Civitai", "Custom URL", "File"])
|
| 108 |
+
RESOLUTION_MAP = _constants.get('RESOLUTION_MAP', {})
|
| 109 |
+
SAMPLER_MAP = _constants.get('SAMPLER_MAP', {})
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print(f"FATAL: Could not load constants from YAML. Error: {e}")
|
| 112 |
+
MAX_LORAS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_CONTROLNETS, MAX_IPADAPTERS = 5, 5, 10, 5, 5
|
| 113 |
+
LORA_SOURCE_CHOICES = ["Civitai", "Custom URL", "File"]
|
| 114 |
+
RESOLUTION_MAP, SAMPLER_MAP = {}, {}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
DEFAULT_NEGATIVE_PROMPT = "monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,"
|
core/shared_state.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
INVALID_MODEL_URLS = {}
|
core/workflow_assembler.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
import os
|
| 3 |
+
import importlib
|
| 4 |
+
from copy import deepcopy
|
| 5 |
+
from comfy_integration.nodes import NODE_CLASS_MAPPINGS
|
| 6 |
+
|
| 7 |
+
class WorkflowAssembler:
|
| 8 |
+
def __init__(self, recipe_path, dynamic_values=None):
|
| 9 |
+
self.base_path = os.path.dirname(recipe_path)
|
| 10 |
+
self.node_counter = 0
|
| 11 |
+
self.workflow = {}
|
| 12 |
+
self.node_map = {}
|
| 13 |
+
|
| 14 |
+
self._load_injector_config()
|
| 15 |
+
|
| 16 |
+
self.recipe = self._load_and_merge_recipe(os.path.basename(recipe_path), dynamic_values or {})
|
| 17 |
+
|
| 18 |
+
def _load_injector_config(self):
|
| 19 |
+
try:
|
| 20 |
+
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 21 |
+
injectors_path = os.path.join(project_root, 'yaml', 'injectors.yaml')
|
| 22 |
+
|
| 23 |
+
with open(injectors_path, 'r', encoding='utf-8') as f:
|
| 24 |
+
injector_config = yaml.safe_load(f)
|
| 25 |
+
|
| 26 |
+
definitions = injector_config.get("injector_definitions", {})
|
| 27 |
+
self.injector_order = injector_config.get("injector_order", [])
|
| 28 |
+
self.global_injectors = {}
|
| 29 |
+
|
| 30 |
+
for chain_type, config in definitions.items():
|
| 31 |
+
module_path = config.get("module")
|
| 32 |
+
if not module_path:
|
| 33 |
+
print(f"Warning: Injector '{chain_type}' in injectors.yaml is missing 'module' path.")
|
| 34 |
+
continue
|
| 35 |
+
try:
|
| 36 |
+
module = importlib.import_module(module_path)
|
| 37 |
+
if hasattr(module, 'inject'):
|
| 38 |
+
self.global_injectors[chain_type] = module.inject
|
| 39 |
+
print(f"✅ Successfully registered global injector: {chain_type} from {module_path}")
|
| 40 |
+
else:
|
| 41 |
+
print(f"⚠️ Warning: Module '{module_path}' for injector '{chain_type}' does not have an 'inject' function.")
|
| 42 |
+
except ImportError as e:
|
| 43 |
+
print(f"❌ Error importing module '{module_path}' for injector '{chain_type}': {e}")
|
| 44 |
+
|
| 45 |
+
if not self.injector_order:
|
| 46 |
+
print("⚠️ Warning: 'injector_order' is not defined in injectors.yaml. Using definition order.")
|
| 47 |
+
self.injector_order = list(definitions.keys())
|
| 48 |
+
|
| 49 |
+
except FileNotFoundError:
|
| 50 |
+
print(f"❌ FATAL: Could not find injectors.yaml at {injectors_path}. Dynamic chains will not work.")
|
| 51 |
+
self.injector_order = []
|
| 52 |
+
self.global_injectors = {}
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print(f"❌ FATAL: Could not load or parse injectors.yaml. Dynamic chains will not work. Error: {e}")
|
| 55 |
+
self.injector_order = []
|
| 56 |
+
self.global_injectors = {}
|
| 57 |
+
|
| 58 |
+
def _get_unique_id(self):
|
| 59 |
+
self.node_counter += 1
|
| 60 |
+
return str(self.node_counter)
|
| 61 |
+
|
| 62 |
+
def _get_node_template(self, class_type):
|
| 63 |
+
if class_type not in NODE_CLASS_MAPPINGS:
|
| 64 |
+
raise ValueError(f"Node class '{class_type}' not found. Ensure it's correctly imported in comfy_integration/nodes.py.")
|
| 65 |
+
|
| 66 |
+
node_class = NODE_CLASS_MAPPINGS[class_type]
|
| 67 |
+
input_types = node_class.INPUT_TYPES()
|
| 68 |
+
|
| 69 |
+
template = {
|
| 70 |
+
"inputs": {},
|
| 71 |
+
"class_type": class_type,
|
| 72 |
+
"_meta": {"title": node_class.NODE_NAME if hasattr(node_class, 'NODE_NAME') else class_type}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
all_inputs = {**input_types.get('required', {}), **input_types.get('optional', {})}
|
| 76 |
+
for name, details in all_inputs.items():
|
| 77 |
+
config = details[1] if len(details) > 1 and isinstance(details[1], dict) else {}
|
| 78 |
+
template["inputs"][name] = config.get("default")
|
| 79 |
+
|
| 80 |
+
return template
|
| 81 |
+
|
| 82 |
+
def _load_and_merge_recipe(self, recipe_filename, dynamic_values, search_context_dir=None):
|
| 83 |
+
search_path = search_context_dir or self.base_path
|
| 84 |
+
recipe_path_to_use = os.path.join(search_path, recipe_filename)
|
| 85 |
+
|
| 86 |
+
if not os.path.exists(recipe_path_to_use):
|
| 87 |
+
raise FileNotFoundError(f"Recipe file not found: {recipe_path_to_use}")
|
| 88 |
+
|
| 89 |
+
with open(recipe_path_to_use, 'r', encoding='utf-8') as f:
|
| 90 |
+
content = f.read()
|
| 91 |
+
|
| 92 |
+
for key, value in dynamic_values.items():
|
| 93 |
+
if value is not None:
|
| 94 |
+
content = content.replace(f"{{{{ {key} }}}}", str(value))
|
| 95 |
+
|
| 96 |
+
main_recipe = yaml.safe_load(content)
|
| 97 |
+
|
| 98 |
+
merged_recipe = {'nodes': {}, 'connections': [], 'ui_map': {}}
|
| 99 |
+
for key in self.injector_order:
|
| 100 |
+
if key.startswith('dynamic_'):
|
| 101 |
+
merged_recipe[key] = {}
|
| 102 |
+
|
| 103 |
+
parent_recipe_dir = os.path.dirname(recipe_path_to_use)
|
| 104 |
+
for import_path_template in main_recipe.get('imports', []):
|
| 105 |
+
import_path = import_path_template
|
| 106 |
+
for key, value in dynamic_values.items():
|
| 107 |
+
if value is not None:
|
| 108 |
+
import_path = import_path.replace(f"{{{{ {key} }}}}", str(value))
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
imported_recipe = self._load_and_merge_recipe(import_path, dynamic_values, search_context_dir=parent_recipe_dir)
|
| 112 |
+
merged_recipe['nodes'].update(imported_recipe.get('nodes', {}))
|
| 113 |
+
merged_recipe['connections'].extend(imported_recipe.get('connections', []))
|
| 114 |
+
merged_recipe['ui_map'].update(imported_recipe.get('ui_map', {}))
|
| 115 |
+
for key in self.injector_order:
|
| 116 |
+
if key in imported_recipe and key.startswith('dynamic_'):
|
| 117 |
+
merged_recipe[key].update(imported_recipe.get(key, {}))
|
| 118 |
+
except FileNotFoundError:
|
| 119 |
+
print(f"Warning: Optional recipe partial '{import_path}' not found. Skipping.")
|
| 120 |
+
|
| 121 |
+
merged_recipe['nodes'].update(main_recipe.get('nodes', {}))
|
| 122 |
+
merged_recipe['connections'].extend(main_recipe.get('connections', []))
|
| 123 |
+
merged_recipe['ui_map'].update(main_recipe.get('ui_map', {}))
|
| 124 |
+
for key in self.injector_order:
|
| 125 |
+
if key in main_recipe and key.startswith('dynamic_'):
|
| 126 |
+
merged_recipe[key].update(main_recipe.get(key, {}))
|
| 127 |
+
|
| 128 |
+
return merged_recipe
|
| 129 |
+
|
| 130 |
+
def assemble(self, ui_values):
|
| 131 |
+
for name, details in self.recipe['nodes'].items():
|
| 132 |
+
class_type = details['class_type']
|
| 133 |
+
template = self._get_node_template(class_type)
|
| 134 |
+
node_data = deepcopy(template)
|
| 135 |
+
|
| 136 |
+
unique_id = self._get_unique_id()
|
| 137 |
+
self.node_map[name] = unique_id
|
| 138 |
+
|
| 139 |
+
if 'params' in details:
|
| 140 |
+
for param, value in details['params'].items():
|
| 141 |
+
if param in node_data['inputs']:
|
| 142 |
+
node_data['inputs'][param] = value
|
| 143 |
+
|
| 144 |
+
self.workflow[unique_id] = node_data
|
| 145 |
+
|
| 146 |
+
for ui_key, target in self.recipe.get('ui_map', {}).items():
|
| 147 |
+
if ui_key in ui_values and ui_values[ui_key] is not None:
|
| 148 |
+
target_list = target if isinstance(target, list) else [target]
|
| 149 |
+
for t in target_list:
|
| 150 |
+
target_name, target_param = t.split(':')
|
| 151 |
+
if target_name in self.node_map:
|
| 152 |
+
self.workflow[self.node_map[target_name]]['inputs'][target_param] = ui_values[ui_key]
|
| 153 |
+
|
| 154 |
+
for conn in self.recipe.get('connections', []):
|
| 155 |
+
from_name, from_output_idx = conn['from'].split(':')
|
| 156 |
+
to_name, to_input_name = conn['to'].split(':')
|
| 157 |
+
|
| 158 |
+
from_id = self.node_map.get(from_name)
|
| 159 |
+
to_id = self.node_map.get(to_name)
|
| 160 |
+
|
| 161 |
+
if from_id and to_id:
|
| 162 |
+
self.workflow[to_id]['inputs'][to_input_name] = [from_id, int(from_output_idx)]
|
| 163 |
+
|
| 164 |
+
print("--- [Assembler] Applying dynamic injectors ---")
|
| 165 |
+
recipe_chain_types = {key for key in self.recipe if key.startswith('dynamic_')}
|
| 166 |
+
processing_order = [key for key in self.injector_order if key in recipe_chain_types]
|
| 167 |
+
|
| 168 |
+
for chain_type in processing_order:
|
| 169 |
+
injector_func = self.global_injectors.get(chain_type)
|
| 170 |
+
if injector_func:
|
| 171 |
+
for chain_key, chain_def in self.recipe.get(chain_type, {}).items():
|
| 172 |
+
if chain_key in ui_values and ui_values[chain_key]:
|
| 173 |
+
print(f" -> Injecting '{chain_type}' for '{chain_key}'...")
|
| 174 |
+
chain_items = ui_values[chain_key]
|
| 175 |
+
injector_func(self, chain_def, chain_items)
|
| 176 |
+
|
| 177 |
+
print("--- [Assembler] Finished applying injectors ---")
|
| 178 |
+
|
| 179 |
+
return self.workflow
|
requirements.txt
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
addict
|
| 2 |
+
aiohttp>=3.11.8
|
| 3 |
+
alembic
|
| 4 |
+
albumentations
|
| 5 |
+
av>=14.2.0
|
| 6 |
+
comfyui-embedded-docs
|
| 7 |
+
comfyui-frontend-package
|
| 8 |
+
comfyui-workflow-templates
|
| 9 |
+
einops
|
| 10 |
+
filelock
|
| 11 |
+
ftfy
|
| 12 |
+
fvcore
|
| 13 |
+
gradio
|
| 14 |
+
huggingface-hub
|
| 15 |
+
imageio[ffmpeg]
|
| 16 |
+
importlib_metadata
|
| 17 |
+
kornia>=0.7.1
|
| 18 |
+
matplotlib
|
| 19 |
+
mediapipe
|
| 20 |
+
ninja
|
| 21 |
+
numpy>=1.25.0
|
| 22 |
+
omegaconf
|
| 23 |
+
opencv-python>=4.7.0.72
|
| 24 |
+
Pillow
|
| 25 |
+
psutil
|
| 26 |
+
pydantic~=2.0
|
| 27 |
+
pydantic-settings~=2.0
|
| 28 |
+
python-dateutil
|
| 29 |
+
PyYAML
|
| 30 |
+
requests
|
| 31 |
+
safetensors>=0.4.2
|
| 32 |
+
scikit-image
|
| 33 |
+
scikit-learn
|
| 34 |
+
scipy
|
| 35 |
+
sentencepiece
|
| 36 |
+
soundfile
|
| 37 |
+
spaces
|
| 38 |
+
spandrel
|
| 39 |
+
SQLAlchemy
|
| 40 |
+
svglib
|
| 41 |
+
tokenizers>=0.13.3
|
| 42 |
+
torch
|
| 43 |
+
torchaudio
|
| 44 |
+
torchsde
|
| 45 |
+
torchvision
|
| 46 |
+
tqdm
|
| 47 |
+
transformers>=4.37.2
|
| 48 |
+
trimesh[easy]
|
| 49 |
+
yacs
|
| 50 |
+
yapf
|
| 51 |
+
yarl>=1.18.0
|
| 52 |
+
onnxruntime-gpu
|
scripts/__init__.py
ADDED
|
File without changes
|
scripts/build_sage_attention.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import sys
|
| 4 |
+
import textwrap
|
| 5 |
+
|
| 6 |
+
REPO_URL = "https://github.com/thu-ml/SageAttention.git"
|
| 7 |
+
REPO_DIR = "SageAttention"
|
| 8 |
+
|
| 9 |
+
def run_command(command, cwd=None, env=None):
|
| 10 |
+
print(f"🚀 Running command: {' '.join(command)}")
|
| 11 |
+
result = subprocess.run(
|
| 12 |
+
command,
|
| 13 |
+
cwd=cwd,
|
| 14 |
+
env=env,
|
| 15 |
+
stdout=subprocess.PIPE,
|
| 16 |
+
stderr=subprocess.STDOUT,
|
| 17 |
+
text=True
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if result.returncode != 0:
|
| 21 |
+
print(result.stdout)
|
| 22 |
+
raise subprocess.CalledProcessError(result.returncode, command)
|
| 23 |
+
|
| 24 |
+
def patch_setup_py(setup_py_path):
|
| 25 |
+
print(f"--- [SageAttention Build] Applying patches to {setup_py_path} ---")
|
| 26 |
+
|
| 27 |
+
with open(setup_py_path, 'r', encoding='utf-8') as f:
|
| 28 |
+
content = f.read()
|
| 29 |
+
|
| 30 |
+
original_cxx_flags = 'CXX_FLAGS = ["-g", "-O3", "-fopenmp", "-lgomp", "-std=c++17", "-DENABLE_BF16"]'
|
| 31 |
+
modified_cxx_flags = 'CXX_FLAGS = ["-g", "-O3", "-std=c++17", "-DENABLE_BF16"]'
|
| 32 |
+
|
| 33 |
+
if original_cxx_flags in content:
|
| 34 |
+
content = content.replace(original_cxx_flags, modified_cxx_flags)
|
| 35 |
+
print("🔧 Patch 1/1: Removed '-fopenmp' and '-lgomp' from CXX_FLAGS.")
|
| 36 |
+
else:
|
| 37 |
+
print("⚠️ Patch 1/1: CXX_FLAGS line not found as expected. It might have been changed upstream. Skipping.")
|
| 38 |
+
|
| 39 |
+
with open(setup_py_path, 'w', encoding='utf-8') as f:
|
| 40 |
+
f.write(content)
|
| 41 |
+
|
| 42 |
+
print("✅ Patches applied successfully.")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def install_sage_attention():
|
| 46 |
+
print("--- [SageAttention Build] Checking environment ---")
|
| 47 |
+
|
| 48 |
+
if os.path.isdir(REPO_DIR):
|
| 49 |
+
print(f"✅ Directory '{REPO_DIR}' already exists, assuming SageAttention is installed. Skipping build.")
|
| 50 |
+
return
|
| 51 |
+
|
| 52 |
+
print(f"⏳ Directory '{REPO_DIR}' not found. Starting a fresh installation of SageAttention.")
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
print(f"--- [SageAttention Build] Step 1/3: Cloning repository ---")
|
| 56 |
+
run_command(["git", "clone", REPO_URL])
|
| 57 |
+
print("✅ Repository cloned successfully.")
|
| 58 |
+
|
| 59 |
+
print(f"--- [SageAttention Build] Step 2/3: Patching setup.py ---")
|
| 60 |
+
setup_py_path = os.path.join(REPO_DIR, "setup.py")
|
| 61 |
+
patch_setup_py(setup_py_path)
|
| 62 |
+
|
| 63 |
+
print(f"--- [SageAttention Build] Step 3/3: Compiling and installing ---")
|
| 64 |
+
|
| 65 |
+
build_env = os.environ.copy()
|
| 66 |
+
build_env.update({
|
| 67 |
+
"TORCH_CUDA_ARCH_LIST": "9.0",
|
| 68 |
+
"EXT_PARALLEL": "4",
|
| 69 |
+
"NVCC_APPEND_FLAGS": "--threads 8",
|
| 70 |
+
"MAX_JOBS": "32"
|
| 71 |
+
})
|
| 72 |
+
print("🔧 Setting build environment variables:")
|
| 73 |
+
print(f" - TORCH_CUDA_ARCH_LIST='{build_env['TORCH_CUDA_ARCH_LIST']}'")
|
| 74 |
+
print(f" - EXT_PARALLEL={build_env['EXT_PARALLEL']}")
|
| 75 |
+
print(f" - NVCC_APPEND_FLAGS='{build_env['NVCC_APPEND_FLAGS']}'")
|
| 76 |
+
print(f" - MAX_JOBS={build_env['MAX_JOBS']}")
|
| 77 |
+
|
| 78 |
+
install_command = [sys.executable, "setup.py", "install"]
|
| 79 |
+
|
| 80 |
+
run_command(install_command, cwd=REPO_DIR, env=build_env)
|
| 81 |
+
|
| 82 |
+
print("🎉 SageAttention compiled and installed successfully! ---")
|
| 83 |
+
|
| 84 |
+
except FileNotFoundError:
|
| 85 |
+
print("❌ ERROR: 'git' command not found. Please ensure Git is installed in your environment.")
|
| 86 |
+
sys.exit(1)
|
| 87 |
+
except subprocess.CalledProcessError as e:
|
| 88 |
+
print(f"❌ Command failed with return code: {e.returncode}")
|
| 89 |
+
print(f"❌ Command: {' '.join(e.cmd)}")
|
| 90 |
+
print("❌ SageAttention installation failed. Please check the logs above for details.")
|
| 91 |
+
sys.exit(1)
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"❌ An unknown error occurred: {e}")
|
| 94 |
+
sys.exit(1)
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
if os.path.isdir(REPO_DIR):
|
| 98 |
+
print(f"Note: To force a rebuild, please delete the '{REPO_DIR}' directory first.")
|
| 99 |
+
install_sage_attention()
|
ui/__init__.py
ADDED
|
File without changes
|
ui/events.py
ADDED
|
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import yaml
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
from functools import lru_cache
|
| 6 |
+
from core.settings import *
|
| 7 |
+
from utils.app_utils import *
|
| 8 |
+
from core.generation_logic import *
|
| 9 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 10 |
+
|
| 11 |
+
from core.pipelines.controlnet_preprocessor import CPU_ONLY_PREPROCESSORS
|
| 12 |
+
from utils.app_utils import PREPROCESSOR_MODEL_MAP, PREPROCESSOR_PARAMETER_MAP, save_uploaded_file_with_hash
|
| 13 |
+
from ui.shared.ui_components import RESOLUTION_MAP, MAX_CONTROLNETS, MAX_IPADAPTERS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_LORAS
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@lru_cache(maxsize=1)
|
| 17 |
+
def load_controlnet_config():
|
| 18 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 19 |
+
_CN_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'controlnet_models.yaml')
|
| 20 |
+
try:
|
| 21 |
+
print("--- Loading controlnet_models.yaml ---")
|
| 22 |
+
with open(_CN_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 23 |
+
config = yaml.safe_load(f)
|
| 24 |
+
print("--- ✅ controlnet_models.yaml loaded successfully ---")
|
| 25 |
+
return config.get("ControlNet", {}).get("SDXL", [])
|
| 26 |
+
except Exception as e:
|
| 27 |
+
print(f"Error loading controlnet_models.yaml: {e}")
|
| 28 |
+
return []
|
| 29 |
+
|
| 30 |
+
@lru_cache(maxsize=1)
|
| 31 |
+
def load_ipadapter_config():
|
| 32 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 33 |
+
_IPA_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 34 |
+
try:
|
| 35 |
+
print("--- Loading ipadapter.yaml ---")
|
| 36 |
+
with open(_IPA_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 37 |
+
config = yaml.safe_load(f)
|
| 38 |
+
print("--- ✅ ipadapter.yaml loaded successfully ---")
|
| 39 |
+
return config
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Error loading ipadapter.yaml: {e}")
|
| 42 |
+
return {}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def apply_data_to_ui(data, prefix, ui_components):
|
| 46 |
+
final_sampler = data.get('sampler') if data.get('sampler') in SAMPLER_CHOICES else SAMPLER_CHOICES[0]
|
| 47 |
+
default_scheduler = 'normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0]
|
| 48 |
+
final_scheduler = data.get('scheduler') if data.get('scheduler') in SCHEDULER_CHOICES else default_scheduler
|
| 49 |
+
|
| 50 |
+
updates = {}
|
| 51 |
+
base_model_name = data.get('base_model')
|
| 52 |
+
|
| 53 |
+
model_map = MODEL_MAP_CHECKPOINT
|
| 54 |
+
|
| 55 |
+
if f'base_model_{prefix}' in ui_components:
|
| 56 |
+
model_dropdown_component = ui_components[f'base_model_{prefix}']
|
| 57 |
+
if base_model_name and base_model_name in model_map:
|
| 58 |
+
updates[model_dropdown_component] = base_model_name
|
| 59 |
+
else:
|
| 60 |
+
updates[model_dropdown_component] = gr.update()
|
| 61 |
+
|
| 62 |
+
common_params = {
|
| 63 |
+
f'prompt_{prefix}': data.get('prompt', ''),
|
| 64 |
+
f'neg_prompt_{prefix}': data.get('negative_prompt', ''),
|
| 65 |
+
f'seed_{prefix}': data.get('seed', -1),
|
| 66 |
+
f'cfg_{prefix}': data.get('cfg_scale', 7.5),
|
| 67 |
+
f'steps_{prefix}': data.get('steps', 28),
|
| 68 |
+
f'sampler_{prefix}': final_sampler,
|
| 69 |
+
f'scheduler_{prefix}': final_scheduler,
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
for comp_name, value in common_params.items():
|
| 73 |
+
if comp_name in ui_components:
|
| 74 |
+
updates[ui_components[comp_name]] = value
|
| 75 |
+
|
| 76 |
+
if prefix == 'txt2img':
|
| 77 |
+
if f'width_{prefix}' in ui_components:
|
| 78 |
+
updates[ui_components[f'width_{prefix}']] = data.get('width', 1024)
|
| 79 |
+
if f'height_{prefix}' in ui_components:
|
| 80 |
+
updates[ui_components[f'height_{prefix}']] = data.get('height', 1024)
|
| 81 |
+
|
| 82 |
+
tab_indices = {"txt2img": 0, "img2img": 1, "inpaint": 2, "outpaint": 3, "hires_fix": 4}
|
| 83 |
+
tab_index = tab_indices.get(prefix, 0)
|
| 84 |
+
|
| 85 |
+
updates[ui_components['tabs']] = gr.Tabs(selected=0)
|
| 86 |
+
updates[ui_components['image_gen_tabs']] = gr.Tabs(selected=tab_index)
|
| 87 |
+
|
| 88 |
+
return updates
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def send_info_to_tab(image, prefix, ui_components):
|
| 92 |
+
if not image or not image.info.get('parameters', ''):
|
| 93 |
+
all_comps = [comp for comp_or_list in ui_components.values() for comp in (comp_or_list if isinstance(comp_or_list, list) else [comp_or_list])]
|
| 94 |
+
return {comp: gr.update() for comp in all_comps}
|
| 95 |
+
|
| 96 |
+
data = parse_parameters(image.info['parameters'])
|
| 97 |
+
|
| 98 |
+
image_input_map = {
|
| 99 |
+
"img2img": 'input_image_img2img',
|
| 100 |
+
"inpaint": 'input_image_dict_inpaint',
|
| 101 |
+
"outpaint": 'input_image_outpaint',
|
| 102 |
+
"hires_fix": 'input_image_hires_fix'
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
updates = apply_data_to_ui(data, prefix, ui_components)
|
| 106 |
+
|
| 107 |
+
if prefix in image_input_map and image_input_map[prefix] in ui_components:
|
| 108 |
+
component_key = image_input_map[prefix]
|
| 109 |
+
updates[ui_components[component_key]] = gr.update(value=image)
|
| 110 |
+
|
| 111 |
+
return updates
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def send_info_by_hash(image, ui_components):
|
| 115 |
+
if not image or not image.info.get('parameters', ''):
|
| 116 |
+
all_comps = [comp for comp_or_list in ui_components.values() for comp in (comp_or_list if isinstance(comp_or_list, list) else [comp_or_list])]
|
| 117 |
+
return {comp: gr.update() for comp in all_comps}
|
| 118 |
+
|
| 119 |
+
data = parse_parameters(image.info['parameters'])
|
| 120 |
+
|
| 121 |
+
return apply_data_to_ui(data, "txt2img", ui_components)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def attach_event_handlers(ui_components, demo):
|
| 125 |
+
def update_cn_input_visibility(choice):
|
| 126 |
+
return {
|
| 127 |
+
ui_components["cn_image_input"]: gr.update(visible=choice == "Image"),
|
| 128 |
+
ui_components["cn_video_input"]: gr.update(visible=choice == "Video")
|
| 129 |
+
}
|
| 130 |
+
ui_components["cn_input_type"].change(
|
| 131 |
+
fn=update_cn_input_visibility,
|
| 132 |
+
inputs=[ui_components["cn_input_type"]],
|
| 133 |
+
outputs=[ui_components["cn_image_input"], ui_components["cn_video_input"]]
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
def update_preprocessor_models_dropdown(preprocessor_name):
|
| 137 |
+
models = PREPROCESSOR_MODEL_MAP.get(preprocessor_name)
|
| 138 |
+
if models:
|
| 139 |
+
model_filenames = [m[1] for m in models]
|
| 140 |
+
return gr.update(choices=model_filenames, value=model_filenames[0], visible=True)
|
| 141 |
+
else:
|
| 142 |
+
return gr.update(choices=[], value=None, visible=False)
|
| 143 |
+
|
| 144 |
+
def update_preprocessor_settings_ui(preprocessor_name):
|
| 145 |
+
from ui.layout import MAX_DYNAMIC_CONTROLS
|
| 146 |
+
params = PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
|
| 147 |
+
|
| 148 |
+
slider_updates, dropdown_updates, checkbox_updates = [], [], []
|
| 149 |
+
|
| 150 |
+
s_idx, d_idx, c_idx = 0, 0, 0
|
| 151 |
+
|
| 152 |
+
for param in params:
|
| 153 |
+
if s_idx + d_idx + c_idx >= MAX_DYNAMIC_CONTROLS: break
|
| 154 |
+
|
| 155 |
+
name = param["name"]
|
| 156 |
+
ptype = param["type"]
|
| 157 |
+
config = param["config"]
|
| 158 |
+
label = name.replace('_', ' ').title()
|
| 159 |
+
|
| 160 |
+
if ptype == "INT" or ptype == "FLOAT":
|
| 161 |
+
if s_idx < MAX_DYNAMIC_CONTROLS:
|
| 162 |
+
slider_updates.append(gr.update(
|
| 163 |
+
label=label,
|
| 164 |
+
minimum=config.get('min', 0),
|
| 165 |
+
maximum=config.get('max', 255),
|
| 166 |
+
step=config.get('step', 0.1 if ptype == "FLOAT" else 1),
|
| 167 |
+
value=config.get('default', 0),
|
| 168 |
+
visible=True
|
| 169 |
+
))
|
| 170 |
+
s_idx += 1
|
| 171 |
+
elif isinstance(ptype, list):
|
| 172 |
+
if d_idx < MAX_DYNAMIC_CONTROLS:
|
| 173 |
+
dropdown_updates.append(gr.update(
|
| 174 |
+
label=label,
|
| 175 |
+
choices=ptype,
|
| 176 |
+
value=config.get('default', ptype[0] if ptype else None),
|
| 177 |
+
visible=True
|
| 178 |
+
))
|
| 179 |
+
d_idx += 1
|
| 180 |
+
elif ptype == "BOOLEAN":
|
| 181 |
+
if c_idx < MAX_DYNAMIC_CONTROLS:
|
| 182 |
+
checkbox_updates.append(gr.update(
|
| 183 |
+
label=label,
|
| 184 |
+
value=config.get('default', False),
|
| 185 |
+
visible=True
|
| 186 |
+
))
|
| 187 |
+
c_idx += 1
|
| 188 |
+
|
| 189 |
+
for _ in range(s_idx, MAX_DYNAMIC_CONTROLS): slider_updates.append(gr.update(visible=False))
|
| 190 |
+
for _ in range(d_idx, MAX_DYNAMIC_CONTROLS): dropdown_updates.append(gr.update(visible=False))
|
| 191 |
+
for _ in range(c_idx, MAX_DYNAMIC_CONTROLS): checkbox_updates.append(gr.update(visible=False))
|
| 192 |
+
|
| 193 |
+
return slider_updates + dropdown_updates + checkbox_updates
|
| 194 |
+
|
| 195 |
+
def update_run_button_for_cpu(preprocessor_name):
|
| 196 |
+
if preprocessor_name in CPU_ONLY_PREPROCESSORS:
|
| 197 |
+
return gr.update(value="Run Preprocessor CPU Only", variant="primary"), gr.update(visible=False)
|
| 198 |
+
else:
|
| 199 |
+
return gr.update(value="Run Preprocessor", variant="primary"), gr.update(visible=True)
|
| 200 |
+
|
| 201 |
+
ui_components["preprocessor_cn"].change(
|
| 202 |
+
fn=update_preprocessor_models_dropdown,
|
| 203 |
+
inputs=[ui_components["preprocessor_cn"]],
|
| 204 |
+
outputs=[ui_components["preprocessor_model_cn"]]
|
| 205 |
+
).then(
|
| 206 |
+
fn=update_preprocessor_settings_ui,
|
| 207 |
+
inputs=[ui_components["preprocessor_cn"]],
|
| 208 |
+
outputs=ui_components["cn_sliders"] + ui_components["cn_dropdowns"] + ui_components["cn_checkboxes"]
|
| 209 |
+
).then(
|
| 210 |
+
fn=update_run_button_for_cpu,
|
| 211 |
+
inputs=[ui_components["preprocessor_cn"]],
|
| 212 |
+
outputs=[ui_components["run_cn"], ui_components["zero_gpu_cn"]]
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
all_dynamic_inputs = (
|
| 216 |
+
ui_components["cn_sliders"] +
|
| 217 |
+
ui_components["cn_dropdowns"] +
|
| 218 |
+
ui_components["cn_checkboxes"]
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
ui_components["run_cn"].click(
|
| 222 |
+
fn=run_cn_preprocessor_entry,
|
| 223 |
+
inputs=[
|
| 224 |
+
ui_components["cn_input_type"],
|
| 225 |
+
ui_components["cn_image_input"],
|
| 226 |
+
ui_components["cn_video_input"],
|
| 227 |
+
ui_components["preprocessor_cn"],
|
| 228 |
+
ui_components["preprocessor_model_cn"],
|
| 229 |
+
ui_components["zero_gpu_cn"],
|
| 230 |
+
] + all_dynamic_inputs,
|
| 231 |
+
outputs=[ui_components["output_gallery_cn"]]
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
def create_lora_event_handlers(prefix):
|
| 235 |
+
lora_rows = ui_components[f'lora_rows_{prefix}']
|
| 236 |
+
lora_ids = ui_components[f'lora_ids_{prefix}']
|
| 237 |
+
lora_scales = ui_components[f'lora_scales_{prefix}']
|
| 238 |
+
lora_uploads = ui_components[f'lora_uploads_{prefix}']
|
| 239 |
+
count_state = ui_components[f'lora_count_state_{prefix}']
|
| 240 |
+
add_button = ui_components[f'add_lora_button_{prefix}']
|
| 241 |
+
del_button = ui_components[f'delete_lora_button_{prefix}']
|
| 242 |
+
|
| 243 |
+
def add_lora_row(c):
|
| 244 |
+
updates = {}
|
| 245 |
+
if c < MAX_LORAS:
|
| 246 |
+
c += 1
|
| 247 |
+
updates[lora_rows[c - 1]] = gr.update(visible=True)
|
| 248 |
+
|
| 249 |
+
updates[count_state] = c
|
| 250 |
+
updates[add_button] = gr.update(visible=c < MAX_LORAS)
|
| 251 |
+
updates[del_button] = gr.update(visible=c > 1)
|
| 252 |
+
return updates
|
| 253 |
+
|
| 254 |
+
def del_lora_row(c):
|
| 255 |
+
updates = {}
|
| 256 |
+
if c > 1:
|
| 257 |
+
updates[lora_rows[c - 1]] = gr.update(visible=False)
|
| 258 |
+
updates[lora_ids[c - 1]] = ""
|
| 259 |
+
updates[lora_scales[c - 1]] = 0.0
|
| 260 |
+
updates[lora_uploads[c - 1]] = None
|
| 261 |
+
c -= 1
|
| 262 |
+
|
| 263 |
+
updates[count_state] = c
|
| 264 |
+
updates[add_button] = gr.update(visible=True)
|
| 265 |
+
updates[del_button] = gr.update(visible=c > 1)
|
| 266 |
+
return updates
|
| 267 |
+
|
| 268 |
+
add_outputs = [count_state, add_button, del_button] + lora_rows
|
| 269 |
+
del_outputs = [count_state, add_button, del_button] + lora_rows + lora_ids + lora_scales + lora_uploads
|
| 270 |
+
|
| 271 |
+
add_button.click(add_lora_row, [count_state], add_outputs, show_progress=False)
|
| 272 |
+
del_button.click(del_lora_row, [count_state], del_outputs, show_progress=False)
|
| 273 |
+
|
| 274 |
+
def create_controlnet_event_handlers(prefix):
|
| 275 |
+
cn_rows = ui_components[f'controlnet_rows_{prefix}']
|
| 276 |
+
cn_types = ui_components[f'controlnet_types_{prefix}']
|
| 277 |
+
cn_series = ui_components[f'controlnet_series_{prefix}']
|
| 278 |
+
cn_filepaths = ui_components[f'controlnet_filepaths_{prefix}']
|
| 279 |
+
cn_images = ui_components[f'controlnet_images_{prefix}']
|
| 280 |
+
cn_strengths = ui_components[f'controlnet_strengths_{prefix}']
|
| 281 |
+
|
| 282 |
+
count_state = ui_components[f'controlnet_count_state_{prefix}']
|
| 283 |
+
add_button = ui_components[f'add_controlnet_button_{prefix}']
|
| 284 |
+
del_button = ui_components[f'delete_controlnet_button_{prefix}']
|
| 285 |
+
accordion = ui_components[f'controlnet_accordion_{prefix}']
|
| 286 |
+
|
| 287 |
+
def add_cn_row(c):
|
| 288 |
+
c += 1
|
| 289 |
+
updates = {
|
| 290 |
+
count_state: c,
|
| 291 |
+
cn_rows[c-1]: gr.update(visible=True),
|
| 292 |
+
add_button: gr.update(visible=c < MAX_CONTROLNETS),
|
| 293 |
+
del_button: gr.update(visible=True)
|
| 294 |
+
}
|
| 295 |
+
return updates
|
| 296 |
+
|
| 297 |
+
def del_cn_row(c):
|
| 298 |
+
c -= 1
|
| 299 |
+
updates = {
|
| 300 |
+
count_state: c,
|
| 301 |
+
cn_rows[c]: gr.update(visible=False),
|
| 302 |
+
cn_images[c]: None,
|
| 303 |
+
cn_strengths[c]: 1.0,
|
| 304 |
+
add_button: gr.update(visible=True),
|
| 305 |
+
del_button: gr.update(visible=c > 0)
|
| 306 |
+
}
|
| 307 |
+
return updates
|
| 308 |
+
|
| 309 |
+
add_outputs = [count_state, add_button, del_button] + cn_rows
|
| 310 |
+
del_outputs = [count_state, add_button, del_button] + cn_rows + cn_images + cn_strengths
|
| 311 |
+
add_button.click(fn=add_cn_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 312 |
+
del_button.click(fn=del_cn_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 313 |
+
|
| 314 |
+
def on_cn_type_change(selected_type):
|
| 315 |
+
cn_config = load_controlnet_config()
|
| 316 |
+
series_choices = []
|
| 317 |
+
if selected_type:
|
| 318 |
+
series_choices = sorted(list(set(
|
| 319 |
+
model.get("Series", "Default") for model in cn_config
|
| 320 |
+
if selected_type in model.get("Type", [])
|
| 321 |
+
)))
|
| 322 |
+
default_series = series_choices[0] if series_choices else None
|
| 323 |
+
filepath = "None"
|
| 324 |
+
if default_series:
|
| 325 |
+
for model in cn_config:
|
| 326 |
+
if model.get("Series") == default_series and selected_type in model.get("Type", []):
|
| 327 |
+
filepath = model.get("Filepath")
|
| 328 |
+
break
|
| 329 |
+
return gr.update(choices=series_choices, value=default_series), filepath
|
| 330 |
+
|
| 331 |
+
def on_cn_series_change(selected_series, selected_type):
|
| 332 |
+
cn_config = load_controlnet_config()
|
| 333 |
+
filepath = "None"
|
| 334 |
+
if selected_series and selected_type:
|
| 335 |
+
for model in cn_config:
|
| 336 |
+
if model.get("Series") == selected_series and selected_type in model.get("Type", []):
|
| 337 |
+
filepath = model.get("Filepath")
|
| 338 |
+
break
|
| 339 |
+
return filepath
|
| 340 |
+
|
| 341 |
+
for i in range(MAX_CONTROLNETS):
|
| 342 |
+
cn_types[i].change(
|
| 343 |
+
fn=on_cn_type_change,
|
| 344 |
+
inputs=[cn_types[i]],
|
| 345 |
+
outputs=[cn_series[i], cn_filepaths[i]],
|
| 346 |
+
show_progress=False
|
| 347 |
+
)
|
| 348 |
+
cn_series[i].change(
|
| 349 |
+
fn=on_cn_series_change,
|
| 350 |
+
inputs=[cn_series[i], cn_types[i]],
|
| 351 |
+
outputs=[cn_filepaths[i]],
|
| 352 |
+
show_progress=False
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
def on_accordion_expand(*images):
|
| 356 |
+
return [gr.update() for _ in images]
|
| 357 |
+
|
| 358 |
+
accordion.expand(
|
| 359 |
+
fn=on_accordion_expand,
|
| 360 |
+
inputs=cn_images,
|
| 361 |
+
outputs=cn_images,
|
| 362 |
+
show_progress=False
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
def create_ipadapter_event_handlers(prefix):
|
| 366 |
+
ipa_rows = ui_components[f'ipadapter_rows_{prefix}']
|
| 367 |
+
ipa_lora_strengths = ui_components[f'ipadapter_lora_strengths_{prefix}']
|
| 368 |
+
ipa_final_preset = ui_components[f'ipadapter_final_preset_{prefix}']
|
| 369 |
+
ipa_final_lora_strength = ui_components[f'ipadapter_final_lora_strength_{prefix}']
|
| 370 |
+
count_state = ui_components[f'ipadapter_count_state_{prefix}']
|
| 371 |
+
add_button = ui_components[f'add_ipadapter_button_{prefix}']
|
| 372 |
+
del_button = ui_components[f'delete_ipadapter_button_{prefix}']
|
| 373 |
+
accordion = ui_components[f'ipadapter_accordion_{prefix}']
|
| 374 |
+
|
| 375 |
+
def add_ipa_row(c):
|
| 376 |
+
c += 1
|
| 377 |
+
return {
|
| 378 |
+
count_state: c,
|
| 379 |
+
ipa_rows[c - 1]: gr.update(visible=True),
|
| 380 |
+
add_button: gr.update(visible=c < MAX_IPADAPTERS),
|
| 381 |
+
del_button: gr.update(visible=True),
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
def del_ipa_row(c):
|
| 385 |
+
c -= 1
|
| 386 |
+
return {
|
| 387 |
+
count_state: c,
|
| 388 |
+
ipa_rows[c]: gr.update(visible=False),
|
| 389 |
+
add_button: gr.update(visible=True),
|
| 390 |
+
del_button: gr.update(visible=c > 0),
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
add_outputs = [count_state, add_button, del_button] + ipa_rows
|
| 394 |
+
del_outputs = [count_state, add_button, del_button] + ipa_rows
|
| 395 |
+
add_button.click(fn=add_ipa_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 396 |
+
del_button.click(fn=del_ipa_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 397 |
+
|
| 398 |
+
def on_preset_change(preset_value):
|
| 399 |
+
config = load_ipadapter_config()
|
| 400 |
+
faceid_presets = []
|
| 401 |
+
if isinstance(config, list):
|
| 402 |
+
faceid_presets = [
|
| 403 |
+
p.get('preset_name', '') for p in config
|
| 404 |
+
if 'FACE' in p.get('preset_name', '') or 'FACEID' in p.get('preset_name', '')
|
| 405 |
+
]
|
| 406 |
+
is_visible = preset_value in faceid_presets
|
| 407 |
+
updates = [gr.update(visible=is_visible)] * (MAX_IPADAPTERS + 1)
|
| 408 |
+
return updates
|
| 409 |
+
|
| 410 |
+
all_lora_strength_sliders = [ipa_final_lora_strength] + ipa_lora_strengths
|
| 411 |
+
ipa_final_preset.change(fn=on_preset_change, inputs=[ipa_final_preset], outputs=all_lora_strength_sliders, show_progress=False)
|
| 412 |
+
|
| 413 |
+
accordion.expand(fn=lambda *imgs: [gr.update() for _ in imgs], inputs=ui_components[f'ipadapter_images_{prefix}'], outputs=ui_components[f'ipadapter_images_{prefix}'], show_progress=False)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def create_embedding_event_handlers(prefix):
|
| 417 |
+
rows = ui_components[f'embedding_rows_{prefix}']
|
| 418 |
+
ids = ui_components[f'embeddings_ids_{prefix}']
|
| 419 |
+
files = ui_components[f'embeddings_files_{prefix}']
|
| 420 |
+
count_state = ui_components[f'embedding_count_state_{prefix}']
|
| 421 |
+
add_button = ui_components[f'add_embedding_button_{prefix}']
|
| 422 |
+
del_button = ui_components[f'delete_embedding_button_{prefix}']
|
| 423 |
+
|
| 424 |
+
def add_row(c):
|
| 425 |
+
c += 1
|
| 426 |
+
return {
|
| 427 |
+
count_state: c,
|
| 428 |
+
rows[c - 1]: gr.update(visible=True),
|
| 429 |
+
add_button: gr.update(visible=c < MAX_EMBEDDINGS),
|
| 430 |
+
del_button: gr.update(visible=True)
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
def del_row(c):
|
| 434 |
+
c -= 1
|
| 435 |
+
return {
|
| 436 |
+
count_state: c,
|
| 437 |
+
rows[c]: gr.update(visible=False),
|
| 438 |
+
ids[c]: "",
|
| 439 |
+
files[c]: None,
|
| 440 |
+
add_button: gr.update(visible=True),
|
| 441 |
+
del_button: gr.update(visible=c > 0)
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
add_outputs = [count_state, add_button, del_button] + rows
|
| 445 |
+
del_outputs = [count_state, add_button, del_button] + rows + ids + files
|
| 446 |
+
add_button.click(fn=add_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 447 |
+
del_button.click(fn=del_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 448 |
+
|
| 449 |
+
def create_conditioning_event_handlers(prefix):
|
| 450 |
+
rows = ui_components[f'conditioning_rows_{prefix}']
|
| 451 |
+
prompts = ui_components[f'conditioning_prompts_{prefix}']
|
| 452 |
+
count_state = ui_components[f'conditioning_count_state_{prefix}']
|
| 453 |
+
add_button = ui_components[f'add_conditioning_button_{prefix}']
|
| 454 |
+
del_button = ui_components[f'delete_conditioning_button_{prefix}']
|
| 455 |
+
|
| 456 |
+
def add_row(c):
|
| 457 |
+
c += 1
|
| 458 |
+
return {
|
| 459 |
+
count_state: c,
|
| 460 |
+
rows[c - 1]: gr.update(visible=True),
|
| 461 |
+
add_button: gr.update(visible=c < MAX_CONDITIONINGS),
|
| 462 |
+
del_button: gr.update(visible=True),
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
def del_row(c):
|
| 466 |
+
c -= 1
|
| 467 |
+
return {
|
| 468 |
+
count_state: c,
|
| 469 |
+
rows[c]: gr.update(visible=False),
|
| 470 |
+
prompts[c]: "",
|
| 471 |
+
add_button: gr.update(visible=True),
|
| 472 |
+
del_button: gr.update(visible=c > 0),
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
add_outputs = [count_state, add_button, del_button] + rows
|
| 476 |
+
del_outputs = [count_state, add_button, del_button] + rows + prompts
|
| 477 |
+
add_button.click(fn=add_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 478 |
+
del_button.click(fn=del_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 479 |
+
|
| 480 |
+
def on_vae_upload(file_obj):
|
| 481 |
+
if not file_obj:
|
| 482 |
+
return gr.update(), gr.update(), None
|
| 483 |
+
|
| 484 |
+
hashed_filename = save_uploaded_file_with_hash(file_obj, VAE_DIR)
|
| 485 |
+
return hashed_filename, "File", file_obj
|
| 486 |
+
|
| 487 |
+
def on_lora_upload(file_obj):
|
| 488 |
+
if not file_obj:
|
| 489 |
+
return gr.update(), gr.update()
|
| 490 |
+
|
| 491 |
+
hashed_filename = save_uploaded_file_with_hash(file_obj, LORA_DIR)
|
| 492 |
+
return hashed_filename, "File"
|
| 493 |
+
|
| 494 |
+
def on_embedding_upload(file_obj):
|
| 495 |
+
if not file_obj:
|
| 496 |
+
return gr.update(), gr.update(), None
|
| 497 |
+
|
| 498 |
+
hashed_filename = save_uploaded_file_with_hash(file_obj, EMBEDDING_DIR)
|
| 499 |
+
return hashed_filename, "File", file_obj
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def create_run_event(prefix: str, task_type: str):
|
| 503 |
+
run_inputs_map = {
|
| 504 |
+
'model_display_name': ui_components[f'base_model_{prefix}'],
|
| 505 |
+
'positive_prompt': ui_components[f'prompt_{prefix}'],
|
| 506 |
+
'negative_prompt': ui_components[f'neg_prompt_{prefix}'],
|
| 507 |
+
'seed': ui_components[f'seed_{prefix}'],
|
| 508 |
+
'batch_size': ui_components[f'batch_size_{prefix}'],
|
| 509 |
+
'guidance_scale': ui_components[f'cfg_{prefix}'],
|
| 510 |
+
'num_inference_steps': ui_components[f'steps_{prefix}'],
|
| 511 |
+
'sampler': ui_components[f'sampler_{prefix}'],
|
| 512 |
+
'scheduler': ui_components[f'scheduler_{prefix}'],
|
| 513 |
+
'zero_gpu_duration': ui_components[f'zero_gpu_{prefix}'],
|
| 514 |
+
'civitai_api_key': ui_components.get(f'civitai_api_key_{prefix}'),
|
| 515 |
+
'clip_skip': ui_components[f'clip_skip_{prefix}'],
|
| 516 |
+
'task_type': gr.State(task_type)
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
if task_type not in ['img2img', 'inpaint']:
|
| 520 |
+
run_inputs_map.update({'width': ui_components[f'width_{prefix}'], 'height': ui_components[f'height_{prefix}']})
|
| 521 |
+
|
| 522 |
+
task_specific_map = {
|
| 523 |
+
'img2img': {'img2img_image': f'input_image_{prefix}', 'img2img_denoise': f'denoise_{prefix}'},
|
| 524 |
+
'inpaint': {'inpaint_image_dict': f'input_image_dict_{prefix}'},
|
| 525 |
+
'outpaint': {'outpaint_image': f'input_image_{prefix}', 'outpaint_left': f'outpaint_left_{prefix}', 'outpaint_top': f'outpaint_top_{prefix}', 'outpaint_right': f'outpaint_right_{prefix}', 'outpaint_bottom': f'outpaint_bottom_{prefix}'},
|
| 526 |
+
'hires_fix': {'hires_image': f'input_image_{prefix}', 'hires_upscaler': f'hires_upscaler_{prefix}', 'hires_scale_by': f'hires_scale_by_{prefix}', 'hires_denoise': f'denoise_{prefix}'}
|
| 527 |
+
}
|
| 528 |
+
if task_type in task_specific_map:
|
| 529 |
+
for key, comp_name in task_specific_map[task_type].items():
|
| 530 |
+
run_inputs_map[key] = ui_components[comp_name]
|
| 531 |
+
|
| 532 |
+
lora_data_components = ui_components.get(f'all_lora_components_flat_{prefix}', [])
|
| 533 |
+
controlnet_data_components = ui_components.get(f'all_controlnet_components_flat_{prefix}', [])
|
| 534 |
+
ipadapter_data_components = ui_components.get(f'all_ipadapter_components_flat_{prefix}', [])
|
| 535 |
+
embedding_data_components = ui_components.get(f'all_embedding_components_flat_{prefix}', [])
|
| 536 |
+
conditioning_data_components = ui_components.get(f'all_conditioning_components_flat_{prefix}', [])
|
| 537 |
+
|
| 538 |
+
run_inputs_map['vae_source'] = ui_components.get(f'vae_source_{prefix}')
|
| 539 |
+
run_inputs_map['vae_id'] = ui_components.get(f'vae_id_{prefix}')
|
| 540 |
+
run_inputs_map['vae_file'] = ui_components.get(f'vae_file_{prefix}')
|
| 541 |
+
|
| 542 |
+
input_keys = list(run_inputs_map.keys())
|
| 543 |
+
input_list_flat = [v for v in run_inputs_map.values() if v is not None]
|
| 544 |
+
input_list_flat += lora_data_components + controlnet_data_components + ipadapter_data_components + embedding_data_components + conditioning_data_components
|
| 545 |
+
|
| 546 |
+
def create_ui_inputs_dict(*args):
|
| 547 |
+
valid_keys = [k for k in input_keys if run_inputs_map[k] is not None]
|
| 548 |
+
ui_dict = dict(zip(valid_keys, args[:len(valid_keys)]))
|
| 549 |
+
arg_idx = len(valid_keys)
|
| 550 |
+
|
| 551 |
+
ui_dict['lora_data'] = list(args[arg_idx : arg_idx + len(lora_data_components)])
|
| 552 |
+
arg_idx += len(lora_data_components)
|
| 553 |
+
ui_dict['controlnet_data'] = list(args[arg_idx : arg_idx + len(controlnet_data_components)])
|
| 554 |
+
arg_idx += len(controlnet_data_components)
|
| 555 |
+
ui_dict['ipadapter_data'] = list(args[arg_idx : arg_idx + len(ipadapter_data_components)])
|
| 556 |
+
arg_idx += len(ipadapter_data_components)
|
| 557 |
+
ui_dict['embedding_data'] = list(args[arg_idx : arg_idx + len(embedding_data_components)])
|
| 558 |
+
arg_idx += len(embedding_data_components)
|
| 559 |
+
ui_dict['conditioning_data'] = list(args[arg_idx : arg_idx + len(conditioning_data_components)])
|
| 560 |
+
|
| 561 |
+
return ui_dict
|
| 562 |
+
|
| 563 |
+
ui_components[f'run_{prefix}'].click(
|
| 564 |
+
fn=lambda *args, progress=gr.Progress(track_tqdm=True): generate_image_wrapper(create_ui_inputs_dict(*args), progress),
|
| 565 |
+
inputs=input_list_flat,
|
| 566 |
+
outputs=[ui_components[f'result_{prefix}']]
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
for prefix, task_type in [
|
| 571 |
+
("txt2img", "txt2img"), ("img2img", "img2img"), ("inpaint", "inpaint"),
|
| 572 |
+
("outpaint", "outpaint"), ("hires_fix", "hires_fix"),
|
| 573 |
+
]:
|
| 574 |
+
if f'add_lora_button_{prefix}' in ui_components:
|
| 575 |
+
create_lora_event_handlers(prefix)
|
| 576 |
+
lora_uploads = ui_components[f'lora_uploads_{prefix}']
|
| 577 |
+
lora_ids = ui_components[f'lora_ids_{prefix}']
|
| 578 |
+
lora_sources = ui_components[f'lora_sources_{prefix}']
|
| 579 |
+
for i in range(MAX_LORAS):
|
| 580 |
+
lora_uploads[i].upload(
|
| 581 |
+
fn=on_lora_upload,
|
| 582 |
+
inputs=[lora_uploads[i]],
|
| 583 |
+
outputs=[lora_ids[i], lora_sources[i]],
|
| 584 |
+
show_progress=False
|
| 585 |
+
)
|
| 586 |
+
if f'add_controlnet_button_{prefix}' in ui_components: create_controlnet_event_handlers(prefix)
|
| 587 |
+
if f'add_ipadapter_button_{prefix}' in ui_components: create_ipadapter_event_handlers(prefix)
|
| 588 |
+
if f'add_embedding_button_{prefix}' in ui_components:
|
| 589 |
+
create_embedding_event_handlers(prefix)
|
| 590 |
+
if f'embeddings_uploads_{prefix}' in ui_components:
|
| 591 |
+
emb_uploads = ui_components[f'embeddings_uploads_{prefix}']
|
| 592 |
+
emb_ids = ui_components[f'embeddings_ids_{prefix}']
|
| 593 |
+
emb_sources = ui_components[f'embeddings_sources_{prefix}']
|
| 594 |
+
emb_files = ui_components[f'embeddings_files_{prefix}']
|
| 595 |
+
for i in range(MAX_EMBEDDINGS):
|
| 596 |
+
emb_uploads[i].upload(
|
| 597 |
+
fn=on_embedding_upload,
|
| 598 |
+
inputs=[emb_uploads[i]],
|
| 599 |
+
outputs=[emb_ids[i], emb_sources[i], emb_files[i]],
|
| 600 |
+
show_progress=False
|
| 601 |
+
)
|
| 602 |
+
if f'add_conditioning_button_{prefix}' in ui_components: create_conditioning_event_handlers(prefix)
|
| 603 |
+
if f'vae_source_{prefix}' in ui_components:
|
| 604 |
+
upload_button = ui_components.get(f'vae_upload_button_{prefix}')
|
| 605 |
+
if upload_button:
|
| 606 |
+
upload_button.upload(
|
| 607 |
+
fn=on_vae_upload,
|
| 608 |
+
inputs=[upload_button],
|
| 609 |
+
outputs=[
|
| 610 |
+
ui_components[f'vae_id_{prefix}'],
|
| 611 |
+
ui_components[f'vae_source_{prefix}'],
|
| 612 |
+
ui_components[f'vae_file_{prefix}']
|
| 613 |
+
]
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
create_run_event(prefix, task_type)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
ui_components["info_get_button"].click(
|
| 620 |
+
get_png_info,
|
| 621 |
+
[ui_components["info_image_input"]],
|
| 622 |
+
[ui_components["info_prompt_output"], ui_components["info_neg_prompt_output"], ui_components["info_params_output"]]
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
flat_ui_list = [comp for comp_or_list in ui_components.values() for comp in (comp_or_list if isinstance(comp_or_list, list) else [comp_or_list])]
|
| 626 |
+
|
| 627 |
+
ui_components["send_to_txt2img_button"].click(lambda img: send_info_by_hash(img, ui_components), [ui_components["info_image_input"]], flat_ui_list)
|
| 628 |
+
ui_components["send_to_img2img_button"].click(lambda img: send_info_to_tab(img, "img2img", ui_components), [ui_components["info_image_input"]], flat_ui_list)
|
| 629 |
+
ui_components["send_to_inpaint_button"].click(lambda img: send_info_to_tab(img, "inpaint", ui_components), [ui_components["info_image_input"]], flat_ui_list)
|
| 630 |
+
ui_components["send_to_outpaint_button"].click(lambda img: send_info_to_tab(img, "outpaint", ui_components), [ui_components["info_image_input"]], flat_ui_list)
|
| 631 |
+
ui_components["send_to_hires_fix_button"].click(lambda img: send_info_to_tab(img, "hires_fix", ui_components), [ui_components["info_image_input"]], flat_ui_list)
|
| 632 |
+
|
| 633 |
+
def on_aspect_ratio_change(ratio_key, model_display_name):
|
| 634 |
+
model_type = MODEL_TYPE_MAP.get(model_display_name, 'sdxl').lower()
|
| 635 |
+
res_map = RESOLUTION_MAP.get(model_type, RESOLUTION_MAP.get("sdxl", {}))
|
| 636 |
+
w, h = res_map.get(ratio_key, (1024, 1024))
|
| 637 |
+
return w, h
|
| 638 |
+
|
| 639 |
+
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 640 |
+
if f'aspect_ratio_{prefix}' in ui_components:
|
| 641 |
+
aspect_ratio_dropdown = ui_components[f'aspect_ratio_{prefix}']
|
| 642 |
+
width_component = ui_components[f'width_{prefix}']
|
| 643 |
+
height_component = ui_components[f'height_{prefix}']
|
| 644 |
+
model_dropdown = ui_components[f'base_model_{prefix}']
|
| 645 |
+
aspect_ratio_dropdown.change(fn=on_aspect_ratio_change, inputs=[aspect_ratio_dropdown, model_dropdown], outputs=[width_component, height_component], show_progress=False)
|
| 646 |
+
|
| 647 |
+
if 'view_mode_inpaint' in ui_components:
|
| 648 |
+
def toggle_inpaint_fullscreen_view(view_mode):
|
| 649 |
+
is_fullscreen = (view_mode == "Fullscreen View")
|
| 650 |
+
other_elements_visible = not is_fullscreen
|
| 651 |
+
editor_height = 800 if is_fullscreen else 272
|
| 652 |
+
return {
|
| 653 |
+
ui_components['model_and_run_row_inpaint']: gr.update(visible=other_elements_visible),
|
| 654 |
+
ui_components['prompts_column_inpaint']: gr.update(visible=other_elements_visible),
|
| 655 |
+
ui_components['params_and_gallery_row_inpaint']: gr.update(visible=other_elements_visible),
|
| 656 |
+
ui_components['accordion_wrapper_inpaint']: gr.update(visible=other_elements_visible),
|
| 657 |
+
ui_components['input_image_dict_inpaint']: gr.update(height=editor_height),
|
| 658 |
+
}
|
| 659 |
+
|
| 660 |
+
output_components = [
|
| 661 |
+
ui_components['model_and_run_row_inpaint'], ui_components['prompts_column_inpaint'],
|
| 662 |
+
ui_components['params_and_gallery_row_inpaint'], ui_components['accordion_wrapper_inpaint'],
|
| 663 |
+
ui_components['input_image_dict_inpaint']
|
| 664 |
+
]
|
| 665 |
+
ui_components['view_mode_inpaint'].change(fn=toggle_inpaint_fullscreen_view, inputs=[ui_components['view_mode_inpaint']], outputs=output_components, show_progress=False)
|
| 666 |
+
|
| 667 |
+
def initialize_all_cn_dropdowns():
|
| 668 |
+
cn_config = load_controlnet_config()
|
| 669 |
+
if not cn_config: return {}
|
| 670 |
+
|
| 671 |
+
all_types = sorted(list(set(t for model in cn_config for t in model.get("Type", []))))
|
| 672 |
+
default_type = all_types[0] if all_types else None
|
| 673 |
+
|
| 674 |
+
series_choices = []
|
| 675 |
+
if default_type:
|
| 676 |
+
series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
|
| 677 |
+
default_series = series_choices[0] if series_choices else None
|
| 678 |
+
|
| 679 |
+
filepath = "None"
|
| 680 |
+
if default_series and default_type:
|
| 681 |
+
for model in cn_config:
|
| 682 |
+
if model.get("Series") == default_series and default_type in model.get("Type", []):
|
| 683 |
+
filepath = model.get("Filepath")
|
| 684 |
+
break
|
| 685 |
+
|
| 686 |
+
updates = {}
|
| 687 |
+
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 688 |
+
if f'controlnet_types_{prefix}' in ui_components:
|
| 689 |
+
for type_dd in ui_components[f'controlnet_types_{prefix}']:
|
| 690 |
+
updates[type_dd] = gr.update(choices=all_types, value=default_type)
|
| 691 |
+
for series_dd in ui_components[f'controlnet_series_{prefix}']:
|
| 692 |
+
updates[series_dd] = gr.update(choices=series_choices, value=default_series)
|
| 693 |
+
for filepath_state in ui_components[f'controlnet_filepaths_{prefix}']:
|
| 694 |
+
updates[filepath_state] = filepath
|
| 695 |
+
return updates
|
| 696 |
+
|
| 697 |
+
def initialize_all_ipa_dropdowns():
|
| 698 |
+
config = load_ipadapter_config()
|
| 699 |
+
if not config or not isinstance(config, list): return {}
|
| 700 |
+
|
| 701 |
+
unified_presets = []
|
| 702 |
+
faceid_presets = []
|
| 703 |
+
for preset_info in config:
|
| 704 |
+
name = preset_info.get("preset_name")
|
| 705 |
+
if not name:
|
| 706 |
+
continue
|
| 707 |
+
if "FACEID" in name or "FACE" in name:
|
| 708 |
+
faceid_presets.append(name)
|
| 709 |
+
else:
|
| 710 |
+
unified_presets.append(name)
|
| 711 |
+
|
| 712 |
+
all_presets = unified_presets + faceid_presets
|
| 713 |
+
default_preset = all_presets[0] if all_presets else None
|
| 714 |
+
is_faceid_default = default_preset in faceid_presets
|
| 715 |
+
|
| 716 |
+
lora_strength_update = gr.update(visible=is_faceid_default)
|
| 717 |
+
|
| 718 |
+
updates = {}
|
| 719 |
+
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 720 |
+
if f'ipadapter_final_preset_{prefix}' in ui_components:
|
| 721 |
+
for lora_strength_slider in ui_components[f'ipadapter_lora_strengths_{prefix}']:
|
| 722 |
+
updates[lora_strength_slider] = lora_strength_update
|
| 723 |
+
updates[ui_components[f'ipadapter_final_preset_{prefix}']] = gr.update(choices=all_presets, value=default_preset)
|
| 724 |
+
updates[ui_components[f'ipadapter_final_lora_strength_{prefix}']] = lora_strength_update
|
| 725 |
+
return updates
|
| 726 |
+
|
| 727 |
+
def run_on_load():
|
| 728 |
+
cn_updates = initialize_all_cn_dropdowns()
|
| 729 |
+
ipa_updates = initialize_all_ipa_dropdowns()
|
| 730 |
+
|
| 731 |
+
all_updates = {**cn_updates, **ipa_updates}
|
| 732 |
+
|
| 733 |
+
default_preprocessor = "Canny Edge"
|
| 734 |
+
model_update = update_preprocessor_models_dropdown(default_preprocessor)
|
| 735 |
+
all_updates[ui_components["preprocessor_model_cn"]] = model_update
|
| 736 |
+
|
| 737 |
+
settings_outputs = update_preprocessor_settings_ui(default_preprocessor)
|
| 738 |
+
dynamic_outputs = ui_components["cn_sliders"] + ui_components["cn_dropdowns"] + ui_components["cn_checkboxes"]
|
| 739 |
+
for i, comp in enumerate(dynamic_outputs):
|
| 740 |
+
all_updates[comp] = settings_outputs[i]
|
| 741 |
+
|
| 742 |
+
run_button_update, zero_gpu_update = update_run_button_for_cpu(default_preprocessor)
|
| 743 |
+
all_updates[ui_components["run_cn"]] = run_button_update
|
| 744 |
+
all_updates[ui_components["zero_gpu_cn"]] = zero_gpu_update
|
| 745 |
+
|
| 746 |
+
return all_updates
|
| 747 |
+
|
| 748 |
+
all_load_outputs = []
|
| 749 |
+
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 750 |
+
if f'controlnet_types_{prefix}' in ui_components:
|
| 751 |
+
all_load_outputs.extend(ui_components[f'controlnet_types_{prefix}'])
|
| 752 |
+
all_load_outputs.extend(ui_components[f'controlnet_series_{prefix}'])
|
| 753 |
+
all_load_outputs.extend(ui_components[f'controlnet_filepaths_{prefix}'])
|
| 754 |
+
if f'ipadapter_final_preset_{prefix}' in ui_components:
|
| 755 |
+
all_load_outputs.extend(ui_components[f'ipadapter_lora_strengths_{prefix}'])
|
| 756 |
+
all_load_outputs.append(ui_components[f'ipadapter_final_preset_{prefix}'])
|
| 757 |
+
all_load_outputs.append(ui_components[f'ipadapter_final_lora_strength_{prefix}'])
|
| 758 |
+
|
| 759 |
+
all_load_outputs.extend([
|
| 760 |
+
ui_components["preprocessor_model_cn"],
|
| 761 |
+
*ui_components["cn_sliders"],
|
| 762 |
+
*ui_components["cn_dropdowns"],
|
| 763 |
+
*ui_components["cn_checkboxes"],
|
| 764 |
+
ui_components["run_cn"],
|
| 765 |
+
ui_components["zero_gpu_cn"]
|
| 766 |
+
])
|
| 767 |
+
|
| 768 |
+
demo.load(
|
| 769 |
+
fn=run_on_load,
|
| 770 |
+
outputs=all_load_outputs
|
| 771 |
+
)
|
ui/layout.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from core.settings import *
|
| 4 |
+
|
| 5 |
+
from .shared import txt2img_ui, img2img_ui, inpaint_ui, outpaint_ui, hires_fix_ui
|
| 6 |
+
|
| 7 |
+
MAX_DYNAMIC_CONTROLS = 10
|
| 8 |
+
|
| 9 |
+
def get_preprocessor_choices():
|
| 10 |
+
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 11 |
+
|
| 12 |
+
preprocessor_names = [
|
| 13 |
+
display_name for class_name, display_name in NODE_DISPLAY_NAME_MAPPINGS.items()
|
| 14 |
+
if "Preprocessor" in class_name or "Segmentor" in class_name or
|
| 15 |
+
"Estimator" in class_name or "Detector" in class_name
|
| 16 |
+
]
|
| 17 |
+
return sorted(list(set(preprocessor_names)))
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def build_ui(event_handler_function):
|
| 21 |
+
ui_components = {}
|
| 22 |
+
|
| 23 |
+
with gr.Blocks(css="#col-container {margin: 0 auto; max-width: 1024px;}") as demo:
|
| 24 |
+
gr.Markdown("# ImageGen - Illustrious")
|
| 25 |
+
gr.Markdown(
|
| 26 |
+
"This demo is a streamlined version of the [Comfy web UI](https://github.com/RioShiina47/comfy-webui)'s ImgGen functionality. "
|
| 27 |
+
"Due to storage limitations on Spaces, this demo features the Illustrious series of models.\n"
|
| 28 |
+
"Other versions are also available: "
|
| 29 |
+
"[NoobAI](https://huggingface.co/spaces/RioShiina/ImageGen-NoobAI), "
|
| 30 |
+
"[Pony](https://huggingface.co/spaces/RioShiina/ImageGen-Pony1), "
|
| 31 |
+
"[SDXL](https://huggingface.co/spaces/RioShiina/ImageGen-SDXL), "
|
| 32 |
+
"[SD1.5](https://huggingface.co/spaces/RioShiina/ImageGen-SD15)"
|
| 33 |
+
)
|
| 34 |
+
with gr.Tabs(elem_id="tabs_container") as tabs:
|
| 35 |
+
with gr.TabItem("Illustrious", id=0):
|
| 36 |
+
with gr.Tabs(elem_id="image_gen_tabs") as image_gen_tabs:
|
| 37 |
+
with gr.TabItem("Txt2Img", id=0):
|
| 38 |
+
ui_components.update(txt2img_ui.create_ui())
|
| 39 |
+
|
| 40 |
+
with gr.TabItem("Img2Img", id=1):
|
| 41 |
+
ui_components.update(img2img_ui.create_ui())
|
| 42 |
+
|
| 43 |
+
with gr.TabItem("Inpaint", id=2):
|
| 44 |
+
ui_components.update(inpaint_ui.create_ui())
|
| 45 |
+
|
| 46 |
+
with gr.TabItem("Outpaint", id=3):
|
| 47 |
+
ui_components.update(outpaint_ui.create_ui())
|
| 48 |
+
|
| 49 |
+
with gr.TabItem("Hires. Fix", id=4):
|
| 50 |
+
ui_components.update(hires_fix_ui.create_ui())
|
| 51 |
+
|
| 52 |
+
ui_components['image_gen_tabs'] = image_gen_tabs
|
| 53 |
+
|
| 54 |
+
with gr.TabItem("Controlnet Preprocessors", id=1):
|
| 55 |
+
gr.Markdown("## ControlNet Auxiliary Preprocessors")
|
| 56 |
+
gr.Markdown("Powered by [Fannovel16/comfyui_controlnet_aux](https://github.com/Fannovel16/comfyui_controlnet_aux).")
|
| 57 |
+
gr.Markdown("Upload an image or video to process it with a ControlNet preprocessor.")
|
| 58 |
+
with gr.Row():
|
| 59 |
+
with gr.Column(scale=1):
|
| 60 |
+
cn_input_type = gr.Radio(["Image", "Video"], label="Input Type", value="Image")
|
| 61 |
+
cn_image_input = gr.Image(type="pil", label="Input Image", visible=True, height=384)
|
| 62 |
+
cn_video_input = gr.Video(label="Input Video", visible=False)
|
| 63 |
+
preprocessor_cn = gr.Dropdown(label="Preprocessor", choices=get_preprocessor_choices(), value="Canny Edge")
|
| 64 |
+
preprocessor_model_cn = gr.Dropdown(label="Preprocessor Model", choices=[], value=None, visible=False)
|
| 65 |
+
with gr.Column() as preprocessor_settings_ui:
|
| 66 |
+
cn_sliders, cn_dropdowns, cn_checkboxes = [], [], []
|
| 67 |
+
for i in range(MAX_DYNAMIC_CONTROLS):
|
| 68 |
+
cn_sliders.append(gr.Slider(visible=False, label=f"dyn_slider_{i}"))
|
| 69 |
+
cn_dropdowns.append(gr.Dropdown(visible=False, label=f"dyn_dropdown_{i}"))
|
| 70 |
+
cn_checkboxes.append(gr.Checkbox(visible=False, label=f"dyn_checkbox_{i}"))
|
| 71 |
+
run_cn = gr.Button("Run Preprocessor", variant="primary")
|
| 72 |
+
with gr.Column(scale=1):
|
| 73 |
+
output_gallery_cn = gr.Gallery(label="Output", show_label=False, object_fit="contain", height=512)
|
| 74 |
+
zero_gpu_cn = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional")
|
| 75 |
+
ui_components.update({
|
| 76 |
+
"cn_input_type": cn_input_type, "cn_image_input": cn_image_input, "cn_video_input": cn_video_input,
|
| 77 |
+
"preprocessor_cn": preprocessor_cn, "preprocessor_model_cn": preprocessor_model_cn, "run_cn": run_cn,
|
| 78 |
+
"zero_gpu_cn": zero_gpu_cn, "output_gallery_cn": output_gallery_cn,
|
| 79 |
+
"preprocessor_settings_ui": preprocessor_settings_ui, "cn_sliders": cn_sliders,
|
| 80 |
+
"cn_dropdowns": cn_dropdowns, "cn_checkboxes": cn_checkboxes
|
| 81 |
+
})
|
| 82 |
+
|
| 83 |
+
with gr.TabItem("PNG Info", id=2):
|
| 84 |
+
with gr.Column():
|
| 85 |
+
info_image_input = gr.Image(type="pil", label="Upload Image", height=512)
|
| 86 |
+
with gr.Row():
|
| 87 |
+
info_get_button = gr.Button("Get Info")
|
| 88 |
+
send_to_txt2img_button = gr.Button("Send to Txt2Img", variant="primary")
|
| 89 |
+
with gr.Row():
|
| 90 |
+
send_to_img2img_button = gr.Button("Send to Img2Img")
|
| 91 |
+
send_to_inpaint_button = gr.Button("Send to Inpaint")
|
| 92 |
+
send_to_outpaint_button = gr.Button("Send to Outpaint")
|
| 93 |
+
send_to_hires_fix_button = gr.Button("Send to Hires. Fix")
|
| 94 |
+
gr.Markdown("### Positive Prompt"); info_prompt_output = gr.Textbox(lines=3, interactive=False, show_label=False)
|
| 95 |
+
gr.Markdown("### Negative Prompt"); info_neg_prompt_output = gr.Textbox(lines=3, interactive=False, show_label=False)
|
| 96 |
+
gr.Markdown("### Other Parameters"); info_params_output = gr.Textbox(lines=5, interactive=False, show_label=False)
|
| 97 |
+
ui_components.update({
|
| 98 |
+
"info_image_input": info_image_input, "info_get_button": info_get_button,
|
| 99 |
+
"send_to_txt2img_button": send_to_txt2img_button,
|
| 100 |
+
"send_to_img2img_button": send_to_img2img_button,
|
| 101 |
+
"send_to_inpaint_button": send_to_inpaint_button,
|
| 102 |
+
"send_to_outpaint_button": send_to_outpaint_button,
|
| 103 |
+
"send_to_hires_fix_button": send_to_hires_fix_button,
|
| 104 |
+
"info_prompt_output": info_prompt_output, "info_neg_prompt_output": info_neg_prompt_output,
|
| 105 |
+
"info_params_output": info_params_output
|
| 106 |
+
})
|
| 107 |
+
|
| 108 |
+
ui_components["tabs"] = tabs
|
| 109 |
+
|
| 110 |
+
gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by RioShiina with ❤️<br><a href='https://github.com/RioShiina47' target='_blank'>GitHub</a> | <a href='https://huggingface.co/RioShiina' target='_blank'>Hugging Face</a> | <a href='https://civitai.com/user/RioShiina' target='_blank'>Civitai</a></div>")
|
| 111 |
+
|
| 112 |
+
event_handler_function(ui_components, demo)
|
| 113 |
+
|
| 114 |
+
return demo
|
ui/shared/hires_fix_ui.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from core.settings import MODEL_MAP_CHECKPOINT
|
| 3 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 4 |
+
from .ui_components import (
|
| 5 |
+
create_lora_settings_ui,
|
| 6 |
+
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 7 |
+
create_conditioning_ui, create_vae_override_ui, create_api_key_ui
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
def create_ui():
|
| 11 |
+
prefix = "hires_fix"
|
| 12 |
+
components = {}
|
| 13 |
+
|
| 14 |
+
with gr.Column():
|
| 15 |
+
with gr.Row():
|
| 16 |
+
components[f'base_model_{prefix}'] = gr.Dropdown(
|
| 17 |
+
label="Base Model",
|
| 18 |
+
choices=list(MODEL_MAP_CHECKPOINT.keys()),
|
| 19 |
+
value=list(MODEL_MAP_CHECKPOINT.keys())[0],
|
| 20 |
+
scale=3
|
| 21 |
+
)
|
| 22 |
+
with gr.Column(scale=1):
|
| 23 |
+
components[f'run_{prefix}'] = gr.Button("Run Hires. Fix", variant="primary")
|
| 24 |
+
|
| 25 |
+
with gr.Row():
|
| 26 |
+
with gr.Column(scale=1):
|
| 27 |
+
components[f'input_image_{prefix}'] = gr.Image(type="pil", label="Input Image", height=255)
|
| 28 |
+
with gr.Column(scale=2):
|
| 29 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3, placeholder="Describe the final image...")
|
| 30 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3, value="monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,")
|
| 31 |
+
|
| 32 |
+
with gr.Row():
|
| 33 |
+
with gr.Column(scale=1):
|
| 34 |
+
with gr.Row():
|
| 35 |
+
components[f'hires_upscaler_{prefix}'] = gr.Dropdown(
|
| 36 |
+
label="Upscaler",
|
| 37 |
+
choices=["nearest-exact", "bilinear", "area", "bicubic", "bislerp"],
|
| 38 |
+
value="nearest-exact"
|
| 39 |
+
)
|
| 40 |
+
components[f'hires_scale_by_{prefix}'] = gr.Slider(
|
| 41 |
+
label="Upscale by", minimum=1.0, maximum=4.0, step=0.1, value=1.5
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
with gr.Row():
|
| 45 |
+
components[f'denoise_{prefix}'] = gr.Slider(label="Denoise Strength", minimum=0.0, maximum=1.0, step=0.01, value=0.55)
|
| 46 |
+
|
| 47 |
+
with gr.Row():
|
| 48 |
+
components[f'sampler_{prefix}'] = gr.Dropdown(label="Sampler", choices=SAMPLER_CHOICES, value=SAMPLER_CHOICES[0])
|
| 49 |
+
components[f'scheduler_{prefix}'] = gr.Dropdown(label="Scheduler", choices=SCHEDULER_CHOICES, value='normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0])
|
| 50 |
+
with gr.Row():
|
| 51 |
+
components[f'steps_{prefix}'] = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=28)
|
| 52 |
+
components[f'cfg_{prefix}'] = gr.Slider(label="CFG Scale", minimum=1.0, maximum=20.0, step=0.1, value=7.5)
|
| 53 |
+
with gr.Row():
|
| 54 |
+
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 55 |
+
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 56 |
+
with gr.Row():
|
| 57 |
+
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU.")
|
| 58 |
+
|
| 59 |
+
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 60 |
+
components[f'width_{prefix}'] = gr.State(value=512)
|
| 61 |
+
components[f'height_{prefix}'] = gr.State(value=512)
|
| 62 |
+
|
| 63 |
+
with gr.Column(scale=1):
|
| 64 |
+
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=610)
|
| 65 |
+
|
| 66 |
+
components.update(create_api_key_ui(prefix))
|
| 67 |
+
components.update(create_lora_settings_ui(prefix))
|
| 68 |
+
components.update(create_controlnet_ui(prefix))
|
| 69 |
+
components.update(create_ipadapter_ui(prefix))
|
| 70 |
+
components.update(create_embedding_ui(prefix))
|
| 71 |
+
components.update(create_conditioning_ui(prefix))
|
| 72 |
+
components.update(create_vae_override_ui(prefix))
|
| 73 |
+
|
| 74 |
+
return components
|
ui/shared/img2img_ui.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from core.settings import MODEL_MAP_CHECKPOINT
|
| 3 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 4 |
+
from .ui_components import (
|
| 5 |
+
create_lora_settings_ui,
|
| 6 |
+
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 7 |
+
create_conditioning_ui, create_vae_override_ui, create_api_key_ui
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
def create_ui():
|
| 11 |
+
prefix = "img2img"
|
| 12 |
+
components = {}
|
| 13 |
+
|
| 14 |
+
with gr.Column():
|
| 15 |
+
with gr.Row():
|
| 16 |
+
components[f'base_model_{prefix}'] = gr.Dropdown(label="Base Model", choices=list(MODEL_MAP_CHECKPOINT.keys()), value=list(MODEL_MAP_CHECKPOINT.keys())[0], scale=3)
|
| 17 |
+
with gr.Column(scale=1):
|
| 18 |
+
components[f'run_{prefix}'] = gr.Button("Run", variant="primary")
|
| 19 |
+
|
| 20 |
+
with gr.Row():
|
| 21 |
+
with gr.Column(scale=1):
|
| 22 |
+
components[f'input_image_{prefix}'] = gr.Image(type="pil", label="Input Image", height=255)
|
| 23 |
+
|
| 24 |
+
with gr.Column(scale=2):
|
| 25 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3, placeholder="Enter your prompt")
|
| 26 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3, value="monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,")
|
| 27 |
+
|
| 28 |
+
with gr.Row():
|
| 29 |
+
with gr.Column(scale=1):
|
| 30 |
+
components[f'denoise_{prefix}'] = gr.Slider(label="Denoise Strength", minimum=0.0, maximum=1.0, step=0.01, value=0.7)
|
| 31 |
+
|
| 32 |
+
with gr.Row():
|
| 33 |
+
components[f'sampler_{prefix}'] = gr.Dropdown(label="Sampler", choices=SAMPLER_CHOICES, value=SAMPLER_CHOICES[0])
|
| 34 |
+
components[f'scheduler_{prefix}'] = gr.Dropdown(label="Scheduler", choices=SCHEDULER_CHOICES, value='normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0])
|
| 35 |
+
with gr.Row():
|
| 36 |
+
components[f'steps_{prefix}'] = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=28)
|
| 37 |
+
components[f'cfg_{prefix}'] = gr.Slider(label="CFG Scale", minimum=1.0, maximum=20.0, step=0.1, value=7.5)
|
| 38 |
+
with gr.Row():
|
| 39 |
+
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 40 |
+
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 41 |
+
with gr.Row():
|
| 42 |
+
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU. Longer jobs may need more time.")
|
| 43 |
+
|
| 44 |
+
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 45 |
+
|
| 46 |
+
with gr.Column(scale=1):
|
| 47 |
+
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=505)
|
| 48 |
+
|
| 49 |
+
components.update(create_api_key_ui(prefix))
|
| 50 |
+
components.update(create_lora_settings_ui(prefix))
|
| 51 |
+
components.update(create_controlnet_ui(prefix))
|
| 52 |
+
components.update(create_ipadapter_ui(prefix))
|
| 53 |
+
components.update(create_embedding_ui(prefix))
|
| 54 |
+
components.update(create_conditioning_ui(prefix))
|
| 55 |
+
components.update(create_vae_override_ui(prefix))
|
| 56 |
+
|
| 57 |
+
return components
|
ui/shared/inpaint_ui.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from core.settings import MODEL_MAP_CHECKPOINT
|
| 3 |
+
from .ui_components import (
|
| 4 |
+
create_base_parameter_ui, create_lora_settings_ui,
|
| 5 |
+
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 6 |
+
create_conditioning_ui, create_vae_override_ui, create_api_key_ui
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
def create_ui():
|
| 10 |
+
prefix = "inpaint"
|
| 11 |
+
components = {}
|
| 12 |
+
|
| 13 |
+
with gr.Column():
|
| 14 |
+
with gr.Row() as model_and_run_row:
|
| 15 |
+
components[f'base_model_{prefix}'] = gr.Dropdown(
|
| 16 |
+
label="Base Model",
|
| 17 |
+
choices=list(MODEL_MAP_CHECKPOINT.keys()),
|
| 18 |
+
value=list(MODEL_MAP_CHECKPOINT.keys())[0],
|
| 19 |
+
scale=3
|
| 20 |
+
)
|
| 21 |
+
with gr.Column(scale=1):
|
| 22 |
+
components[f'run_{prefix}'] = gr.Button("Run Inpaint", variant="primary")
|
| 23 |
+
|
| 24 |
+
components[f'model_and_run_row_{prefix}'] = model_and_run_row
|
| 25 |
+
|
| 26 |
+
with gr.Row() as main_content_row:
|
| 27 |
+
with gr.Column(scale=1) as editor_column:
|
| 28 |
+
components[f'view_mode_{prefix}'] = gr.Radio(
|
| 29 |
+
["Normal View", "Fullscreen View"],
|
| 30 |
+
label="Editor View",
|
| 31 |
+
value="Normal View",
|
| 32 |
+
interactive=True
|
| 33 |
+
)
|
| 34 |
+
components[f'input_image_dict_{prefix}'] = gr.ImageEditor(
|
| 35 |
+
type="pil",
|
| 36 |
+
label="Image & Mask",
|
| 37 |
+
height=272
|
| 38 |
+
)
|
| 39 |
+
components[f'editor_column_{prefix}'] = editor_column
|
| 40 |
+
|
| 41 |
+
with gr.Column(scale=2) as prompts_column:
|
| 42 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=6, placeholder="Describe what to fill in the mask...")
|
| 43 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=6, value="monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,")
|
| 44 |
+
components[f'prompts_column_{prefix}'] = prompts_column
|
| 45 |
+
|
| 46 |
+
with gr.Row() as params_and_gallery_row:
|
| 47 |
+
with gr.Column(scale=1):
|
| 48 |
+
param_defaults = {'w': 1024, 'h': 1024, 'cs_vis': False, 'cs_val': 1}
|
| 49 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 50 |
+
with gr.Row():
|
| 51 |
+
components[f'sampler_{prefix}'] = gr.Dropdown(label="Sampler", choices=SAMPLER_CHOICES, value=SAMPLER_CHOICES[0])
|
| 52 |
+
components[f'scheduler_{prefix}'] = gr.Dropdown(label="Scheduler", choices=SCHEDULER_CHOICES, value='normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0])
|
| 53 |
+
with gr.Row():
|
| 54 |
+
components[f'steps_{prefix}'] = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=28)
|
| 55 |
+
components[f'cfg_{prefix}'] = gr.Slider(label="CFG Scale", minimum=1.0, maximum=20.0, step=0.1, value=7.5)
|
| 56 |
+
with gr.Row():
|
| 57 |
+
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 58 |
+
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 59 |
+
with gr.Row():
|
| 60 |
+
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU.")
|
| 61 |
+
|
| 62 |
+
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 63 |
+
components[f'width_{prefix}'] = gr.State(value=512)
|
| 64 |
+
components[f'height_{prefix}'] = gr.State(value=512)
|
| 65 |
+
|
| 66 |
+
with gr.Column(scale=1):
|
| 67 |
+
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=414)
|
| 68 |
+
|
| 69 |
+
components[f'params_and_gallery_row_{prefix}'] = params_and_gallery_row
|
| 70 |
+
|
| 71 |
+
with gr.Column() as accordion_wrapper:
|
| 72 |
+
components.update(create_api_key_ui(prefix))
|
| 73 |
+
components.update(create_lora_settings_ui(prefix))
|
| 74 |
+
components.update(create_controlnet_ui(prefix))
|
| 75 |
+
components.update(create_ipadapter_ui(prefix))
|
| 76 |
+
components.update(create_embedding_ui(prefix))
|
| 77 |
+
components.update(create_conditioning_ui(prefix))
|
| 78 |
+
components.update(create_vae_override_ui(prefix))
|
| 79 |
+
components[f'accordion_wrapper_{prefix}'] = accordion_wrapper
|
| 80 |
+
|
| 81 |
+
return components
|
ui/shared/outpaint_ui.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from core.settings import MODEL_MAP_CHECKPOINT
|
| 3 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 4 |
+
from .ui_components import (
|
| 5 |
+
create_lora_settings_ui,
|
| 6 |
+
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 7 |
+
create_conditioning_ui, create_vae_override_ui, create_api_key_ui
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
def create_ui():
|
| 11 |
+
prefix = "outpaint"
|
| 12 |
+
components = {}
|
| 13 |
+
|
| 14 |
+
with gr.Column():
|
| 15 |
+
with gr.Row():
|
| 16 |
+
components[f'base_model_{prefix}'] = gr.Dropdown(
|
| 17 |
+
label="Base Model",
|
| 18 |
+
choices=list(MODEL_MAP_CHECKPOINT.keys()),
|
| 19 |
+
value=list(MODEL_MAP_CHECKPOINT.keys())[0],
|
| 20 |
+
scale=3
|
| 21 |
+
)
|
| 22 |
+
with gr.Column(scale=1):
|
| 23 |
+
components[f'run_{prefix}'] = gr.Button("Run Outpaint", variant="primary")
|
| 24 |
+
|
| 25 |
+
with gr.Row():
|
| 26 |
+
with gr.Column(scale=1):
|
| 27 |
+
components[f'input_image_{prefix}'] = gr.Image(type="pil", label="Input Image", height=255)
|
| 28 |
+
with gr.Column(scale=2):
|
| 29 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3, placeholder="Describe the content for the expanded areas...")
|
| 30 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3, value="monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,")
|
| 31 |
+
|
| 32 |
+
with gr.Row():
|
| 33 |
+
with gr.Column(scale=1):
|
| 34 |
+
with gr.Row():
|
| 35 |
+
components[f'outpaint_left_{prefix}'] = gr.Slider(label="Pad Left", minimum=0, maximum=512, step=64, value=0)
|
| 36 |
+
components[f'outpaint_right_{prefix}'] = gr.Slider(label="Pad Right", minimum=0, maximum=512, step=64, value=256)
|
| 37 |
+
with gr.Row():
|
| 38 |
+
components[f'outpaint_top_{prefix}'] = gr.Slider(label="Pad Top", minimum=0, maximum=512, step=64, value=0)
|
| 39 |
+
components[f'outpaint_bottom_{prefix}'] = gr.Slider(label="Pad Bottom", minimum=0, maximum=512, step=64, value=0)
|
| 40 |
+
|
| 41 |
+
with gr.Row():
|
| 42 |
+
components[f'sampler_{prefix}'] = gr.Dropdown(label="Sampler", choices=SAMPLER_CHOICES, value=SAMPLER_CHOICES[0])
|
| 43 |
+
components[f'scheduler_{prefix}'] = gr.Dropdown(label="Scheduler", choices=SCHEDULER_CHOICES, value='normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0])
|
| 44 |
+
with gr.Row():
|
| 45 |
+
components[f'steps_{prefix}'] = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=28)
|
| 46 |
+
components[f'cfg_{prefix}'] = gr.Slider(label="CFG Scale", minimum=1.0, maximum=20.0, step=0.1, value=7.5)
|
| 47 |
+
with gr.Row():
|
| 48 |
+
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 49 |
+
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 50 |
+
with gr.Row():
|
| 51 |
+
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU.")
|
| 52 |
+
|
| 53 |
+
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 54 |
+
components[f'width_{prefix}'] = gr.State(value=512)
|
| 55 |
+
components[f'height_{prefix}'] = gr.State(value=512)
|
| 56 |
+
|
| 57 |
+
with gr.Column(scale=1):
|
| 58 |
+
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=595)
|
| 59 |
+
|
| 60 |
+
components.update(create_api_key_ui(prefix))
|
| 61 |
+
components.update(create_lora_settings_ui(prefix))
|
| 62 |
+
components.update(create_controlnet_ui(prefix))
|
| 63 |
+
components.update(create_ipadapter_ui(prefix))
|
| 64 |
+
components.update(create_embedding_ui(prefix))
|
| 65 |
+
components.update(create_conditioning_ui(prefix))
|
| 66 |
+
components.update(create_vae_override_ui(prefix))
|
| 67 |
+
|
| 68 |
+
return components
|
ui/shared/txt2img_ui.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from core.settings import MODEL_MAP_CHECKPOINT
|
| 3 |
+
from .ui_components import (
|
| 4 |
+
create_base_parameter_ui, create_lora_settings_ui,
|
| 5 |
+
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 6 |
+
create_conditioning_ui, create_vae_override_ui, create_api_key_ui
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
def create_ui():
|
| 10 |
+
"""Creates the UI components for the Txt2Img tab."""
|
| 11 |
+
prefix = "txt2img"
|
| 12 |
+
components = {}
|
| 13 |
+
|
| 14 |
+
with gr.Column():
|
| 15 |
+
with gr.Row():
|
| 16 |
+
components[f'base_model_{prefix}'] = gr.Dropdown(label="Base Model", choices=list(MODEL_MAP_CHECKPOINT.keys()), value=list(MODEL_MAP_CHECKPOINT.keys())[0], scale=3)
|
| 17 |
+
with gr.Column(scale=1):
|
| 18 |
+
components[f'run_{prefix}'] = gr.Button("Run", variant="primary")
|
| 19 |
+
|
| 20 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3, placeholder="Enter your prompt")
|
| 21 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3, value="monochrome, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn,")
|
| 22 |
+
|
| 23 |
+
with gr.Row():
|
| 24 |
+
with gr.Column(scale=1):
|
| 25 |
+
param_defaults = {'w': 1024, 'h': 1024, 'cs_vis': False, 'cs_val': 1}
|
| 26 |
+
components.update(create_base_parameter_ui(prefix, param_defaults))
|
| 27 |
+
with gr.Column(scale=1):
|
| 28 |
+
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=2, object_fit="contain", height=627)
|
| 29 |
+
|
| 30 |
+
components.update(create_api_key_ui(prefix))
|
| 31 |
+
components.update(create_lora_settings_ui(prefix))
|
| 32 |
+
components.update(create_controlnet_ui(prefix))
|
| 33 |
+
components.update(create_ipadapter_ui(prefix))
|
| 34 |
+
components.update(create_embedding_ui(prefix))
|
| 35 |
+
components.update(create_conditioning_ui(prefix))
|
| 36 |
+
components.update(create_vae_override_ui(prefix))
|
| 37 |
+
|
| 38 |
+
return components
|
ui/shared/ui_components.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 3 |
+
from core.settings import (
|
| 4 |
+
MAX_LORAS, LORA_SOURCE_CHOICES, MAX_EMBEDDINGS, MAX_CONDITIONINGS,
|
| 5 |
+
MAX_CONTROLNETS, MAX_IPADAPTERS, RESOLUTION_MAP
|
| 6 |
+
)
|
| 7 |
+
import yaml
|
| 8 |
+
import os
|
| 9 |
+
from functools import lru_cache
|
| 10 |
+
|
| 11 |
+
@lru_cache(maxsize=1)
|
| 12 |
+
def get_ipadapter_presets_from_yaml():
|
| 13 |
+
try:
|
| 14 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 15 |
+
_IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 16 |
+
with open(_IPADAPTER_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 17 |
+
config = yaml.safe_load(f)
|
| 18 |
+
if isinstance(config, list):
|
| 19 |
+
presets = [item.get('preset_name') for item in config if item.get('preset_name')]
|
| 20 |
+
if "Composition" not in presets:
|
| 21 |
+
pass
|
| 22 |
+
return presets
|
| 23 |
+
return []
|
| 24 |
+
except Exception as e:
|
| 25 |
+
print(f"Warning: Could not load ipadapter.yaml for UI components: {e}")
|
| 26 |
+
return ["STANDARD (medium strength)"]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def create_base_parameter_ui(prefix, defaults=None):
|
| 30 |
+
if defaults is None:
|
| 31 |
+
defaults = {}
|
| 32 |
+
|
| 33 |
+
components = {}
|
| 34 |
+
|
| 35 |
+
with gr.Row():
|
| 36 |
+
components[f'aspect_ratio_{prefix}'] = gr.Dropdown(
|
| 37 |
+
label="Aspect Ratio",
|
| 38 |
+
choices=list(RESOLUTION_MAP['sdxl'].keys()),
|
| 39 |
+
value="1:1 (Square)",
|
| 40 |
+
interactive=True
|
| 41 |
+
)
|
| 42 |
+
with gr.Row():
|
| 43 |
+
components[f'width_{prefix}'] = gr.Number(label="Width", value=defaults.get('w', 1024), interactive=True)
|
| 44 |
+
components[f'height_{prefix}'] = gr.Number(label="Height", value=defaults.get('h', 1024), interactive=True)
|
| 45 |
+
with gr.Row():
|
| 46 |
+
components[f'sampler_{prefix}'] = gr.Dropdown(label="Sampler", choices=SAMPLER_CHOICES, value=SAMPLER_CHOICES[0])
|
| 47 |
+
components[f'scheduler_{prefix}'] = gr.Dropdown(label="Scheduler", choices=SCHEDULER_CHOICES, value='normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0])
|
| 48 |
+
with gr.Row():
|
| 49 |
+
components[f'steps_{prefix}'] = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=28)
|
| 50 |
+
components[f'cfg_{prefix}'] = gr.Slider(label="CFG Scale", minimum=1.0, maximum=20.0, step=0.1, value=7.5)
|
| 51 |
+
with gr.Row():
|
| 52 |
+
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 53 |
+
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 54 |
+
with gr.Row():
|
| 55 |
+
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU. Longer jobs may need more time.")
|
| 56 |
+
|
| 57 |
+
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 58 |
+
|
| 59 |
+
return components
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def create_api_key_ui(prefix: str):
|
| 63 |
+
components = {}
|
| 64 |
+
with gr.Accordion("API Key Settings", open=False) as api_key_accordion:
|
| 65 |
+
components[f'api_key_accordion_{prefix}'] = api_key_accordion
|
| 66 |
+
gr.Markdown("💡 **Tip:** Enter API key (optional). An API key is required for resources that need a login to download. The key will be used for all Civitai downloads on this tab. You can also manually upload the corresponding files to avoid API Key leakage caused by potential vulnerabilities.")
|
| 67 |
+
with gr.Row():
|
| 68 |
+
components[f'civitai_api_key_{prefix}'] = gr.Textbox(
|
| 69 |
+
label="Civitai API Key",
|
| 70 |
+
type="password",
|
| 71 |
+
placeholder="Enter your Civitai API key here (optional)"
|
| 72 |
+
)
|
| 73 |
+
return components
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def create_lora_settings_ui(prefix: str):
|
| 77 |
+
components = {}
|
| 78 |
+
|
| 79 |
+
lora_rows, lora_sources, lora_ids, lora_scales, lora_uploads = [], [], [], [], []
|
| 80 |
+
|
| 81 |
+
with gr.Accordion("LoRA Settings", open=False) as lora_accordion:
|
| 82 |
+
components[f'lora_accordion_{prefix}'] = lora_accordion
|
| 83 |
+
gr.Markdown("💡 **Tip:** When downloading from Civitai, please use the **Version ID**, not the Model ID. You can find the Version ID in the URL (e.g., `civitai.com/models/123?modelVersionId=456`) or under the model's download button.")
|
| 84 |
+
components[f'lora_count_state_{prefix}'] = gr.State(1)
|
| 85 |
+
|
| 86 |
+
for i in range(MAX_LORAS):
|
| 87 |
+
with gr.Row(visible=i==0) as row:
|
| 88 |
+
source = gr.Dropdown(label=f"LoRA Source {i+1}", choices=LORA_SOURCE_CHOICES, value=LORA_SOURCE_CHOICES[0], scale=1)
|
| 89 |
+
lora_id = gr.Textbox(label=f"Civitai Version ID / File", placeholder="Civitai Version ID or Filename", scale=2, type="text")
|
| 90 |
+
scale = gr.Slider(label=f"Scale", minimum=-2.0, maximum=2.0, step=0.05, value=0.8, scale=1)
|
| 91 |
+
upload = gr.UploadButton(label="Upload", file_types=[".safetensors"], scale=1)
|
| 92 |
+
|
| 93 |
+
lora_rows.append(row)
|
| 94 |
+
lora_sources.append(source)
|
| 95 |
+
lora_ids.append(lora_id)
|
| 96 |
+
lora_scales.append(scale)
|
| 97 |
+
lora_uploads.append(upload)
|
| 98 |
+
|
| 99 |
+
with gr.Row():
|
| 100 |
+
components[f'add_lora_button_{prefix}'] = gr.Button("Add LoRA", variant="secondary")
|
| 101 |
+
components[f'delete_lora_button_{prefix}'] = gr.Button("Remove LoRA", variant="secondary", visible=False)
|
| 102 |
+
|
| 103 |
+
components[f'lora_rows_{prefix}'] = lora_rows
|
| 104 |
+
components[f'lora_sources_{prefix}'] = lora_sources
|
| 105 |
+
components[f'lora_ids_{prefix}'] = lora_ids
|
| 106 |
+
components[f'lora_scales_{prefix}'] = lora_scales
|
| 107 |
+
components[f'lora_uploads_{prefix}'] = lora_uploads
|
| 108 |
+
|
| 109 |
+
all_lora_components_flat = []
|
| 110 |
+
for i in range(MAX_LORAS):
|
| 111 |
+
all_lora_components_flat.extend([lora_sources[i], lora_ids[i], lora_scales[i], lora_uploads[i]])
|
| 112 |
+
components[f'all_lora_components_flat_{prefix}'] = all_lora_components_flat
|
| 113 |
+
|
| 114 |
+
return components
|
| 115 |
+
|
| 116 |
+
def create_controlnet_ui(prefix: str, max_units=MAX_CONTROLNETS):
|
| 117 |
+
components = {}
|
| 118 |
+
key = lambda name: f"{name}_{prefix}"
|
| 119 |
+
|
| 120 |
+
with gr.Accordion("ControlNet Settings", open=False) as accordion:
|
| 121 |
+
components[key('controlnet_accordion')] = accordion
|
| 122 |
+
|
| 123 |
+
cn_rows, images, series, types, strengths, filepaths = [], [], [], [], [], []
|
| 124 |
+
components.update({
|
| 125 |
+
key('controlnet_rows'): cn_rows,
|
| 126 |
+
key('controlnet_images'): images,
|
| 127 |
+
key('controlnet_series'): series,
|
| 128 |
+
key('controlnet_types'): types,
|
| 129 |
+
key('controlnet_strengths'): strengths,
|
| 130 |
+
key('controlnet_filepaths'): filepaths
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
for i in range(max_units):
|
| 134 |
+
with gr.Row(visible=(i < 1)) as row:
|
| 135 |
+
with gr.Column(scale=1):
|
| 136 |
+
images.append(gr.Image(label=f"Control Image {i+1}", type="pil", sources=["upload"], height=256))
|
| 137 |
+
with gr.Column(scale=2):
|
| 138 |
+
types.append(gr.Dropdown(label="Type", choices=[], interactive=True))
|
| 139 |
+
series.append(gr.Dropdown(label="Series", choices=[], interactive=True))
|
| 140 |
+
strengths.append(gr.Slider(label="Strength", minimum=0.0, maximum=2.0, step=0.05, value=1.0, interactive=True))
|
| 141 |
+
filepaths.append(gr.State(None))
|
| 142 |
+
cn_rows.append(row)
|
| 143 |
+
|
| 144 |
+
with gr.Row():
|
| 145 |
+
components[key('add_controlnet_button')] = gr.Button("✚ Add ControlNet")
|
| 146 |
+
components[key('delete_controlnet_button')] = gr.Button("➖ Delete ControlNet", visible=False)
|
| 147 |
+
components[key('controlnet_count_state')] = gr.State(1)
|
| 148 |
+
|
| 149 |
+
all_cn_components_flat = []
|
| 150 |
+
for i in range(max_units):
|
| 151 |
+
all_cn_components_flat.extend([
|
| 152 |
+
images[i], types[i], series[i], strengths[i], filepaths[i]
|
| 153 |
+
])
|
| 154 |
+
components[key('all_controlnet_components_flat')] = all_cn_components_flat
|
| 155 |
+
|
| 156 |
+
return components
|
| 157 |
+
|
| 158 |
+
def create_ipadapter_ui(prefix: str, max_units=MAX_IPADAPTERS):
|
| 159 |
+
components = {}
|
| 160 |
+
key = lambda name: f"{name}_{prefix}"
|
| 161 |
+
|
| 162 |
+
sdxl_presets = get_ipadapter_presets_from_yaml()
|
| 163 |
+
default_preset = sdxl_presets[0] if sdxl_presets else None
|
| 164 |
+
|
| 165 |
+
with gr.Accordion("IPAdapter Settings", open=False) as accordion:
|
| 166 |
+
components[key('ipadapter_accordion')] = accordion
|
| 167 |
+
gr.Markdown("Powered by [cubiq/ComfyUI_IPAdapter_plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus).")
|
| 168 |
+
|
| 169 |
+
with gr.Row():
|
| 170 |
+
components[key('ipadapter_final_preset')] = gr.Dropdown(
|
| 171 |
+
label="Preset (for all images)",
|
| 172 |
+
choices=sdxl_presets,
|
| 173 |
+
value=default_preset,
|
| 174 |
+
interactive=True
|
| 175 |
+
)
|
| 176 |
+
components[key('ipadapter_embeds_scaling')] = gr.Dropdown(
|
| 177 |
+
label="Embeds Scaling",
|
| 178 |
+
choices=['V only', 'K+V', 'K+V w/ C penalty', 'K+mean(V) w/ C penalty'],
|
| 179 |
+
value='V only',
|
| 180 |
+
interactive=True
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
with gr.Row():
|
| 184 |
+
components[key('ipadapter_combine_method')] = gr.Dropdown(
|
| 185 |
+
label="Combine Method",
|
| 186 |
+
choices=["concat", "add", "subtract", "average", "norm average", "max", "min"],
|
| 187 |
+
value="concat",
|
| 188 |
+
interactive=True
|
| 189 |
+
)
|
| 190 |
+
components[key('ipadapter_final_weight')] = gr.Slider(label="Final Weight", minimum=0.0, maximum=2.0, step=0.05, value=1.0, interactive=True)
|
| 191 |
+
components[key('ipadapter_final_lora_strength')] = gr.Slider(label="Final LoRA Strength", minimum=0.0, maximum=2.0, step=0.05, value=0.6, interactive=True, visible=False)
|
| 192 |
+
|
| 193 |
+
gr.Markdown("---")
|
| 194 |
+
|
| 195 |
+
ipa_rows, images, weights, lora_strengths = [], [], [], []
|
| 196 |
+
components.update({
|
| 197 |
+
key('ipadapter_rows'): ipa_rows,
|
| 198 |
+
key('ipadapter_images'): images,
|
| 199 |
+
key('ipadapter_weights'): weights,
|
| 200 |
+
key('ipadapter_lora_strengths'): lora_strengths
|
| 201 |
+
})
|
| 202 |
+
|
| 203 |
+
for i in range(max_units):
|
| 204 |
+
with gr.Row(visible=(i < 1)) as row:
|
| 205 |
+
with gr.Column(scale=1):
|
| 206 |
+
images.append(gr.Image(label=f"IPAdapter Image {i+1}", type="pil", sources="upload", height=256))
|
| 207 |
+
with gr.Column(scale=2):
|
| 208 |
+
weights.append(gr.Slider(label="Weight", minimum=0.0, maximum=2.0, step=0.05, value=1.0, interactive=True))
|
| 209 |
+
lora_strengths.append(gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.05, value=0.6, interactive=True, visible=False))
|
| 210 |
+
ipa_rows.append(row)
|
| 211 |
+
|
| 212 |
+
with gr.Row():
|
| 213 |
+
components[key('add_ipadapter_button')] = gr.Button("✚ Add IPAdapter")
|
| 214 |
+
components[key('delete_ipadapter_button')] = gr.Button("➖ Delete IPAdapter", visible=False)
|
| 215 |
+
components[key('ipadapter_count_state')] = gr.State(1)
|
| 216 |
+
|
| 217 |
+
all_ipa_components_flat = images + weights + lora_strengths
|
| 218 |
+
all_ipa_components_flat += [
|
| 219 |
+
components[key('ipadapter_final_preset')],
|
| 220 |
+
components[key('ipadapter_final_weight')],
|
| 221 |
+
components[key('ipadapter_final_lora_strength')],
|
| 222 |
+
components[key('ipadapter_embeds_scaling')],
|
| 223 |
+
components[key('ipadapter_combine_method')],
|
| 224 |
+
]
|
| 225 |
+
components[key('all_ipadapter_components_flat')] = all_ipa_components_flat
|
| 226 |
+
|
| 227 |
+
return components
|
| 228 |
+
|
| 229 |
+
def create_embedding_ui(prefix: str):
|
| 230 |
+
components = {}
|
| 231 |
+
key = lambda name: f"{name}_{prefix}"
|
| 232 |
+
|
| 233 |
+
with gr.Accordion("Embedding Settings", open=False, visible=True) as accordion:
|
| 234 |
+
components[key('embedding_accordion')] = accordion
|
| 235 |
+
gr.Markdown("💡 **Tip:** Embeddings are automatically added to your prompt using `embedding:filename` syntax. When downloading from Civitai, please use the **Version ID**, not the Model ID. You can find the Version ID in the URL (e.g., `civitai.com/models/123?modelVersionId=456`) or under the model's download button. For instance, using the Version ID `456` from the example above would automatically append `embedding:civitai_456` to your positive prompt.")
|
| 236 |
+
|
| 237 |
+
embedding_rows, sources, ids, files, upload_buttons = [], [], [], [], []
|
| 238 |
+
components.update({
|
| 239 |
+
key('embedding_rows'): embedding_rows,
|
| 240 |
+
key('embeddings_sources'): sources,
|
| 241 |
+
key('embeddings_ids'): ids,
|
| 242 |
+
key('embeddings_files'): files,
|
| 243 |
+
key('embeddings_uploads'): upload_buttons
|
| 244 |
+
})
|
| 245 |
+
|
| 246 |
+
for i in range(MAX_EMBEDDINGS):
|
| 247 |
+
with gr.Row(visible=(i < 1)) as row:
|
| 248 |
+
sources.append(gr.Dropdown(label=f"Embedding Source {i+1}", choices=LORA_SOURCE_CHOICES, value="Civitai", scale=1, interactive=True))
|
| 249 |
+
ids.append(gr.Textbox(label="Civitai Version ID / File", placeholder="Civitai Version ID or Filename", scale=3, interactive=True, type="text"))
|
| 250 |
+
upload_btn = gr.UploadButton("Upload", file_types=[".safetensors"], scale=1)
|
| 251 |
+
files.append(gr.State(None))
|
| 252 |
+
upload_buttons.append(upload_btn)
|
| 253 |
+
embedding_rows.append(row)
|
| 254 |
+
|
| 255 |
+
with gr.Row():
|
| 256 |
+
components[key('add_embedding_button')] = gr.Button("✚ Add Embedding")
|
| 257 |
+
components[key('delete_embedding_button')] = gr.Button("➖ Delete Embedding", visible=False)
|
| 258 |
+
components[key('embedding_count_state')] = gr.State(1)
|
| 259 |
+
|
| 260 |
+
all_embedding_components_flat = []
|
| 261 |
+
for i in range(MAX_EMBEDDINGS):
|
| 262 |
+
all_embedding_components_flat.extend([sources[i], ids[i], files[i]])
|
| 263 |
+
components[key('all_embedding_components_flat')] = all_embedding_components_flat
|
| 264 |
+
|
| 265 |
+
return components
|
| 266 |
+
|
| 267 |
+
def create_conditioning_ui(prefix: str):
|
| 268 |
+
components = {}
|
| 269 |
+
key = lambda name: f"{name}_{prefix}"
|
| 270 |
+
|
| 271 |
+
with gr.Accordion("Conditioning Settings", open=False) as accordion:
|
| 272 |
+
components[key('conditioning_accordion')] = accordion
|
| 273 |
+
gr.Markdown("💡 **Tip:** Define rectangular areas and assign specific prompts to them. Coordinates (X, Y) start from the top-left corner.")
|
| 274 |
+
|
| 275 |
+
cond_rows, prompts, widths, heights, xs, ys, strengths = [], [], [], [], [], [], []
|
| 276 |
+
components.update({
|
| 277 |
+
key('conditioning_rows'): cond_rows,
|
| 278 |
+
key('conditioning_prompts'): prompts,
|
| 279 |
+
key('conditioning_widths'): widths,
|
| 280 |
+
key('conditioning_heights'): heights,
|
| 281 |
+
key('conditioning_xs'): xs,
|
| 282 |
+
key('conditioning_ys'): ys,
|
| 283 |
+
key('conditioning_strengths'): strengths
|
| 284 |
+
})
|
| 285 |
+
|
| 286 |
+
for i in range(MAX_CONDITIONINGS):
|
| 287 |
+
with gr.Column(visible=(i < 1)) as row_wrapper:
|
| 288 |
+
prompts.append(gr.Textbox(label=f"Area Prompt {i+1}", lines=2, interactive=True))
|
| 289 |
+
with gr.Row():
|
| 290 |
+
xs.append(gr.Number(label="X", value=0, interactive=True, step=8, scale=1))
|
| 291 |
+
ys.append(gr.Number(label="Y", value=0, interactive=True, step=8, scale=1))
|
| 292 |
+
widths.append(gr.Number(label="Width", value=512, interactive=True, step=8, scale=1))
|
| 293 |
+
heights.append(gr.Number(label="Height", value=512, interactive=True, step=8, scale=1))
|
| 294 |
+
strengths.append(gr.Slider(label="Strength", minimum=0.1, maximum=2.0, step=0.05, value=1.0, interactive=True, scale=2))
|
| 295 |
+
cond_rows.append(row_wrapper)
|
| 296 |
+
|
| 297 |
+
with gr.Row():
|
| 298 |
+
components[key('add_conditioning_button')] = gr.Button("✚ Add Area")
|
| 299 |
+
components[key('delete_conditioning_button')] = gr.Button("➖ Delete Area", visible=False)
|
| 300 |
+
components[key('conditioning_count_state')] = gr.State(1)
|
| 301 |
+
|
| 302 |
+
all_cond_components_flat = prompts + widths + heights + xs + ys + strengths
|
| 303 |
+
components[key('all_conditioning_components_flat')] = all_cond_components_flat
|
| 304 |
+
|
| 305 |
+
return components
|
| 306 |
+
|
| 307 |
+
def create_vae_override_ui(prefix: str):
|
| 308 |
+
components = {}
|
| 309 |
+
key = lambda name: f"{name}_{prefix}"
|
| 310 |
+
source_choices = ["None"] + LORA_SOURCE_CHOICES
|
| 311 |
+
|
| 312 |
+
with gr.Accordion("VAE Settings (Override)", open=False) as accordion:
|
| 313 |
+
components[key('vae_accordion')] = accordion
|
| 314 |
+
gr.Markdown("💡 **Tip:** When downloading from Civitai, please use the **Version ID**, not the Model ID. You can find the Version ID in the URL (e.g., `civitai.com/models/123?modelVersionId=456`) or under the model's download button.")
|
| 315 |
+
with gr.Row():
|
| 316 |
+
components[key('vae_source')] = gr.Dropdown(
|
| 317 |
+
label="VAE Source",
|
| 318 |
+
choices=source_choices,
|
| 319 |
+
value="None",
|
| 320 |
+
scale=1,
|
| 321 |
+
interactive=True
|
| 322 |
+
)
|
| 323 |
+
components[key('vae_id')] = gr.Textbox(
|
| 324 |
+
label="Civitai Version ID / File",
|
| 325 |
+
placeholder="Civitai Version ID or Filename",
|
| 326 |
+
scale=3,
|
| 327 |
+
interactive=True,
|
| 328 |
+
type="text"
|
| 329 |
+
)
|
| 330 |
+
upload_btn = gr.UploadButton(
|
| 331 |
+
"Upload",
|
| 332 |
+
file_types=[".safetensors"],
|
| 333 |
+
scale=1
|
| 334 |
+
)
|
| 335 |
+
components[key('vae_upload_button')] = upload_btn
|
| 336 |
+
components[key('vae_file')] = gr.State(None)
|
| 337 |
+
|
| 338 |
+
return components
|
utils/__init__.py
ADDED
|
File without changes
|
utils/app_utils.py
ADDED
|
@@ -0,0 +1,601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
import hashlib
|
| 4 |
+
import re
|
| 5 |
+
from typing import Sequence, Mapping, Any, Union, Set
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
import gradio as gr
|
| 10 |
+
from huggingface_hub import hf_hub_download, constants as hf_constants
|
| 11 |
+
import torch
|
| 12 |
+
import numpy as np
|
| 13 |
+
from PIL import Image, ImageChops
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from core.settings import *
|
| 17 |
+
|
| 18 |
+
DISK_LIMIT_GB = 120
|
| 19 |
+
MODELS_ROOT_DIR = "ComfyUI/models"
|
| 20 |
+
|
| 21 |
+
PREPROCESSOR_MODEL_MAP = None
|
| 22 |
+
PREPROCESSOR_PARAMETER_MAP = None
|
| 23 |
+
IPADAPTER_PRESETS = None
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def save_uploaded_file_with_hash(file_obj: gr.File, target_dir: str) -> str:
|
| 27 |
+
if not file_obj:
|
| 28 |
+
return ""
|
| 29 |
+
|
| 30 |
+
temp_path = file_obj.name
|
| 31 |
+
|
| 32 |
+
sha256 = hashlib.sha256()
|
| 33 |
+
with open(temp_path, 'rb') as f:
|
| 34 |
+
for block in iter(lambda: f.read(65536), b''):
|
| 35 |
+
sha256.update(block)
|
| 36 |
+
|
| 37 |
+
file_hash = sha256.hexdigest()
|
| 38 |
+
_, extension = os.path.splitext(temp_path)
|
| 39 |
+
hashed_filename = f"{file_hash}{extension.lower()}"
|
| 40 |
+
|
| 41 |
+
dest_path = os.path.join(target_dir, hashed_filename)
|
| 42 |
+
|
| 43 |
+
os.makedirs(target_dir, exist_ok=True)
|
| 44 |
+
if not os.path.exists(dest_path):
|
| 45 |
+
shutil.copy(temp_path, dest_path)
|
| 46 |
+
print(f"✅ Saved uploaded file as: {dest_path}")
|
| 47 |
+
else:
|
| 48 |
+
print(f"ℹ️ File already exists (deduplicated): {dest_path}")
|
| 49 |
+
|
| 50 |
+
return hashed_filename
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def bytes_to_gb(byte_size: int) -> float:
|
| 54 |
+
if byte_size is None or byte_size == 0:
|
| 55 |
+
return 0.0
|
| 56 |
+
return round(byte_size / (1024 ** 3), 2)
|
| 57 |
+
|
| 58 |
+
def get_directory_size(path: str) -> int:
|
| 59 |
+
total_size = 0
|
| 60 |
+
if not os.path.exists(path):
|
| 61 |
+
return 0
|
| 62 |
+
try:
|
| 63 |
+
for dirpath, _, filenames in os.walk(path):
|
| 64 |
+
for f in filenames:
|
| 65 |
+
fp = os.path.join(dirpath, f)
|
| 66 |
+
if os.path.isfile(fp) and not os.path.islink(fp):
|
| 67 |
+
total_size += os.path.getsize(fp)
|
| 68 |
+
except OSError as e:
|
| 69 |
+
print(f"Warning: Could not access {path} to calculate size: {e}")
|
| 70 |
+
return total_size
|
| 71 |
+
|
| 72 |
+
def enforce_disk_limit():
|
| 73 |
+
disk_limit_bytes = DISK_LIMIT_GB * (1024 ** 3)
|
| 74 |
+
cache_dir = hf_constants.HF_HUB_CACHE
|
| 75 |
+
|
| 76 |
+
if not os.path.exists(cache_dir):
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
print(f"--- [Storage Manager] Checking disk usage in '{cache_dir}' (Limit: {DISK_LIMIT_GB} GB) ---")
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
all_files = []
|
| 83 |
+
current_size_bytes = 0
|
| 84 |
+
for dirpath, _, filenames in os.walk(cache_dir):
|
| 85 |
+
for f in filenames:
|
| 86 |
+
if f.endswith(".incomplete") or f.endswith(".lock"):
|
| 87 |
+
continue
|
| 88 |
+
file_path = os.path.join(dirpath, f)
|
| 89 |
+
if os.path.isfile(file_path) and not os.path.islink(file_path):
|
| 90 |
+
try:
|
| 91 |
+
file_size = os.path.getsize(file_path)
|
| 92 |
+
creation_time = os.path.getctime(file_path)
|
| 93 |
+
all_files.append((creation_time, file_path, file_size))
|
| 94 |
+
current_size_bytes += file_size
|
| 95 |
+
except OSError:
|
| 96 |
+
continue
|
| 97 |
+
|
| 98 |
+
print(f"--- [Storage Manager] Current usage: {bytes_to_gb(current_size_bytes)} GB ---")
|
| 99 |
+
|
| 100 |
+
if current_size_bytes > disk_limit_bytes:
|
| 101 |
+
print(f"--- [Storage Manager] Usage exceeds limit. Starting cleanup... ---")
|
| 102 |
+
all_files.sort(key=lambda x: x[0])
|
| 103 |
+
|
| 104 |
+
while current_size_bytes > disk_limit_bytes and all_files:
|
| 105 |
+
oldest_file_time, oldest_file_path, oldest_file_size = all_files.pop(0)
|
| 106 |
+
try:
|
| 107 |
+
os.remove(oldest_file_path)
|
| 108 |
+
current_size_bytes -= oldest_file_size
|
| 109 |
+
print(f"--- [Storage Manager] Deleted oldest file: {os.path.basename(oldest_file_path)} ({bytes_to_gb(oldest_file_size)} GB freed) ---")
|
| 110 |
+
except OSError as e:
|
| 111 |
+
print(f"--- [Storage Manager] Error deleting file {oldest_file_path}: {e} ---")
|
| 112 |
+
|
| 113 |
+
print(f"--- [Storage Manager] Cleanup finished. New usage: {bytes_to_gb(current_size_bytes)} GB ---")
|
| 114 |
+
else:
|
| 115 |
+
print("--- [Storage Manager] Disk usage is within the limit. No action needed. ---")
|
| 116 |
+
|
| 117 |
+
except Exception as e:
|
| 118 |
+
print(f"--- [Storage Manager] An unexpected error occurred: {e} ---")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 122 |
+
try:
|
| 123 |
+
return obj[index]
|
| 124 |
+
except (KeyError, IndexError):
|
| 125 |
+
try:
|
| 126 |
+
return obj["result"][index]
|
| 127 |
+
except (KeyError, IndexError):
|
| 128 |
+
return None
|
| 129 |
+
|
| 130 |
+
def sanitize_prompt(prompt: str) -> str:
|
| 131 |
+
if not isinstance(prompt, str):
|
| 132 |
+
return ""
|
| 133 |
+
return "".join(char for char in prompt if char.isprintable() or char in ('\n', '\t'))
|
| 134 |
+
|
| 135 |
+
def sanitize_id(input_id: str) -> str:
|
| 136 |
+
if not isinstance(input_id, str):
|
| 137 |
+
return ""
|
| 138 |
+
return re.sub(r'[^0-9]', '', input_id)
|
| 139 |
+
|
| 140 |
+
def sanitize_url(url: str) -> str:
|
| 141 |
+
if not isinstance(url, str):
|
| 142 |
+
raise ValueError("URL must be a string.")
|
| 143 |
+
url = url.strip()
|
| 144 |
+
if not re.match(r'^https?://[^\s/$.?#].[^\s]*$', url):
|
| 145 |
+
raise ValueError("Invalid URL format or scheme. Only HTTP and HTTPS are allowed.")
|
| 146 |
+
return url
|
| 147 |
+
|
| 148 |
+
def sanitize_filename(filename: str) -> str:
|
| 149 |
+
if not isinstance(filename, str):
|
| 150 |
+
return ""
|
| 151 |
+
sanitized = filename.replace('..', '')
|
| 152 |
+
sanitized = re.sub(r'[^\w\.\-]', '_', sanitized)
|
| 153 |
+
return sanitized.lstrip('/\\')
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def get_civitai_file_info(version_id: str) -> dict | None:
|
| 157 |
+
api_url = f"https://civitai.com/api/v1/model-versions/{version_id}"
|
| 158 |
+
try:
|
| 159 |
+
response = requests.get(api_url, timeout=10)
|
| 160 |
+
response.raise_for_status()
|
| 161 |
+
data = response.json()
|
| 162 |
+
|
| 163 |
+
for file_data in data.get('files', []):
|
| 164 |
+
if file_data.get('type') == 'Model' and file_data['name'].endswith(('.safetensors', '.pt', '.bin')):
|
| 165 |
+
return file_data
|
| 166 |
+
|
| 167 |
+
if data.get('files'):
|
| 168 |
+
return data['files'][0]
|
| 169 |
+
except Exception:
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def download_file(url: str, save_path: str, api_key: str = None, progress=None, desc: str = "") -> str:
|
| 174 |
+
enforce_disk_limit()
|
| 175 |
+
|
| 176 |
+
if os.path.exists(save_path):
|
| 177 |
+
return f"File already exists: {os.path.basename(save_path)}"
|
| 178 |
+
|
| 179 |
+
headers = {'Authorization': f'Bearer {api_key}'} if api_key and api_key.strip() else {}
|
| 180 |
+
try:
|
| 181 |
+
if progress:
|
| 182 |
+
progress(0, desc=desc)
|
| 183 |
+
|
| 184 |
+
response = requests.get(url, stream=True, headers=headers, timeout=15)
|
| 185 |
+
response.raise_for_status()
|
| 186 |
+
total_size = int(response.headers.get('content-length', 0))
|
| 187 |
+
|
| 188 |
+
with open(save_path, "wb") as f:
|
| 189 |
+
downloaded = 0
|
| 190 |
+
for chunk in response.iter_content(chunk_size=8192):
|
| 191 |
+
f.write(chunk)
|
| 192 |
+
if progress and total_size > 0:
|
| 193 |
+
downloaded += len(chunk)
|
| 194 |
+
progress(downloaded / total_size, desc=desc)
|
| 195 |
+
return f"Successfully downloaded: {os.path.basename(save_path)}"
|
| 196 |
+
except Exception as e:
|
| 197 |
+
if os.path.exists(save_path):
|
| 198 |
+
os.remove(save_path)
|
| 199 |
+
return f"Download failed for {os.path.basename(save_path)}: {e}"
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def get_lora_path(source: str, id_or_url: str, civitai_key: str, progress) -> tuple[str | None, str]:
|
| 203 |
+
if not id_or_url or not id_or_url.strip():
|
| 204 |
+
return None, "No ID/URL provided."
|
| 205 |
+
|
| 206 |
+
try:
|
| 207 |
+
if source == "Civitai":
|
| 208 |
+
version_id = sanitize_id(id_or_url)
|
| 209 |
+
if not version_id:
|
| 210 |
+
return None, "Invalid Civitai ID provided. Must be numeric."
|
| 211 |
+
filename = sanitize_filename(f"civitai_{version_id}.safetensors")
|
| 212 |
+
local_path = os.path.join(LORA_DIR, filename)
|
| 213 |
+
file_info = get_civitai_file_info(version_id)
|
| 214 |
+
api_key_to_use = civitai_key
|
| 215 |
+
source_name = f"Civitai ID {version_id}"
|
| 216 |
+
else:
|
| 217 |
+
return None, "Invalid source."
|
| 218 |
+
|
| 219 |
+
except ValueError as e:
|
| 220 |
+
return None, f"Input validation failed: {e}"
|
| 221 |
+
|
| 222 |
+
if os.path.exists(local_path):
|
| 223 |
+
return local_path, "File already exists."
|
| 224 |
+
|
| 225 |
+
if not file_info or not file_info.get('downloadUrl'):
|
| 226 |
+
return None, f"Could not get download link for {source_name}."
|
| 227 |
+
|
| 228 |
+
status = download_file(file_info['downloadUrl'], local_path, api_key_to_use, progress=progress, desc=f"Downloading {source_name}")
|
| 229 |
+
|
| 230 |
+
return (local_path, status) if "Successfully" in status else (None, status)
|
| 231 |
+
|
| 232 |
+
def get_embedding_path(source: str, id_or_url: str, civitai_key: str, progress) -> tuple[str | None, str]:
|
| 233 |
+
if not id_or_url or not id_or_url.strip():
|
| 234 |
+
return None, "No ID/URL provided."
|
| 235 |
+
|
| 236 |
+
try:
|
| 237 |
+
file_ext = ".safetensors"
|
| 238 |
+
|
| 239 |
+
if source == "Civitai":
|
| 240 |
+
version_id = sanitize_id(id_or_url)
|
| 241 |
+
if not version_id:
|
| 242 |
+
return None, "Invalid Civitai ID. Must be numeric."
|
| 243 |
+
|
| 244 |
+
file_info = get_civitai_file_info(version_id)
|
| 245 |
+
if file_info and file_info['name'].lower().endswith(('.pt', '.bin')):
|
| 246 |
+
file_ext = os.path.splitext(file_info['name'])[1]
|
| 247 |
+
|
| 248 |
+
filename = sanitize_filename(f"civitai_{version_id}{file_ext}")
|
| 249 |
+
local_path = os.path.join(EMBEDDING_DIR, filename)
|
| 250 |
+
api_key_to_use = civitai_key
|
| 251 |
+
source_name = f"Embedding Civitai ID {version_id}"
|
| 252 |
+
else:
|
| 253 |
+
return None, "Invalid source."
|
| 254 |
+
|
| 255 |
+
except ValueError as e:
|
| 256 |
+
return None, f"Input validation failed: {e}"
|
| 257 |
+
|
| 258 |
+
if os.path.exists(local_path):
|
| 259 |
+
return local_path, "File already exists."
|
| 260 |
+
|
| 261 |
+
if not file_info or not file_info.get('downloadUrl'):
|
| 262 |
+
return None, f"Could not get download link for {source_name}."
|
| 263 |
+
|
| 264 |
+
status = download_file(file_info['downloadUrl'], local_path, api_key_to_use, progress=progress, desc=f"Downloading {source_name}")
|
| 265 |
+
|
| 266 |
+
return (local_path, status) if "Successfully" in status else (None, status)
|
| 267 |
+
|
| 268 |
+
def get_vae_path(source: str, id_or_url: str, civitai_key: str, progress) -> tuple[str | None, str]:
|
| 269 |
+
if not id_or_url or not id_or_url.strip():
|
| 270 |
+
return None, "No ID/URL provided."
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
file_ext = ".safetensors"
|
| 274 |
+
|
| 275 |
+
if source == "Civitai":
|
| 276 |
+
version_id = sanitize_id(id_or_url)
|
| 277 |
+
if not version_id:
|
| 278 |
+
return None, "Invalid Civitai ID. Must be numeric."
|
| 279 |
+
|
| 280 |
+
file_info = get_civitai_file_info(version_id)
|
| 281 |
+
if file_info and file_info['name'].lower().endswith(('.pt', '.bin')):
|
| 282 |
+
file_ext = os.path.splitext(file_info['name'])[1]
|
| 283 |
+
|
| 284 |
+
filename = sanitize_filename(f"civitai_{version_id}{file_ext}")
|
| 285 |
+
local_path = os.path.join(VAE_DIR, filename)
|
| 286 |
+
api_key_to_use = civitai_key
|
| 287 |
+
source_name = f"VAE Civitai ID {version_id}"
|
| 288 |
+
else:
|
| 289 |
+
return None, "Invalid source."
|
| 290 |
+
|
| 291 |
+
except ValueError as e:
|
| 292 |
+
return None, f"Input validation failed: {e}"
|
| 293 |
+
|
| 294 |
+
if os.path.exists(local_path):
|
| 295 |
+
return local_path, "File already exists."
|
| 296 |
+
|
| 297 |
+
if not file_info or not file_info.get('downloadUrl'):
|
| 298 |
+
return None, f"Could not get download link for {source_name}."
|
| 299 |
+
|
| 300 |
+
status = download_file(file_info['downloadUrl'], local_path, api_key_to_use, progress=progress, desc=f"Downloading {source_name}")
|
| 301 |
+
|
| 302 |
+
return (local_path, status) if "Successfully" in status else (None, status)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _ensure_model_downloaded(display_name: str, progress=gr.Progress()):
|
| 306 |
+
if display_name not in ALL_MODEL_MAP:
|
| 307 |
+
raise ValueError(f"Model '{display_name}' not found in configuration.")
|
| 308 |
+
|
| 309 |
+
_, repo_filename, model_type, _ = ALL_MODEL_MAP[display_name]
|
| 310 |
+
|
| 311 |
+
type_to_dir_map = {
|
| 312 |
+
"SDXL": CHECKPOINT_DIR, "SD1.5": CHECKPOINT_DIR, "UNET": DIFFUSION_MODELS_DIR,
|
| 313 |
+
"VAE": VAE_DIR, "TEXT_ENCODER": TEXT_ENCODERS_DIR, "LORA": LORA_DIR,
|
| 314 |
+
"IPADAPTER": os.path.join(os.path.dirname(LORA_DIR), "ipadapter"),
|
| 315 |
+
"CLIP_VISION": os.path.join(os.path.dirname(LORA_DIR), "clip_vision")
|
| 316 |
+
}
|
| 317 |
+
dest_dir = type_to_dir_map.get(model_type)
|
| 318 |
+
if not dest_dir:
|
| 319 |
+
raise ValueError(f"Unknown model type '{model_type}' for '{display_name}'.")
|
| 320 |
+
|
| 321 |
+
base_filename = os.path.basename(repo_filename)
|
| 322 |
+
dest_path = os.path.join(dest_dir, base_filename)
|
| 323 |
+
|
| 324 |
+
if os.path.lexists(dest_path):
|
| 325 |
+
if not os.path.exists(dest_path):
|
| 326 |
+
print(f"⚠️ Found and removed broken symlink: {dest_path}")
|
| 327 |
+
os.remove(dest_path)
|
| 328 |
+
else:
|
| 329 |
+
return base_filename
|
| 330 |
+
|
| 331 |
+
download_info = ALL_FILE_DOWNLOAD_MAP.get(base_filename)
|
| 332 |
+
if not download_info:
|
| 333 |
+
raise gr.Error(f"Model '{base_filename}' not found in file_list.yaml. Cannot download.")
|
| 334 |
+
|
| 335 |
+
source = download_info.get("source")
|
| 336 |
+
try:
|
| 337 |
+
progress(0, desc=f"Downloading: {base_filename}")
|
| 338 |
+
|
| 339 |
+
if source == "hf":
|
| 340 |
+
repo_id = download_info.get("repo_id")
|
| 341 |
+
hf_filename = download_info.get("repository_file_path", base_filename)
|
| 342 |
+
if not repo_id:
|
| 343 |
+
raise ValueError(f"repo_id is missing for HF model '{base_filename}'")
|
| 344 |
+
|
| 345 |
+
cached_path = hf_hub_download(repo_id=repo_id, filename=hf_filename)
|
| 346 |
+
os.makedirs(dest_dir, exist_ok=True)
|
| 347 |
+
os.symlink(cached_path, dest_path)
|
| 348 |
+
print(f"✅ Symlinked '{cached_path}' to '{dest_path}'")
|
| 349 |
+
|
| 350 |
+
elif source == "civitai":
|
| 351 |
+
model_version_id = download_info.get("model_version_id")
|
| 352 |
+
if not model_version_id:
|
| 353 |
+
raise ValueError(f"model_version_id is missing for Civitai model '{base_filename}'")
|
| 354 |
+
|
| 355 |
+
file_info = get_civitai_file_info(model_version_id)
|
| 356 |
+
if not file_info or not file_info.get('downloadUrl'):
|
| 357 |
+
raise ConnectionError(f"Could not get download URL for Civitai model version ID {model_version_id}")
|
| 358 |
+
|
| 359 |
+
status = download_file(
|
| 360 |
+
file_info['downloadUrl'], dest_path, progress=progress, desc=f"Downloading: {base_filename}"
|
| 361 |
+
)
|
| 362 |
+
if "Failed" in status:
|
| 363 |
+
raise ConnectionError(status)
|
| 364 |
+
else:
|
| 365 |
+
raise NotImplementedError(f"Download source '{source}' is not implemented for '{base_filename}'")
|
| 366 |
+
|
| 367 |
+
progress(1.0, desc=f"Downloaded: {base_filename}")
|
| 368 |
+
|
| 369 |
+
except Exception as e:
|
| 370 |
+
if os.path.lexists(dest_path):
|
| 371 |
+
try:
|
| 372 |
+
os.remove(dest_path)
|
| 373 |
+
except OSError: pass
|
| 374 |
+
raise gr.Error(f"Failed to download and link '{display_name}': {e}")
|
| 375 |
+
|
| 376 |
+
return base_filename
|
| 377 |
+
|
| 378 |
+
def ensure_controlnet_model_downloaded(filename: str, progress):
|
| 379 |
+
if not filename or filename == "None":
|
| 380 |
+
return
|
| 381 |
+
|
| 382 |
+
dest_path = os.path.join(CONTROLNET_DIR, filename)
|
| 383 |
+
if os.path.exists(dest_path):
|
| 384 |
+
return
|
| 385 |
+
|
| 386 |
+
download_info = ALL_FILE_DOWNLOAD_MAP.get(filename)
|
| 387 |
+
if not download_info:
|
| 388 |
+
raise gr.Error(f"ControlNet model '{filename}' not found in configuration (file_list.yaml). Cannot download.")
|
| 389 |
+
|
| 390 |
+
source = download_info.get("source")
|
| 391 |
+
|
| 392 |
+
try:
|
| 393 |
+
if source == "hf":
|
| 394 |
+
repo_id = download_info.get("repo_id")
|
| 395 |
+
repo_filename = download_info.get("repository_file_path", filename)
|
| 396 |
+
if not repo_id:
|
| 397 |
+
raise ValueError("repo_id is missing for Hugging Face download.")
|
| 398 |
+
|
| 399 |
+
progress(0, desc=f"Downloading CN: {filename}")
|
| 400 |
+
cached_path = hf_hub_download(repo_id=repo_id, filename=repo_filename)
|
| 401 |
+
os.makedirs(CONTROLNET_DIR, exist_ok=True)
|
| 402 |
+
os.symlink(cached_path, dest_path)
|
| 403 |
+
print(f"✅ Symlinked ControlNet '{cached_path}' to '{dest_path}'")
|
| 404 |
+
progress(1.0, desc=f"Downloaded CN: {filename}")
|
| 405 |
+
|
| 406 |
+
elif source == "civitai":
|
| 407 |
+
model_version_id = download_info.get("model_version_id")
|
| 408 |
+
if not model_version_id:
|
| 409 |
+
raise ValueError("model_version_id is missing for Civitai download.")
|
| 410 |
+
|
| 411 |
+
file_info = get_civitai_file_info(model_version_id)
|
| 412 |
+
if not file_info or not file_info.get('downloadUrl'):
|
| 413 |
+
raise ConnectionError(f"Could not get download URL for Civitai model version ID {model_version_id}")
|
| 414 |
+
|
| 415 |
+
status = download_file(
|
| 416 |
+
file_info['downloadUrl'],
|
| 417 |
+
dest_path,
|
| 418 |
+
progress=progress,
|
| 419 |
+
desc=f"Downloading CN: {filename}"
|
| 420 |
+
)
|
| 421 |
+
if "Failed" in status:
|
| 422 |
+
raise ConnectionError(status)
|
| 423 |
+
else:
|
| 424 |
+
raise NotImplementedError(f"Download source '{source}' is not implemented for ControlNets.")
|
| 425 |
+
|
| 426 |
+
except Exception as e:
|
| 427 |
+
if os.path.lexists(dest_path):
|
| 428 |
+
try:
|
| 429 |
+
os.remove(dest_path)
|
| 430 |
+
except OSError:
|
| 431 |
+
pass
|
| 432 |
+
raise gr.Error(f"Failed to download ControlNet model '{filename}': {e}")
|
| 433 |
+
|
| 434 |
+
def load_ipadapter_presets():
|
| 435 |
+
global IPADAPTER_PRESETS
|
| 436 |
+
if IPADAPTER_PRESETS is not None:
|
| 437 |
+
return
|
| 438 |
+
|
| 439 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 440 |
+
_IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 441 |
+
|
| 442 |
+
try:
|
| 443 |
+
with open(_IPADAPTER_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 444 |
+
presets_list = yaml.safe_load(f)
|
| 445 |
+
|
| 446 |
+
IPADAPTER_PRESETS = {item['preset_name']: item for item in presets_list}
|
| 447 |
+
print("✅ IPAdapter presets loaded successfully.")
|
| 448 |
+
except Exception as e:
|
| 449 |
+
print(f"❌ FATAL: Could not load or parse ipadapter.yaml. IPAdapter will not work. Error: {e}")
|
| 450 |
+
IPADAPTER_PRESETS = {}
|
| 451 |
+
|
| 452 |
+
def ensure_ipadapter_models_downloaded(preset_name: str, progress):
|
| 453 |
+
if not preset_name:
|
| 454 |
+
return
|
| 455 |
+
|
| 456 |
+
if IPADAPTER_PRESETS is None:
|
| 457 |
+
raise RuntimeError("IPAdapter presets have not been loaded. `load_ipadapter_presets` must be called on startup.")
|
| 458 |
+
|
| 459 |
+
preset_info = IPADAPTER_PRESETS.get(preset_name)
|
| 460 |
+
if not preset_info:
|
| 461 |
+
print(f"⚠️ Warning: IPAdapter preset '{preset_name}' not found in configuration. Skipping download.")
|
| 462 |
+
return
|
| 463 |
+
|
| 464 |
+
model_files_to_check = {
|
| 465 |
+
preset_info.get('vision_model'): 'CLIP_VISION',
|
| 466 |
+
preset_info.get('ipadapter_model'): 'IPADAPTER',
|
| 467 |
+
preset_info.get('lora_model'): 'LORA'
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
for filename, model_type in model_files_to_check.items():
|
| 471 |
+
if not filename:
|
| 472 |
+
continue
|
| 473 |
+
|
| 474 |
+
temp_display_name = f"ipadapter_asset_{filename}"
|
| 475 |
+
|
| 476 |
+
if temp_display_name not in ALL_MODEL_MAP:
|
| 477 |
+
ALL_MODEL_MAP[temp_display_name] = (None, filename, model_type, None)
|
| 478 |
+
|
| 479 |
+
try:
|
| 480 |
+
_ensure_model_downloaded(temp_display_name, progress)
|
| 481 |
+
except Exception as e:
|
| 482 |
+
print(f"❌ Error ensuring download for IPAdapter asset '{filename}': {e}")
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def parse_parameters(params_text: str) -> dict:
|
| 486 |
+
data = {}
|
| 487 |
+
lines = params_text.strip().split('\n')
|
| 488 |
+
data['prompt'] = lines[0]
|
| 489 |
+
data['negative_prompt'] = lines[1].replace("Negative prompt:", "").strip() if len(lines) > 1 and lines[1].startswith("Negative prompt:") else ""
|
| 490 |
+
|
| 491 |
+
params_line = '\n'.join(lines[2:])
|
| 492 |
+
|
| 493 |
+
def find_param(key, default, cast_type=str):
|
| 494 |
+
match = re.search(fr"\b{key}: ([^,]+?)(,|$|\n)", params_line)
|
| 495 |
+
return cast_type(match.group(1).strip()) if match else default
|
| 496 |
+
|
| 497 |
+
data['steps'] = find_param("Steps", 28, int)
|
| 498 |
+
data['sampler'] = find_param("Sampler", "euler", str)
|
| 499 |
+
data['scheduler'] = find_param("Scheduler", "normal", str)
|
| 500 |
+
data['cfg_scale'] = find_param("CFG scale", 7.5, float)
|
| 501 |
+
data['seed'] = find_param("Seed", -1, int)
|
| 502 |
+
data['clip_skip'] = find_param("Clip skip", 1, int)
|
| 503 |
+
data['base_model'] = find_param("Base Model", list(ALL_MODEL_MAP.keys())[0] if ALL_MODEL_MAP else "", str)
|
| 504 |
+
data['model_hash'] = find_param("Model hash", None, str)
|
| 505 |
+
|
| 506 |
+
size_match = re.search(r"Size: (\d+)x(\d+)", params_line)
|
| 507 |
+
data['width'], data['height'] = (int(size_match.group(1)), int(size_match.group(2))) if size_match else (1024, 1024)
|
| 508 |
+
|
| 509 |
+
return data
|
| 510 |
+
|
| 511 |
+
def get_png_info(image) -> tuple[str, str, str]:
|
| 512 |
+
if not image or not (params := image.info.get('parameters')):
|
| 513 |
+
return "", "", "No metadata found in the image."
|
| 514 |
+
|
| 515 |
+
parsed_data = parse_parameters(params)
|
| 516 |
+
raw_params_list = '\n'.join(params.strip().split('\n')[2:]).split(',')
|
| 517 |
+
other_params_text = "\n".join([p.strip() for p in raw_params_list])
|
| 518 |
+
|
| 519 |
+
return parsed_data.get('prompt', ''), parsed_data.get('negative_prompt', ''), other_params_text
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def build_preprocessor_model_map():
|
| 523 |
+
global PREPROCESSOR_MODEL_MAP
|
| 524 |
+
if PREPROCESSOR_MODEL_MAP is not None: return PREPROCESSOR_MODEL_MAP
|
| 525 |
+
print("--- Building ControlNet Preprocessor model map ---")
|
| 526 |
+
manual_map = {
|
| 527 |
+
"dwpose": [("yzd-v/DWPose", "yolox_l.onnx"), ("yzd-v/DWPose", "dw-ll_ucoco_384.onnx"), ("hr16/UnJIT-DWPose", "dw-ll_ucoco.onnx"), ("hr16/DWPose-TorchScript-BatchSize5", "dw-ll_ucoco_384_bs5.torchscript.pt"), ("hr16/DWPose-TorchScript-BatchSize5", "rtmpose-m_ap10k_256_bs5.torchscript.pt"), ("hr16/yolo-nas-fp16", "yolo_nas_l_fp16.onnx"), ("hr16/yolo-nas-fp16", "yolo_nas_m_fp16.onnx"), ("hr16/yolo-nas-fp16", "yolo_nas_s_fp16.onnx")],
|
| 528 |
+
"densepose": [("LayerNorm/DensePose-TorchScript-with-hint-image", "densepose_r50_fpn_dl.torchscript"), ("LayerNorm/DensePose-TorchScript-with-hint-image", "densepose_r101_fpn_dl.torchscript")]
|
| 529 |
+
}
|
| 530 |
+
temp_map = {}
|
| 531 |
+
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 532 |
+
wrappers_dir = Path("./custom_nodes/comfyui_controlnet_aux/node_wrappers/")
|
| 533 |
+
if not wrappers_dir.exists():
|
| 534 |
+
print("⚠️ ControlNet AUX wrappers directory not found. Cannot build model map.")
|
| 535 |
+
PREPROCESSOR_MODEL_MAP = {}; return PREPROCESSOR_MODEL_MAP
|
| 536 |
+
for wrapper_file in wrappers_dir.glob("*.py"):
|
| 537 |
+
if wrapper_file.name == "__init__.py": continue
|
| 538 |
+
with open(wrapper_file, 'r', encoding='utf-8') as f:
|
| 539 |
+
content = f.read()
|
| 540 |
+
display_name_matches = re.findall(r'NODE_DISPLAY_NAME_MAPPINGS\s*=\s*{(?:.|\n)*?["\'](.*?)["\']\s*:\s*["\'](.*?)["\']', content)
|
| 541 |
+
for _, display_name in display_name_matches:
|
| 542 |
+
if display_name not in temp_map: temp_map[display_name] = []
|
| 543 |
+
manual_key = wrapper_file.stem
|
| 544 |
+
if manual_key in manual_map: temp_map[display_name].extend(manual_map[manual_key])
|
| 545 |
+
matches = re.findall(r"from_pretrained\s*\(\s*(?:filename=)?\s*f?[\"']([^\"']+)[\"']", content)
|
| 546 |
+
for model_filename in matches:
|
| 547 |
+
repo_id = "lllyasviel/Annotators"
|
| 548 |
+
if "depth_anything" in model_filename and "v2" in model_filename: repo_id = "LiheYoung/Depth-Anything-V2"
|
| 549 |
+
elif "depth_anything" in model_filename: repo_id = "LiheYoung/Depth-Anything"
|
| 550 |
+
elif "diffusion_edge" in model_filename: repo_id = "hr16/Diffusion-Edge"
|
| 551 |
+
temp_map[display_name].append((repo_id, model_filename))
|
| 552 |
+
final_map = {name: sorted(list(set(models))) for name, models in temp_map.items() if models}
|
| 553 |
+
PREPROCESSOR_MODEL_MAP = final_map
|
| 554 |
+
print("✅ ControlNet Preprocessor model map built."); return PREPROCESSOR_MODEL_MAP
|
| 555 |
+
|
| 556 |
+
def build_preprocessor_parameter_map():
|
| 557 |
+
global PREPROCESSOR_PARAMETER_MAP
|
| 558 |
+
if PREPROCESSOR_PARAMETER_MAP is not None: return
|
| 559 |
+
print("--- Building ControlNet Preprocessor parameter map ---")
|
| 560 |
+
param_map = {}
|
| 561 |
+
from nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
| 562 |
+
for class_name, node_class in NODE_CLASS_MAPPINGS.items():
|
| 563 |
+
if not hasattr(node_class, "INPUT_TYPES"): continue
|
| 564 |
+
if hasattr(node_class, '__module__') and 'comfyui_controlnet_aux.node_wrappers' not in node_class.__module__: continue
|
| 565 |
+
display_name = NODE_DISPLAY_NAME_MAPPINGS.get(class_name)
|
| 566 |
+
if not display_name: continue
|
| 567 |
+
try:
|
| 568 |
+
input_types = node_class.INPUT_TYPES()
|
| 569 |
+
all_inputs = {**input_types.get('required', {}), **input_types.get('optional', {})}
|
| 570 |
+
params = []
|
| 571 |
+
for name, details in all_inputs.items():
|
| 572 |
+
if name in ['image', 'resolution', 'pose_kps']: continue
|
| 573 |
+
if not isinstance(details, (list, tuple)) or not details: continue
|
| 574 |
+
param_type = details[0]
|
| 575 |
+
param_config = details[1] if len(details) > 1 and isinstance(details[1], dict) else {}
|
| 576 |
+
param_info = {"name": name, "type": param_type, "config": param_config}
|
| 577 |
+
params.append(param_info)
|
| 578 |
+
if params: param_map[display_name] = params
|
| 579 |
+
except Exception as e:
|
| 580 |
+
print(f"⚠️ Could not parse parameters for {display_name}: {e}")
|
| 581 |
+
PREPROCESSOR_PARAMETER_MAP = param_map
|
| 582 |
+
print("✅ ControlNet Preprocessor parameter map built.")
|
| 583 |
+
|
| 584 |
+
def print_welcome_message():
|
| 585 |
+
author_name = "RioShiina"
|
| 586 |
+
project_url = "https://huggingface.co/RioShiina"
|
| 587 |
+
border = "=" * 72
|
| 588 |
+
|
| 589 |
+
message = (
|
| 590 |
+
f"\n{border}\n\n"
|
| 591 |
+
f" Thank you for using this project!\n\n"
|
| 592 |
+
f" **Author:** {author_name}\n"
|
| 593 |
+
f" **Find more from the author:** {project_url}\n\n"
|
| 594 |
+
f" This project is open-source under the GNU General Public License v3.0 (GPL-3.0).\n"
|
| 595 |
+
f" As it's built upon GPL-3.0 components (like ComfyUI), any modifications you\n"
|
| 596 |
+
f" distribute must also be open-sourced under the same license.\n\n"
|
| 597 |
+
f" Your respect for the principles of free software is greatly appreciated!\n\n"
|
| 598 |
+
f"{border}\n"
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
print(message)
|
yaml/constants.yaml
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MAX_LORAS: 5
|
| 2 |
+
MAX_CONTROLNETS: 5
|
| 3 |
+
MAX_IPADAPTERS: 5
|
| 4 |
+
MAX_EMBEDDINGS: 5
|
| 5 |
+
MAX_CONDITIONINGS: 10
|
| 6 |
+
LORA_SOURCE_CHOICES: ["Civitai", "File"]
|
| 7 |
+
|
| 8 |
+
RESOLUTION_MAP:
|
| 9 |
+
sdxl:
|
| 10 |
+
"1:1 (Square)": [1024, 1024]
|
| 11 |
+
"16:9 (Landscape)": [1344, 768]
|
| 12 |
+
"9:16 (Portrait)": [768, 1344]
|
| 13 |
+
"4:3 (Classic)": [1152, 896]
|
| 14 |
+
"3:4 (Classic Portrait)": [896, 1152]
|
| 15 |
+
"3:2 (Photography)": [1216, 832]
|
| 16 |
+
"2:3 (Photography Portrait)": [832, 1216]
|
| 17 |
+
|
| 18 |
+
SAMPLER_MAP:
|
| 19 |
+
euler a: "euler_ancestral"
|
| 20 |
+
dpm++ 2s a: "dpmpp_2s_ancestral"
|
| 21 |
+
dpm++ 2m: "dpmpp_2m"
|
| 22 |
+
dpm++ sde: "dpmpp_sde"
|
| 23 |
+
dpm++ 2m sde: "dpmpp_2m_sde"
|
| 24 |
+
dpm++ 3m sde: "dpmpp_3m_sde"
|
| 25 |
+
ddim: "ddim"
|
| 26 |
+
uni_pc: "uni_pc"
|
| 27 |
+
euler_a: "euler_ancestral"
|
| 28 |
+
dpm++ 2s a karras: "dpmpp_2s_ancestral"
|
| 29 |
+
dpm++ 2m karras: "dpmpp_2m"
|
| 30 |
+
dpm++ sde karras: "dpmpp_sde"
|
| 31 |
+
dpm++ 2m sde karras: "dpmpp_2m_sde"
|
| 32 |
+
dpm++ 3m sde karras: "dpmpp_3m_sde"
|
yaml/controlnet_models.yaml
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ControlNet:
|
| 2 |
+
SDXL:
|
| 3 |
+
- Filepath: "controlnet-union-sdxl-1.0_promax.safetensors"
|
| 4 |
+
Series: "xinsir Union"
|
| 5 |
+
Type: ["Tile Deblur", "Tile variation", "Tile Super Resolution", "Image Inpainting", "Image Outpainting", "OpenPose", "Depth", "Canny", "Lineart", "Anime Lineart", "Mlsd", "Scribble", "Hed", "Pidi(Softedge)", "Teed", "Segment", "Normal"]
|
| 6 |
+
- Filepath: "controlnet-tile-sdxl-1.0.safetensors"
|
| 7 |
+
Series: "xinsir"
|
| 8 |
+
Type: ["Tile"]
|
| 9 |
+
- Filepath: "controlnet-canny-sdxl-1.0_V2.safetensors"
|
| 10 |
+
Series: "xinsir"
|
| 11 |
+
Type: ["Canny"]
|
| 12 |
+
- Filepath: "controlnet-openpose-sdxl-1.0.safetensors"
|
| 13 |
+
Series: "xinsir"
|
| 14 |
+
Type: ["OpenPose"]
|
| 15 |
+
- Filepath: "controlnet-openpose-sdxl-1.0.safetensors"
|
| 16 |
+
Series: "xinsir"
|
| 17 |
+
Type: ["OpenPose(Twins)"]
|
| 18 |
+
- Filepath: "controlnet-depth-sdxl-1.0"
|
| 19 |
+
Series: "xinsir"
|
| 20 |
+
Type: ["Depth"]
|
| 21 |
+
- Filepath: "controlnet-scribble-sdxl-1.0.safetensors"
|
| 22 |
+
Series: "xinsir"
|
| 23 |
+
Type: ["Scribble"]
|
| 24 |
+
- Filepath: "anime-painter.safetensors"
|
| 25 |
+
Series: "xinsir"
|
| 26 |
+
Type: ["Anime Painter"]
|
| 27 |
+
- Filepath: "noob_sdxl_controlnet_canny.fp16.safetensors"
|
| 28 |
+
Series: "NoobAI"
|
| 29 |
+
Type: ["Canny"]
|
| 30 |
+
- Filepath: "noob-sdxl-controlnet-depth.fp16.safetensors"
|
| 31 |
+
Series: "NoobAI"
|
| 32 |
+
Type: ["Depth"]
|
| 33 |
+
- Filepath: "noob-sdxl-controlnet-lineart_anime.fp16.safetensors"
|
| 34 |
+
Series: "NoobAI"
|
| 35 |
+
Type: ["Anime Lineart"]
|
| 36 |
+
- Filepath: "noob-sdxl-controlnet-lineart_realistic.fp16.safetensors"
|
| 37 |
+
Series: "NoobAI"
|
| 38 |
+
Type: ["Realistic Lineart"]
|
| 39 |
+
- Filepath: "noob-sdxl-controlnet-manga_line.fp16.safetensors"
|
| 40 |
+
Series: "NoobAI"
|
| 41 |
+
Type: ["Manga Lineart"]
|
| 42 |
+
- Filepath: "noob-sdxl-controlnet-normal.fp16.safetensors"
|
| 43 |
+
Series: "NoobAI"
|
| 44 |
+
Type: ["Normal"]
|
| 45 |
+
- Filepath: "noob-sdxl-controlnet-softedge_hed.fp16.safetensors"
|
| 46 |
+
Series: "NoobAI"
|
| 47 |
+
Type: ["SoftEdge (HED)"]
|
| 48 |
+
- Filepath: "noob-sdxl-controlnet-tile.fp16.safetensors"
|
| 49 |
+
Series: "NoobAI"
|
| 50 |
+
Type: ["Tile"]
|
| 51 |
+
- Filepath: "noobaiXLControlnet_epsBlur.safetensors"
|
| 52 |
+
Series: "NoobAI"
|
| 53 |
+
Type: ["Blur"]
|
| 54 |
+
- Filepath: "noobaiXLControlnet_epsDepthMidasV11.safetensors"
|
| 55 |
+
Series: "NoobAI"
|
| 56 |
+
Type: ["Depth (Midas)"]
|
| 57 |
+
- Filepath: "noobaiXLControlnet_epsNormalMidas.safetensors"
|
| 58 |
+
Series: "NoobAI"
|
| 59 |
+
Type: ["Normal (Midas)"]
|
| 60 |
+
- Filepath: "noobaiXLControlnet_epsScribbleHed.safetensors"
|
| 61 |
+
Series: "NoobAI"
|
| 62 |
+
Type: ["Scribble (HED)"]
|
| 63 |
+
- Filepath: "noobaiXLControlnet_epsScribblePidinet.safetensors"
|
| 64 |
+
Series: "NoobAI"
|
| 65 |
+
Type: ["Scribble (PiDiNet)"]
|
| 66 |
+
- Filepath: "NoobAI_Inpainting_ControlNet.safetensors"
|
| 67 |
+
Series: "NoobAI"
|
| 68 |
+
Type: ["Inpainting"]
|
| 69 |
+
- Filepath: "noob_openpose_pre.safetensors"
|
| 70 |
+
Series: "NoobAI"
|
| 71 |
+
Type: ["OpenPose"]
|
yaml/file_list.yaml
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
file:
|
| 2 |
+
checkpoints:
|
| 3 |
+
- filename: "waiNSFWIllustrious_v150.safetensors"
|
| 4 |
+
source: hf
|
| 5 |
+
repo_id: "mogaru99/waiNSFWIllustrious_v150"
|
| 6 |
+
repository_file_path: "waiNSFWIllustrious_v150.safetensors"
|
| 7 |
+
- filename: "waiIllustriousSDXL_v150.safetensors"
|
| 8 |
+
source: hf
|
| 9 |
+
repo_id: "elias1001/waiIllustriousSDXL_v150"
|
| 10 |
+
repository_file_path: "waiIllustriousSDXL_v150.safetensors"
|
| 11 |
+
- filename: "illustrious_pencil-XL-v5.0.0.safetensors"
|
| 12 |
+
source: hf
|
| 13 |
+
repo_id: "bluepen5805/illustrious_pencil-XL"
|
| 14 |
+
repository_file_path: "illustrious_pencil-XL-v5.0.0.safetensors"
|
| 15 |
+
- filename: "hassakuXLIllustrious_v30.safetensors"
|
| 16 |
+
source: hf
|
| 17 |
+
repo_id: "misri/hassakuXLIllustrious_v30"
|
| 18 |
+
repository_file_path: "hassakuXLIllustrious_v30.safetensors"
|
| 19 |
+
- filename: "Illustrious-XL-v2.0.safetensors"
|
| 20 |
+
source: hf
|
| 21 |
+
repo_id: "OnomaAIResearch/Illustrious-XL-v2.0"
|
| 22 |
+
repository_file_path: "Illustrious-XL-v2.0.safetensors"
|
| 23 |
+
- filename: "Illustrious-XL-v1.1.safetensors"
|
| 24 |
+
source: hf
|
| 25 |
+
repo_id: "OnomaAIResearch/Illustrious-XL-v1.1"
|
| 26 |
+
repository_file_path: "Illustrious-XL-v1.1.safetensors"
|
| 27 |
+
- filename: "Illustrious-XL-v1.0.safetensors"
|
| 28 |
+
source: hf
|
| 29 |
+
repo_id: "OnomaAIResearch/Illustrious-XL-v1.0"
|
| 30 |
+
repository_file_path: "Illustrious-XL-v1.0.safetensors"
|
| 31 |
+
- filename: "illustriousXL_v01.safetensors"
|
| 32 |
+
source: hf
|
| 33 |
+
repo_id: "AiAF/Illustrious-XL-v0.1.safetensors"
|
| 34 |
+
repository_file_path: "illustriousXL_v01.safetensors"
|
| 35 |
+
|
| 36 |
+
clip_vision:
|
| 37 |
+
- filename: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors"
|
| 38 |
+
source: "hf"
|
| 39 |
+
repo_id: "h94/IP-Adapter"
|
| 40 |
+
repository_file_path: "sdxl_models/image_encoder/model.safetensors"
|
| 41 |
+
- filename: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 42 |
+
source: "hf"
|
| 43 |
+
repo_id: "h94/IP-Adapter"
|
| 44 |
+
repository_file_path: "models/image_encoder/model.safetensors"
|
| 45 |
+
|
| 46 |
+
controlnet:
|
| 47 |
+
- filename: "controlnet-union-sdxl-1.0_promax.safetensors"
|
| 48 |
+
source: "hf"
|
| 49 |
+
repo_id: "xinsir/controlnet-union-sdxl-1.0"
|
| 50 |
+
repository_file_path: "diffusion_pytorch_model_promax.safetensors"
|
| 51 |
+
- filename: "controlnet-tile-sdxl-1.0.safetensors"
|
| 52 |
+
source: "hf"
|
| 53 |
+
repo_id: "xinsir/controlnet-tile-sdxl-1.0"
|
| 54 |
+
repository_file_path: "diffusion_pytorch_model.safetensors"
|
| 55 |
+
- filename: "controlnet-canny-sdxl-1.0_V2.safetensors"
|
| 56 |
+
source: "hf"
|
| 57 |
+
repo_id: "xinsir/controlnet-canny-sdxl-1.0"
|
| 58 |
+
repository_file_path: "diffusion_pytorch_model_V2.safetensors"
|
| 59 |
+
- filename: "controlnet-openpose-sdxl-1.0.safetensors"
|
| 60 |
+
source: "hf"
|
| 61 |
+
repo_id: "xinsir/controlnet-openpose-sdxl-1.0"
|
| 62 |
+
repository_file_path: "diffusion_pytorch_model.safetensors"
|
| 63 |
+
- filename: "controlnet-openpose-sdxl-1.0.safetensors"
|
| 64 |
+
source: "hf"
|
| 65 |
+
repo_id: "xinsir/controlnet-openpose-sdxl-1.0_twins"
|
| 66 |
+
repository_file_path: "diffusion_pytorch_model_twins.safetensors"
|
| 67 |
+
- filename: "controlnet-depth-sdxl-1.0.safetensors"
|
| 68 |
+
source: "hf"
|
| 69 |
+
repo_id: "xinsir/controlnet-depth-sdxl-1.0"
|
| 70 |
+
repository_file_path: "diffusion_pytorch_model.safetensors"
|
| 71 |
+
- filename: "controlnet-scribble-sdxl-1.0.safetensors"
|
| 72 |
+
source: "hf"
|
| 73 |
+
repo_id: "xinsir/controlnet-scribble-sdxl-1.0"
|
| 74 |
+
repository_file_path: "diffusion_pytorch_model.safetensors"
|
| 75 |
+
- filename: "anime-painter.safetensors"
|
| 76 |
+
source: "hf"
|
| 77 |
+
repo_id: "xinsir/anime-painter"
|
| 78 |
+
repository_file_path: "diffusion_pytorch_model.safetensors"
|
| 79 |
+
- filename: "noob_sdxl_controlnet_canny.fp16.safetensors"
|
| 80 |
+
source: "hf"
|
| 81 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-canny"
|
| 82 |
+
repository_file_path: "noob_sdxl_controlnet_canny.fp16.safetensors"
|
| 83 |
+
- filename: "noob-sdxl-controlnet-depth.fp16.safetensors"
|
| 84 |
+
source: "hf"
|
| 85 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-depth"
|
| 86 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 87 |
+
- filename: "noob-sdxl-controlnet-lineart_anime.fp16.safetensors"
|
| 88 |
+
source: "hf"
|
| 89 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-lineart_anime"
|
| 90 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 91 |
+
- filename: "noob-sdxl-controlnet-lineart_realistic.fp16.safetensors"
|
| 92 |
+
source: "hf"
|
| 93 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-lineart_realistic"
|
| 94 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 95 |
+
- filename: "noob-sdxl-controlnet-manga_line.fp16.safetensors"
|
| 96 |
+
source: "hf"
|
| 97 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-manga_line"
|
| 98 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 99 |
+
- filename: "noob-sdxl-controlnet-normal.fp16.safetensors"
|
| 100 |
+
source: "hf"
|
| 101 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-normal"
|
| 102 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 103 |
+
- filename: "noob-sdxl-controlnet-softedge_hed.fp16.safetensors"
|
| 104 |
+
source: "hf"
|
| 105 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-softedge_hed"
|
| 106 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 107 |
+
- filename: "noob-sdxl-controlnet-tile.fp16.safetensors"
|
| 108 |
+
source: "hf"
|
| 109 |
+
repo_id: "Eugeoter/noob-sdxl-controlnet-tile"
|
| 110 |
+
repository_file_path: "diffusion_pytorch_model.fp16.safetensors"
|
| 111 |
+
- filename: "NoobAI_Inpainting_ControlNet.safetensors"
|
| 112 |
+
source: "hf"
|
| 113 |
+
repo_id: "Wenaka/NoobAI_XL_Inpainting_ControlNet_Full"
|
| 114 |
+
repository_file_path: "NoobAI_Inpainting_ControlNet.safetensors"
|
| 115 |
+
- filename: "noob_openpose_pre.safetensors"
|
| 116 |
+
source: "hf"
|
| 117 |
+
repo_id: "Laxhar/noob_openpose"
|
| 118 |
+
repository_file_path: "openpose_pre.safetensors"
|
| 119 |
+
- filename: "noobaiXLControlnet_epsBlur.safetensors"
|
| 120 |
+
source: "civitai"
|
| 121 |
+
model_version_id: "1731092"
|
| 122 |
+
- filename: "noobaiXLControlnet_epsDepthMidasV11.safetensors"
|
| 123 |
+
source: "civitai"
|
| 124 |
+
model_version_id: "1091944"
|
| 125 |
+
- filename: "noobaiXLControlnet_epsNormalMidas.safetensors"
|
| 126 |
+
source: "civitai"
|
| 127 |
+
model_version_id: "1044514"
|
| 128 |
+
- filename: "noobaiXLControlnet_epsScribbleHed.safetensors"
|
| 129 |
+
source: "civitai"
|
| 130 |
+
model_version_id: "1095278"
|
| 131 |
+
- filename: "noobaiXLControlnet_epsScribblePidinet.safetensors"
|
| 132 |
+
source: "civitai"
|
| 133 |
+
model_version_id: "1097585"
|
| 134 |
+
|
| 135 |
+
ipadapter:
|
| 136 |
+
- filename: "ip-adapter_sdxl_vit-h.safetensors"
|
| 137 |
+
source: "hf"
|
| 138 |
+
repo_id: "h94/IP-Adapter"
|
| 139 |
+
repository_file_path: "sdxl_models/ip-adapter_sdxl_vit-h.safetensors"
|
| 140 |
+
- filename: "ip-adapter_sdxl.safetensors"
|
| 141 |
+
source: "hf"
|
| 142 |
+
repo_id: "h94/IP-Adapter"
|
| 143 |
+
repository_file_path: "sdxl_models/ip-adapter_sdxl.safetensors"
|
| 144 |
+
- filename: "ip-adapter-plus_sdxl_vit-h.safetensors"
|
| 145 |
+
source: "hf"
|
| 146 |
+
repo_id: "h94/IP-Adapter"
|
| 147 |
+
repository_file_path: "sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors"
|
| 148 |
+
- filename: "ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
| 149 |
+
source: "hf"
|
| 150 |
+
repo_id: "h94/IP-Adapter"
|
| 151 |
+
repository_file_path: "sdxl_models/ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
| 152 |
+
- filename: "ip-adapter-faceid_sdxl.bin"
|
| 153 |
+
source: "hf"
|
| 154 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 155 |
+
repository_file_path: "ip-adapter-faceid_sdxl.bin"
|
| 156 |
+
- filename: "ip-adapter-faceid-plusv2_sdxl.bin"
|
| 157 |
+
source: "hf"
|
| 158 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 159 |
+
repository_file_path: "ip-adapter-faceid-plusv2_sdxl.bin"
|
| 160 |
+
- filename: "ip-adapter-faceid-portrait_sdxl.bin"
|
| 161 |
+
source: "hf"
|
| 162 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 163 |
+
repository_file_path: "ip-adapter-faceid-portrait_sdxl.bin"
|
| 164 |
+
- filename: "ip-adapter-faceid-portrait_sdxl_unnorm.bin"
|
| 165 |
+
source: "hf"
|
| 166 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 167 |
+
repository_file_path: "ip-adapter-faceid-portrait_sdxl_unnorm.bin"
|
| 168 |
+
|
| 169 |
+
loras:
|
| 170 |
+
- filename: "ip-adapter-faceid_sdxl_lora.safetensors"
|
| 171 |
+
source: "hf"
|
| 172 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 173 |
+
repository_file_path: "ip-adapter-faceid_sdxl_lora.safetensors"
|
| 174 |
+
- filename: "ip-adapter-faceid-plusv2_sdxl_lora.safetensors"
|
| 175 |
+
source: "hf"
|
| 176 |
+
repo_id: "h94/IP-Adapter-FaceID"
|
| 177 |
+
repository_file_path: "ip-adapter-faceid-plusv2_sdxl_lora.safetensors"
|
yaml/injectors.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
injector_definitions:
|
| 2 |
+
dynamic_controlnet_chains:
|
| 3 |
+
module: "chain_injectors.controlnet_injector"
|
| 4 |
+
dynamic_ipadapter_chains:
|
| 5 |
+
module: "chain_injectors.ipadapter_injector"
|
| 6 |
+
dynamic_conditioning_chains:
|
| 7 |
+
module: "chain_injectors.conditioning_injector"
|
| 8 |
+
|
| 9 |
+
injector_order:
|
| 10 |
+
- dynamic_ipadapter_chains
|
| 11 |
+
- dynamic_conditioning_chains
|
| 12 |
+
- dynamic_controlnet_chains
|
yaml/ipadapter.yaml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- preset_name: "STANDARD (medium strength)"
|
| 2 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 3 |
+
ipadapter_model: "ip-adapter_sdxl_vit-h.safetensors"
|
| 4 |
+
|
| 5 |
+
- preset_name: "VIT-G (medium strength)"
|
| 6 |
+
vision_model: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors"
|
| 7 |
+
ipadapter_model: "ip-adapter_sdxl.safetensors"
|
| 8 |
+
|
| 9 |
+
- preset_name: "PLUS (high strength)"
|
| 10 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 11 |
+
ipadapter_model: "ip-adapter-plus_sdxl_vit-h.safetensors"
|
| 12 |
+
|
| 13 |
+
- preset_name: "PLUS FACE (portraits)"
|
| 14 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 15 |
+
ipadapter_model: "ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
| 16 |
+
|
| 17 |
+
- preset_name: "FACEID"
|
| 18 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 19 |
+
ipadapter_model: "ip-adapter-faceid_sdxl.bin"
|
| 20 |
+
lora_model: "ip-adapter-faceid_sdxl_lora.safetensors"
|
| 21 |
+
|
| 22 |
+
- preset_name: "FACEID PLUS V2"
|
| 23 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 24 |
+
ipadapter_model: "ip-adapter-faceid-plusv2_sdxl.bin"
|
| 25 |
+
lora_model: "ip-adapter-faceid-plusv2_sdxl_lora.safetensors"
|
| 26 |
+
|
| 27 |
+
- preset_name: "FACEID PORTRAIT (style transfer)"
|
| 28 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 29 |
+
ipadapter_model: "ip-adapter-faceid-portrait_sdxl.bin"
|
| 30 |
+
|
| 31 |
+
- preset_name: "FACEID PORTRAIT UNNORM - SDXL only (strong)"
|
| 32 |
+
vision_model: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
| 33 |
+
ipadapter_model: "ip-adapter-faceid-portrait_sdxl_unnorm.bin"
|
yaml/model_list.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Checkpoint:
|
| 2 |
+
- display_name: "WAI0731/wai-nsfw-illustrious-sdxl-v150"
|
| 3 |
+
path: "waiNSFWIllustrious_v150.safetensors"
|
| 4 |
+
- display_name: "WAI0731/wai-illustrious-sdxl-v150"
|
| 5 |
+
path: "waiIllustriousSDXL_v150.safetensors"
|
| 6 |
+
- display_name: "blue_pen5805/illustrious_pencil-XL-v5.0.0"
|
| 7 |
+
path: "illustrious_pencil-XL-v5.0.0.safetensors"
|
| 8 |
+
- display_name: "Ikena/hassaku-xl-illustrious-v30"
|
| 9 |
+
path: "hassakuXLIllustrious_v30.safetensors"
|
| 10 |
+
- display_name: "OnomaAIResearch/Illustrious-XL-v2.0"
|
| 11 |
+
path: "Illustrious-XL-v2.0.safetensors"
|
| 12 |
+
- display_name: "OnomaAIResearch/Illustrious-XL-v1.1"
|
| 13 |
+
path: "Illustrious-XL-v1.1.safetensors"
|
| 14 |
+
- display_name: "OnomaAIResearch/Illustrious-XL-v1.0"
|
| 15 |
+
path: "Illustrious-XL-v1.0.safetensors"
|
| 16 |
+
- display_name: "aria1th261/Illustrious-XL-v0.1"
|
| 17 |
+
path: "illustriousXL_v01.safetensors"
|