Spaces:
Runtime error
Runtime error
yupeng.zhou
commited on
Commit
·
e578dd5
1
Parent(s):
a33dac7
fix
Browse files
app.py
CHANGED
|
@@ -40,7 +40,7 @@ global models_dict
|
|
| 40 |
use_va = True
|
| 41 |
models_dict = {
|
| 42 |
# "Juggernaut": "RunDiffusion/Juggernaut-XL-v8",
|
| 43 |
-
|
| 44 |
# "SDXL":"stabilityai/stable-diffusion-xl-base-1.0" ,
|
| 45 |
"Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y"
|
| 46 |
}
|
|
@@ -431,14 +431,14 @@ global sd_model_path
|
|
| 431 |
sd_model_path = models_dict["Unstable"]#"SG161222/RealVisXL_V4.0"
|
| 432 |
use_safetensors= False
|
| 433 |
### LOAD Stable Diffusion Pipeline
|
| 434 |
-
pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
|
| 435 |
-
pipe1 = pipe1.to("cpu")
|
| 436 |
-
pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 437 |
-
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 438 |
-
pipe1.scheduler.set_timesteps(50)
|
| 439 |
###
|
| 440 |
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
| 441 |
-
|
| 442 |
pipe2 = pipe2.to("cpu")
|
| 443 |
pipe2.load_photomaker_adapter(
|
| 444 |
os.path.dirname(photomaker_path),
|
|
@@ -450,6 +450,24 @@ pipe2 = pipe2.to("cpu")
|
|
| 450 |
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 451 |
pipe2.fuse_lora()
|
| 452 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
######### Gradio Fuction #############
|
| 454 |
|
| 455 |
def swap_to_gallery(images):
|
|
@@ -489,8 +507,8 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
|
|
| 489 |
raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
|
| 490 |
if _upload_images is None and _model_type != "original":
|
| 491 |
raise gr.Error(f"Cannot find any input face image!")
|
| 492 |
-
if len(prompt_array.splitlines()) >
|
| 493 |
-
raise gr.Error(f"No more than
|
| 494 |
global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
|
| 495 |
global write
|
| 496 |
global cur_step,attn_count
|
|
@@ -501,13 +519,24 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
|
|
| 501 |
global sd_model_path,models_dict
|
| 502 |
sd_model_path = models_dict[_sd_type]
|
| 503 |
use_safe_tensor = True
|
|
|
|
|
|
|
| 504 |
if _model_type == "original":
|
| 505 |
-
pipe =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 506 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
| 507 |
elif _model_type == "Photomaker":
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 511 |
else:
|
| 512 |
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
|
| 513 |
##### ########################
|
|
@@ -569,10 +598,7 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
|
|
| 569 |
captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
|
| 570 |
from PIL import ImageFont
|
| 571 |
total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
|
| 572 |
-
if _model_type == "
|
| 573 |
-
pipe = pipe1.to("cpu")
|
| 574 |
-
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
| 575 |
-
elif _model_type == "Photomaker":
|
| 576 |
pipe = pipe2.to("cpu")
|
| 577 |
pipe.id_encoder.to("cpu")
|
| 578 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
|
|
|
| 40 |
use_va = True
|
| 41 |
models_dict = {
|
| 42 |
# "Juggernaut": "RunDiffusion/Juggernaut-XL-v8",
|
| 43 |
+
"RealVision": "SG161222/RealVisXL_V4.0" ,
|
| 44 |
# "SDXL":"stabilityai/stable-diffusion-xl-base-1.0" ,
|
| 45 |
"Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y"
|
| 46 |
}
|
|
|
|
| 431 |
sd_model_path = models_dict["Unstable"]#"SG161222/RealVisXL_V4.0"
|
| 432 |
use_safetensors= False
|
| 433 |
### LOAD Stable Diffusion Pipeline
|
| 434 |
+
# pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
|
| 435 |
+
# pipe1 = pipe1.to("cpu")
|
| 436 |
+
# pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 437 |
+
# # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 438 |
+
# pipe1.scheduler.set_timesteps(50)
|
| 439 |
###
|
| 440 |
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
| 441 |
+
models_dict["Unstable"], torch_dtype=torch.float16, use_safetensors=use_safetensors)
|
| 442 |
pipe2 = pipe2.to("cpu")
|
| 443 |
pipe2.load_photomaker_adapter(
|
| 444 |
os.path.dirname(photomaker_path),
|
|
|
|
| 450 |
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 451 |
pipe2.fuse_lora()
|
| 452 |
|
| 453 |
+
pipe4 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
| 454 |
+
models_dict["RealVision"], torch_dtype=torch.float16, use_safetensors=use_safetensors)
|
| 455 |
+
pipe4 = pipe4.to("cpu")
|
| 456 |
+
pipe4.load_photomaker_adapter(
|
| 457 |
+
os.path.dirname(photomaker_path),
|
| 458 |
+
subfolder="",
|
| 459 |
+
weight_name=os.path.basename(photomaker_path),
|
| 460 |
+
trigger_word="img" # define the trigger word
|
| 461 |
+
)
|
| 462 |
+
pipe4 = pipe4.to("cpu")
|
| 463 |
+
pipe4.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 464 |
+
pipe4.fuse_lora()
|
| 465 |
+
|
| 466 |
+
# pipe3 = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0", torch_dtype=torch.float16)
|
| 467 |
+
# pipe3 = pipe3.to("cpu")
|
| 468 |
+
# pipe3.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 469 |
+
# # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 470 |
+
# pipe3.scheduler.set_timesteps(50)
|
| 471 |
######### Gradio Fuction #############
|
| 472 |
|
| 473 |
def swap_to_gallery(images):
|
|
|
|
| 507 |
raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
|
| 508 |
if _upload_images is None and _model_type != "original":
|
| 509 |
raise gr.Error(f"Cannot find any input face image!")
|
| 510 |
+
if len(prompt_array.splitlines()) > 6:
|
| 511 |
+
raise gr.Error(f"No more than 6 prompts in huggface demo for Speed! But found {len(prompt_array)} prompts!")
|
| 512 |
global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
|
| 513 |
global write
|
| 514 |
global cur_step,attn_count
|
|
|
|
| 519 |
global sd_model_path,models_dict
|
| 520 |
sd_model_path = models_dict[_sd_type]
|
| 521 |
use_safe_tensor = True
|
| 522 |
+
if style_name == "(No style)":
|
| 523 |
+
sd_model_path = models_dict["RealVision"]
|
| 524 |
if _model_type == "original":
|
| 525 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0", torch_dtype=torch.float16)
|
| 526 |
+
pipe = pipe.to(device)
|
| 527 |
+
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
| 528 |
+
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 529 |
+
pipe.scheduler.set_timesteps(50)
|
| 530 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
| 531 |
elif _model_type == "Photomaker":
|
| 532 |
+
if _sd_type != "RealVision":
|
| 533 |
+
pipe = pipe2.to(device)
|
| 534 |
+
pipe.id_encoder.to(device)
|
| 535 |
+
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
| 536 |
+
else:
|
| 537 |
+
pipe = pipe4.to(device)
|
| 538 |
+
pipe.id_encoder.to(device)
|
| 539 |
+
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
| 540 |
else:
|
| 541 |
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
|
| 542 |
##### ########################
|
|
|
|
| 598 |
captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
|
| 599 |
from PIL import ImageFont
|
| 600 |
total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
|
| 601 |
+
if _model_type == "Photomaker":
|
|
|
|
|
|
|
|
|
|
| 602 |
pipe = pipe2.to("cpu")
|
| 603 |
pipe.id_encoder.to("cpu")
|
| 604 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|