Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -88,7 +88,7 @@ upscaler = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.d
|
|
| 88 |
def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
| 89 |
# adjust the batch_size of prompt_embeds according to guidance_scale
|
| 90 |
if step_index == int(pipeline.num_timesteps * 0.1):
|
| 91 |
-
|
| 92 |
# pipeline.scheduler = euler_scheduler
|
| 93 |
torch.set_float32_matmul_precision("high")
|
| 94 |
# pipe.vae = vae_b
|
|
@@ -115,7 +115,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
|
| 115 |
# pipe.unet = unet_a
|
| 116 |
torch.backends.cudnn.deterministic = False
|
| 117 |
#pipe.unet.set_default_attn_processor()
|
| 118 |
-
print("-- swapping scheduler --")
|
| 119 |
# pipeline.scheduler = heun_scheduler
|
| 120 |
#pipe.scheduler.set_timesteps(num_inference_steps*.70)
|
| 121 |
# print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
|
|
@@ -199,7 +199,7 @@ FTP_PASS = "GoogleBez12!"
|
|
| 199 |
def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
| 200 |
# adjust the batch_size of prompt_embeds according to guidance_scale
|
| 201 |
if step_index == int(pipeline.num_timesteps * 0.1):
|
| 202 |
-
|
| 203 |
# pipeline.scheduler = euler_scheduler
|
| 204 |
torch.set_float32_matmul_precision("high")
|
| 205 |
# pipe.vae = vae_b
|
|
@@ -225,7 +225,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
|
| 225 |
# pipe.vae = vae_a
|
| 226 |
# pipe.unet = unet_a
|
| 227 |
torch.backends.cudnn.deterministic = False
|
| 228 |
-
|
| 229 |
# pipeline.scheduler = heun_scheduler
|
| 230 |
#pipe.scheduler.set_timesteps(num_inference_steps*.70)
|
| 231 |
# print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
|
|
@@ -275,6 +275,7 @@ def generate_30(
|
|
| 275 |
num_inference_steps: int = 170,
|
| 276 |
denoising_start: float = 0.0,
|
| 277 |
denoising_end: float = 1.0,
|
|
|
|
| 278 |
use_resolution_binning: bool = True,
|
| 279 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 280 |
):
|
|
@@ -285,6 +286,7 @@ def generate_30(
|
|
| 285 |
#upload_to_ftp(filename)
|
| 286 |
#uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 287 |
sd_image_path = f"rv_L_{timestamp}.png"
|
|
|
|
| 288 |
if denoising_start==0.0 and denoising_end!=1.0:
|
| 289 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 290 |
options = {
|
|
@@ -294,10 +296,9 @@ def generate_30(
|
|
| 294 |
"width": width,
|
| 295 |
"height": height,
|
| 296 |
"guidance_scale": guidance_scale,
|
| 297 |
-
"num_inference_steps":
|
| 298 |
"generator": generator,
|
| 299 |
"output_type": "latent",
|
| 300 |
-
# "denoising_start": denoising_start, # for img2img
|
| 301 |
"denoising_end": denoising_end,
|
| 302 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 303 |
}
|
|
@@ -318,7 +319,7 @@ def generate_30(
|
|
| 318 |
},
|
| 319 |
sd_latent_path,
|
| 320 |
)
|
| 321 |
-
|
| 322 |
if denoising_end!=1.0 and denoising_start!=0.0:
|
| 323 |
latent_file = f'rv_L_{denoising_start}.pt'
|
| 324 |
loaded_data = torch.load(latent_file)
|
|
@@ -333,30 +334,30 @@ def generate_30(
|
|
| 333 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 334 |
options = {
|
| 335 |
"prompt": [prompt],
|
| 336 |
-
|
| 337 |
-
|
| 338 |
"negative_prompt": [negative_prompt],
|
| 339 |
"negative_prompt_2": [neg_prompt_2],
|
| 340 |
"width": width,
|
| 341 |
"height": height,
|
| 342 |
"guidance_scale": guidance_scale,
|
| 343 |
-
"num_inference_steps":
|
| 344 |
"generator": generator,
|
| 345 |
"latents": loaded_latents,
|
| 346 |
"output_type": "latent",
|
| 347 |
-
|
| 348 |
"denoising_end": denoising_end,
|
| 349 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 350 |
}
|
| 351 |
if use_resolution_binning:
|
| 352 |
options["use_resolution_binning"] = True
|
| 353 |
batch_options = options.copy()
|
| 354 |
-
|
| 355 |
-
|
| 356 |
#indx=int(num_inference_steps*denoising_start)
|
| 357 |
#pipe.scheduler.set_begin_index(indx)
|
| 358 |
#pipe.scheduler._step_index = indx
|
| 359 |
-
rv_image = pipe(**batch_options).images
|
| 360 |
sd_latent_path = f"rv_L_{denoising_end}.pt"
|
| 361 |
# Encode the latents before saving
|
| 362 |
#pipe.vae.to(torch.float32)
|
|
@@ -369,7 +370,7 @@ def generate_30(
|
|
| 369 |
},
|
| 370 |
sd_latent_path,
|
| 371 |
)
|
| 372 |
-
|
| 373 |
if denoising_end==1.0 and denoising_start!=0.0:
|
| 374 |
pyx.upload_to_ftp(filename)
|
| 375 |
latent_file = f'rv_L_{denoising_start}.pt'
|
|
@@ -385,30 +386,30 @@ def generate_30(
|
|
| 385 |
print("-- using latent file --")
|
| 386 |
options = {
|
| 387 |
"prompt": [prompt],
|
| 388 |
-
|
| 389 |
-
|
| 390 |
"negative_prompt": [negative_prompt],
|
| 391 |
"negative_prompt_2": [neg_prompt_2],
|
| 392 |
"width": width,
|
| 393 |
"height": height,
|
| 394 |
"guidance_scale": guidance_scale,
|
| 395 |
-
"num_inference_steps":
|
| 396 |
"generator": generator,
|
| 397 |
"latents": loaded_latents,
|
| 398 |
"output_type": "pil",
|
| 399 |
-
|
| 400 |
"denoising_end": denoising_end,
|
| 401 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 402 |
}
|
| 403 |
if use_resolution_binning:
|
| 404 |
options["use_resolution_binning"] = True
|
| 405 |
batch_options = options.copy()
|
| 406 |
-
|
| 407 |
-
|
| 408 |
#indx=int(num_inference_steps*denoising_start)
|
| 409 |
#pipe.scheduler.set_begin_index(indx)
|
| 410 |
#pipe.scheduler._step_index = indx
|
| 411 |
-
rv_image = pipe(**batch_options).images[0]
|
| 412 |
print("-- got image file --")
|
| 413 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
| 414 |
pyx.upload_to_ftp(sd_image_path)
|
|
@@ -419,7 +420,7 @@ def generate_30(
|
|
| 419 |
downscale_path = f"rv_L_upscale_{timestamp}.png"
|
| 420 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 421 |
pyx.upload_to_ftp(downscale_path)
|
| 422 |
-
|
| 423 |
if denoising_end==1.0 and denoising_start==0.0:
|
| 424 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 425 |
options = {
|
|
@@ -617,7 +618,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 617 |
label="Guidance Scale",
|
| 618 |
minimum=0.1,
|
| 619 |
maximum=30,
|
| 620 |
-
step=0.
|
| 621 |
value=3.8,
|
| 622 |
)
|
| 623 |
num_inference_steps = gr.Slider(
|
|
@@ -631,7 +632,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 631 |
label="Denoising Start",
|
| 632 |
minimum=0.0,
|
| 633 |
maximum=0.9,
|
| 634 |
-
step=0.
|
| 635 |
value=0.0,
|
| 636 |
)
|
| 637 |
|
|
@@ -639,9 +640,16 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 639 |
label="Denoising End",
|
| 640 |
minimum=0.1,
|
| 641 |
maximum=1.0,
|
| 642 |
-
step=0.
|
| 643 |
value=1.0,
|
| 644 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 645 |
|
| 646 |
|
| 647 |
gr.Examples(
|
|
@@ -674,6 +682,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
| 674 |
num_inference_steps,
|
| 675 |
denoising_start,
|
| 676 |
denoising_end,
|
|
|
|
| 677 |
],
|
| 678 |
outputs=[result],
|
| 679 |
)
|
|
|
|
| 88 |
def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
| 89 |
# adjust the batch_size of prompt_embeds according to guidance_scale
|
| 90 |
if step_index == int(pipeline.num_timesteps * 0.1):
|
| 91 |
+
# print("-- swapping scheduler --")
|
| 92 |
# pipeline.scheduler = euler_scheduler
|
| 93 |
torch.set_float32_matmul_precision("high")
|
| 94 |
# pipe.vae = vae_b
|
|
|
|
| 115 |
# pipe.unet = unet_a
|
| 116 |
torch.backends.cudnn.deterministic = False
|
| 117 |
#pipe.unet.set_default_attn_processor()
|
| 118 |
+
#print("-- swapping scheduler --")
|
| 119 |
# pipeline.scheduler = heun_scheduler
|
| 120 |
#pipe.scheduler.set_timesteps(num_inference_steps*.70)
|
| 121 |
# print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
|
|
|
|
| 199 |
def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
| 200 |
# adjust the batch_size of prompt_embeds according to guidance_scale
|
| 201 |
if step_index == int(pipeline.num_timesteps * 0.1):
|
| 202 |
+
# print("-- swapping scheduler --")
|
| 203 |
# pipeline.scheduler = euler_scheduler
|
| 204 |
torch.set_float32_matmul_precision("high")
|
| 205 |
# pipe.vae = vae_b
|
|
|
|
| 225 |
# pipe.vae = vae_a
|
| 226 |
# pipe.unet = unet_a
|
| 227 |
torch.backends.cudnn.deterministic = False
|
| 228 |
+
# print("-- swapping scheduler --")
|
| 229 |
# pipeline.scheduler = heun_scheduler
|
| 230 |
#pipe.scheduler.set_timesteps(num_inference_steps*.70)
|
| 231 |
# print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
|
|
|
|
| 275 |
num_inference_steps: int = 170,
|
| 276 |
denoising_start: float = 0.0,
|
| 277 |
denoising_end: float = 1.0,
|
| 278 |
+
strength: float = 0.3,
|
| 279 |
use_resolution_binning: bool = True,
|
| 280 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
| 281 |
):
|
|
|
|
| 286 |
#upload_to_ftp(filename)
|
| 287 |
#uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 288 |
sd_image_path = f"rv_L_{timestamp}.png"
|
| 289 |
+
# INITIAL GENERATE
|
| 290 |
if denoising_start==0.0 and denoising_end!=1.0:
|
| 291 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 292 |
options = {
|
|
|
|
| 296 |
"width": width,
|
| 297 |
"height": height,
|
| 298 |
"guidance_scale": guidance_scale,
|
| 299 |
+
"num_inference_steps": num_inference_steps,
|
| 300 |
"generator": generator,
|
| 301 |
"output_type": "latent",
|
|
|
|
| 302 |
"denoising_end": denoising_end,
|
| 303 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 304 |
}
|
|
|
|
| 319 |
},
|
| 320 |
sd_latent_path,
|
| 321 |
)
|
| 322 |
+
# MIDDLE GENERATE
|
| 323 |
if denoising_end!=1.0 and denoising_start!=0.0:
|
| 324 |
latent_file = f'rv_L_{denoising_start}.pt'
|
| 325 |
loaded_data = torch.load(latent_file)
|
|
|
|
| 334 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 335 |
options = {
|
| 336 |
"prompt": [prompt],
|
| 337 |
+
"image": loaded_latents, # for img2img
|
| 338 |
+
"strength": 0.3, # for img2img
|
| 339 |
"negative_prompt": [negative_prompt],
|
| 340 |
"negative_prompt_2": [neg_prompt_2],
|
| 341 |
"width": width,
|
| 342 |
"height": height,
|
| 343 |
"guidance_scale": guidance_scale,
|
| 344 |
+
"num_inference_steps": num_inference_steps,
|
| 345 |
"generator": generator,
|
| 346 |
"latents": loaded_latents,
|
| 347 |
"output_type": "latent",
|
| 348 |
+
"denoising_start": denoising_start, # for img2img
|
| 349 |
"denoising_end": denoising_end,
|
| 350 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 351 |
}
|
| 352 |
if use_resolution_binning:
|
| 353 |
options["use_resolution_binning"] = True
|
| 354 |
batch_options = options.copy()
|
| 355 |
+
pipe2 = StableDiffusionXLImg2ImgPipeline.from_pipe(pipe)
|
| 356 |
+
rv_image = pipe2(**batch_options).images
|
| 357 |
#indx=int(num_inference_steps*denoising_start)
|
| 358 |
#pipe.scheduler.set_begin_index(indx)
|
| 359 |
#pipe.scheduler._step_index = indx
|
| 360 |
+
#rv_image = pipe(**batch_options).images
|
| 361 |
sd_latent_path = f"rv_L_{denoising_end}.pt"
|
| 362 |
# Encode the latents before saving
|
| 363 |
#pipe.vae.to(torch.float32)
|
|
|
|
| 370 |
},
|
| 371 |
sd_latent_path,
|
| 372 |
)
|
| 373 |
+
# FINAL GENERATE
|
| 374 |
if denoising_end==1.0 and denoising_start!=0.0:
|
| 375 |
pyx.upload_to_ftp(filename)
|
| 376 |
latent_file = f'rv_L_{denoising_start}.pt'
|
|
|
|
| 386 |
print("-- using latent file --")
|
| 387 |
options = {
|
| 388 |
"prompt": [prompt],
|
| 389 |
+
"image": loaded_latents, # for img2img
|
| 390 |
+
"strength": 0.3, # for img2img
|
| 391 |
"negative_prompt": [negative_prompt],
|
| 392 |
"negative_prompt_2": [neg_prompt_2],
|
| 393 |
"width": width,
|
| 394 |
"height": height,
|
| 395 |
"guidance_scale": guidance_scale,
|
| 396 |
+
"num_inference_steps": num_inference_steps,
|
| 397 |
"generator": generator,
|
| 398 |
"latents": loaded_latents,
|
| 399 |
"output_type": "pil",
|
| 400 |
+
"denoising_start": denoising_start, # for img2img
|
| 401 |
"denoising_end": denoising_end,
|
| 402 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
| 403 |
}
|
| 404 |
if use_resolution_binning:
|
| 405 |
options["use_resolution_binning"] = True
|
| 406 |
batch_options = options.copy()
|
| 407 |
+
pipe2 = StableDiffusionXLImg2ImgPipeline.from_pipe(pipe)
|
| 408 |
+
rv_image = pipe2(**batch_options).images[0]
|
| 409 |
#indx=int(num_inference_steps*denoising_start)
|
| 410 |
#pipe.scheduler.set_begin_index(indx)
|
| 411 |
#pipe.scheduler._step_index = indx
|
| 412 |
+
#rv_image = pipe(**batch_options).images[0]
|
| 413 |
print("-- got image file --")
|
| 414 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
| 415 |
pyx.upload_to_ftp(sd_image_path)
|
|
|
|
| 420 |
downscale_path = f"rv_L_upscale_{timestamp}.png"
|
| 421 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
| 422 |
pyx.upload_to_ftp(downscale_path)
|
| 423 |
+
# SINGLE RUN GENERATE
|
| 424 |
if denoising_end==1.0 and denoising_start==0.0:
|
| 425 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 426 |
options = {
|
|
|
|
| 618 |
label="Guidance Scale",
|
| 619 |
minimum=0.1,
|
| 620 |
maximum=30,
|
| 621 |
+
step=0.001,
|
| 622 |
value=3.8,
|
| 623 |
)
|
| 624 |
num_inference_steps = gr.Slider(
|
|
|
|
| 632 |
label="Denoising Start",
|
| 633 |
minimum=0.0,
|
| 634 |
maximum=0.9,
|
| 635 |
+
step=0.05,
|
| 636 |
value=0.0,
|
| 637 |
)
|
| 638 |
|
|
|
|
| 640 |
label="Denoising End",
|
| 641 |
minimum=0.1,
|
| 642 |
maximum=1.0,
|
| 643 |
+
step=0.05,
|
| 644 |
value=1.0,
|
| 645 |
)
|
| 646 |
+
strength = gr.Slider(
|
| 647 |
+
label="Denoising Strength",
|
| 648 |
+
minimum=0.1,
|
| 649 |
+
maximum=1.0,
|
| 650 |
+
step=0.01,
|
| 651 |
+
value=0.3,
|
| 652 |
+
)
|
| 653 |
|
| 654 |
|
| 655 |
gr.Examples(
|
|
|
|
| 682 |
num_inference_steps,
|
| 683 |
denoising_start,
|
| 684 |
denoising_end,
|
| 685 |
+
strength,
|
| 686 |
],
|
| 687 |
outputs=[result],
|
| 688 |
)
|