Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -218,6 +218,8 @@ def generate_30(
|
|
| 218 |
pipe.text_encoder_2=text_encoder_2
|
| 219 |
seed = random.randint(0, MAX_SEED)
|
| 220 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
| 221 |
if latent_file is not None: # Check if a latent file is provided
|
| 222 |
sd_image_a = Image.open(latent_file.name).convert('RGB')
|
| 223 |
sd_image_a.resize((height,width), Image.LANCZOS)
|
|
@@ -269,6 +271,8 @@ def generate_30(
|
|
| 269 |
upload_to_ftp(filename)
|
| 270 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 271 |
torch.set_float32_matmul_precision("medium")
|
|
|
|
|
|
|
| 272 |
with torch.no_grad():
|
| 273 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 274 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
|
@@ -309,25 +313,28 @@ def generate_60(
|
|
| 309 |
pipe.text_encoder_2=text_encoder_2
|
| 310 |
seed = random.randint(0, MAX_SEED)
|
| 311 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
| 312 |
if latent_file is not None: # Check if a latent file is provided
|
| 313 |
-
sd_image_a = Image.open(latent_file.name)
|
|
|
|
| 314 |
if latent_file_2 is not None: # Check if a latent file is provided
|
| 315 |
-
sd_image_b = Image.open(latent_file_2.name)
|
| 316 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
| 317 |
else:
|
| 318 |
sd_image_b = None
|
| 319 |
if latent_file_3 is not None: # Check if a latent file is provided
|
| 320 |
-
sd_image_c = Image.open(latent_file_3.name)
|
| 321 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
| 322 |
else:
|
| 323 |
sd_image_c = None
|
| 324 |
if latent_file_4 is not None: # Check if a latent file is provided
|
| 325 |
-
sd_image_d = Image.open(latent_file_4.name)
|
| 326 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
| 327 |
else:
|
| 328 |
sd_image_d = None
|
| 329 |
if latent_file_5 is not None: # Check if a latent file is provided
|
| 330 |
-
sd_image_e = Image.open(latent_file_5.name)
|
| 331 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
| 332 |
else:
|
| 333 |
sd_image_e = None
|
|
@@ -359,6 +366,8 @@ def generate_60(
|
|
| 359 |
upload_to_ftp(filename)
|
| 360 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 361 |
torch.set_float32_matmul_precision("medium")
|
|
|
|
|
|
|
| 362 |
with torch.no_grad():
|
| 363 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 364 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
|
@@ -399,25 +408,28 @@ def generate_90(
|
|
| 399 |
pipe.text_encoder_2=text_encoder_2
|
| 400 |
seed = random.randint(0, MAX_SEED)
|
| 401 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
| 402 |
if latent_file is not None: # Check if a latent file is provided
|
| 403 |
-
sd_image_a = Image.open(latent_file.name)
|
|
|
|
| 404 |
if latent_file_2 is not None: # Check if a latent file is provided
|
| 405 |
-
sd_image_b = Image.open(latent_file_2.name)
|
| 406 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
| 407 |
else:
|
| 408 |
sd_image_b = None
|
| 409 |
if latent_file_3 is not None: # Check if a latent file is provided
|
| 410 |
-
sd_image_c = Image.open(latent_file_3.name)
|
| 411 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
| 412 |
else:
|
| 413 |
sd_image_c = None
|
| 414 |
if latent_file_4 is not None: # Check if a latent file is provided
|
| 415 |
-
sd_image_d = Image.open(latent_file_4.name)
|
| 416 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
| 417 |
else:
|
| 418 |
sd_image_d = None
|
| 419 |
if latent_file_5 is not None: # Check if a latent file is provided
|
| 420 |
-
sd_image_e = Image.open(latent_file_5.name)
|
| 421 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
| 422 |
else:
|
| 423 |
sd_image_e = None
|
|
@@ -425,7 +437,6 @@ def generate_90(
|
|
| 425 |
filename= f'rv_IP_{timestamp}.png'
|
| 426 |
print("-- using image file --")
|
| 427 |
print('-- generating image --')
|
| 428 |
-
#with torch.no_grad():
|
| 429 |
sd_image = ip_model.generate(
|
| 430 |
pil_image_1=sd_image_a,
|
| 431 |
pil_image_2=sd_image_b,
|
|
@@ -450,6 +461,8 @@ def generate_90(
|
|
| 450 |
upload_to_ftp(filename)
|
| 451 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 452 |
torch.set_float32_matmul_precision("medium")
|
|
|
|
|
|
|
| 453 |
with torch.no_grad():
|
| 454 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 455 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
|
|
|
| 218 |
pipe.text_encoder_2=text_encoder_2
|
| 219 |
seed = random.randint(0, MAX_SEED)
|
| 220 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 221 |
+
torch.cuda.empty_cache()
|
| 222 |
+
torch.cuda.reset_peak_memory_stats()
|
| 223 |
if latent_file is not None: # Check if a latent file is provided
|
| 224 |
sd_image_a = Image.open(latent_file.name).convert('RGB')
|
| 225 |
sd_image_a.resize((height,width), Image.LANCZOS)
|
|
|
|
| 271 |
upload_to_ftp(filename)
|
| 272 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 273 |
torch.set_float32_matmul_precision("medium")
|
| 274 |
+
torch.cuda.empty_cache()
|
| 275 |
+
torch.cuda.reset_peak_memory_stats()
|
| 276 |
with torch.no_grad():
|
| 277 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 278 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
|
|
|
| 313 |
pipe.text_encoder_2=text_encoder_2
|
| 314 |
seed = random.randint(0, MAX_SEED)
|
| 315 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 316 |
+
torch.cuda.empty_cache()
|
| 317 |
+
torch.cuda.reset_peak_memory_stats()
|
| 318 |
if latent_file is not None: # Check if a latent file is provided
|
| 319 |
+
sd_image_a = Image.open(latent_file.name).convert('RGB')
|
| 320 |
+
sd_image_a.resize((height,width), Image.LANCZOS)
|
| 321 |
if latent_file_2 is not None: # Check if a latent file is provided
|
| 322 |
+
sd_image_b = Image.open(latent_file_2.name).convert('RGB')
|
| 323 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
| 324 |
else:
|
| 325 |
sd_image_b = None
|
| 326 |
if latent_file_3 is not None: # Check if a latent file is provided
|
| 327 |
+
sd_image_c = Image.open(latent_file_3.name).convert('RGB')
|
| 328 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
| 329 |
else:
|
| 330 |
sd_image_c = None
|
| 331 |
if latent_file_4 is not None: # Check if a latent file is provided
|
| 332 |
+
sd_image_d = Image.open(latent_file_4.name).convert('RGB')
|
| 333 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
| 334 |
else:
|
| 335 |
sd_image_d = None
|
| 336 |
if latent_file_5 is not None: # Check if a latent file is provided
|
| 337 |
+
sd_image_e = Image.open(latent_file_5.name).convert('RGB')
|
| 338 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
| 339 |
else:
|
| 340 |
sd_image_e = None
|
|
|
|
| 366 |
upload_to_ftp(filename)
|
| 367 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 368 |
torch.set_float32_matmul_precision("medium")
|
| 369 |
+
torch.cuda.empty_cache()
|
| 370 |
+
torch.cuda.reset_peak_memory_stats()
|
| 371 |
with torch.no_grad():
|
| 372 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 373 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
|
|
|
| 408 |
pipe.text_encoder_2=text_encoder_2
|
| 409 |
seed = random.randint(0, MAX_SEED)
|
| 410 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 411 |
+
torch.cuda.empty_cache()
|
| 412 |
+
torch.cuda.reset_peak_memory_stats()
|
| 413 |
if latent_file is not None: # Check if a latent file is provided
|
| 414 |
+
sd_image_a = Image.open(latent_file.name).convert('RGB')
|
| 415 |
+
sd_image_a.resize((height,width), Image.LANCZOS)
|
| 416 |
if latent_file_2 is not None: # Check if a latent file is provided
|
| 417 |
+
sd_image_b = Image.open(latent_file_2.name).convert('RGB')
|
| 418 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
| 419 |
else:
|
| 420 |
sd_image_b = None
|
| 421 |
if latent_file_3 is not None: # Check if a latent file is provided
|
| 422 |
+
sd_image_c = Image.open(latent_file_3.name).convert('RGB')
|
| 423 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
| 424 |
else:
|
| 425 |
sd_image_c = None
|
| 426 |
if latent_file_4 is not None: # Check if a latent file is provided
|
| 427 |
+
sd_image_d = Image.open(latent_file_4.name).convert('RGB')
|
| 428 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
| 429 |
else:
|
| 430 |
sd_image_d = None
|
| 431 |
if latent_file_5 is not None: # Check if a latent file is provided
|
| 432 |
+
sd_image_e = Image.open(latent_file_5.name).convert('RGB')
|
| 433 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
| 434 |
else:
|
| 435 |
sd_image_e = None
|
|
|
|
| 437 |
filename= f'rv_IP_{timestamp}.png'
|
| 438 |
print("-- using image file --")
|
| 439 |
print('-- generating image --')
|
|
|
|
| 440 |
sd_image = ip_model.generate(
|
| 441 |
pil_image_1=sd_image_a,
|
| 442 |
pil_image_2=sd_image_b,
|
|
|
|
| 461 |
upload_to_ftp(filename)
|
| 462 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
| 463 |
torch.set_float32_matmul_precision("medium")
|
| 464 |
+
torch.cuda.empty_cache()
|
| 465 |
+
torch.cuda.reset_peak_memory_stats()
|
| 466 |
with torch.no_grad():
|
| 467 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
| 468 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|