Spaces:
Running
Running
Commit
·
c8cb9bb
1
Parent(s):
d8dc6b8
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,14 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
import requests
|
| 5 |
import PIL
|
|
@@ -98,11 +107,11 @@ def preprocess_mask(mask):
|
|
| 98 |
|
| 99 |
def model_process(init_image, mask):
|
| 100 |
global model
|
| 101 |
-
|
| 102 |
-
input = request.files
|
| 103 |
# RGB
|
| 104 |
-
origin_image_bytes = input["image"].read()
|
| 105 |
-
|
| 106 |
|
| 107 |
print(f'liuyz_2_here_')
|
| 108 |
|
|
@@ -111,9 +120,9 @@ def model_process(init_image, mask):
|
|
| 111 |
original_shape = init_image.shape
|
| 112 |
interpolation = cv2.INTER_CUBIC
|
| 113 |
|
| 114 |
-
|
| 115 |
-
form = request.form
|
| 116 |
-
|
| 117 |
size_limit = 1080 # : Union[int, str] = form.get("sizeLimit", "1080")
|
| 118 |
if size_limit == "Original":
|
| 119 |
size_limit = max(image.shape)
|
|
@@ -173,16 +182,6 @@ def model_process(init_image, mask):
|
|
| 173 |
|
| 174 |
ext = get_image_ext(origin_image_bytes)
|
| 175 |
return ext
|
| 176 |
-
'''
|
| 177 |
-
response = make_response(
|
| 178 |
-
send_file(
|
| 179 |
-
io.BytesIO(numpy_to_bytes(res_np_img, ext)),
|
| 180 |
-
mimetype=f"image/{ext}",
|
| 181 |
-
)
|
| 182 |
-
)
|
| 183 |
-
response.headers["X-Seed"] = str(config.sd_seed)
|
| 184 |
-
return response
|
| 185 |
-
'''
|
| 186 |
|
| 187 |
model = ModelManager(
|
| 188 |
name='lama',
|
|
@@ -193,7 +192,7 @@ model = ModelManager(
|
|
| 193 |
# sd_run_local=True,
|
| 194 |
# callback=diffuser_callback,
|
| 195 |
)
|
| 196 |
-
|
| 197 |
|
| 198 |
'''
|
| 199 |
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", dtype=torch.float16, revision="fp16", use_auth_token=auth_token).to(device)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import PIL
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
import uuid
|
| 7 |
+
import torch
|
| 8 |
+
from torch import autocast
|
| 9 |
+
import cv2
|
| 10 |
|
| 11 |
+
'''
|
| 12 |
from io import BytesIO
|
| 13 |
import requests
|
| 14 |
import PIL
|
|
|
|
| 107 |
|
| 108 |
def model_process(init_image, mask):
|
| 109 |
global model
|
| 110 |
+
|
| 111 |
+
# input = request.files
|
| 112 |
# RGB
|
| 113 |
+
# origin_image_bytes = input["image"].read()
|
| 114 |
+
|
| 115 |
|
| 116 |
print(f'liuyz_2_here_')
|
| 117 |
|
|
|
|
| 120 |
original_shape = init_image.shape
|
| 121 |
interpolation = cv2.INTER_CUBIC
|
| 122 |
|
| 123 |
+
|
| 124 |
+
# form = request.form
|
| 125 |
+
|
| 126 |
size_limit = 1080 # : Union[int, str] = form.get("sizeLimit", "1080")
|
| 127 |
if size_limit == "Original":
|
| 128 |
size_limit = max(image.shape)
|
|
|
|
| 182 |
|
| 183 |
ext = get_image_ext(origin_image_bytes)
|
| 184 |
return ext
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
model = ModelManager(
|
| 187 |
name='lama',
|
|
|
|
| 192 |
# sd_run_local=True,
|
| 193 |
# callback=diffuser_callback,
|
| 194 |
)
|
| 195 |
+
'''
|
| 196 |
|
| 197 |
'''
|
| 198 |
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", dtype=torch.float16, revision="fp16", use_auth_token=auth_token).to(device)
|