Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ from diffusers.utils import load_image
|
|
| 15 |
DESCRIPTION = """
|
| 16 |
# [Fluently Playground](https://huggingface.co/fluently)
|
| 17 |
|
| 18 |
-
|
| 19 |
"""
|
| 20 |
if not torch.cuda.is_available():
|
| 21 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
|
|
@@ -29,13 +29,13 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
| 29 |
|
| 30 |
|
| 31 |
if torch.cuda.is_available():
|
| 32 |
-
|
| 33 |
-
"fluently/Fluently-
|
| 34 |
torch_dtype=torch.float16,
|
| 35 |
use_safetensors=True,
|
| 36 |
)
|
| 37 |
-
|
| 38 |
-
|
| 39 |
|
| 40 |
pipe_anime = StableDiffusionPipeline.from_pretrained(
|
| 41 |
"fluently/Fluently-anime",
|
|
@@ -131,14 +131,14 @@ def generate(
|
|
| 131 |
|
| 132 |
if not use_negative_prompt:
|
| 133 |
negative_prompt = "" # type: ignore
|
| 134 |
-
if model == "Fluently
|
| 135 |
-
images =
|
| 136 |
prompt=prompt,
|
| 137 |
negative_prompt=negative_prompt,
|
| 138 |
width=width,
|
| 139 |
height=height,
|
| 140 |
guidance_scale=guidance_scale,
|
| 141 |
-
num_inference_steps=
|
| 142 |
num_images_per_prompt=1,
|
| 143 |
output_type="pil",
|
| 144 |
).images
|
|
@@ -253,7 +253,7 @@ with gr.Blocks(title="Fluently Playground", css=css) as demo:
|
|
| 253 |
with gr.Row():
|
| 254 |
model = gr.Radio(
|
| 255 |
label="Model",
|
| 256 |
-
choices=["Fluently XL
|
| 257 |
value="Fluently XL v3 Lightning",
|
| 258 |
interactive=True,
|
| 259 |
)
|
|
|
|
| 15 |
DESCRIPTION = """
|
| 16 |
# [Fluently Playground](https://huggingface.co/fluently)
|
| 17 |
|
| 18 |
+
[🦾 New FluentlyXL Final!](https://huggingface.co/fluently/Fluently-XL-Final)
|
| 19 |
"""
|
| 20 |
if not torch.cuda.is_available():
|
| 21 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
if torch.cuda.is_available():
|
| 32 |
+
pipe_xl_final = StableDiffusionXLPipeline.from_pretrained(
|
| 33 |
+
"fluently/Fluently-XL-Final",
|
| 34 |
torch_dtype=torch.float16,
|
| 35 |
use_safetensors=True,
|
| 36 |
)
|
| 37 |
+
pipe_xl_final.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl_final.scheduler.config)
|
| 38 |
+
pipe_xl_final.to(device)
|
| 39 |
|
| 40 |
pipe_anime = StableDiffusionPipeline.from_pretrained(
|
| 41 |
"fluently/Fluently-anime",
|
|
|
|
| 131 |
|
| 132 |
if not use_negative_prompt:
|
| 133 |
negative_prompt = "" # type: ignore
|
| 134 |
+
if model == "Fluently XL Final":
|
| 135 |
+
images = pipe_xl_final(
|
| 136 |
prompt=prompt,
|
| 137 |
negative_prompt=negative_prompt,
|
| 138 |
width=width,
|
| 139 |
height=height,
|
| 140 |
guidance_scale=guidance_scale,
|
| 141 |
+
num_inference_steps=25,
|
| 142 |
num_images_per_prompt=1,
|
| 143 |
output_type="pil",
|
| 144 |
).images
|
|
|
|
| 253 |
with gr.Row():
|
| 254 |
model = gr.Radio(
|
| 255 |
label="Model",
|
| 256 |
+
choices=["Fluently XL Final", "Fluently XL v4", "Fluently XL v3 Lightning", "Fluently Anime", "Fluently Epic", "Fluently XL v3 inpaint", "Fluently v4 inpaint"],
|
| 257 |
value="Fluently XL v3 Lightning",
|
| 258 |
interactive=True,
|
| 259 |
)
|