Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,9 +20,7 @@ from diffusers import AutoencoderKL
|
|
| 20 |
from torch import Tensor, nn
|
| 21 |
from transformers import CLIPTextModel, CLIPTokenizer
|
| 22 |
from transformers import T5EncoderModel, T5Tokenizer
|
| 23 |
-
from
|
| 24 |
-
# from torch.profiler import profile, record_function, ProfilerActivity
|
| 25 |
-
# from optimum.quanto import freeze, qfloat8, quantize
|
| 26 |
|
| 27 |
|
| 28 |
# ---------------- Encoders ----------------
|
|
@@ -67,8 +65,8 @@ device = "cuda"
|
|
| 67 |
t5 = HFEmbedder("google/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
| 68 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
| 69 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
| 70 |
-
|
| 71 |
-
|
| 72 |
|
| 73 |
|
| 74 |
# ---------------- NF4 ----------------
|
|
@@ -746,6 +744,7 @@ sd = load_file(hf_hub_download(repo_id="lllyasviel/flux1-dev-bnb-nf4", filename=
|
|
| 746 |
sd = {k.replace("model.diffusion_model.", ""): v for k, v in sd.items() if "model.diffusion_model" in k}
|
| 747 |
model = Flux().to(dtype=torch.bfloat16, device="cuda")
|
| 748 |
result = model.load_state_dict(sd)
|
|
|
|
| 749 |
print(result)
|
| 750 |
|
| 751 |
# model = Flux().to(dtype=torch.bfloat16, device="cuda")
|
|
@@ -764,8 +763,10 @@ def generate_image(
|
|
| 764 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 765 |
torch_device = torch.device(device)
|
| 766 |
|
| 767 |
-
global model
|
| 768 |
-
|
|
|
|
|
|
|
| 769 |
|
| 770 |
if do_img2img and init_image is not None:
|
| 771 |
init_image = get_image(init_image)
|
|
|
|
| 20 |
from torch import Tensor, nn
|
| 21 |
from transformers import CLIPTextModel, CLIPTokenizer
|
| 22 |
from transformers import T5EncoderModel, T5Tokenizer
|
| 23 |
+
from optimum.quanto import freeze, qfloat8, quantize
|
|
|
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
# ---------------- Encoders ----------------
|
|
|
|
| 65 |
t5 = HFEmbedder("google/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
| 66 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
| 67 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
| 68 |
+
quantize(t5, weights=qfloat8)
|
| 69 |
+
freeze(t5)
|
| 70 |
|
| 71 |
|
| 72 |
# ---------------- NF4 ----------------
|
|
|
|
| 744 |
sd = {k.replace("model.diffusion_model.", ""): v for k, v in sd.items() if "model.diffusion_model" in k}
|
| 745 |
model = Flux().to(dtype=torch.bfloat16, device="cuda")
|
| 746 |
result = model.load_state_dict(sd)
|
| 747 |
+
model_zero_init = False
|
| 748 |
print(result)
|
| 749 |
|
| 750 |
# model = Flux().to(dtype=torch.bfloat16, device="cuda")
|
|
|
|
| 763 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 764 |
torch_device = torch.device(device)
|
| 765 |
|
| 766 |
+
global model, model_zero_init
|
| 767 |
+
if not model_zero_init:
|
| 768 |
+
model = model.to(torch_device)
|
| 769 |
+
model_zero_init = True
|
| 770 |
|
| 771 |
if do_img2img and init_image is not None:
|
| 772 |
init_image = get_image(init_image)
|