nsfwalex commited on
Commit
2a1c625
·
verified ·
1 Parent(s): 79ca56b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -8,7 +8,7 @@ import uuid
8
  from PIL import Image
9
  from huggingface_hub import snapshot_download
10
  from diffusers import AutoencoderKL
11
- from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler, AutoPipelineForText2Image, DiffusionPipeline
12
  from diffusers import EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, DPMSolverSDEScheduler
13
  from diffusers.models.attention_processor import AttnProcessor2_0
14
  import torch
@@ -16,7 +16,6 @@ from typing import Tuple
16
  from datetime import datetime
17
  import requests
18
  import torch
19
- from diffusers import DiffusionPipeline
20
  import importlib
21
  import re
22
  from urllib.parse import urlparse
@@ -57,12 +56,12 @@ def load_pipeline_and_scheduler():
57
  ckpt_dir = snapshot_download(repo_id=cfg["model_id"])
58
 
59
  # Load the models
60
- vae = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir, "vae"), torch_dtype=torch.float16)
61
 
62
  pipe = StableDiffusionXLPipeline.from_pretrained(
63
  ckpt_dir,
64
  vae=vae,
65
- torch_dtype=torch.float16,
66
  use_safetensors=True,
67
  #variant="fp16"
68
  )
 
8
  from PIL import Image
9
  from huggingface_hub import snapshot_download
10
  from diffusers import AutoencoderKL
11
+ from diffusers import StableDiffusionXLPipeline
12
  from diffusers import EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, DPMSolverSDEScheduler
13
  from diffusers.models.attention_processor import AttnProcessor2_0
14
  import torch
 
16
  from datetime import datetime
17
  import requests
18
  import torch
 
19
  import importlib
20
  import re
21
  from urllib.parse import urlparse
 
56
  ckpt_dir = snapshot_download(repo_id=cfg["model_id"])
57
 
58
  # Load the models
59
+ vae = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir, "vae"), dtype=torch.float16)
60
 
61
  pipe = StableDiffusionXLPipeline.from_pretrained(
62
  ckpt_dir,
63
  vae=vae,
64
+ dtype=torch.float16,
65
  use_safetensors=True,
66
  #variant="fp16"
67
  )