Walid-Ahmed commited on
Commit
9bc086f
Β·
verified Β·
1 Parent(s): d41174a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -36
app.py CHANGED
@@ -1,35 +1,34 @@
1
  import torch
2
  import gradio as gr
3
- import spaces # Import spaces for ZeroGPU support
4
  from functools import lru_cache
5
- from diffusers import StableDiffusionXLPipeline # βœ… Correct pipeline for text-to-image
6
 
7
- # LoRA model path on Hugging Face Hub
8
- color_book_lora_path = "artificialguybr/ColoringBookRedmond-V2"
9
- color_book_trigger = ", ColoringBookAF, Coloring Book"
 
10
 
11
- # Load model on CPU initially
12
- @lru_cache(maxsize=1)
13
- def load_pipeline(use_lora: bool):
14
- """Load Stable Diffusion XL pipeline and LoRA weights (if selected)."""
15
-
16
- # βœ… Use StableDiffusionXLPipeline for text-to-image generation
17
- pipe = StableDiffusionXLPipeline.from_pretrained(
18
- "stabilityai/stable-diffusion-xl-base-1.0",
19
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
20
- use_safetensors=True
21
- )
22
 
23
- # Keep the model on CPU until GPU is requested
24
- pipe.to("cpu")
 
 
 
25
 
26
- # Load LoRA if selected
27
- if use_lora:
28
- pipe.load_lora_weights(color_book_lora_path)
29
 
30
- return pipe
31
 
32
- # Define styles
 
 
 
 
 
33
  styles = {
34
  "Neonpunk": {
35
  "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibrant, stunningly beautiful, crisp, "
@@ -54,25 +53,46 @@ styles = {
54
  }
55
  }
56
 
57
- @spaces.GPU # ZeroGPU: Allocate GPU only when generating images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def generate_image(prompt: str, style_name: str, use_lora: bool):
59
  """Generate an image using Stable Diffusion XL with optional LoRA fine-tuning."""
60
-
61
- # Load the pipeline (cached)
62
- pipeline = load_pipeline(use_lora)
63
 
64
- # Move model to GPU only when needed
 
65
  pipeline.to("cuda")
66
 
67
- # Get the selected style details
68
  style_prompt = styles.get(style_name, {}).get("prompt", "")
69
  negative_prompt = styles.get(style_name, {}).get("negative_prompt", "")
70
 
71
- # Apply LoRA trigger phrase if enabled
72
  if use_lora:
73
  prompt += color_book_trigger
74
 
75
- # βœ… Ensure text-to-image pipeline is used correctly
76
  image = pipeline(
77
  prompt=prompt + " " + style_prompt,
78
  negative_prompt="blurred, ugly, watermark, low resolution, " + negative_prompt,
@@ -80,12 +100,15 @@ def generate_image(prompt: str, style_name: str, use_lora: bool):
80
  guidance_scale=9.0
81
  ).images[0]
82
 
83
- # Move model back to CPU to free GPU resources
84
  pipeline.to("cpu")
85
 
86
  return image
87
 
88
- # Gradio Interface for Hugging Face Spaces (ZeroGPU-compatible)
 
 
 
89
  interface = gr.Interface(
90
  fn=generate_image,
91
  inputs=[
@@ -95,10 +118,16 @@ interface = gr.Interface(
95
  ],
96
  outputs=gr.Image(label="Generated Image"),
97
  title="🎨 AI Coloring Book & Style Generator",
98
- description="Generate AI-powered art using Stable Diffusion XL on Hugging Face Spaces. "
99
- "Choose a style or enable a LoRA fine-tuned coloring book effect."
 
 
 
100
  )
101
 
102
- # Run Gradio app for Hugging Face Spaces
 
 
 
103
  if __name__ == "__main__":
104
  interface.launch()
 
1
  import torch
2
  import gradio as gr
3
+ import spaces
4
  from functools import lru_cache
5
+ from diffusers import StableDiffusionXLPipeline
6
 
7
+ # ===============================
8
+ # 🩹 FIX for Gradio bug (bool schema issue)
9
+ # ===============================
10
+ import gradio_client.utils as gu
11
 
12
+ # Monkey patch for "TypeError: argument of type 'bool' is not iterable"
13
+ if not hasattr(gu, "_patched_json_schema_to_python_type"):
14
+ orig_get_type = gu.get_type
 
 
 
 
 
 
 
 
15
 
16
+ def safe_get_type(schema):
17
+ # Ensure schema is always a dict before checking keys
18
+ if not isinstance(schema, dict):
19
+ return str(schema)
20
+ return orig_get_type(schema)
21
 
22
+ gu.get_type = safe_get_type
23
+ gu._patched_json_schema_to_python_type = True
 
24
 
 
25
 
26
+ # ===============================
27
+ # 🎨 Model and Styles Configuration
28
+ # ===============================
29
+ color_book_lora_path = "artificialguybr/ColoringBookRedmond-V2"
30
+ color_book_trigger = ", ColoringBookAF, Coloring Book"
31
+
32
  styles = {
33
  "Neonpunk": {
34
  "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibrant, stunningly beautiful, crisp, "
 
53
  }
54
  }
55
 
56
+
57
+ # ===============================
58
+ # πŸš€ Pipeline Loader (with caching)
59
+ # ===============================
60
+ @lru_cache(maxsize=1)
61
+ def load_pipeline(use_lora: bool):
62
+ """Load Stable Diffusion XL pipeline and optionally apply LoRA weights."""
63
+ pipe = StableDiffusionXLPipeline.from_pretrained(
64
+ "stabilityai/stable-diffusion-xl-base-1.0",
65
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
66
+ use_safetensors=True
67
+ )
68
+ pipe.to("cpu")
69
+
70
+ if use_lora:
71
+ pipe.load_lora_weights(color_book_lora_path)
72
+
73
+ return pipe
74
+
75
+
76
+ # ===============================
77
+ # 🎨 Image Generation Function
78
+ # ===============================
79
+ @spaces.GPU # ZeroGPU: allocate GPU only when generating
80
  def generate_image(prompt: str, style_name: str, use_lora: bool):
81
  """Generate an image using Stable Diffusion XL with optional LoRA fine-tuning."""
 
 
 
82
 
83
+ # Load cached pipeline
84
+ pipeline = load_pipeline(use_lora)
85
  pipeline.to("cuda")
86
 
87
+ # Retrieve style info
88
  style_prompt = styles.get(style_name, {}).get("prompt", "")
89
  negative_prompt = styles.get(style_name, {}).get("negative_prompt", "")
90
 
91
+ # Add LoRA trigger if needed
92
  if use_lora:
93
  prompt += color_book_trigger
94
 
95
+ # Generate image
96
  image = pipeline(
97
  prompt=prompt + " " + style_prompt,
98
  negative_prompt="blurred, ugly, watermark, low resolution, " + negative_prompt,
 
100
  guidance_scale=9.0
101
  ).images[0]
102
 
103
+ # Move model back to CPU to release GPU
104
  pipeline.to("cpu")
105
 
106
  return image
107
 
108
+
109
+ # ===============================
110
+ # 🌐 Gradio Interface (for Spaces)
111
+ # ===============================
112
  interface = gr.Interface(
113
  fn=generate_image,
114
  inputs=[
 
118
  ],
119
  outputs=gr.Image(label="Generated Image"),
120
  title="🎨 AI Coloring Book & Style Generator",
121
+ description=(
122
+ "Generate AI-powered art using Stable Diffusion XL on Hugging Face Spaces. "
123
+ "Choose a style or enable a LoRA fine-tuned coloring book effect. "
124
+ "This app dynamically allocates GPU (ZeroGPU) only during generation."
125
+ )
126
  )
127
 
128
+
129
+ # ===============================
130
+ # 🏁 Launch App
131
+ # ===============================
132
  if __name__ == "__main__":
133
  interface.launch()