MJaheen commited on
Commit
6287e96
·
1 Parent(s): 1992682

Fix author

Browse files

Initial commit- v 0.1

.gitignore ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.so
5
+ *.egg
6
+ *.egg-info/
7
+ dist/
8
+ build/
9
+ venv/
10
+ env/
11
+
12
+ # Models
13
+ models/
14
+ *.pth
15
+ *.bin
16
+ *.safetensors
17
+
18
+ # Outputs
19
+ outputs/
20
+ cache/
21
+
22
+ # IDE
23
+ .vscode/
24
+ .idea/
25
+ .DS_Store
26
+
27
+ # Logs
28
+ *.log
29
+ logs/
30
+
31
+ # Environment
32
+ .env
33
+
34
+ extra/
35
+ old_files/
36
+ scripts/
37
+ tests/
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ transformers==4.45.1
3
+ diffusers==0.31.0
4
+ accelerate==0.34.2
5
+ safetensors==0.4.4
6
+ peft>=0.11.0
7
+ torch
8
+ Pillow
9
+ streamlit
src/__init__.py ADDED
File without changes
src/app.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pepe the Frog Meme Generator - Main Application"""
2
+
3
+ import streamlit as st
4
+ from PIL import Image
5
+ import io
6
+ from datetime import datetime
7
+
8
+ # Import our modules
9
+ from model.generator import PepeGenerator
10
+ from model.config import ModelConfig
11
+ from utils.image_processor import ImageProcessor
12
+
13
+ # Page config
14
+ st.set_page_config(
15
+ page_title="🐸 Pepe Meme Generator",
16
+ page_icon="🐸",
17
+ layout="wide",
18
+ )
19
+
20
+ # Custom CSS
21
+ st.markdown("""
22
+ <style>
23
+ .stButton>button {
24
+ width: 100%;
25
+ background-color: #4CAF50;
26
+ color: white;
27
+ height: 3em;
28
+ border-radius: 10px;
29
+ font-weight: bold;
30
+ }
31
+ .stButton>button:hover {
32
+ background-color: #45a049;
33
+ }
34
+ </style>
35
+ """, unsafe_allow_html=True)
36
+
37
+
38
+ def init_session_state():
39
+ """Initialize session state"""
40
+ if 'generated_images' not in st.session_state:
41
+ st.session_state.generated_images = []
42
+ if 'generation_count' not in st.session_state:
43
+ st.session_state.generation_count = 0
44
+
45
+
46
+ @st.cache_resource
47
+ def load_generator():
48
+ """Load and cache the generator"""
49
+ return PepeGenerator()
50
+
51
+
52
+ def get_example_prompts():
53
+ """Return example prompts"""
54
+ return [
55
+ "pepe the frog as a wizard casting spells",
56
+ "pepe the frog coding on a laptop",
57
+ "pepe the frog drinking coffee",
58
+ "pepe the frog as a superhero",
59
+ "pepe the frog reading a book",
60
+ ]
61
+
62
+
63
+ def main():
64
+ """Main application"""
65
+ init_session_state()
66
+
67
+ # Header
68
+ st.title("🐸 Pepe the Frog Meme Generator")
69
+ st.markdown("Create custom Pepe memes using AI! Powered by Stable Diffusion.")
70
+
71
+ # Sidebar
72
+ st.sidebar.header("⚙️ Settings")
73
+
74
+ # Style selection
75
+ style_options = {
76
+ "Default": "default",
77
+ "😊 Happy": "happy",
78
+ "😢 Sad": "sad",
79
+ "😏 Smug": "smug",
80
+ "😠 Angry": "angry",
81
+ "🤔 Thinking": "thinking",
82
+ "😲 Surprised": "surprised",
83
+ }
84
+
85
+ selected_style = st.sidebar.selectbox(
86
+ "Choose Style",
87
+ list(style_options.keys())
88
+ )
89
+ style = style_options[selected_style]
90
+
91
+ # Advanced settings
92
+ with st.sidebar.expander("🔧 Advanced Settings"):
93
+ steps = st.slider("Steps", 20, 100, 50, 5)
94
+ guidance = st.slider("Guidance Scale", 1.0, 20.0, 7.5, 0.5)
95
+ use_seed = st.checkbox("Fixed Seed")
96
+ seed = st.number_input("Seed", 0, 999999, 42) if use_seed else None
97
+
98
+ # Text overlay settings
99
+ with st.sidebar.expander("💬 Add Text"):
100
+ add_text = st.checkbox("Add Meme Text")
101
+ top_text = st.text_input("Top Text") if add_text else ""
102
+ bottom_text = st.text_input("Bottom Text") if add_text else ""
103
+
104
+ # Main area
105
+ col1, col2 = st.columns([1, 1])
106
+
107
+ with col1:
108
+ st.subheader("✏️ Create Your Meme")
109
+
110
+ # Prompt input
111
+ prompt = st.text_area(
112
+ "Describe your meme",
113
+ height=100,
114
+ placeholder="e.g., pepe the frog celebrating victory"
115
+ )
116
+
117
+ # Examples
118
+ with st.expander("💡 Example Prompts"):
119
+ for example in get_example_prompts():
120
+ st.write(f"• {example}")
121
+
122
+ # Generate button
123
+ col_btn1, col_btn2 = st.columns([3, 1])
124
+ with col_btn1:
125
+ generate = st.button("🎨 Generate Meme", type="primary")
126
+ with col_btn2:
127
+ num_vars = st.number_input("Variations", 1, 4, 1)
128
+
129
+ with col2:
130
+ st.subheader("🖼️ Generated Meme")
131
+ placeholder = st.empty()
132
+
133
+ if st.session_state.generated_images:
134
+ placeholder.image(
135
+ st.session_state.generated_images[-1],
136
+ use_container_width=True
137
+ )
138
+ else:
139
+ placeholder.info("Your meme will appear here...")
140
+
141
+ # Generate
142
+ if generate and prompt:
143
+ try:
144
+ generator = load_generator()
145
+
146
+ progress = st.progress(0)
147
+ status = st.empty()
148
+
149
+ for i in range(num_vars):
150
+ status.text(f"Generating {i+1}/{num_vars}...")
151
+ progress.progress((i + 1) / num_vars)
152
+
153
+ # Generate
154
+ image = generator.generate(
155
+ prompt=prompt,
156
+ style=style,
157
+ num_inference_steps=steps,
158
+ guidance_scale=guidance,
159
+ seed=seed
160
+ )
161
+
162
+ # Add text if requested
163
+ if add_text and (top_text or bottom_text):
164
+ processor = ImageProcessor()
165
+ image = processor.add_meme_text(image, top_text, bottom_text)
166
+
167
+ st.session_state.generated_images.append(image)
168
+ st.session_state.generation_count += 1
169
+
170
+ progress.empty()
171
+ status.empty()
172
+
173
+ st.success("✅ Meme generated!")
174
+
175
+ # Show result
176
+ if num_vars == 1:
177
+ placeholder.image(image, use_container_width=True)
178
+
179
+ # Download
180
+ buf = io.BytesIO()
181
+ image.save(buf, format="PNG")
182
+ st.download_button(
183
+ "⬇️ Download",
184
+ buf.getvalue(),
185
+ f"pepe_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png",
186
+ "image/png"
187
+ )
188
+ else:
189
+ st.subheader("All Variations")
190
+ cols = st.columns(min(num_vars, 2))
191
+ for idx, img in enumerate(st.session_state.generated_images[-num_vars:]):
192
+ with cols[idx % 2]:
193
+ st.image(img, use_container_width=True)
194
+
195
+ except Exception as e:
196
+ st.error(f"Error: {str(e)}")
197
+
198
+ elif generate and not prompt:
199
+ st.error("Please enter a prompt!")
200
+
201
+ # Gallery
202
+ if st.session_state.generated_images:
203
+ st.divider()
204
+ with st.expander(f"🖼️ Gallery ({len(st.session_state.generated_images)} images)"):
205
+ cols = st.columns(4)
206
+ for idx, img in enumerate(reversed(st.session_state.generated_images[-8:])):
207
+ with cols[idx % 4]:
208
+ st.image(img, use_container_width=True)
209
+
210
+ # Footer
211
+ st.divider()
212
+ col_a, col_b, col_c = st.columns(3)
213
+ with col_a:
214
+ st.metric("Total Generated", st.session_state.generation_count)
215
+ with col_b:
216
+ st.metric("In Gallery", len(st.session_state.generated_images))
217
+ with col_c:
218
+ if st.button("🗑️ Clear"):
219
+ st.session_state.generated_images = []
220
+ st.session_state.generation_count = 0
221
+ st.rerun()
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
src/model/__init__.py ADDED
File without changes
src/model/config.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration management for the meme generator"""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Optional
5
+
6
+
7
+ @dataclass
8
+ class ModelConfig:
9
+ """Model configuration parameters"""
10
+
11
+ # Model paths
12
+ BASE_MODEL: str = "runwayml/stable-diffusion-v1-5"
13
+ LORA_PATH: str = "./models/pepe_lora"
14
+
15
+ # Default generation parameters
16
+ DEFAULT_STEPS: int = 50
17
+ DEFAULT_GUIDANCE: float = 7.5
18
+ DEFAULT_WIDTH: int = 512
19
+ DEFAULT_HEIGHT: int = 512
20
+
21
+ # Negative prompt
22
+ DEFAULT_NEGATIVE_PROMPT: str = (
23
+ "blurry, low quality, distorted, deformed, "
24
+ "ugly, bad anatomy, watermark, signature"
25
+ )
26
+
27
+ # Performance
28
+ ENABLE_ATTENTION_SLICING: bool = True
29
+ ENABLE_VAE_SLICING: bool = True
30
+
31
+ # Available styles
32
+ AVAILABLE_STYLES: tuple = (
33
+ "default", "happy", "sad", "smug",
34
+ "angry", "thinking", "surprised"
35
+ )
src/model/generator.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pepe Meme Generator - Core generation logic"""
2
+
3
+ from typing import Optional, List
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
6
+ import streamlit as st
7
+ from PIL import Image
8
+ import logging
9
+
10
+ from .config import ModelConfig
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class PepeGenerator:
16
+ """Main generator class for creating Pepe memes"""
17
+
18
+ def __init__(self, config: Optional[ModelConfig] = None):
19
+ """Initialize the generator"""
20
+ self.config = config or ModelConfig()
21
+ self.device = self._get_device()
22
+ self.pipe = self._load_model()
23
+ logger.info(f"PepeGenerator initialized on {self.device}")
24
+
25
+ @staticmethod
26
+ @st.cache_resource
27
+ def _load_model() -> StableDiffusionPipeline:
28
+ """Load and cache the Stable Diffusion model"""
29
+ logger.info("Loading Stable Diffusion model...")
30
+
31
+ pipe = StableDiffusionPipeline.from_pretrained(
32
+ ModelConfig.BASE_MODEL,
33
+ torch_dtype=torch.float16,
34
+ safety_checker=None,
35
+ )
36
+
37
+ # Optimize scheduler
38
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
39
+ pipe.scheduler.config
40
+ )
41
+
42
+ # Enable memory optimizations
43
+ pipe.enable_attention_slicing()
44
+ pipe.enable_vae_slicing()
45
+
46
+ if torch.cuda.is_available():
47
+ pipe = pipe.to("cuda")
48
+ try:
49
+ pipe.enable_xformers_memory_efficient_attention()
50
+ except:
51
+ pass
52
+
53
+ logger.info("Model loaded successfully")
54
+ return pipe
55
+
56
+ @staticmethod
57
+ def _get_device() -> str:
58
+ """Determine the best available device"""
59
+ return "cuda" if torch.cuda.is_available() else "cpu"
60
+
61
+ def generate(
62
+ self,
63
+ prompt: str,
64
+ style: str = "default",
65
+ negative_prompt: Optional[str] = None,
66
+ num_inference_steps: int = 50,
67
+ guidance_scale: float = 7.5,
68
+ seed: Optional[int] = None,
69
+ width: int = 512,
70
+ height: int = 512,
71
+ ) -> Image.Image:
72
+ """Generate a single Pepe meme image"""
73
+
74
+ # Apply style preset
75
+ enhanced_prompt = self._apply_style_preset(prompt, style)
76
+
77
+ # Set default negative prompt
78
+ if negative_prompt is None:
79
+ negative_prompt = self.config.DEFAULT_NEGATIVE_PROMPT
80
+
81
+ # Set seed for reproducibility
82
+ generator = None
83
+ if seed is not None:
84
+ generator = torch.Generator(device=self.device).manual_seed(seed)
85
+
86
+ logger.info(f"Generating: {enhanced_prompt[:50]}...")
87
+
88
+ # Generate image
89
+ with torch.autocast(self.device):
90
+ output = self.pipe(
91
+ prompt=enhanced_prompt,
92
+ negative_prompt=negative_prompt,
93
+ num_inference_steps=num_inference_steps,
94
+ guidance_scale=guidance_scale,
95
+ generator=generator,
96
+ width=width,
97
+ height=height,
98
+ )
99
+
100
+ return output.images[0]
101
+
102
+ def generate_batch(
103
+ self,
104
+ prompt: str,
105
+ num_images: int = 4,
106
+ **kwargs
107
+ ) -> List[Image.Image]:
108
+ """Generate multiple variations"""
109
+ images = []
110
+ for i in range(num_images):
111
+ if 'seed' not in kwargs:
112
+ kwargs['seed'] = torch.randint(0, 100000, (1,)).item()
113
+
114
+ image = self.generate(prompt, **kwargs)
115
+ images.append(image)
116
+
117
+ if 'seed' in kwargs:
118
+ del kwargs['seed']
119
+
120
+ return images
121
+
122
+ @staticmethod
123
+ def _apply_style_preset(prompt: str, style: str) -> str:
124
+ """Apply style-specific prompt enhancements"""
125
+ style_modifiers = {
126
+ "happy": "cheerful, smiling, joyful",
127
+ "sad": "melancholic, crying, emotional",
128
+ "smug": "confident, satisfied, smirking",
129
+ "angry": "frustrated, mad, intense",
130
+ "thinking": "contemplative, philosophical",
131
+ "surprised": "shocked, amazed, wide eyes",
132
+ }
133
+
134
+ base = f"pepe the frog, {prompt}"
135
+
136
+ if style in style_modifiers:
137
+ base = f"{base}, {style_modifiers[style]}"
138
+
139
+ base = f"{base}, high quality, detailed, meme art"
140
+
141
+ return base
src/utils/__init__.py ADDED
File without changes
src/utils/image_processor.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processing utilities"""
2
+
3
+ from PIL import Image, ImageDraw, ImageFont, ImageEnhance
4
+ from typing import Optional, Tuple
5
+ import logging
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ class ImageProcessor:
11
+ """Handles image post-processing"""
12
+
13
+ @staticmethod
14
+ def add_meme_text(
15
+ image: Image.Image,
16
+ top_text: str = "",
17
+ bottom_text: str = "",
18
+ font_size: int = 40,
19
+ font_path: Optional[str] = None,
20
+ ) -> Image.Image:
21
+ """Add classic meme text to image"""
22
+
23
+ img = image.copy()
24
+ draw = ImageDraw.Draw(img)
25
+
26
+ # Load font
27
+ try:
28
+ if font_path:
29
+ font = ImageFont.truetype(font_path, font_size)
30
+ else:
31
+ font = ImageFont.truetype("impact.ttf", font_size)
32
+ except:
33
+ font = ImageFont.load_default()
34
+ logger.warning("Using default font")
35
+
36
+ # Add top text
37
+ if top_text:
38
+ ImageProcessor._draw_text_with_outline(
39
+ draw, top_text.upper(), (img.width // 2, 30), font
40
+ )
41
+
42
+ # Add bottom text
43
+ if bottom_text:
44
+ ImageProcessor._draw_text_with_outline(
45
+ draw, bottom_text.upper(), (img.width // 2, img.height - 50), font
46
+ )
47
+
48
+ return img
49
+
50
+ @staticmethod
51
+ def _draw_text_with_outline(
52
+ draw: ImageDraw.Draw,
53
+ text: str,
54
+ position: Tuple[int, int],
55
+ font: ImageFont.FreeTypeFont,
56
+ outline_width: int = 3,
57
+ ):
58
+ """Draw text with black outline"""
59
+ x, y = position
60
+
61
+ # Draw outline
62
+ for adj_x in range(-outline_width, outline_width + 1):
63
+ for adj_y in range(-outline_width, outline_width + 1):
64
+ draw.text(
65
+ (x + adj_x, y + adj_y),
66
+ text,
67
+ font=font,
68
+ fill="black",
69
+ anchor="mm"
70
+ )
71
+
72
+ # Draw main text
73
+ draw.text(position, text, font=font, fill="white", anchor="mm")
74
+
75
+ @staticmethod
76
+ def enhance_image(
77
+ image: Image.Image,
78
+ sharpness: float = 1.2,
79
+ contrast: float = 1.1,
80
+ ) -> Image.Image:
81
+ """Apply enhancement filters"""
82
+
83
+ # Sharpen
84
+ enhancer = ImageEnhance.Sharpness(image)
85
+ image = enhancer.enhance(sharpness)
86
+
87
+ # Adjust contrast
88
+ enhancer = ImageEnhance.Contrast(image)
89
+ image = enhancer.enhance(contrast)
90
+
91
+ return image