import gradio as gr import torch import os from diffusers import StableDiffusionPipeline, TextToVideoZeroPipeline, LTXPipeline, FluxPipeline from diffusers.utils import export_to_video from transformers import AutoTokenizer, AutoModelForCausalLM # No need to login again if HF_TOKEN is already set in environment def load_image_model(model_choice): model_map = { "DreamShaper 8": "Lykon/dreamshaper-8", "NSFW RealVision": "SG161222/Realistic_Vision_V5.1_noVAE", "FLUX.1 (schnell)": "black-forest-labs/FLUX.1-schnell", "Dreamlike Photoreal": "dreamlike-art/dreamlike-photoreal-2.0", "Realistic Vision v2.0": "SG161222/Realistic_Vision_V2.0", "SDXL Lightning": "stablediffusionapi/sdxl-lightning" } model_id = model_map[model_choice] if model_choice == "FLUX.1 (schnell)": return FluxPipeline.from_pretrained(model_id, torch_dtype=torch.float32).to("cpu") else: return StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32).to("cpu") def load_video_model(model_choice): if model_choice == "Text2Video-Zero": return TextToVideoZeroPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32).to("cpu") else: return LTXPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.float32).to("cpu") def load_chat_model(): tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5") model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.5", torch_dtype=torch.float32).to("cpu") return tokenizer, model def gen_image(prompt, model_choice, style, resolution, guidance, steps, seed, num_images, progress=gr.Progress()): pipe = load_image_model(model_choice) full = f"{prompt}, in {style} style" height = width = 768 if resolution == "768x768" else 512 torch.manual_seed(seed) images = [] for _ in progress.tqdm(range(num_images), desc="Generating"): images.append(pipe(full, height=height, width=width, guidance_scale=guidance, num_inference_steps=steps).images[0]) del pipe torch.cuda.empty_cache() return images def gen_video(prompt, model_choice, style, resolution, duration, fps, guidance, loop, steps, progress=gr.Progress()): pipe = load_video_model(model_choice) full = f"{prompt}, in {style} style" num_frames = int(duration * fps) height = width = 768 if resolution == "768x768" else (576 if resolution == "1024x576" else 512) if model_choice == "Text2Video-Zero": result = pipe(prompt=full, video_length=num_frames, guidance_scale=guidance, t0=steps//2, t1=steps//2+1).images frames = [(r * 255).astype("uint8") for r in result] else: out = pipe(prompt=full, width=width, height=height, num_frames=num_frames, num_inference_steps=steps, guidance_scale=guidance) frames = out.frames[0] del pipe torch.cuda.empty_cache() return export_to_video(frames, fps=fps) def chat_fn(user_msg, history, temp, top_p): tokenizer, chat_model = load_chat_model() full = "".join([f"{u}\n{a}\n" for u, a in history]) + user_msg inputs = tokenizer(full, return_tensors="pt").to("cpu") outputs = chat_model.generate(**inputs, max_new_tokens=200, temperature=temp, top_p=top_p) reply = tokenizer.decode(outputs[0], skip_special_tokens=True) history.append((user_msg, reply)) del tokenizer del chat_model torch.cuda.empty_cache() return "", history, history with gr.Blocks( theme=gr.themes.Base(primary_hue="indigo", secondary_hue="fuchsia"), title="DreamDebris.Ai" ) as demo: gr.Markdown("## 🌌 DreamDebris.Ai — Free Image, Video & Chat Studio") with gr.Tab("🖼️ Image Generator"): with gr.Column(): prompt = gr.Textbox(label="Prompt") model_choice = gr.Dropdown([ "DreamShaper 8", "NSFW RealVision", "FLUX.1 (schnell)", "Dreamlike Photoreal", "Realistic Vision v2.0", "SDXL Lightning" ], label="Model") style = gr.Dropdown(["Realistic", "Anime", "Fantasy", "Surreal", "Sketch", "Oil Painting"], label="Style") resolution = gr.Radio(["512x512", "768x768"], label="Resolution") with gr.Row(): with gr.Column(): guidance = gr.Slider(1, 20, value=7.5, label="Guidance") steps = gr.Slider(5, 100, value=30, step=5, label="Steps") with gr.Column(): seed = gr.Slider(0, 99999, value=42, step=1, label="Seed") num_images = gr.Slider(1, 4, value=1, step=1, label="Images") gen_i = gr.Button("Generate Image") out_i = gr.Gallery(label="Generated Images", columns=1, rows=4, object_fit="contain") gen_i.click(fn=gen_image, inputs=[prompt, model_choice, style, resolution, guidance, steps, seed, num_images], outputs=out_i) with gr.Tab("🎬 Video Generator"): with gr.Column(): vprompt = gr.Textbox(label="Prompt") vmodel = gr.Radio(["Text2Video-Zero", "LTX‑Video"], label="Model") vstyle = gr.Dropdown(["Dreamy", "Cartoon", "Sci‑fi", "Cinematic"], label="Style") vres = gr.Radio(["512x512", "768x768", "1024x576"], label="Resolution") with gr.Row(): with gr.Column(): vdur = gr.Slider(2, 20, value=8, step=1, label="Duration (s)") vfps = gr.Slider(4, 24, value=12, step=2, label="FPS") with gr.Column(): vguidance = gr.Slider(1, 20, value=8, step=0.5, label="Guidance") vloop = gr.Checkbox(label="Loop") vsteps = gr.Slider(5, 100, value=30, step=5, label="Steps") gen_v = gr.Button("Generate Video") out_v = gr.Video(label="Generated Video") gen_v.click(fn=gen_video, inputs=[vprompt, vmodel, vstyle, vres, vdur, vfps, vguidance, vloop, vsteps], outputs=out_v) with gr.Tab("💬 DreamBot Chat"): chatbot = gr.Chatbot(label="DreamBot") msg = gr.Textbox(placeholder="Talk to DreamBot...") temp = gr.Slider(0.1, 1.0, value=0.7, label="Temperature") top_p = gr.Slider(0.1, 1.0, value=0.9, label="Top-p") state = gr.State([]) msg.submit(chat_fn, [msg, state, temp, top_p], [msg, chatbot, state]) demo.launch()