|
|
import streamlit as st |
|
|
import os |
|
|
import tempfile |
|
|
from PIL import Image |
|
|
import torch |
|
|
import time |
|
|
import numpy as np |
|
|
import gc |
|
|
|
|
|
class ImageGenerator: |
|
|
def __init__(self): |
|
|
self.model = None |
|
|
self.processor = None |
|
|
self.target_size = (512, 512) |
|
|
self.inference_steps = 30 |
|
|
self.guidance_scale = 8.5 |
|
|
self.aspect_ratio = "1:1" |
|
|
self.image_cache = {} |
|
|
self.vram_optimization = False |
|
|
|
|
|
def set_vram_optimization(self, enabled): |
|
|
"""Enable or disable VRAM optimization techniques""" |
|
|
self.vram_optimization = enabled |
|
|
|
|
|
def set_aspect_ratio(self, aspect_ratio): |
|
|
"""Set the aspect ratio for image generation""" |
|
|
self.aspect_ratio = aspect_ratio |
|
|
|
|
|
def set_target_size(self, size): |
|
|
"""Set the target size for generated images""" |
|
|
self.target_size = size |
|
|
|
|
|
def set_inference_steps(self, steps): |
|
|
"""Set the number of inference steps for image generation""" |
|
|
self.inference_steps = steps |
|
|
|
|
|
def get_size_for_aspect_ratio(self, base_size, aspect_ratio=None): |
|
|
"""Calculate image dimensions based on aspect ratio""" |
|
|
if aspect_ratio is None: |
|
|
aspect_ratio = self.aspect_ratio |
|
|
|
|
|
|
|
|
base_pixels = base_size[0] * base_size[1] |
|
|
|
|
|
if aspect_ratio == "1:1": |
|
|
|
|
|
side = int(np.sqrt(base_pixels)) |
|
|
|
|
|
side = side if side % 2 == 0 else side + 1 |
|
|
return (side, side) |
|
|
elif aspect_ratio == "16:9": |
|
|
|
|
|
width = int(np.sqrt(base_pixels * 16 / 9)) |
|
|
height = int(width * 9 / 16) |
|
|
|
|
|
width = width if width % 2 == 0 else width + 1 |
|
|
height = height if height % 2 == 0 else height + 1 |
|
|
return (width, height) |
|
|
elif aspect_ratio == "9:16": |
|
|
|
|
|
height = int(np.sqrt(base_pixels * 16 / 9)) |
|
|
width = int(height * 9 / 16) |
|
|
|
|
|
width = width if width % 2 == 0 else width + 1 |
|
|
height = height if height % 2 == 0 else height + 1 |
|
|
return (width, height) |
|
|
else: |
|
|
|
|
|
return base_size |
|
|
|
|
|
def load_model(self): |
|
|
"""Load the image generation model with optimizations for CPU""" |
|
|
if self.model is None: |
|
|
with st.spinner("Loading image generation model..."): |
|
|
try: |
|
|
|
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() if torch.cuda.is_available() else None |
|
|
|
|
|
|
|
|
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler |
|
|
|
|
|
|
|
|
model_id = "CompVis/stable-diffusion-v1-4" |
|
|
|
|
|
|
|
|
self.model = StableDiffusionPipeline.from_pretrained( |
|
|
model_id, |
|
|
torch_dtype=torch.float16, |
|
|
safety_checker=None, |
|
|
use_safetensors=True |
|
|
) |
|
|
|
|
|
|
|
|
self.model.scheduler = DPMSolverMultistepScheduler.from_config( |
|
|
self.model.scheduler.config, |
|
|
algorithm_type="dpmsolver++", |
|
|
solver_order=2 |
|
|
) |
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
self.model = self.model.to(device) |
|
|
|
|
|
|
|
|
self.model.enable_attention_slicing(slice_size=1) |
|
|
|
|
|
|
|
|
try: |
|
|
import xformers |
|
|
self.model.enable_xformers_memory_efficient_attention() |
|
|
except (ImportError, AttributeError): |
|
|
pass |
|
|
|
|
|
|
|
|
if device == "cpu" and hasattr(self.model, "enable_model_cpu_offload"): |
|
|
self.model.enable_model_cpu_offload() |
|
|
|
|
|
|
|
|
if device == "cpu" and hasattr(self.model, "enable_sequential_cpu_offload"): |
|
|
self.model.enable_sequential_cpu_offload() |
|
|
|
|
|
|
|
|
if hasattr(self.model, "vae") and hasattr(self.model.vae, "enable_tiling"): |
|
|
self.model.vae.enable_tiling() |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Error loading image generation model: {str(e)}. Please try again with VRAM optimization enabled.") |
|
|
self.model = None |
|
|
|
|
|
return self.model |
|
|
|
|
|
def generate_image(self, prompt, negative_prompt="blurry, bad quality, distorted, disfigured, low resolution, worst quality, deformed, text, watermark, writing, letters, numbers"): |
|
|
"""Generate an image from a text prompt with optimized settings""" |
|
|
|
|
|
inference_steps = self.inference_steps |
|
|
if self.vram_optimization: |
|
|
|
|
|
inference_steps = min(inference_steps, 20) |
|
|
else: |
|
|
|
|
|
inference_steps = min(inference_steps, 30) |
|
|
|
|
|
|
|
|
import hashlib |
|
|
cache_key = f"{hashlib.md5(prompt.encode()).hexdigest()}_{self.target_size}_{inference_steps}_{self.guidance_scale}_{self.aspect_ratio}" |
|
|
|
|
|
|
|
|
if cache_key in self.image_cache: |
|
|
return self.image_cache[cache_key] |
|
|
|
|
|
|
|
|
os.makedirs("temp", exist_ok=True) |
|
|
|
|
|
try: |
|
|
|
|
|
model = self.load_model() |
|
|
|
|
|
if model is not None: |
|
|
|
|
|
enhanced_prompt = self.enhance_prompt_for_aspect_ratio(prompt) |
|
|
|
|
|
|
|
|
enhanced_prompt = self.clean_prompt_for_image_generation(enhanced_prompt) |
|
|
|
|
|
|
|
|
simplified_prompt = self.simplify_prompt(enhanced_prompt) |
|
|
|
|
|
|
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() if torch.cuda.is_available() else None |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
|
guidance_scale = min(self.guidance_scale, 7.5) |
|
|
|
|
|
|
|
|
image = model( |
|
|
prompt=simplified_prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
num_inference_steps=inference_steps, |
|
|
guidance_scale=guidance_scale, |
|
|
width=min(self.target_size[0], 512), |
|
|
height=min(self.target_size[1], 512) |
|
|
).images[0] |
|
|
|
|
|
|
|
|
output_path = f"temp/image_{int(time.time() * 1000)}.jpg" |
|
|
image = image.convert("RGB") |
|
|
image.save(output_path, format="JPEG", quality=95) |
|
|
|
|
|
|
|
|
try: |
|
|
from PIL import Image |
|
|
test_load = Image.open(output_path) |
|
|
test_load.verify() |
|
|
test_load.close() |
|
|
except Exception as e: |
|
|
st.error(f"Image verification failed: {str(e)}. Using fallback.") |
|
|
return self.create_fallback_image(prompt) |
|
|
|
|
|
|
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() if torch.cuda.is_available() else None |
|
|
|
|
|
|
|
|
self.image_cache[cache_key] = output_path |
|
|
|
|
|
return output_path |
|
|
else: |
|
|
|
|
|
st.warning("Retrying with reduced settings...") |
|
|
return self.retry_with_reduced_settings(prompt) |
|
|
except Exception as e: |
|
|
st.error(f"Error generating image: {str(e)}. Retrying with reduced settings.") |
|
|
return self.retry_with_reduced_settings(prompt) |
|
|
|
|
|
def retry_with_reduced_settings(self, prompt): |
|
|
"""Retry image generation with reduced settings for better compatibility""" |
|
|
try: |
|
|
|
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() if torch.cuda.is_available() else None |
|
|
|
|
|
|
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
|
|
|
model_id = "CompVis/stable-diffusion-v1-4" |
|
|
|
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
|
model_id, |
|
|
torch_dtype=torch.float16, |
|
|
safety_checker=None, |
|
|
use_safetensors=True |
|
|
) |
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
pipe = pipe.to(device) |
|
|
|
|
|
|
|
|
pipe.enable_attention_slicing(slice_size=1) |
|
|
|
|
|
|
|
|
simple_prompt = self.simplify_prompt(prompt) |
|
|
|
|
|
|
|
|
image = pipe( |
|
|
prompt=simple_prompt, |
|
|
num_inference_steps=20, |
|
|
guidance_scale=7.0, |
|
|
width=512, |
|
|
height=512 |
|
|
).images[0] |
|
|
|
|
|
|
|
|
output_path = f"temp/retry_image_{int(time.time() * 1000)}.jpg" |
|
|
image = image.convert("RGB") |
|
|
image.save(output_path, format="JPEG", quality=95) |
|
|
|
|
|
|
|
|
try: |
|
|
from PIL import Image |
|
|
test_load = Image.open(output_path) |
|
|
test_load.verify() |
|
|
test_load.close() |
|
|
except Exception as e: |
|
|
st.error(f"Image verification failed: {str(e)}. Using fallback.") |
|
|
return self.create_fallback_image(prompt) |
|
|
|
|
|
return output_path |
|
|
except Exception as e: |
|
|
st.error(f"Final attempt failed: {str(e)}. Using fallback image.") |
|
|
return self.create_fallback_image(prompt) |
|
|
|
|
|
def simplify_prompt(self, prompt): |
|
|
"""Simplify a prompt to its core elements for better compatibility""" |
|
|
|
|
|
simple = prompt.split('.')[0].strip() |
|
|
if len(simple) > 100: |
|
|
simple = simple[:100] |
|
|
|
|
|
|
|
|
return f"{simple}, high quality, detailed" |
|
|
|
|
|
def clean_prompt_for_image_generation(self, prompt): |
|
|
"""Clean prompt to avoid patterns that might cause text rendering in images""" |
|
|
|
|
|
import re |
|
|
|
|
|
|
|
|
cleaned = re.sub(r'text\s+that\s+says', '', prompt, flags=re.IGNORECASE) |
|
|
cleaned = re.sub(r'with\s+text', '', cleaned, flags=re.IGNORECASE) |
|
|
cleaned = re.sub(r'showing\s+text', '', cleaned, flags=re.IGNORECASE) |
|
|
cleaned = re.sub(r'displaying\s+text', '', cleaned, flags=re.IGNORECASE) |
|
|
cleaned = re.sub(r'with\s+the\s+words', '', cleaned, flags=re.IGNORECASE) |
|
|
|
|
|
|
|
|
cleaned = re.sub(r'["\'].*?["\']', '', cleaned) |
|
|
|
|
|
|
|
|
cleaned += ", no text, no words, no writing, no letters, no numbers, no watermark" |
|
|
|
|
|
return cleaned |
|
|
|
|
|
def enhance_prompt_for_aspect_ratio(self, prompt): |
|
|
"""Enhance the prompt based on the selected aspect ratio""" |
|
|
|
|
|
base_enhancement = "hyper realistic, photo realistic, ultra detailed, hyper detailed textures, 8K resolution" |
|
|
|
|
|
|
|
|
lighting_options = [ |
|
|
"golden hour glow", "moody overcast", "dramatic lighting", |
|
|
"soft natural light", "cinematic lighting", "film noir shadows" |
|
|
] |
|
|
|
|
|
|
|
|
camera_effects = [ |
|
|
"shallow depth of field", "motion blur", "film grain", |
|
|
"professional photography", "award winning photograph" |
|
|
] |
|
|
|
|
|
|
|
|
environmental_details = [ |
|
|
"atmospheric", "detailed environment", "rich textures", |
|
|
"detailed background", "immersive scene" |
|
|
] |
|
|
|
|
|
|
|
|
import random |
|
|
random.seed(hash(prompt)) |
|
|
|
|
|
selected_lighting = random.choice(lighting_options) |
|
|
selected_effect = random.choice(camera_effects) |
|
|
selected_detail = random.choice(environmental_details) |
|
|
|
|
|
|
|
|
if self.aspect_ratio == "16:9": |
|
|
|
|
|
aspect_enhancement = "cinematic wide shot, landscape composition, panoramic view" |
|
|
elif self.aspect_ratio == "9:16": |
|
|
|
|
|
aspect_enhancement = "vertical composition, portrait framing, tall perspective" |
|
|
else: |
|
|
|
|
|
aspect_enhancement = "balanced composition, centered framing, square format" |
|
|
|
|
|
|
|
|
enhanced_prompt = f"{prompt}, {base_enhancement}, {selected_lighting}, {selected_effect}, {selected_detail}, {aspect_enhancement}" |
|
|
|
|
|
return enhanced_prompt |
|
|
|
|
|
def create_fallback_image(self, prompt): |
|
|
"""Create a fallback image when model generation fails""" |
|
|
from PIL import Image, ImageDraw, ImageFont |
|
|
|
|
|
|
|
|
width, height = self.target_size |
|
|
image = Image.new('RGB', (width, height), color=(240, 240, 240)) |
|
|
draw = ImageDraw.Draw(image) |
|
|
|
|
|
|
|
|
for y in range(height): |
|
|
r = int(240 * (1 - y / height)) |
|
|
g = int(240 * (1 - y / height)) |
|
|
b = int(255 * (1 - y / height * 0.5)) |
|
|
for x in range(width): |
|
|
draw.point((x, y), fill=(r, g, b)) |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
font = ImageFont.truetype("Arial", 20) |
|
|
except: |
|
|
|
|
|
font = ImageFont.load_default() |
|
|
|
|
|
|
|
|
words = prompt.split() |
|
|
lines = [] |
|
|
current_line = [] |
|
|
|
|
|
for word in words: |
|
|
test_line = ' '.join(current_line + [word]) |
|
|
|
|
|
if len(test_line) * 10 < width - 40: |
|
|
current_line.append(word) |
|
|
else: |
|
|
lines.append(' '.join(current_line)) |
|
|
current_line = [word] |
|
|
|
|
|
if current_line: |
|
|
lines.append(' '.join(current_line)) |
|
|
|
|
|
|
|
|
y_position = height // 4 |
|
|
for line in lines[:8]: |
|
|
draw.text((20, y_position), line, fill=(0, 0, 0), font=font) |
|
|
y_position += 30 |
|
|
|
|
|
|
|
|
output_path = f"temp/fallback_{int(time.time() * 1000)}.png" |
|
|
image.save(output_path) |
|
|
|
|
|
return output_path |
|
|
|
|
|
def clear_cache(self): |
|
|
"""Clear the image cache""" |
|
|
self.image_cache = {} |
|
|
return True |
|
|
|