Spaces:
Runtime error
Runtime error
fixes update and time
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import numpy as np
|
|
| 3 |
import random
|
| 4 |
import torch
|
| 5 |
from diffusers import (
|
| 6 |
-
DiffusionPipeline, FluxPipeline, PixArtSigmaPipeline,
|
| 7 |
AuraFlowPipeline, Kandinsky3Pipeline, HunyuanDiTPipeline,
|
| 8 |
LuminaText2ImgPipeline
|
| 9 |
)
|
|
@@ -26,7 +26,7 @@ TORCH_DTYPE = torch.bfloat16 if torch.cuda.is_available() else torch.float32
|
|
| 26 |
MODEL_CONFIGS = {
|
| 27 |
"Stable Diffusion 3.5": {
|
| 28 |
"repo_id": "stabilityai/stable-diffusion-3.5-large",
|
| 29 |
-
"pipeline_class":
|
| 30 |
},
|
| 31 |
"FLUX": {
|
| 32 |
"repo_id": "black-forest-labs/FLUX.1-dev",
|
|
@@ -162,7 +162,7 @@ def generate_image(
|
|
| 162 |
seed = random.randint(0, MAX_SEED)
|
| 163 |
|
| 164 |
generator = torch.Generator(DEVICE).manual_seed(seed)
|
| 165 |
-
|
| 166 |
progress(0.3, desc=f"Generating image with {model_name}...")
|
| 167 |
|
| 168 |
# Generate image
|
|
@@ -290,20 +290,22 @@ with gr.Blocks(css=css) as demo:
|
|
| 290 |
return f"Current memory usage: System RAM: {memory_gb:.2f} GB"
|
| 291 |
|
| 292 |
# Handle generation for each model
|
| 293 |
-
@spaces.GPU(duration=
|
| 294 |
def generate_all(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress()):
|
| 295 |
outputs = []
|
| 296 |
for model_name in MODEL_CONFIGS.keys():
|
|
|
|
| 297 |
try:
|
| 298 |
image, used_seed = generate_image(
|
| 299 |
model_name, prompt, negative_prompt, seed,
|
| 300 |
randomize_seed, width, height, guidance_scale,
|
| 301 |
num_inference_steps, progress
|
| 302 |
)
|
|
|
|
| 303 |
outputs.extend([image, used_seed])
|
| 304 |
|
| 305 |
# Update memory usage after each model
|
| 306 |
-
memory_indicator.update(update_memory_usage())
|
| 307 |
|
| 308 |
except Exception as e:
|
| 309 |
outputs.extend([None, None])
|
|
|
|
| 3 |
import random
|
| 4 |
import torch
|
| 5 |
from diffusers import (
|
| 6 |
+
DiffusionPipeline, StableDiffusion3Pipeline, FluxPipeline, PixArtSigmaPipeline,
|
| 7 |
AuraFlowPipeline, Kandinsky3Pipeline, HunyuanDiTPipeline,
|
| 8 |
LuminaText2ImgPipeline
|
| 9 |
)
|
|
|
|
| 26 |
MODEL_CONFIGS = {
|
| 27 |
"Stable Diffusion 3.5": {
|
| 28 |
"repo_id": "stabilityai/stable-diffusion-3.5-large",
|
| 29 |
+
"pipeline_class": StableDiffusion3Pipeline
|
| 30 |
},
|
| 31 |
"FLUX": {
|
| 32 |
"repo_id": "black-forest-labs/FLUX.1-dev",
|
|
|
|
| 162 |
seed = random.randint(0, MAX_SEED)
|
| 163 |
|
| 164 |
generator = torch.Generator(DEVICE).manual_seed(seed)
|
| 165 |
+
print(f"Generating image with {model_name}...")
|
| 166 |
progress(0.3, desc=f"Generating image with {model_name}...")
|
| 167 |
|
| 168 |
# Generate image
|
|
|
|
| 290 |
return f"Current memory usage: System RAM: {memory_gb:.2f} GB"
|
| 291 |
|
| 292 |
# Handle generation for each model
|
| 293 |
+
@spaces.GPU(duration=600)
|
| 294 |
def generate_all(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress()):
|
| 295 |
outputs = []
|
| 296 |
for model_name in MODEL_CONFIGS.keys():
|
| 297 |
+
print(f"IMAGE GENERATING {model_name} ")
|
| 298 |
try:
|
| 299 |
image, used_seed = generate_image(
|
| 300 |
model_name, prompt, negative_prompt, seed,
|
| 301 |
randomize_seed, width, height, guidance_scale,
|
| 302 |
num_inference_steps, progress
|
| 303 |
)
|
| 304 |
+
print(f"IMAGE GENERATED {model_name} {update_memory_usage()}")
|
| 305 |
outputs.extend([image, used_seed])
|
| 306 |
|
| 307 |
# Update memory usage after each model
|
| 308 |
+
#memory_indicator.update(update_memory_usage())
|
| 309 |
|
| 310 |
except Exception as e:
|
| 311 |
outputs.extend([None, None])
|