Spaces:
Running
on
Zero
Running
on
Zero
Single image output
#3
by
multimodalart
HF Staff
- opened
app.py
CHANGED
|
@@ -145,7 +145,7 @@ pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype
|
|
| 145 |
MAX_SEED = np.iinfo(np.int32).max
|
| 146 |
|
| 147 |
# --- Main Inference Function (with hardcoded negative prompt) ---
|
| 148 |
-
@spaces.GPU(duration=
|
| 149 |
def infer(
|
| 150 |
image,
|
| 151 |
prompt,
|
|
@@ -154,7 +154,6 @@ def infer(
|
|
| 154 |
true_guidance_scale=1.0,
|
| 155 |
num_inference_steps=50,
|
| 156 |
rewrite_prompt=True,
|
| 157 |
-
num_images_per_prompt=1,
|
| 158 |
progress=gr.Progress(track_tqdm=True),
|
| 159 |
):
|
| 160 |
"""
|
|
@@ -177,17 +176,17 @@ def infer(
|
|
| 177 |
print(f"Rewritten Prompt: {prompt}")
|
| 178 |
|
| 179 |
# Generate the image
|
| 180 |
-
|
| 181 |
image,
|
| 182 |
prompt=prompt,
|
| 183 |
negative_prompt=negative_prompt,
|
| 184 |
num_inference_steps=num_inference_steps,
|
| 185 |
generator=generator,
|
| 186 |
true_cfg_scale=true_guidance_scale,
|
| 187 |
-
num_images_per_prompt=
|
| 188 |
).images
|
| 189 |
-
|
| 190 |
-
return
|
| 191 |
|
| 192 |
# --- Examples and UI Layout ---
|
| 193 |
examples = []
|
|
@@ -208,8 +207,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 208 |
with gr.Column():
|
| 209 |
input_image = gr.Image(label="Input Image", show_label=False, type="pil")
|
| 210 |
|
| 211 |
-
|
| 212 |
-
result = gr.Gallery(label="Result", show_label=False, type="pil")
|
| 213 |
with gr.Row():
|
| 214 |
prompt = gr.Text(
|
| 215 |
label="Prompt",
|
|
@@ -250,14 +248,6 @@ with gr.Blocks(css=css) as demo:
|
|
| 250 |
value=50,
|
| 251 |
)
|
| 252 |
|
| 253 |
-
num_images_per_prompt = gr.Slider(
|
| 254 |
-
label="Number of images per prompt",
|
| 255 |
-
minimum=1,
|
| 256 |
-
maximum=4,
|
| 257 |
-
step=1,
|
| 258 |
-
value=1,
|
| 259 |
-
)
|
| 260 |
-
|
| 261 |
rewrite_prompt = gr.Checkbox(label="Rewrite prompt", value=True)
|
| 262 |
|
| 263 |
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
|
@@ -273,7 +263,6 @@ with gr.Blocks(css=css) as demo:
|
|
| 273 |
true_guidance_scale,
|
| 274 |
num_inference_steps,
|
| 275 |
rewrite_prompt,
|
| 276 |
-
num_images_per_prompt,
|
| 277 |
],
|
| 278 |
outputs=[result, seed],
|
| 279 |
)
|
|
|
|
| 145 |
MAX_SEED = np.iinfo(np.int32).max
|
| 146 |
|
| 147 |
# --- Main Inference Function (with hardcoded negative prompt) ---
|
| 148 |
+
@spaces.GPU(duration=120)
|
| 149 |
def infer(
|
| 150 |
image,
|
| 151 |
prompt,
|
|
|
|
| 154 |
true_guidance_scale=1.0,
|
| 155 |
num_inference_steps=50,
|
| 156 |
rewrite_prompt=True,
|
|
|
|
| 157 |
progress=gr.Progress(track_tqdm=True),
|
| 158 |
):
|
| 159 |
"""
|
|
|
|
| 176 |
print(f"Rewritten Prompt: {prompt}")
|
| 177 |
|
| 178 |
# Generate the image
|
| 179 |
+
images = pipe(
|
| 180 |
image,
|
| 181 |
prompt=prompt,
|
| 182 |
negative_prompt=negative_prompt,
|
| 183 |
num_inference_steps=num_inference_steps,
|
| 184 |
generator=generator,
|
| 185 |
true_cfg_scale=true_guidance_scale,
|
| 186 |
+
num_images_per_prompt=1
|
| 187 |
).images
|
| 188 |
+
|
| 189 |
+
return images[0], seed
|
| 190 |
|
| 191 |
# --- Examples and UI Layout ---
|
| 192 |
examples = []
|
|
|
|
| 207 |
with gr.Column():
|
| 208 |
input_image = gr.Image(label="Input Image", show_label=False, type="pil")
|
| 209 |
|
| 210 |
+
result = gr.Image(label="Result", show_label=False, type="pil")
|
|
|
|
| 211 |
with gr.Row():
|
| 212 |
prompt = gr.Text(
|
| 213 |
label="Prompt",
|
|
|
|
| 248 |
value=50,
|
| 249 |
)
|
| 250 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
rewrite_prompt = gr.Checkbox(label="Rewrite prompt", value=True)
|
| 252 |
|
| 253 |
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
|
|
|
| 263 |
true_guidance_scale,
|
| 264 |
num_inference_steps,
|
| 265 |
rewrite_prompt,
|
|
|
|
| 266 |
],
|
| 267 |
outputs=[result, seed],
|
| 268 |
)
|