Zenctrl-Inpaint / app.py
salso's picture
Update app.py
bc870ea verified
raw
history blame
6.98 kB
# -*- coding: utf-8 -*-
# ZenCtrl Inpainting Playground (Baseten backend)
import os, json, base64, requests
from io import BytesIO
from PIL import Image, ImageDraw
import gradio as gr
# ────────── Secrets & endpoints ──────────
BASETEN_MODEL_URL = "https://app.baseten.co/models/YOUR_MODEL_ID/predict"
BASETEN_API_KEY = os.getenv("BASETEN_API_KEY")
REPLICATE_TOKEN = os.getenv("REPLICATE_API_TOKEN")
from florence_sam.detect_and_segment import fill_detected_bboxes
# ────────── Globals ──────────
ADAPTER_NAME = "inpaint"
ADAPTER_SIZE = 1024
model_config = dict(union_cond_attn=True, add_cond_attn=False,
latent_lora=False, independent_condition=False)
css = "#col-container {margin:0 auto; max-width:960px;}"
#Background prompt via Replicate
def _gen_bg(prompt: str):
url = replicate.run(
"google/imagen-4-fast",
input={"prompt": prompt or "cinematic background", "aspect_ratio": "1:1"},
)
url = url[0] if isinstance(url, list) else url
return Image.open(BytesIO(requests.get(url, timeout=120).content)).convert("RGB")
# Core generation
def process_image_and_text(subject_image, adapter_dict, prompt, use_detect, detect_prompt, size=ADAPTER_SIZE, rank=10.0):
seed, guidance_scale, steps = 42, 2.5, 28
if use_detect:
base_img = adapter_dict["image"] if isinstance(adapter_dict, dict) else adapter_dict
if base_img is None:
raise gr.Error("Upload a background image first.")
adapter_image, _ = fill_detected_bboxes(
image=base_img, text=detect_prompt,
inflate_pct=0.15, fill_color="#00FF00"
)
else:
adapter_image = adapter_dict["image"] if isinstance(adapter_dict, dict) else adapter_dict
if isinstance(adapter_dict, dict) and adapter_dict.get("mask") is not None:
m = adapter_dict["mask"].convert("L").point(lambda p: 255 if p else 0)
if bbox := m.getbbox():
rect = Image.new("L", m.size, 0)
ImageDraw.Draw(rect).rectangle(bbox, fill=255)
m = rect
green = Image.new("RGB", adapter_image.size, "#00FF00")
adapter_image = Image.composite(green, adapter_image, m)
def prep(img: Image.Image):
w, h = img.size
m = min(w, h)
return img.crop(((w-m)//2, (h-m)//2, (w+m)//2, (h+m)//2)).resize((size, size), Image.LANCZOS)
subj_proc = prep(subject_image)
adap_proc = prep(adapter_image)
def b64(img):
buf = BytesIO(); img.save(buf, format="PNG")
return base64.b64encode(buf.getvalue()).decode()
payload = {
"prompt": prompt,
"subject_image": b64(subj_proc),
"adapter_image": b64(adap_proc),
"height": size, "width": size,
"steps": steps, "seed": seed,
"guidance_scale": guidance_scale, "rank": rank,
}
headers = {"Content-Type": "application/json"}
if BASETEN_API_KEY:
headers["Authorization"] = f"Api-Key {BASETEN_API_KEY}"
resp = requests.post(BASETEN_MODEL_URL, headers=headers, json=payload, timeout=120)
resp.raise_for_status()
if resp.headers.get("content-type", "").startswith("image/"):
raw_img = Image.open(BytesIO(resp.content))
else:
url = resp.json().get("image_url")
if not url:
raise gr.Error("Baseten response missing image data.")
raw_img = Image.open(BytesIO(requests.get(url, timeout=120).content))
return [[raw_img]], raw_img
# ────────── Header HTML ──────────
header_html = """
<h1>ZenCtrl Inpainting</h1>
<div align="center" style="line-height:1;">
<a href="https://discord.com/invite/b9RuYQ3F8k" target="_blank" style="margin:10px;">
<img src="https://img.shields.io/badge/Discord-Join-7289da.svg?logo=discord" alt="Discord">
</a>
<a href="https://fotographer.ai/zen-control" target="_blank" style="margin:10px;">
<img src="https://img.shields.io/badge/Website-Landing_Page-blue" alt="LP">
</a>
<a href="https://x.com/FotographerAI" target="_blank" style="margin:10px;">
<img src="https://img.shields.io/twitter/follow/FotographerAI?style=social" alt="X">
</a>
</div>
"""
# ────────── Gradio UI ──────────
with gr.Blocks(css=css, title="ZenCtrl Playground") as demo:
raw_state = gr.State()
gr.HTML(header_html)
gr.Markdown("""
**Generate context-aware images of your subject with ZenCtrl’s inpainting playground.**
Upload a subject + optional mask, write a prompt, and hit **Generate**.
Open *Advanced Settings* to fetch an AI-generated background.
""")
with gr.Row():
with gr.Column(scale=2, elem_id="col-container"):
subj_img = gr.Image(type="pil", label="Subject image")
ref_img = gr.Image(type="pil", label="Background / Mask image", tool="sketch", brush_color="#00FF00", sources=["upload", "clipboard"])
use_detect_ck = gr.Checkbox(False, label="Detect with Florence-SAM")
detect_box = gr.Textbox(label="Detection prompt", value="person, chair", visible=False)
promptbox = gr.Textbox(label="Generation prompt", value="furniture", lines=2)
run_btn = gr.Button("Generate", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
bgprompt = gr.Textbox(label="Background Prompt", value="Scandinavian living room …")
bg_btn = gr.Button("Generate BG")
with gr.Column(scale=2):
gallery = gr.Gallery(columns=[1], rows=[1], object_fit="contain", height="auto")
bg_img = gr.Image(label="Background", visible=False)
gr.Examples(
examples=[
["examples/subject1.png", "examples/bg1.png", "Make the toy sit on a marble table", "examples/out1.png"],
["examples/subject2.png", "examples/bg2.png", "Turn the flowers into sunflowers", "examples/out2.png"],
["examples/subject3.png", "examples/bg3.png", "Make this monster ride a skateboard on the beach", "examples/out3.png"],
["examples/subject4.png", "examples/bg4.png", "Make this cat happy", "examples/out4.png"],
],
inputs=[subj_img, ref_img, promptbox],
outputs=[gallery],
fn=process_image_and_text,
examples_per_page="all",
label="Presets (Input Β· Background Β· Prompt Β· Output)",
cache_examples="lazy"
)
run_btn.click(
process_image_and_text,
inputs=[subj_img, ref_img, promptbox, use_detect_ck, detect_box],
outputs=[gallery, raw_state]
)
bg_btn.click(_gen_bg, inputs=[bgprompt], outputs=[bg_img])
use_detect_ck.change(lambda v: gr.update(visible=v), inputs=use_detect_ck, outputs=detect_box)
# ────────── Launch ──────────
demo.launch(show_api=False, share=True)