HAL1993's picture
Update app.py
6083a24 verified
raw
history blame
4.93 kB
import gradio as gr
import numpy as np
import random
import spaces
import torch
import time
import requests
from diffusers import DiffusionPipeline, AutoencoderTiny
from custom_pipeline import FluxWithCFGPipeline
# Torch Optimizations
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
# Constants
MAX_SEED = np.iinfo(np.int32).max
DEFAULT_WIDTH = 1024
DEFAULT_HEIGHT = 576
ASPECT_RATIOS = {
"16:9": (1024, 576),
"1:1": (1024, 1024),
"9:16": (576, 1024)
}
INFERENCE_STEPS = 8
# Device and Model Setup
dtype = torch.float16
device = "cuda" if torch.cuda.is_available() else "cpu"
print("⏳ Loading Flux pipeline...")
pipe = FluxWithCFGPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
pipe.to(device)
print("✅ Flux pipeline loaded.")
@spaces.GPU
def translate_albanian_to_english(text):
"""Translate Albanian to English using sepioo-facebook-translation API."""
if not text.strip():
return ""
for attempt in range(2):
try:
response = requests.post(
"https://hal1993-mdftranslation1234567890abcdef1234567890-fc073a6.hf.space/v1/translate",
json={"from_language": "sq", "to_language": "en", "input_text": text},
headers={"accept": "application/json", "Content-Type": "application/json"},
timeout=5
)
response.raise_for_status()
translated = response.json().get("translate", "")
print(f"Translation response: {translated}")
return translated
except Exception as e:
print(f"Translation error (attempt {attempt + 1}): {e}")
if attempt == 1:
return f"Përkthimi dështoi: {str(e)}"
return f"Përkthimi dështoi"
@spaces.GPU
def generate_image_from_albanian(prompt_albanian: str, aspect_ratio: str = "1:1"):
"""Translate Albanian prompt to English and generate image."""
if not prompt_albanian.strip():
return None
# Translate stealthily
prompt_english = translate_albanian_to_english(prompt_albanian)
if prompt_english.startswith("Përkthimi dështoi"):
return None
if pipe is None:
return None
width, height = ASPECT_RATIOS.get(aspect_ratio, (DEFAULT_WIDTH, DEFAULT_HEIGHT))
prompt_final = prompt_english + ", ultra realistic, sharp, 8k resolution"
print(f"🎯 Prompt for generation: {prompt_final}")
try:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
with torch.inference_mode():
images = pipe(
prompt=prompt_final,
width=width,
height=height,
num_inference_steps=INFERENCE_STEPS,
generator=generator,
output_type="pil",
return_dict=False
)
image = images[0][0]
return image
except Exception as e:
if torch.cuda.is_available():
torch.cuda.empty_cache()
return None
# UI Layout
def create_demo():
with gr.Blocks() as app:
gr.HTML("""
<style>
body::before {
content: "";
display: block;
height: 320px;
background-color: var(--body-background-fill);
}
button[aria-label="Fullscreen"], button[aria-label="Fullscreen"]:hover {
display: none !important;
visibility: hidden !important;
opacity: 0 !important;
pointer-events: none !important;
}
button[aria-label="Share"], button[aria-label="Share"]:hover {
display: none !important;
}
button[aria-label="Download"] {
transform: scale(3);
transform-origin: top right;
margin: 0 !important;
padding: 6px !important;
}
</style>
""")
gr.Markdown("# Krijo Imazhe")
gr.Markdown("Gjenero imazhe të reja nga përshkrimi yt me fuqinë e inteligjencës artificiale.")
with gr.Column():
prompt_albanian = gr.Textbox(label="Përshkrimi", placeholder="Shkruani përshkrimin këtu", lines=3)
aspect_ratio = gr.Radio(label="Raporti i fotos", choices=list(ASPECT_RATIOS.keys()), value="1:1")
generate_btn = gr.Button("Gjenero")
with gr.Row():
output_image = gr.Image(label="Imazhi i Gjeneruar", interactive=False)
generate_btn.click(
fn=generate_image_from_albanian,
inputs=[prompt_albanian, aspect_ratio],
outputs=[output_image],
show_progress="full"
)
return app
if __name__ == "__main__":
print(f"Gradio version: {gr.__version__}")
app = create_demo()
app.launch()