File size: 4,932 Bytes
40462a0
7aafe2f
 
4c123aa
7aafe2f
 
3353fcf
c82ffd4
21e68e9
40462a0
7d8d7b7
65e5cab
52c9b64
caec759
7d8d7b7
7aafe2f
 
52c9b64
 
 
 
 
 
 
40462a0
7d8d7b7
7aafe2f
cac40c5
 
a762308
56b40fc
6934cc4
56b40fc
a762308
cac40c5
4c123aa
46b3a9a
3353fcf
46b3a9a
 
e8c02b4
 
3353fcf
b8f6d59
3353fcf
 
 
 
 
 
 
 
e8c02b4
 
 
20c00dc
 
46b3a9a
4c123aa
16abcdd
20c00dc
 
16abcdd
cac40c5
20c00dc
 
 
16abcdd
20c00dc
 
16abcdd
52c9b64
 
 
20c00dc
7aafe2f
68cb6e9
a762308
cac40c5
16abcdd
46b3a9a
cac40c5
a762308
e8e62f7
cac40c5
 
52c9b64
cac40c5
52c9b64
 
a762308
 
16abcdd
cac40c5
 
 
16abcdd
7aafe2f
7d8d7b7
46b3a9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6083a24
 
46b3a9a
53d7b3a
 
 
 
 
46b3a9a
53d7b3a
46b3a9a
 
20c00dc
53d7b3a
 
46b3a9a
 
 
 
 
 
f6f8a42
46b3a9a
3353fcf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import gradio as gr
import numpy as np
import random
import spaces
import torch
import time
import requests
from diffusers import DiffusionPipeline, AutoencoderTiny
from custom_pipeline import FluxWithCFGPipeline

# Torch Optimizations
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True

# Constants
MAX_SEED = np.iinfo(np.int32).max
DEFAULT_WIDTH = 1024
DEFAULT_HEIGHT = 576
ASPECT_RATIOS = {
    "16:9": (1024, 576),
    "1:1": (1024, 1024),
    "9:16": (576, 1024)
}
INFERENCE_STEPS = 8

# Device and Model Setup
dtype = torch.float16
device = "cuda" if torch.cuda.is_available() else "cpu"

print("⏳ Loading Flux pipeline...")
pipe = FluxWithCFGPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
pipe.to(device)
print("✅ Flux pipeline loaded.")

@spaces.GPU
def translate_albanian_to_english(text):
    """Translate Albanian to English using sepioo-facebook-translation API."""
    if not text.strip():
        return ""
    for attempt in range(2):
        try:
            response = requests.post(
                "https://hal1993-mdftranslation1234567890abcdef1234567890-fc073a6.hf.space/v1/translate",
                json={"from_language": "sq", "to_language": "en", "input_text": text},
                headers={"accept": "application/json", "Content-Type": "application/json"},
                timeout=5
            )
            response.raise_for_status()
            translated = response.json().get("translate", "")
            print(f"Translation response: {translated}")
            return translated
        except Exception as e:
            print(f"Translation error (attempt {attempt + 1}): {e}")
            if attempt == 1:
                return f"Përkthimi dështoi: {str(e)}"
    return f"Përkthimi dështoi"

@spaces.GPU
def generate_image_from_albanian(prompt_albanian: str, aspect_ratio: str = "1:1"):
    """Translate Albanian prompt to English and generate image."""
    if not prompt_albanian.strip():
        return None

    # Translate stealthily
    prompt_english = translate_albanian_to_english(prompt_albanian)
    if prompt_english.startswith("Përkthimi dështoi"):
        return None

    if pipe is None:
        return None

    width, height = ASPECT_RATIOS.get(aspect_ratio, (DEFAULT_WIDTH, DEFAULT_HEIGHT))

    prompt_final = prompt_english + ", ultra realistic, sharp, 8k resolution"

    print(f"🎯 Prompt for generation: {prompt_final}")

    try:
        seed = random.randint(0, MAX_SEED)
        generator = torch.Generator(device=device).manual_seed(seed)
        with torch.inference_mode():
            images = pipe(
                prompt=prompt_final,
                width=width,
                height=height,
                num_inference_steps=INFERENCE_STEPS,
                generator=generator,
                output_type="pil",
                return_dict=False
            )
        image = images[0][0]
        return image
    except Exception as e:
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        return None

# UI Layout
def create_demo():
    with gr.Blocks() as app:
        gr.HTML("""
        <style>
        body::before {
            content: "";
            display: block;
            height: 320px;
            background-color: var(--body-background-fill);
        }
        button[aria-label="Fullscreen"], button[aria-label="Fullscreen"]:hover {
            display: none !important;
            visibility: hidden !important;
            opacity: 0 !important;
            pointer-events: none !important;
        }
        button[aria-label="Share"], button[aria-label="Share"]:hover {
            display: none !important;
        }
        button[aria-label="Download"] {
            transform: scale(3);
            transform-origin: top right;
            margin: 0 !important;
            padding: 6px !important;
        }
        </style>
        """)

        gr.Markdown("# Krijo Imazhe")
        gr.Markdown("Gjenero imazhe të reja nga përshkrimi yt me fuqinë e inteligjencës artificiale.")

        with gr.Column():
            prompt_albanian = gr.Textbox(label="Përshkrimi", placeholder="Shkruani përshkrimin këtu", lines=3)
            aspect_ratio = gr.Radio(label="Raporti i fotos", choices=list(ASPECT_RATIOS.keys()), value="1:1")
            generate_btn = gr.Button("Gjenero")
        
        with gr.Row():
            output_image = gr.Image(label="Imazhi i Gjeneruar", interactive=False)

        generate_btn.click(
            fn=generate_image_from_albanian,
            inputs=[prompt_albanian, aspect_ratio],
            outputs=[output_image],
            show_progress="full"
        )

    return app

if __name__ == "__main__":
    print(f"Gradio version: {gr.__version__}")
    app = create_demo()
    app.launch()