fantaxy commited on
Commit
b870c2c
·
verified ·
1 Parent(s): abb87aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -19
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import spaces
2
  import time
3
  import os
 
4
 
5
- # ONNX Runtime에서 CUDA provider 우선 사용 (insightface ORT 세션에 반영)
6
  os.environ.setdefault("INSIGHTFACE_ONNX_PROVIDERS", "CUDAExecutionProvider,CPUExecutionProvider")
7
  os.environ.setdefault("ORT_LOG_severity_level", "3") # ORT 로그 최소화
8
 
@@ -49,6 +50,14 @@ class FluxGenerator:
49
  flux_generator = FluxGenerator()
50
 
51
 
 
 
 
 
 
 
 
 
52
  @spaces.GPU
53
  @torch.inference_mode()
54
  def generate_image(
@@ -97,8 +106,6 @@ def generate_image(
97
  id_embeddings = None
98
  uncond_id_embeddings = None
99
 
100
- print(id_embeddings)
101
-
102
  # prepare input
103
  x = get_noise(
104
  1,
@@ -108,7 +115,6 @@ def generate_image(
108
  dtype=torch.bfloat16 if flux_generator.device.type == "cuda" else torch.float32,
109
  seed=opts.seed,
110
  )
111
- print(x)
112
  timesteps = get_schedule(
113
  opts.num_steps,
114
  x.shape[-1] * x.shape[-2] // 4,
@@ -123,13 +129,11 @@ def generate_image(
123
  inp = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=opts.prompt)
124
  inp_neg = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=neg_prompt) if use_true_cfg else None
125
 
126
- # offload TEs to CPU, load model to gpu
127
  if flux_generator.offload:
128
  flux_generator.t5, flux_generator.clip = flux_generator.t5.cpu(), flux_generator.clip.cpu()
129
  torch.cuda.empty_cache()
130
  flux_generator.model = flux_generator.model.to(flux_generator.device)
131
 
132
- # denoise initial noise
133
  x = denoise(
134
  flux_generator.model,
135
  **inp,
@@ -146,13 +150,11 @@ def generate_image(
146
  neg_vec=inp_neg["vec"] if use_true_cfg else None,
147
  )
148
 
149
- # offload model, load autoencoder to gpu
150
  if flux_generator.offload:
151
  flux_generator.model.cpu()
152
  torch.cuda.empty_cache()
153
  flux_generator.ae.decoder.to(x.device)
154
 
155
- # decode latents to pixel space
156
  x = unpack(x.float(), opts.height, opts.width)
157
  with torch.autocast(
158
  device_type=flux_generator.device.type,
@@ -167,13 +169,16 @@ def generate_image(
167
  t1 = time.perf_counter()
168
  print(f"Done in {t1 - t0:.1f}s.")
169
 
170
- # bring into PIL format
171
  x = x.clamp(-1, 1)
172
  x = rearrange(x[0], "c h w -> h w c")
173
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy()).convert("RGB")
174
 
175
- # Gallery용 디버그 이미지들을 PIL 리스트로 강제 변환
176
- debug = []
 
 
 
177
  for it in (flux_generator.pulid_model.debug_img_list or []):
178
  try:
179
  if isinstance(it, Image.Image):
@@ -188,22 +193,41 @@ def generate_image(
188
  if arr.dtype != np.uint8:
189
  arr = np.clip(arr, 0, 255).astype(np.uint8)
190
  pil = Image.fromarray(arr).convert("RGB")
191
- debug.append(pil)
 
 
 
 
 
192
  except Exception:
193
  continue
194
 
195
- return img, str(opts.seed), debug
196
 
197
 
198
  def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_available() else "cpu", offload: bool = False):
199
- with gr.Blocks(theme="soft") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  gr.HTML(
201
  """
202
- <div class='container' style='display:flex; justify-content:center; gap:12px;'>
203
  <a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
204
  <img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
205
  </a>
206
-
207
  <a href="https://discord.gg/openfreeai" target="_blank">
208
  <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
209
  </a>
@@ -239,10 +263,11 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
239
  generate_btn = gr.Button("Generate")
240
 
241
  with gr.Column():
242
- output_image = gr.Image(label="Generated Image", type="pil", format="png")
 
243
  seed_output = gr.Textbox(label="Used Seed")
244
  intermediate_output = gr.Gallery(
245
- label="Output",
246
  elem_id="gallery",
247
  visible=args.dev,
248
  allow_preview=True,
@@ -326,6 +351,6 @@ if __name__ == "__main__":
326
  huggingface_hub.login(hf_token)
327
 
328
  demo = create_demo(args, args.name, args.device, args.offload)
329
- # SSR 비활성화: 업로드/탭 이동 중 ClientDisconnect 노이즈 감소 및 렌더 안정화
330
  demo.launch(ssr_mode=False)
331
 
 
1
  import spaces
2
  import time
3
  import os
4
+ import tempfile
5
 
6
+ # ONNX Runtime CUDA provider 시도 (효과 없더라도 무해)
7
  os.environ.setdefault("INSIGHTFACE_ONNX_PROVIDERS", "CUDAExecutionProvider,CPUExecutionProvider")
8
  os.environ.setdefault("ORT_LOG_severity_level", "3") # ORT 로그 최소화
9
 
 
50
  flux_generator = FluxGenerator()
51
 
52
 
53
+ def _save_pil(img: Image.Image, prefix: str = "out") -> str:
54
+ os.makedirs("/tmp", exist_ok=True)
55
+ ts = int(time.time() * 1000)
56
+ path = f"/tmp/{prefix}_{ts}.png"
57
+ img.save(path, format="PNG")
58
+ return path
59
+
60
+
61
  @spaces.GPU
62
  @torch.inference_mode()
63
  def generate_image(
 
106
  id_embeddings = None
107
  uncond_id_embeddings = None
108
 
 
 
109
  # prepare input
110
  x = get_noise(
111
  1,
 
115
  dtype=torch.bfloat16 if flux_generator.device.type == "cuda" else torch.float32,
116
  seed=opts.seed,
117
  )
 
118
  timesteps = get_schedule(
119
  opts.num_steps,
120
  x.shape[-1] * x.shape[-2] // 4,
 
129
  inp = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=opts.prompt)
130
  inp_neg = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=neg_prompt) if use_true_cfg else None
131
 
 
132
  if flux_generator.offload:
133
  flux_generator.t5, flux_generator.clip = flux_generator.t5.cpu(), flux_generator.clip.cpu()
134
  torch.cuda.empty_cache()
135
  flux_generator.model = flux_generator.model.to(flux_generator.device)
136
 
 
137
  x = denoise(
138
  flux_generator.model,
139
  **inp,
 
150
  neg_vec=inp_neg["vec"] if use_true_cfg else None,
151
  )
152
 
 
153
  if flux_generator.offload:
154
  flux_generator.model.cpu()
155
  torch.cuda.empty_cache()
156
  flux_generator.ae.decoder.to(x.device)
157
 
 
158
  x = unpack(x.float(), opts.height, opts.width)
159
  with torch.autocast(
160
  device_type=flux_generator.device.type,
 
169
  t1 = time.perf_counter()
170
  print(f"Done in {t1 - t0:.1f}s.")
171
 
172
+ # tensor [-1,1] uint8 HWC
173
  x = x.clamp(-1, 1)
174
  x = rearrange(x[0], "c h w -> h w c")
175
  img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy()).convert("RGB")
176
 
177
+ # 메인 이미지는 파일 경로로 반환 (대용량 base64 전송 이슈 회피)
178
+ out_path = _save_pil(img, "flux")
179
+
180
+ # 디버그 갤러리는 선택적으로 축소/파일 저장
181
+ debug_paths = []
182
  for it in (flux_generator.pulid_model.debug_img_list or []):
183
  try:
184
  if isinstance(it, Image.Image):
 
193
  if arr.dtype != np.uint8:
194
  arr = np.clip(arr, 0, 255).astype(np.uint8)
195
  pil = Image.fromarray(arr).convert("RGB")
196
+ # 썸네일화 (너비 512)
197
+ w, h = pil.size
198
+ if w > 512:
199
+ nh = int(h * (512 / w))
200
+ pil = pil.resize((512, nh), Image.BICUBIC)
201
+ debug_paths.append(_save_pil(pil, "debug"))
202
  except Exception:
203
  continue
204
 
205
+ return out_path, str(opts.seed), debug_paths
206
 
207
 
208
  def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_available() else "cpu", offload: bool = False):
209
+ custom_css = """
210
+ /* 상단에 패딩을 강제해 HF 툴바/노치와 겹침 방지 */
211
+ :root { scroll-padding-top: 72px; }
212
+ html, body { padding-top: env(safe-area-inset-top); }
213
+ .gradio-container { padding-top: 16px !important; overflow: visible !important; }
214
+ /* 상단 배지 영역이 다른 요소 뒤로 깔리지 않도록 */
215
+ #top-badges { position: relative; z-index: 3; }
216
+ /* 모바일에서 상단 요소 잘림 방지 */
217
+ @media (max-width: 640px) {
218
+ .gradio-container { padding-top: 20px !important; }
219
+ }
220
+ """
221
+
222
+ with gr.Blocks(theme="soft", css=custom_css) as demo:
223
+ # 최상단 여백 확보용 스페이서 (툴바가 가리는 환경 대비)
224
+ gr.HTML("<div style='height: 12px;'></div>")
225
  gr.HTML(
226
  """
227
+ <div id="top-badges" class='container' style='display:flex; justify-content:center; gap:12px; margin-top:0;'>
228
  <a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
229
  <img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
230
  </a>
 
231
  <a href="https://discord.gg/openfreeai" target="_blank">
232
  <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
233
  </a>
 
263
  generate_btn = gr.Button("Generate")
264
 
265
  with gr.Column():
266
+ # 파일 경로 모드로 전송 → 브라우저 랜더링 안정적
267
+ output_image = gr.Image(label="Generated Image", type="filepath", show_download_button=True)
268
  seed_output = gr.Textbox(label="Used Seed")
269
  intermediate_output = gr.Gallery(
270
+ label="Output (dev only)",
271
  elem_id="gallery",
272
  visible=args.dev,
273
  allow_preview=True,
 
351
  huggingface_hub.login(hf_token)
352
 
353
  demo = create_demo(args, args.name, args.device, args.offload)
354
+ # SSR 비활성화: 렌더 안정화
355
  demo.launch(ssr_mode=False)
356