Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,7 +8,7 @@ from tqdm import tqdm, trange
|
|
| 8 |
from PIL import Image
|
| 9 |
|
| 10 |
|
| 11 |
-
def random_clip(x, min=-1.
|
| 12 |
if isinstance(x, np.ndarray):
|
| 13 |
return np.clip(x, min, max)
|
| 14 |
elif isinstance(x, torch.Tensor):
|
|
@@ -221,12 +221,14 @@ class DDIMSampler(Sampler):
|
|
| 221 |
axis=0)
|
| 222 |
else:
|
| 223 |
# 均匀取子集
|
|
|
|
| 224 |
steps = np.linspace(noise_steps, 1, step)
|
|
|
|
|
|
|
| 225 |
steps = np.floor(steps)
|
| 226 |
-
steps = np.concatenate((steps, steps[-1:]), axis=0)
|
| 227 |
|
| 228 |
x_t = torch.tile(noised_latents, (batch_size, 1, 1, 1)).to(self.device) # 32, 32
|
| 229 |
-
print("sample", steps)
|
| 230 |
for i in trange(len(steps) - 1):
|
| 231 |
x_t = self.sample(model, x_t, steps[i], steps[i + 1], eta)
|
| 232 |
|
|
@@ -499,8 +501,15 @@ def init_webui(unet, vae, normal_t):
|
|
| 499 |
batch_size,
|
| 500 |
step_value,
|
| 501 |
eta=1.)
|
|
|
|
|
|
|
| 502 |
for i in progress.tqdm(range(1, step_value + 1)):
|
| 503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 504 |
|
| 505 |
output = sampler.decode_img(vae, output)
|
| 506 |
output = np.clip(output, 0, 255)
|
|
@@ -534,11 +543,11 @@ def init_webui(unet, vae, normal_t):
|
|
| 534 |
# 创建输出组件
|
| 535 |
output_images_u = gr.Gallery(show_label=False, height=400, columns=5)
|
| 536 |
gr.Examples(
|
| 537 |
-
examples=[[
|
| 538 |
inputs=[step_u, batch_size_u, sampler_name_u, img_size_u, ramdom_seed_u],
|
| 539 |
outputs=output_images_u,
|
| 540 |
fn=process_image_u,
|
| 541 |
-
cache_examples=
|
| 542 |
)
|
| 543 |
with gr.Tab(label="image to image"):
|
| 544 |
with gr.Column():
|
|
@@ -566,7 +575,7 @@ def init_webui(unet, vae, normal_t):
|
|
| 566 |
inputs=[input_image, noise_step, step, batch_size, sampler_name, img_size, ramdom_seed],
|
| 567 |
outputs=output_images,
|
| 568 |
fn=process_image,
|
| 569 |
-
cache_examples=
|
| 570 |
)
|
| 571 |
|
| 572 |
start_button.click(process_image,
|
|
|
|
| 8 |
from PIL import Image
|
| 9 |
|
| 10 |
|
| 11 |
+
def random_clip(x, min=-1.3, max=1.3):
|
| 12 |
if isinstance(x, np.ndarray):
|
| 13 |
return np.clip(x, min, max)
|
| 14 |
elif isinstance(x, torch.Tensor):
|
|
|
|
| 221 |
axis=0)
|
| 222 |
else:
|
| 223 |
# 均匀取子集
|
| 224 |
+
# print(noise_steps, 1, step)
|
| 225 |
steps = np.linspace(noise_steps, 1, step)
|
| 226 |
+
# print("steps", len(steps))
|
| 227 |
+
|
| 228 |
steps = np.floor(steps)
|
| 229 |
+
# steps = np.concatenate((steps, steps[-1:]), axis=0)
|
| 230 |
|
| 231 |
x_t = torch.tile(noised_latents, (batch_size, 1, 1, 1)).to(self.device) # 32, 32
|
|
|
|
| 232 |
for i in trange(len(steps) - 1):
|
| 233 |
x_t = self.sample(model, x_t, steps[i], steps[i + 1], eta)
|
| 234 |
|
|
|
|
| 501 |
batch_size,
|
| 502 |
step_value,
|
| 503 |
eta=1.)
|
| 504 |
+
# print(step_value)
|
| 505 |
+
ss = 0
|
| 506 |
for i in progress.tqdm(range(1, step_value + 1)):
|
| 507 |
+
try:
|
| 508 |
+
output = next(looper)
|
| 509 |
+
ss += 1
|
| 510 |
+
except StopIteration:
|
| 511 |
+
# print("StopIteration", ss)
|
| 512 |
+
break
|
| 513 |
|
| 514 |
output = sampler.decode_img(vae, output)
|
| 515 |
output = np.clip(output, 0, 255)
|
|
|
|
| 543 |
# 创建输出组件
|
| 544 |
output_images_u = gr.Gallery(show_label=False, height=400, columns=5)
|
| 545 |
gr.Examples(
|
| 546 |
+
examples=[[60, 4, "DDIM", 256, 255392]], # 255392
|
| 547 |
inputs=[step_u, batch_size_u, sampler_name_u, img_size_u, ramdom_seed_u],
|
| 548 |
outputs=output_images_u,
|
| 549 |
fn=process_image_u,
|
| 550 |
+
cache_examples=True,
|
| 551 |
)
|
| 552 |
with gr.Tab(label="image to image"):
|
| 553 |
with gr.Column():
|
|
|
|
| 575 |
inputs=[input_image, noise_step, step, batch_size, sampler_name, img_size, ramdom_seed],
|
| 576 |
outputs=output_images,
|
| 577 |
fn=process_image,
|
| 578 |
+
cache_examples=True,
|
| 579 |
)
|
| 580 |
|
| 581 |
start_button.click(process_image,
|