Spaces:
Running
on
Zero
Running
on
Zero
add examples
Browse files- .gitattributes +1 -0
- .gitignore +2 -1
- app.py +70 -65
- example/example_01.png +3 -0
- example/example_02.png +3 -0
- example/example_03.png +3 -0
- example/example_04.png +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -15,4 +15,5 @@ debugs/
|
|
| 15 |
models
|
| 16 |
!*/models
|
| 17 |
.ipynb_checkpoints
|
| 18 |
-
checkpoints
|
|
|
|
|
|
| 15 |
models
|
| 16 |
!*/models
|
| 17 |
.ipynb_checkpoints
|
| 18 |
+
checkpoints
|
| 19 |
+
gradio_cached_examples
|
app.py
CHANGED
|
@@ -39,6 +39,58 @@ savedir = os.path.join(basedir, "samples/Gradio", datetime.now().strftime
|
|
| 39 |
savedir_sample = os.path.join(savedir, "sample")
|
| 40 |
os.makedirs(savedir, exist_ok=True)
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
# config models
|
| 43 |
pipeline = ConditionalAnimationPipeline.from_pretrained("TIGER-Lab/ConsistI2V", torch_dtype=torch.float16)
|
| 44 |
pipeline.to("cuda")
|
|
@@ -89,6 +141,14 @@ def animate(
|
|
| 89 |
frame_init_noise_level,
|
| 90 |
seed_textbox
|
| 91 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
if pipeline is None:
|
| 93 |
raise gr.Error(f"Please select a pretrained pipeline path.")
|
| 94 |
if input_image_path == "":
|
|
@@ -190,44 +250,6 @@ def animate(
|
|
| 190 |
|
| 191 |
return gr.Video(value=save_sample_path)
|
| 192 |
|
| 193 |
-
|
| 194 |
-
# @spaces.GPU
|
| 195 |
-
# def run_pipeline(
|
| 196 |
-
# pipeline,
|
| 197 |
-
# prompt_textbox,
|
| 198 |
-
# negative_prompt_textbox,
|
| 199 |
-
# first_frame,
|
| 200 |
-
# sample_step_slider,
|
| 201 |
-
# width_slider,
|
| 202 |
-
# height_slider,
|
| 203 |
-
# txt_cfg_scale_slider,
|
| 204 |
-
# img_cfg_scale_slider,
|
| 205 |
-
# frame_stride,
|
| 206 |
-
# use_frameinit,
|
| 207 |
-
# frame_init_noise_level,
|
| 208 |
-
|
| 209 |
-
# ):
|
| 210 |
-
# first_frame = first_frame.to("cuda")
|
| 211 |
-
# sample = pipeline(
|
| 212 |
-
# prompt_textbox,
|
| 213 |
-
# negative_prompt = negative_prompt_textbox,
|
| 214 |
-
# first_frames = first_frame,
|
| 215 |
-
# num_inference_steps = sample_step_slider,
|
| 216 |
-
# guidance_scale_txt = txt_cfg_scale_slider,
|
| 217 |
-
# guidance_scale_img = img_cfg_scale_slider,
|
| 218 |
-
# width = width_slider,
|
| 219 |
-
# height = height_slider,
|
| 220 |
-
# video_length = 16,
|
| 221 |
-
# noise_sampling_method = "pyoco_mixed",
|
| 222 |
-
# noise_alpha = 1.0,
|
| 223 |
-
# frame_stride = frame_stride,
|
| 224 |
-
# use_frameinit = use_frameinit,
|
| 225 |
-
# frameinit_noise_level = frame_init_noise_level,
|
| 226 |
-
# camera_motion = None,
|
| 227 |
-
# ).videos
|
| 228 |
-
# return sample
|
| 229 |
-
|
| 230 |
-
|
| 231 |
def ui():
|
| 232 |
with gr.Blocks(css=css) as demo:
|
| 233 |
gr.Markdown(
|
|
@@ -287,34 +309,16 @@ def ui():
|
|
| 287 |
input_image = gr.Image(label="Input Image", interactive=True)
|
| 288 |
input_image.upload(fn=update_textbox_and_save_image, inputs=[input_image, height_slider, width_slider, center_crop], outputs=[input_image_path, input_image])
|
| 289 |
result_video = gr.Video(label="Generated Animation", interactive=False, autoplay=True)
|
| 290 |
-
|
| 291 |
-
def update_and_resize_image(input_image_path, height_slider, width_slider, center_crop):
|
| 292 |
-
if input_image_path.startswith("http://") or input_image_path.startswith("https://"):
|
| 293 |
-
pil_image = Image.open(requests.get(input_image_path, stream=True).raw).convert('RGB')
|
| 294 |
-
else:
|
| 295 |
-
pil_image = Image.open(input_image_path).convert('RGB')
|
| 296 |
-
original_width, original_height = pil_image.size
|
| 297 |
-
|
| 298 |
-
if center_crop:
|
| 299 |
-
crop_aspect_ratio = width_slider / height_slider
|
| 300 |
-
aspect_ratio = original_width / original_height
|
| 301 |
-
if aspect_ratio > crop_aspect_ratio:
|
| 302 |
-
new_width = int(crop_aspect_ratio * original_height)
|
| 303 |
-
left = (original_width - new_width) / 2
|
| 304 |
-
top = 0
|
| 305 |
-
right = left + new_width
|
| 306 |
-
bottom = original_height
|
| 307 |
-
pil_image = pil_image.crop((left, top, right, bottom))
|
| 308 |
-
elif aspect_ratio < crop_aspect_ratio:
|
| 309 |
-
new_height = int(original_width / crop_aspect_ratio)
|
| 310 |
-
top = (original_height - new_height) / 2
|
| 311 |
-
left = 0
|
| 312 |
-
right = original_width
|
| 313 |
-
bottom = top + new_height
|
| 314 |
-
pil_image = pil_image.crop((left, top, right, bottom))
|
| 315 |
|
| 316 |
-
|
| 317 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
|
| 319 |
preview_button.click(fn=update_and_resize_image, inputs=[input_image_path, height_slider, width_slider, center_crop], outputs=[input_image])
|
| 320 |
input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height_slider, width_slider, center_crop], outputs=[input_image])
|
|
@@ -339,6 +343,7 @@ def ui():
|
|
| 339 |
],
|
| 340 |
outputs=[result_video]
|
| 341 |
)
|
|
|
|
| 342 |
|
| 343 |
return demo
|
| 344 |
|
|
|
|
| 39 |
savedir_sample = os.path.join(savedir, "sample")
|
| 40 |
os.makedirs(savedir, exist_ok=True)
|
| 41 |
|
| 42 |
+
EXAMPLES = [ # prompt, first frame, width, height, center crop, seed
|
| 43 |
+
["timelapse at the snow land with aurora in the sky.", "example/example_01.png"],
|
| 44 |
+
["fireworks.", "example/example_02.png"],
|
| 45 |
+
["clown fish swimming through the coral reef.", "example/example_03.png"],
|
| 46 |
+
["melting ice cream dripping down the cone.", "example/example_04.png"],
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
EXAMPLES_HIDDEN = {
|
| 50 |
+
"timelapse at the snow land with aurora in the sky.": ["example/example_01.png", 256, 256, True, 21800],
|
| 51 |
+
"fireworks.": ["example/example_02.png", 256, 256, True, 21800],
|
| 52 |
+
"clown fish swimming through the coral reef.": ["example/example_03.png", 256, 256, True, 21800],
|
| 53 |
+
"melting ice cream dripping down the cone.": ["example/example_04.png", 256, 256, True, 21800]
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
def update_and_resize_image(input_image_path, height_slider, width_slider, center_crop):
|
| 57 |
+
if input_image_path.startswith("http://") or input_image_path.startswith("https://"):
|
| 58 |
+
pil_image = Image.open(requests.get(input_image_path, stream=True).raw).convert('RGB')
|
| 59 |
+
else:
|
| 60 |
+
pil_image = Image.open(input_image_path).convert('RGB')
|
| 61 |
+
original_width, original_height = pil_image.size
|
| 62 |
+
|
| 63 |
+
if center_crop:
|
| 64 |
+
crop_aspect_ratio = width_slider / height_slider
|
| 65 |
+
aspect_ratio = original_width / original_height
|
| 66 |
+
if aspect_ratio > crop_aspect_ratio:
|
| 67 |
+
new_width = int(crop_aspect_ratio * original_height)
|
| 68 |
+
left = (original_width - new_width) / 2
|
| 69 |
+
top = 0
|
| 70 |
+
right = left + new_width
|
| 71 |
+
bottom = original_height
|
| 72 |
+
pil_image = pil_image.crop((left, top, right, bottom))
|
| 73 |
+
elif aspect_ratio < crop_aspect_ratio:
|
| 74 |
+
new_height = int(original_width / crop_aspect_ratio)
|
| 75 |
+
top = (original_height - new_height) / 2
|
| 76 |
+
left = 0
|
| 77 |
+
right = original_width
|
| 78 |
+
bottom = top + new_height
|
| 79 |
+
pil_image = pil_image.crop((left, top, right, bottom))
|
| 80 |
+
|
| 81 |
+
pil_image = pil_image.resize((width_slider, height_slider))
|
| 82 |
+
return gr.Image(value=np.array(pil_image))
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_examples(prompt_textbox, input_image):
|
| 86 |
+
input_image_path = EXAMPLES_HIDDEN[prompt_textbox][0]
|
| 87 |
+
width_slider = EXAMPLES_HIDDEN[prompt_textbox][1]
|
| 88 |
+
height_slider = EXAMPLES_HIDDEN[prompt_textbox][2]
|
| 89 |
+
center_crop = EXAMPLES_HIDDEN[prompt_textbox][3]
|
| 90 |
+
seed_textbox = EXAMPLES_HIDDEN[prompt_textbox][4]
|
| 91 |
+
input_image = update_and_resize_image(input_image_path, height_slider, width_slider, center_crop)
|
| 92 |
+
return prompt_textbox, input_image, input_image_path, width_slider, height_slider, center_crop, seed_textbox
|
| 93 |
+
|
| 94 |
# config models
|
| 95 |
pipeline = ConditionalAnimationPipeline.from_pretrained("TIGER-Lab/ConsistI2V", torch_dtype=torch.float16)
|
| 96 |
pipeline.to("cuda")
|
|
|
|
| 141 |
frame_init_noise_level,
|
| 142 |
seed_textbox
|
| 143 |
):
|
| 144 |
+
width_slider = int(width_slider)
|
| 145 |
+
height_slider = int(height_slider)
|
| 146 |
+
frame_stride = int(frame_stride)
|
| 147 |
+
sample_step_slider = int(sample_step_slider)
|
| 148 |
+
txt_cfg_scale_slider = float(txt_cfg_scale_slider)
|
| 149 |
+
img_cfg_scale_slider = float(img_cfg_scale_slider)
|
| 150 |
+
frame_init_noise_level = int(frame_init_noise_level)
|
| 151 |
+
|
| 152 |
if pipeline is None:
|
| 153 |
raise gr.Error(f"Please select a pretrained pipeline path.")
|
| 154 |
if input_image_path == "":
|
|
|
|
| 250 |
|
| 251 |
return gr.Video(value=save_sample_path)
|
| 252 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
def ui():
|
| 254 |
with gr.Blocks(css=css) as demo:
|
| 255 |
gr.Markdown(
|
|
|
|
| 309 |
input_image = gr.Image(label="Input Image", interactive=True)
|
| 310 |
input_image.upload(fn=update_textbox_and_save_image, inputs=[input_image, height_slider, width_slider, center_crop], outputs=[input_image_path, input_image])
|
| 311 |
result_video = gr.Video(label="Generated Animation", interactive=False, autoplay=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
|
| 313 |
+
with gr.Row():
|
| 314 |
+
batch_examples = gr.Examples(
|
| 315 |
+
examples=EXAMPLES,
|
| 316 |
+
fn=get_examples,
|
| 317 |
+
cache_examples=True,
|
| 318 |
+
examples_per_page=4,
|
| 319 |
+
inputs=[prompt_textbox, input_image],
|
| 320 |
+
outputs=[prompt_textbox, input_image, input_image_path, width_slider, height_slider, center_crop, seed_textbox],
|
| 321 |
+
)
|
| 322 |
|
| 323 |
preview_button.click(fn=update_and_resize_image, inputs=[input_image_path, height_slider, width_slider, center_crop], outputs=[input_image])
|
| 324 |
input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height_slider, width_slider, center_crop], outputs=[input_image])
|
|
|
|
| 343 |
],
|
| 344 |
outputs=[result_video]
|
| 345 |
)
|
| 346 |
+
|
| 347 |
|
| 348 |
return demo
|
| 349 |
|
example/example_01.png
ADDED
|
Git LFS Details
|
example/example_02.png
ADDED
|
Git LFS Details
|
example/example_03.png
ADDED
|
Git LFS Details
|
example/example_04.png
ADDED
|
Git LFS Details
|