Spaces:
Running
Running
File size: 1,902 Bytes
7957c02 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import torch
import gradio as gr
from diffusers import DiffusionPipeline
# π Automatically detect GPU or CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
print(f"β
Running on device: {device}")
# βοΈ Try to load model safely
model_id = "akhaliq/veo3.1-fast" # replace with your own model if needed
try:
with gr.Progress(track_tqdm=True):
pipe = DiffusionPipeline.from_pretrained(
model_id,
torch_dtype=dtype,
).to(device)
print("β
Model loaded successfully!")
except Exception as e:
print("β Error loading model:", e)
pipe = None
# π¬ Define your main function
def generate_video(image, prompt):
if pipe is None:
return "β οΈ Model could not be loaded. Please try again later."
if image is None or prompt.strip() == "":
return "β οΈ Please upload an image and enter a description."
try:
with gr.Progress(track_tqdm=True):
result = pipe(image=image, prompt=prompt)
video = result.get("video", None)
if video:
return video
else:
return "β οΈ No video was generated. Try a different prompt."
except Exception as e:
return f"β An error occurred while generating video: {str(e)}"
# π₯οΈ Gradio interface
demo = gr.Interface(
fn=generate_video,
inputs=[
gr.Image(type="pil", label="πΈ Upload Image"),
gr.Textbox(label="π Prompt", placeholder="Describe the animation you want..."),
],
outputs=gr.Video(label="π₯ Generated Video"),
title="β¨ AI Image to Video Generator",
description="Upload an image and let AI create an animated video for you. Works on CPU and GPU!",
theme="gradio/soft",
allow_flagging="never",
)
# π Launch safely
if __name__ == "__main__":
demo.launch()
|