Spaces:
Running
Running
| import torch | |
| import gradio as gr | |
| from diffusers import DiffusionPipeline | |
| # π Automatically detect GPU or CPU | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| dtype = torch.float16 if device == "cuda" else torch.float32 | |
| print(f"β Running on device: {device}") | |
| # βοΈ Try to load model safely | |
| model_id = "akhaliq/veo3.1-fast" # replace with your own model if needed | |
| try: | |
| with gr.Progress(track_tqdm=True): | |
| pipe = DiffusionPipeline.from_pretrained( | |
| model_id, | |
| torch_dtype=dtype, | |
| ).to(device) | |
| print("β Model loaded successfully!") | |
| except Exception as e: | |
| print("β Error loading model:", e) | |
| pipe = None | |
| # π¬ Define your main function | |
| def generate_video(image, prompt): | |
| if pipe is None: | |
| return "β οΈ Model could not be loaded. Please try again later." | |
| if image is None or prompt.strip() == "": | |
| return "β οΈ Please upload an image and enter a description." | |
| try: | |
| with gr.Progress(track_tqdm=True): | |
| result = pipe(image=image, prompt=prompt) | |
| video = result.get("video", None) | |
| if video: | |
| return video | |
| else: | |
| return "β οΈ No video was generated. Try a different prompt." | |
| except Exception as e: | |
| return f"β An error occurred while generating video: {str(e)}" | |
| # π₯οΈ Gradio interface | |
| demo = gr.Interface( | |
| fn=generate_video, | |
| inputs=[ | |
| gr.Image(type="pil", label="πΈ Upload Image"), | |
| gr.Textbox(label="π Prompt", placeholder="Describe the animation you want..."), | |
| ], | |
| outputs=gr.Video(label="π₯ Generated Video"), | |
| title="β¨ AI Image to Video Generator", | |
| description="Upload an image and let AI create an animated video for you. Works on CPU and GPU!", | |
| theme="gradio/soft", | |
| allow_flagging="never", | |
| ) | |
| # π Launch safely | |
| if __name__ == "__main__": | |
| demo.launch() | |