Spaces:
Sleeping
Sleeping
| import torch | |
| from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
| from diffusers.utils import export_to_video | |
| import gradio as gr | |
| # Load the DiffusionPipeline | |
| pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") | |
| pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
| pipe.enable_model_cpu_offload() | |
| def generate_video(prompt): | |
| video_frames = pipe(prompt, num_inference_steps=25).frames | |
| video_path = export_to_video(video_frames[0]) | |
| return video_path | |
| # Create the Gradio Interface | |
| interface = gr.Interface( | |
| fn=generate_video, | |
| inputs=gr.Textbox(label="Enter your prompt"), | |
| outputs=gr.Video(label="Generated Video"), | |
| title="Text-to-Video Generator", | |
| description="Enter a prompt to generate a video using diffusion models." | |
| ) | |
| # Launch the Gradio app | |
| interface.launch() |