File size: 1,743 Bytes
a7aea10 097a1ae a7aea10 097a1ae a7aea10 097a1ae a7aea10 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
import torch
import yaml
import os
from tools.infer import main as run_inference
# Ruta base del modelo
MODEL_DIR = "./model_weights"
CONFIG_PATH = "./configs/voyager.yaml"
# Carga de configuración
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH, "r") as f:
config = yaml.safe_load(f)
else:
config = {}
# Definir función de inferencia
def generate_scene(prompt, steps=20, seed=42):
"""
Genera una imagen o escena usando el modelo HunyuanWorld-Voyager.
"""
os.makedirs("outputs", exist_ok=True)
input_args = {
"config": CONFIG_PATH,
"ckpt": os.path.join(MODEL_DIR, "pytorch_model.bin"),
"prompt": prompt,
"steps": steps,
"seed": seed,
"out_dir": "outputs/"
}
# Ejecutar el script de inferencia del repo original
try:
run_inference(**input_args)
result_files = [f for f in os.listdir("outputs") if f.endswith((".png", ".jpg"))]
if result_files:
latest = os.path.join("outputs", result_files[-1])
return latest
else:
return "No se generó ninguna imagen."
except Exception as e:
return f"Error al generar: {str(e)}"
# Interfaz Gradio
demo = gr.Interface(
fn=generate_scene,
inputs=[
gr.Textbox(label="Prompt de escena o descripción"),
gr.Slider(1, 50, value=20, step=1, label="Steps de inferencia"),
gr.Number(value=42, label="Seed (aleatorio)")
],
outputs=gr.Image(label="Resultado"),
title="🎨 HunyuanWorld-Voyager — Tencent",
description="Generador de escenas 3D e imágenes a partir de texto usando el modelo HunyuanWorld-Voyager."
)
if __name__ == "__main__":
demo.launch()
|