Spaces:
Paused
Paused
Update api/ltx_server_refactored_complete.py
Browse files
api/ltx_server_refactored_complete.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
# FILE: api/ltx_server_refactored_complete.py
|
| 2 |
-
# DESCRIPTION: Final
|
| 3 |
-
#
|
|
|
|
| 4 |
|
| 5 |
import gc
|
| 6 |
import json
|
|
@@ -16,24 +17,33 @@ from typing import Dict, List, Optional, Tuple
|
|
| 16 |
import torch
|
| 17 |
import yaml
|
| 18 |
import numpy as np
|
|
|
|
| 19 |
|
| 20 |
# ==============================================================================
|
| 21 |
# --- SETUP E IMPORTAÇÕES DO PROJETO ---
|
| 22 |
# ==============================================================================
|
| 23 |
|
| 24 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
DEPS_DIR = Path("/data")
|
| 26 |
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 27 |
RESULTS_DIR = Path("/app/output")
|
| 28 |
DEFAULT_FPS = 24.0
|
| 29 |
FRAMES_ALIGNMENT = 8
|
|
|
|
| 30 |
|
| 31 |
# Garante que a biblioteca LTX-Video seja importável
|
| 32 |
def add_deps_to_path():
|
| 33 |
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 34 |
if repo_path not in sys.path:
|
| 35 |
sys.path.insert(0, repo_path)
|
| 36 |
-
# Usamos logging.info aqui, pois é uma informação importante de inicialização
|
| 37 |
logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}")
|
| 38 |
|
| 39 |
add_deps_to_path()
|
|
@@ -43,18 +53,13 @@ try:
|
|
| 43 |
from api.gpu_manager import gpu_manager
|
| 44 |
from managers.vae_manager import vae_manager_singleton
|
| 45 |
from tools.video_encode_tool import video_encode_tool_singleton
|
| 46 |
-
|
| 47 |
-
# Nosso módulo de utilitários LTX, que encapsula a complexidade
|
| 48 |
from api.ltx.ltx_utils import (
|
| 49 |
build_ltx_pipeline_on_cpu,
|
| 50 |
seed_everything,
|
| 51 |
load_image_to_tensor_with_resize_and_crop,
|
| 52 |
ConditioningItem,
|
| 53 |
)
|
| 54 |
-
|
| 55 |
-
# Nosso novo decorador de logging para depuração
|
| 56 |
from api.utils.debug_utils import log_function_io
|
| 57 |
-
|
| 58 |
except ImportError as e:
|
| 59 |
logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True)
|
| 60 |
sys.exit(1)
|
|
@@ -66,7 +71,7 @@ except ImportError as e:
|
|
| 66 |
@log_function_io
|
| 67 |
def calculate_padding(orig_h: int, orig_w: int, target_h: int, target_w: int) -> Tuple[int, int, int, int]:
|
| 68 |
"""Calculates symmetric padding required to meet target dimensions."""
|
| 69 |
-
pad_h = target_h -
|
| 70 |
pad_w = target_w - orig_w
|
| 71 |
pad_top = pad_h // 2
|
| 72 |
pad_bottom = pad_h - pad_top
|
|
@@ -87,7 +92,6 @@ class VideoService:
|
|
| 87 |
@log_function_io
|
| 88 |
def __init__(self):
|
| 89 |
t0 = time.perf_counter()
|
| 90 |
-
# Logging de alto nível para o usuário
|
| 91 |
logging.info("Initializing VideoService Orchestrator...")
|
| 92 |
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 93 |
|
|
@@ -96,6 +100,8 @@ class VideoService:
|
|
| 96 |
logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'")
|
| 97 |
|
| 98 |
self.config = self._load_config()
|
|
|
|
|
|
|
| 99 |
self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config)
|
| 100 |
|
| 101 |
self.main_device = torch.device("cpu")
|
|
@@ -106,7 +112,6 @@ class VideoService:
|
|
| 106 |
vae_manager_singleton.attach_pipeline(self.pipeline, device=self.vae_device, autocast_dtype=self.runtime_autocast_dtype)
|
| 107 |
logging.info(f"VideoService ready. Startup time: {time.perf_counter()-t0:.2f}s")
|
| 108 |
|
| 109 |
-
@log_function_io
|
| 110 |
def _load_config(self) -> Dict:
|
| 111 |
"""Loads the YAML configuration file."""
|
| 112 |
config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
|
@@ -114,6 +119,38 @@ class VideoService:
|
|
| 114 |
with open(config_path, "r") as file:
|
| 115 |
return yaml.safe_load(file)
|
| 116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
@log_function_io
|
| 118 |
def move_to_device(self, main_device_str: str, vae_device_str: str):
|
| 119 |
"""Moves pipeline components to their designated target devices."""
|
|
|
|
| 1 |
# FILE: api/ltx_server_refactored_complete.py
|
| 2 |
+
# DESCRIPTION: Final orchestrator for LTX-Video generation.
|
| 3 |
+
# Features path resolution for cached models, dedicated VAE device logic,
|
| 4 |
+
# delegation to utility modules, and advanced debug logging.
|
| 5 |
|
| 6 |
import gc
|
| 7 |
import json
|
|
|
|
| 17 |
import torch
|
| 18 |
import yaml
|
| 19 |
import numpy as np
|
| 20 |
+
from huggingface_hub import hf_hub_download
|
| 21 |
|
| 22 |
# ==============================================================================
|
| 23 |
# --- SETUP E IMPORTAÇÕES DO PROJETO ---
|
| 24 |
# ==============================================================================
|
| 25 |
|
| 26 |
+
# Configuração de logging e supressão de warnings
|
| 27 |
+
# (Pode ser removido se o logging for configurado globalmente)
|
| 28 |
+
import warnings
|
| 29 |
+
warnings.filterwarnings("ignore")
|
| 30 |
+
logging.getLogger("huggingface_hub").setLevel(logging.ERROR)
|
| 31 |
+
log_level = os.environ.get("ADUC_LOG_LEVEL", "INFO").upper()
|
| 32 |
+
logging.basicConfig(level=log_level, format='[%(levelname)s] [%(name)s] %(message)s')
|
| 33 |
+
|
| 34 |
+
# --- Constantes de Configuração ---
|
| 35 |
DEPS_DIR = Path("/data")
|
| 36 |
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 37 |
RESULTS_DIR = Path("/app/output")
|
| 38 |
DEFAULT_FPS = 24.0
|
| 39 |
FRAMES_ALIGNMENT = 8
|
| 40 |
+
LTX_REPO_ID = "Lightricks/LTX-Video" # Repositório de onde os modelos são baixados
|
| 41 |
|
| 42 |
# Garante que a biblioteca LTX-Video seja importável
|
| 43 |
def add_deps_to_path():
|
| 44 |
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 45 |
if repo_path not in sys.path:
|
| 46 |
sys.path.insert(0, repo_path)
|
|
|
|
| 47 |
logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}")
|
| 48 |
|
| 49 |
add_deps_to_path()
|
|
|
|
| 53 |
from api.gpu_manager import gpu_manager
|
| 54 |
from managers.vae_manager import vae_manager_singleton
|
| 55 |
from tools.video_encode_tool import video_encode_tool_singleton
|
|
|
|
|
|
|
| 56 |
from api.ltx.ltx_utils import (
|
| 57 |
build_ltx_pipeline_on_cpu,
|
| 58 |
seed_everything,
|
| 59 |
load_image_to_tensor_with_resize_and_crop,
|
| 60 |
ConditioningItem,
|
| 61 |
)
|
|
|
|
|
|
|
| 62 |
from api.utils.debug_utils import log_function_io
|
|
|
|
| 63 |
except ImportError as e:
|
| 64 |
logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True)
|
| 65 |
sys.exit(1)
|
|
|
|
| 71 |
@log_function_io
|
| 72 |
def calculate_padding(orig_h: int, orig_w: int, target_h: int, target_w: int) -> Tuple[int, int, int, int]:
|
| 73 |
"""Calculates symmetric padding required to meet target dimensions."""
|
| 74 |
+
pad_h = target_h - orig_h
|
| 75 |
pad_w = target_w - orig_w
|
| 76 |
pad_top = pad_h // 2
|
| 77 |
pad_bottom = pad_h - pad_top
|
|
|
|
| 92 |
@log_function_io
|
| 93 |
def __init__(self):
|
| 94 |
t0 = time.perf_counter()
|
|
|
|
| 95 |
logging.info("Initializing VideoService Orchestrator...")
|
| 96 |
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 97 |
|
|
|
|
| 100 |
logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'")
|
| 101 |
|
| 102 |
self.config = self._load_config()
|
| 103 |
+
self._resolve_model_paths_from_cache() # Etapa crítica para encontrar os modelos
|
| 104 |
+
|
| 105 |
self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config)
|
| 106 |
|
| 107 |
self.main_device = torch.device("cpu")
|
|
|
|
| 112 |
vae_manager_singleton.attach_pipeline(self.pipeline, device=self.vae_device, autocast_dtype=self.runtime_autocast_dtype)
|
| 113 |
logging.info(f"VideoService ready. Startup time: {time.perf_counter()-t0:.2f}s")
|
| 114 |
|
|
|
|
| 115 |
def _load_config(self) -> Dict:
|
| 116 |
"""Loads the YAML configuration file."""
|
| 117 |
config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
|
|
|
| 119 |
with open(config_path, "r") as file:
|
| 120 |
return yaml.safe_load(file)
|
| 121 |
|
| 122 |
+
def _resolve_model_paths_from_cache(self):
|
| 123 |
+
"""
|
| 124 |
+
Uses hf_hub_download to find the absolute paths to model files in the cache,
|
| 125 |
+
updating the in-memory config. This makes the app resilient to cache structure.
|
| 126 |
+
"""
|
| 127 |
+
logging.info("Resolving model paths from Hugging Face cache...")
|
| 128 |
+
cache_dir = os.environ.get("HF_HOME")
|
| 129 |
+
try:
|
| 130 |
+
# Resolve o caminho do checkpoint principal
|
| 131 |
+
main_ckpt_filename = self.config["checkpoint_path"]
|
| 132 |
+
main_ckpt_path = hf_hub_download(
|
| 133 |
+
repo_id=LTX_REPO_ID,
|
| 134 |
+
filename=main_ckpt_filename,
|
| 135 |
+
cache_dir=cache_dir
|
| 136 |
+
)
|
| 137 |
+
self.config["checkpoint_path"] = main_ckpt_path
|
| 138 |
+
logging.info(f" -> Main checkpoint resolved to: {main_ckpt_path}")
|
| 139 |
+
|
| 140 |
+
# Resolve o caminho do upsampler, se existir
|
| 141 |
+
if self.config.get("spatial_upscaler_model_path"):
|
| 142 |
+
upscaler_filename = self.config["spatial_upscaler_model_path"]
|
| 143 |
+
upscaler_path = hf_hub_download(
|
| 144 |
+
repo_id=LTX_REPO_ID,
|
| 145 |
+
filename=upscaler_filename,
|
| 146 |
+
cache_dir=cache_dir
|
| 147 |
+
)
|
| 148 |
+
self.config["spatial_upscaler_model_path"] = upscaler_path
|
| 149 |
+
logging.info(f" -> Spatial upscaler resolved to: {upscaler_path}")
|
| 150 |
+
except Exception as e:
|
| 151 |
+
logging.critical(f"Failed to resolve model paths. Ensure setup.py ran correctly. Error: {e}", exc_info=True)
|
| 152 |
+
sys.exit(1)
|
| 153 |
+
|
| 154 |
@log_function_io
|
| 155 |
def move_to_device(self, main_device_str: str, vae_device_str: str):
|
| 156 |
"""Moves pipeline components to their designated target devices."""
|