Spaces:
Paused
Paused
Update api/ltx_server_refactored.py
Browse files
api/ltx_server_refactored.py
CHANGED
|
@@ -200,11 +200,11 @@ class VideoService:
|
|
| 200 |
|
| 201 |
|
| 202 |
def _save_and_log_video(self, pixel_tensor, base_filename, fps, temp_dir, results_dir, used_seed, progress_callback=None):
|
| 203 |
-
output_path = os.path.join(temp_dir, f"{base_filename}_
|
| 204 |
video_encode_tool_singleton.save_video_from_tensor(
|
| 205 |
pixel_tensor, output_path, fps=fps, progress_callback=progress_callback
|
| 206 |
)
|
| 207 |
-
final_path = os.path.join(results_dir, f"{base_filename}_
|
| 208 |
shutil.move(output_path, final_path)
|
| 209 |
print(f"[DEBUG] Vídeo salvo em: {final_path}")
|
| 210 |
return final_path
|
|
@@ -278,6 +278,9 @@ class VideoService:
|
|
| 278 |
"""
|
| 279 |
print("\n" + "-"*20 + " INÍCIO: _generate_single_chunk_low " + "-"*20)
|
| 280 |
|
|
|
|
|
|
|
|
|
|
| 281 |
# --- NÓ 1.1: SETUP DE PARÂMETROS ---
|
| 282 |
height_padded = ((height - 1) // 8 + 1) * 8
|
| 283 |
width_padded = ((width - 1) // 8 + 1) * 8
|
|
@@ -330,7 +333,7 @@ class VideoService:
|
|
| 330 |
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
|
| 331 |
latents_bruto = self.pipeline(**first_pass_kwargs).images
|
| 332 |
latents_cpu_bruto = latents_bruto.detach().to("cpu")
|
| 333 |
-
tensor_path_cpu = os.path.join(results_dir, f"
|
| 334 |
torch.save(latents_cpu_bruto, tensor_path_cpu)
|
| 335 |
log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
|
| 336 |
|
|
@@ -359,6 +362,7 @@ class VideoService:
|
|
| 359 |
print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
|
| 360 |
print("="*80)
|
| 361 |
|
|
|
|
| 362 |
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
|
| 363 |
seed_everething(used_seed)
|
| 364 |
FPS = 24.0
|
|
@@ -435,7 +439,7 @@ class VideoService:
|
|
| 435 |
pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 436 |
video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
|
| 437 |
latents_cpu = latents.detach().to("cpu")
|
| 438 |
-
tensor_path = os.path.join(results_dir, f"
|
| 439 |
torch.save(latents_cpu, tensor_path)
|
| 440 |
return video_path, tensor_path, used_seed
|
| 441 |
|
|
@@ -487,7 +491,7 @@ class VideoService:
|
|
| 487 |
pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 488 |
video_path = self._save_and_log_video(pixel_tensor, "single_video", FPS, temp_dir, results_dir, used_seed)
|
| 489 |
latents_cpu = latents.detach().to("cpu")
|
| 490 |
-
tensor_path = os.path.join(results_dir, f"
|
| 491 |
torch.save(latents_cpu, tensor_path)
|
| 492 |
return video_path, tensor_path, used_seed
|
| 493 |
except Exception as e:
|
|
|
|
| 200 |
|
| 201 |
|
| 202 |
def _save_and_log_video(self, pixel_tensor, base_filename, fps, temp_dir, results_dir, used_seed, progress_callback=None):
|
| 203 |
+
output_path = os.path.join(temp_dir, f"{base_filename}_.mp4")
|
| 204 |
video_encode_tool_singleton.save_video_from_tensor(
|
| 205 |
pixel_tensor, output_path, fps=fps, progress_callback=progress_callback
|
| 206 |
)
|
| 207 |
+
final_path = os.path.join(results_dir, f"{base_filename}_.mp4")
|
| 208 |
shutil.move(output_path, final_path)
|
| 209 |
print(f"[DEBUG] Vídeo salvo em: {final_path}")
|
| 210 |
return final_path
|
|
|
|
| 278 |
"""
|
| 279 |
print("\n" + "-"*20 + " INÍCIO: _generate_single_chunk_low " + "-"*20)
|
| 280 |
|
| 281 |
+
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
|
| 282 |
+
seed_everething(used_seed)
|
| 283 |
+
|
| 284 |
# --- NÓ 1.1: SETUP DE PARÂMETROS ---
|
| 285 |
height_padded = ((height - 1) // 8 + 1) * 8
|
| 286 |
width_padded = ((width - 1) // 8 + 1) * 8
|
|
|
|
| 333 |
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
|
| 334 |
latents_bruto = self.pipeline(**first_pass_kwargs).images
|
| 335 |
latents_cpu_bruto = latents_bruto.detach().to("cpu")
|
| 336 |
+
tensor_path_cpu = os.path.join(results_dir, f"latents_low_res.pt")
|
| 337 |
torch.save(latents_cpu_bruto, tensor_path_cpu)
|
| 338 |
log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
|
| 339 |
|
|
|
|
| 362 |
print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
|
| 363 |
print("="*80)
|
| 364 |
|
| 365 |
+
|
| 366 |
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
|
| 367 |
seed_everething(used_seed)
|
| 368 |
FPS = 24.0
|
|
|
|
| 439 |
pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 440 |
video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
|
| 441 |
latents_cpu = latents.detach().to("cpu")
|
| 442 |
+
tensor_path = os.path.join(results_dir, f"latents_low_res.pt")
|
| 443 |
torch.save(latents_cpu, tensor_path)
|
| 444 |
return video_path, tensor_path, used_seed
|
| 445 |
|
|
|
|
| 491 |
pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 492 |
video_path = self._save_and_log_video(pixel_tensor, "single_video", FPS, temp_dir, results_dir, used_seed)
|
| 493 |
latents_cpu = latents.detach().to("cpu")
|
| 494 |
+
tensor_path = os.path.join(results_dir, f"latents_single.pt")
|
| 495 |
torch.save(latents_cpu, tensor_path)
|
| 496 |
return video_path, tensor_path, used_seed
|
| 497 |
except Exception as e:
|