Eueuiaa commited on
Commit
5b5f805
·
verified ·
1 Parent(s): 0d74cea

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +27 -42
api/ltx_server_refactored.py CHANGED
@@ -225,7 +225,11 @@ class VideoService:
225
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
226
  return conditioning_items
227
 
228
- def generate_low(self, prompt, negative_prompt, height, width, duration, seed, conditioning_items=None):
 
 
 
 
229
  guidance_scale="4"
230
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
231
  seed_everething(used_seed)
@@ -251,12 +255,12 @@ class VideoService:
251
  try:
252
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
253
  latents = self.pipeline(**first_pass_kwargs).images
254
- pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
255
- video_path = self._save_and_log_video(pixel_tensor, "low_res_video", FPS, temp_dir, results_dir, used_seed)
256
  latents_cpu = latents.detach().to("cpu")
257
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
258
  torch.save(latents_cpu, tensor_path)
259
- return video_path, tensor_path, used_seed
260
 
261
  except Exception as e:
262
  pass
@@ -333,36 +337,27 @@ class VideoService:
333
  torch.cuda.ipc_collect()
334
  self.finalize(keep_paths=[])
335
 
336
-
337
  def generate_narrative_low(
338
  self, prompt: str, negative_prompt,
339
  height, width, duration,
340
- seed, initial_image_conditions=None, overlap_frames: int = 8,
341
  ltx_configs_override: dict = None):
342
 
343
  print("\n" + "="*80)
344
  print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
345
  print("="*80)
346
-
347
- used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
348
- seed_everething(used_seed)
349
- FPS = 24.0
350
 
351
  prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
352
  num_chunks = len(prompt_list)
353
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
354
 
355
- total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
356
- frames_per_chunk = max(9, total_actual_frames)
357
- frames_per_chunk_last = max(9, total_actual_frames)
358
- poda_latents_num = overlap_frames
359
-
360
  latentes_chunk_video = []
361
  condition_item_latent_overlap = None
362
- temp_dir = tempfile.mkdtemp(prefix="ltxv_narrative_"); self._register_tmp_dir(temp_dir)
363
  results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
364
 
365
- for i, chunk_prompt in enumerate(prompt_list):
366
  print(f"\n--- Gerando Chunk Narrativo {i+1}/{num_chunks}: '{chunk_prompt}' ---")
367
 
368
  current_image_conditions = []
@@ -377,36 +372,29 @@ class VideoService:
377
  current_image_conditions.append(cond_item_fraco)
378
 
379
  if ltx_configs_override is None: ltx_configs_override = {}
380
- current_conditions = []
381
- if current_image_conditions: current_conditions.extend(current_image_conditions)
382
- if condition_item_latent_overlap: current_conditions.append(condition_item_latent_overlap)
383
- ltx_configs_override["conditioning_items"] = current_conditions
384
 
385
- num_frames_para_gerar = frames_per_chunk_last
 
 
 
386
 
387
- latentes_bruto = self._generate_single_chunk_low(
388
- prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
389
- num_frames=num_frames_para_gerar, seed=used_seed,
390
- ltx_configs_override=ltx_configs_override
391
  )
392
-
393
- if latentes_bruto is None:
394
- print(f"ERRO FATAL: A geração do chunk {i+1} falhou. Abortando.")
395
- self.finalize(keep_paths=[])
396
- return None, None, None
397
-
398
- if i > 0:
399
- latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
400
-
401
- latentes_podado = latentes_bruto.clone().detach()
402
  if i < num_chunks - 1 and poda_latents_num > 0:
403
- overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
404
  condition_item_latent_overlap = ConditioningItem(
405
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
406
  )
407
- latentes_chunk_video.append(latentes_podado)
408
 
409
- final_latents_cpu = torch.cat(latentes_chunk_video, dim=2).cpu()
 
 
 
 
 
410
  log_tensor_info(final_latents_cpu, "Tensor de Latentes Final Concatenado (CPU)")
411
 
412
  tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
@@ -596,7 +584,6 @@ class VideoService:
596
  self._tmp_dirs = set()
597
  print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
598
 
599
- # A função move_to_device que criamos antes é essencial aqui
600
  def move_to_device(self, device):
601
  """Move os modelos do pipeline para o dispositivo especificado."""
602
  print(f"[LTX] Movendo modelos para {device}...")
@@ -612,8 +599,6 @@ class VideoService:
612
  if torch.cuda.is_available():
613
  torch.cuda.empty_cache()
614
 
615
-
616
- # Instanciação limpa, sem usar `self` fora da classe.
617
  print("Criando instância do VideoService...")
618
  video_generation_service = VideoService()
619
  print("Instância do VideoService pronta.")
 
225
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
226
  return conditioning_items
227
 
228
+ def generate_low(
229
+ self, prompt, negative_prompt,
230
+ height, width, duration, seed,
231
+ conditioning_items=None
232
+ ):
233
  guidance_scale="4"
234
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
235
  seed_everething(used_seed)
 
255
  try:
256
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
257
  latents = self.pipeline(**first_pass_kwargs).images
258
+ #pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
259
+ #video_path = self._save_and_log_video(pixel_tensor, "low_res_video", FPS, temp_dir, results_dir, used_seed)
260
  latents_cpu = latents.detach().to("cpu")
261
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
262
  torch.save(latents_cpu, tensor_path)
263
+ return tensor_path
264
 
265
  except Exception as e:
266
  pass
 
337
  torch.cuda.ipc_collect()
338
  self.finalize(keep_paths=[])
339
 
340
+
341
  def generate_narrative_low(
342
  self, prompt: str, negative_prompt,
343
  height, width, duration,
344
+ seed, initial_image_conditions=None,
345
  ltx_configs_override: dict = None):
346
 
347
  print("\n" + "="*80)
348
  print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
349
  print("="*80)
 
 
 
 
350
 
351
  prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
352
  num_chunks = len(prompt_list)
353
  if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
354
 
355
+
 
 
 
 
356
  latentes_chunk_video = []
357
  condition_item_latent_overlap = None
 
358
  results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
359
 
360
+ for i, prompt in enumerate(prompt_list):
361
  print(f"\n--- Gerando Chunk Narrativo {i+1}/{num_chunks}: '{chunk_prompt}' ---")
362
 
363
  current_image_conditions = []
 
372
  current_image_conditions.append(cond_item_fraco)
373
 
374
  if ltx_configs_override is None: ltx_configs_override = {}
 
 
 
 
375
 
376
+ eco_current_conditions_list = []
377
+ if current_image_conditions: eco_current_conditions_list.extend(current_image_conditions)
378
+ if eco_latents_condition_overlap: eco_current_conditions_list.append(eco_latents_condition_overlap)
379
+ #ltx_configs_override["conditioning_items"] = current_conditions
380
 
381
+ latentes_chunk = self.generate_low(
382
+ prompt, negative_prompt, height, width,
383
+ duration, seed, conditioning_items=eco_latents_condition_overlap
 
384
  )
385
+
 
 
 
 
 
 
 
 
 
386
  if i < num_chunks - 1 and poda_latents_num > 0:
387
+ overlap_latents = latentes_chunk[:, :, -poda_latents_num:, :, :].clone()
388
  condition_item_latent_overlap = ConditioningItem(
389
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
390
  )
 
391
 
392
+ if i > 0:
393
+ latentes_chunk = latentes_chunk[:, :, poda_latents_num:, :, :].clone()
394
+
395
+ latentes_chunk_video_list.append(latentes_chunk)
396
+
397
+ final_latents_cpu = torch.cat(latentes_chunk_video_list, dim=2).cpu()
398
  log_tensor_info(final_latents_cpu, "Tensor de Latentes Final Concatenado (CPU)")
399
 
400
  tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
 
584
  self._tmp_dirs = set()
585
  print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
586
 
 
587
  def move_to_device(self, device):
588
  """Move os modelos do pipeline para o dispositivo especificado."""
589
  print(f"[LTX] Movendo modelos para {device}...")
 
599
  if torch.cuda.is_available():
600
  torch.cuda.empty_cache()
601
 
 
 
602
  print("Criando instância do VideoService...")
603
  video_generation_service = VideoService()
604
  print("Instância do VideoService pronta.")