eeuuia commited on
Commit
761ae64
·
verified ·
1 Parent(s): e24c75c

Update api/ltx_server_refactored_complete.py

Browse files
api/ltx_server_refactored_complete.py CHANGED
@@ -243,6 +243,37 @@ class VideoService:
243
  # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
244
  # ==========================================================================
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  @log_function_io
247
  def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]:
248
  """[WORKER] Calls the pipeline to generate a single chunk of latents."""
@@ -260,11 +291,17 @@ class VideoService:
260
  "prompt": kwargs['prompt'], "negative_prompt": kwargs['negative_prompt'],
261
  "height": downscaled_height, "width": downscaled_width, "num_frames": kwargs['num_frames'],
262
  "frame_rate": DEFAULT_FPS, "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
263
- "output_type": "latent", "conditioning_items": kwargs['conditioning_items'], **first_pass_config
264
  }
 
 
 
265
 
266
  with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type):
267
- latents_raw = self.pipeline(**pipeline_kwargs).images
 
 
 
268
 
269
  return latents_raw.to(self.main_device)
270
 
 
243
  # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
244
  # ==========================================================================
245
 
246
+ # --- NOVA FUNÇÃO DE LOG DEDICADA ---
247
+ def _log_conditioning_items(self, items: List[ConditioningItem]):
248
+ """
249
+ Logs detailed information about a list of ConditioningItem objects.
250
+ This is a dedicated debug helper function.
251
+ """
252
+ # Só imprime o log se o nível de logging for DEBUG
253
+ if logging.getLogger().isEnabledFor(logging.INFO):
254
+ log_str = ["\n" + "="*25 + " INFO: Conditioning Items " + "="*25]
255
+ if not items:
256
+ log_str.append(" -> Lista de conditioning_items está vazia.")
257
+ else:
258
+ for i, item in enumerate(items):
259
+ if hasattr(item, 'media_item') and isinstance(item.media_item, torch.Tensor):
260
+ t = item.media_item
261
+ log_str.append(
262
+ f" -> Item [{i}]: "
263
+ f"Tensor(shape={list(t.shape)}, "
264
+ f"device='{t.device}', "
265
+ f"dtype={t.dtype}), "
266
+ f"Target Frame = {item.media_frame_number}, "
267
+ f"Strength = {item.conditioning_strength:.2f}"
268
+ )
269
+ else:
270
+ log_str.append(f" -> Item [{i}]: Não contém um tensor válido.")
271
+ log_str.append("="*75 + "\n")
272
+
273
+ # Usa o logger de debug para imprimir a mensagem completa
274
+ logging.info("\n".join(log_str))
275
+
276
+
277
  @log_function_io
278
  def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]:
279
  """[WORKER] Calls the pipeline to generate a single chunk of latents."""
 
291
  "prompt": kwargs['prompt'], "negative_prompt": kwargs['negative_prompt'],
292
  "height": downscaled_height, "width": downscaled_width, "num_frames": kwargs['num_frames'],
293
  "frame_rate": DEFAULT_FPS, "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
294
+ "output_type": "latent", **first_pass_config
295
  }
296
+
297
+ logging.info(f"\n[Info] pipeline_kwargs:\n {pipeline_kwargs}\n\n")
298
+ self._log_conditioning_items(kwargs['conditioning_items'])
299
 
300
  with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type):
301
+ latents_raw = self.pipeline(
302
+ "conditioning_items": kwargs['conditioning_items']
303
+ **pipeline_kwargs
304
+ ).images
305
 
306
  return latents_raw.to(self.main_device)
307