Eueuiaa commited on
Commit
df311a5
·
verified ·
1 Parent(s): f85c8b3

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +14 -10
api/ltx_server_refactored.py CHANGED
@@ -246,7 +246,8 @@ class VideoService:
246
  first_pass_kwargs = {
247
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
248
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
249
- "output_type": "latent", "conditioning_items": conditioning_items, "guidance_scale": float(guidance_scale),
 
250
  **(self.config.get("first_pass", {}))
251
  }
252
  try:
@@ -295,10 +296,10 @@ class VideoService:
295
  print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
296
  if "first_pass_num_inference_steps" in ltx_configs_override:
297
  first_pass_config["num_inference_steps"] = ltx_configs_override["first_pass_num_inference_steps"]
298
- if "first_pass_guidance_scale" in ltx_configs_override:
299
- max_val = max(first_pass_config.get("guidance_scale", [1]))
300
- new_max_val = ltx_configs_override["first_pass_guidance_scale"]
301
- first_pass_config["guidance_scale"] = [new_max_val if x==max_val else x for x in first_pass_config["guidance_scale"]]
302
 
303
  first_pass_kwargs = {
304
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
@@ -307,8 +308,8 @@ class VideoService:
307
  **first_pass_config
308
  }
309
  # Removido guidance_scale daqui pois agora está dentro do first_pass_config
310
- if "guidance_scale" in first_pass_kwargs:
311
- del first_pass_kwargs['guidance_scale']
312
 
313
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
314
  latents_bruto = self.pipeline(**first_pass_kwargs).images
@@ -376,7 +377,9 @@ class VideoService:
376
 
377
  latentes_bruto = self._generate_single_chunk_low(
378
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
379
- num_frames=num_frames_para_gerar, guidance_scale=guidance_scale, seed=used_seed + i,
 
 
380
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
381
  ltx_configs_override=ltx_configs_override
382
  )
@@ -431,7 +434,7 @@ class VideoService:
431
  negative_prompt=negative_prompt,
432
  height=height, width=width,
433
  num_frames=total_actual_frames,
434
- guidance_scale=guidance_scale,
435
  seed=used_seed,
436
  image_conditions=initial_image_conditions,
437
  ltx_configs_override=ltx_configs_override
@@ -476,7 +479,8 @@ class VideoService:
476
  second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
477
  second_pass_kwargs = {
478
  "prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
479
- "num_frames": chunk.shape[2], "latents": chunk, "guidance_scale": float(guidance_scale),
 
480
  "output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
481
  **(self.config.get("second_pass", {}))
482
  }
 
246
  first_pass_kwargs = {
247
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
248
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
249
+ "output_type": "latent", "conditioning_items": conditioning_items,
250
+ #"guidance_scale": float(guidance_scale),
251
  **(self.config.get("first_pass", {}))
252
  }
253
  try:
 
296
  print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
297
  if "first_pass_num_inference_steps" in ltx_configs_override:
298
  first_pass_config["num_inference_steps"] = ltx_configs_override["first_pass_num_inference_steps"]
299
+ #if "first_pass_guidance_scale" in ltx_configs_override:
300
+ # max_val = max(first_pass_config.get("guidance_scale", [1]))
301
+ # new_max_val = ltx_configs_override["first_pass_guidance_scale"]
302
+ # first_pass_config["guidance_scale"] = [new_max_val if x==max_val else x for x in first_pass_config["guidance_scale"]]
303
 
304
  first_pass_kwargs = {
305
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
 
308
  **first_pass_config
309
  }
310
  # Removido guidance_scale daqui pois agora está dentro do first_pass_config
311
+ #if "guidance_scale" in first_pass_kwargs:
312
+ # del first_pass_kwargs['guidance_scale']
313
 
314
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
315
  latents_bruto = self.pipeline(**first_pass_kwargs).images
 
377
 
378
  latentes_bruto = self._generate_single_chunk_low(
379
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
380
+ num_frames=num_frames_para_gerar,
381
+ #guidance_scale=guidance_scale,
382
+ seed=used_seed + i,
383
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
384
  ltx_configs_override=ltx_configs_override
385
  )
 
434
  negative_prompt=negative_prompt,
435
  height=height, width=width,
436
  num_frames=total_actual_frames,
437
+ #guidance_scale=guidance_scale,
438
  seed=used_seed,
439
  image_conditions=initial_image_conditions,
440
  ltx_configs_override=ltx_configs_override
 
479
  second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
480
  second_pass_kwargs = {
481
  "prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
482
+ "num_frames": chunk.shape[2], "latents": chunk,
483
+ #"guidance_scale": float(guidance_scale),
484
  "output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
485
  **(self.config.get("second_pass", {}))
486
  }