Skip to content

Commit 3e7eba5

Browse files
committed
removed extra debug from pipeline
1 parent 8861b92 commit 3e7eba5

3 files changed

Lines changed: 2 additions & 9 deletions

File tree

src/maxdiffusion/configs/base_wan_27b.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ boundary_ratio: 0.875
304304

305305
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
306306
guidance_rescale: 0.0
307-
num_inference_steps: 30
307+
num_inference_steps: 40
308308
fps: 24
309309
save_final_checkpoint: False
310310

src/maxdiffusion/configs/base_wan_i2v_27b.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ boundary_ratio: 0.875
300300

301301
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
302302
guidance_rescale: 0.0
303-
num_inference_steps: 50
303+
num_inference_steps: 40
304304
fps: 16
305305
save_final_checkpoint: False
306306

src/maxdiffusion/pipelines/ltx2/ltx2_pipeline.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -633,9 +633,6 @@ def get_fp8_config(cls, config: HyperParameters):
633633
@classmethod
634634
def get_qt_provider(cls, config: HyperParameters) -> Optional[qwix.QtProvider]:
635635
"""Get quantization rules based on the config."""
636-
max_logging.log(
637-
f"DEBUG: use_qwix_quantization={getattr(config, 'use_qwix_quantization', None)}, quantization={getattr(config, 'quantization', None)}"
638-
)
639636
if not getattr(config, "use_qwix_quantization", False):
640637
return None
641638

@@ -652,17 +649,13 @@ def quantize_transformer(cls, config: HyperParameters, model: Any, pipeline: "LT
652649
"""Quantizes the transformer model."""
653650
q_rules = cls.get_qt_provider(config)
654651
if not q_rules:
655-
max_logging.log("DEBUG: Transformer is NOT being quantized. (q_rules is None)")
656652
return model
657-
max_logging.log("Quantizing transformer with Qwix.")
658653

659654
batch_size = config.global_batch_size_to_train_on
660655
model_inputs = get_dummy_ltx2_inputs(config, pipeline, batch_size)
661656

662657
with mesh:
663658
quantized_model = qwix.quantize_model(model, q_rules, *model_inputs)
664-
max_logging.log("DEBUG: Transformer WAS successfully quantized.")
665-
max_logging.log("Qwix Quantization complete.")
666659
return quantized_model
667660

668661
def _get_gemma_prompt_embeds(

0 commit comments

Comments
 (0)