Skip to content

Commit 2cab8c6

Browse files
committed
Fix
1 parent 266ae13 commit 2cab8c6

3 files changed

Lines changed: 6 additions & 6 deletions

File tree

src/maxdiffusion/configs/base_wan_lora_14b.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ lora_rank: 64
296296
lora_config: {
297297
lora_model_name_or_path: ["lightx2v/Wan2.1-Distill-Loras"],
298298
weight_name: ["Wan2.1_t2v_14b_lora_rank64_lightx2v_4step.safetensors"],
299-
adapter_name: ["wan21-lora"],
299+
adapter_name: ["wan21-distill-lora"],
300300
scale: [1.0],
301301
from_pt: []
302302
}

src/maxdiffusion/configs/base_wan_lora_27b.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -305,10 +305,10 @@ lightning_ckpt: ""
305305
lora_rank: 64
306306
# Values are lists to support multiple LoRA loading during inference in the future.
307307
lora_config: {
308-
lora_model_name_or_path: ["lightx2v/Wan2.2-Lightning"],
309-
high_noise_weight_name: ["Wan2.2-T2V-A14B-4steps-lora-250928/high_noise_model.safetensors"],
310-
low_noise_weight_name: ["Wan2.2-T2V-A14B-4steps-lora-250928/low_noise_model.safetensors"],
311-
adapter_name: ["wan22-lightning-lora"],
308+
lora_model_name_or_path: ["lightx2v/Wan2.2-Distill-Loras"],
309+
high_noise_weight_name: ["wan2.2_t2v_A14b_high_noise_lora_rank64_lightx2v_4step_1217.safetensors"],
310+
low_noise_weight_name: ["wan2.2_t2v_A14b_low_noise_lora_rank64_lightx2v_4step_1217.safetensors"],
311+
adapter_name: ["wan22-distill-lora"],
312312
scale: [1.0],
313313
from_pt: []
314314
}

src/maxdiffusion/models/lora_nnx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ def merge_lora_for_scanned(model: nnx.Module, state_dict: dict, scale: float, tr
349349
# Handle scanned Conv layers (ndim=5)
350350
elif isinstance(module, nnx.Conv) and module.kernel.ndim == 5:
351351
if module.kernel_size != (1, 1):
352-
max_logging.warn(f"Skipping merge for scanned Conv layer {nnx_path_str} with kernel size {module.kernel_size}, only 1x1 is supported for merging.")
352+
max_logging.log(f"Skipping merge for scanned Conv layer {nnx_path_str} with kernel size {module.kernel_size}, only 1x1 is supported for merging.")
353353
continue
354354

355355
lora_key_template = translate_fn(nnx_path_str) if translate_fn else None

0 commit comments

Comments
 (0)