Skip to content

Commit bee4f96

Browse files
committed
transformer fix
1 parent 27225eb commit bee4f96

1 file changed

Lines changed: 1 addition & 4 deletions

File tree

src/maxdiffusion/models/ltx2/transformer_ltx2.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -624,9 +624,6 @@ def __init__(
624624
qk_norm: str = "rms_norm_across_heads",
625625
flash_block_sizes: BlockSizes = None,
626626
flash_min_seq_length: int = 4096,
627-
video_gated_attn: bool = False,
628-
audio_gated_attn: bool = False,
629-
cross_attn_mod: bool = False,
630627
**kwargs,
631628
):
632629
self.in_channels = in_channels
@@ -676,7 +673,7 @@ def __init__(
676673
self.a2v_attention_kernel = a2v_attention_kernel
677674
self.v2a_attention_kernel = v2a_attention_kernel
678675
self.flash_min_seq_length = flash_min_seq_length
679-
self.video_gated_attn = video_gated_attn
676+
self.video_gated_attn = gated_attn
680677
self.audio_gated_attn = audio_gated_attn
681678
self.cross_attn_mod = cross_attn_mod
682679

0 commit comments

Comments
 (0)