Skip to content

Commit b90b1cf

Browse files
committed
fix
1 parent 6503c05 commit b90b1cf

1 file changed

Lines changed: 14 additions & 1 deletion

File tree

src/maxdiffusion/tests/ltx2_pipeline_test.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,20 @@ class MockConfig:
115115
rope_type = "interleaved"
116116
rope_embedding_dims = 16
117117
rope_use_scale = False
118-
shard_mode = "auto"
118+
model_name = "gemma3-4b" # Use a valid model name
119+
base_emb_dim = 16
120+
base_num_query_heads = 2
121+
head_dim = 8
122+
num_query_heads = 2
123+
num_kv_heads = 1
124+
dropout_rate = 0.0
125+
float32_qk_product = False
126+
float32_logits = False
127+
sliding_window_size = 128
128+
attn_logits_soft_cap = 50.0
129+
use_post_attn_norm = True
130+
attention = "dot_product" # attention_kernel
131+
quantization = "" # for configure_kv_quant
119132

120133
self.text_encoder = MaxTextGemma3FeatureExtractor(
121134
config=MockConfig(),

0 commit comments

Comments
 (0)