We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 6503c05 commit b90b1cfCopy full SHA for b90b1cf
1 file changed
src/maxdiffusion/tests/ltx2_pipeline_test.py
@@ -115,7 +115,20 @@ class MockConfig:
115
rope_type = "interleaved"
116
rope_embedding_dims = 16
117
rope_use_scale = False
118
- shard_mode = "auto"
+ model_name = "gemma3-4b" # Use a valid model name
119
+ base_emb_dim = 16
120
+ base_num_query_heads = 2
121
+ head_dim = 8
122
+ num_query_heads = 2
123
+ num_kv_heads = 1
124
+ dropout_rate = 0.0
125
+ float32_qk_product = False
126
+ float32_logits = False
127
+ sliding_window_size = 128
128
+ attn_logits_soft_cap = 50.0
129
+ use_post_attn_norm = True
130
+ attention = "dot_product" # attention_kernel
131
+ quantization = "" # for configure_kv_quant
132
133
self.text_encoder = MaxTextGemma3FeatureExtractor(
134
config=MockConfig(),
0 commit comments