We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent a79a49c commit 436e7d1Copy full SHA for 436e7d1
1 file changed
src/maxdiffusion/max_utils.py
@@ -501,7 +501,7 @@ def get_flash_block_sizes(config):
501
"""Create custom flash attention BlockSizes."""
502
flash_block_sizes = None
503
if len(config.flash_block_sizes.keys()) > 0:
504
- attention_is_tokamax = "tokamax" in config.attention_kernel
+ attention_is_tokamax = "tokamax" in config.attention
505
user_block_sizes:Dict[str, int] = config.flash_block_sizes
506
if attention_is_tokamax:
507
max_logging.log("Tokamax kernel specified, Note: Tokamax only supports fused backward kernel."
0 commit comments