Skip to content

Commit 2e9f896

Browse files
committed
revert requirements
1 parent 544714b commit 2e9f896

2 files changed

Lines changed: 2 additions & 16 deletions

File tree

dependencies/requirements/generated_requirements/requirements.txt

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
# If you need to modify dependencies, please do so in the host requirements file and run seed-env again.
33

44
absl-py>=2.3.1
5-
accelerate>=1.13.0
65
aiofiles>=25.1.0
76
aiohappyeyeballs>=2.6.1
87
aiohttp>=3.13.3
@@ -68,7 +67,7 @@ hf-transfer>=0.1.9
6867
hf-xet>=1.4.2 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
6968
httpcore>=1.0.9
7069
httpx>=0.28.1
71-
huggingface-hub>=1.10.1
70+
huggingface-hub>=0.36.2
7271
humanize>=4.15.0
7372
hypothesis>=6.142.1
7473
idna>=3.11
@@ -81,7 +80,6 @@ isort>=8.0.1
8180
jaraco-functools>=4.4.0
8281
jax>=0.9.0
8382
jaxlib>=0.9.0
84-
jaxopt>=0.8.5
8583
jaxtyping>=0.3.9
8684
jinja2>=3.1.6
8785
keras>=3.13.1

src/maxdiffusion/generate_flux.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -29,19 +29,7 @@
2929
from chex import Array
3030
from einops import rearrange
3131
from flax.linen import partitioning as nn_partitioning
32-
33-
try:
34-
from transformers import (CLIPTokenizer, FlaxCLIPTextModel, T5EncoderModel, FlaxT5EncoderModel, AutoTokenizer)
35-
except ImportError:
36-
# For transformers>=5.0, Flax models have different import paths
37-
from transformers import CLIPTokenizer, T5EncoderModel, AutoTokenizer
38-
39-
try:
40-
from transformers.models.clip.modeling_flax_clip import FlaxCLIPTextModel
41-
from transformers.models.t5.modeling_flax_t5 import FlaxT5EncoderModel
42-
except ImportError:
43-
FlaxCLIPTextModel = None
44-
FlaxT5EncoderModel = None
32+
from transformers import (CLIPTokenizer, FlaxCLIPTextModel, T5EncoderModel, FlaxT5EncoderModel, AutoTokenizer)
4533

4634
from maxdiffusion import FlaxAutoencoderKL, pyconfig, max_logging
4735
from maxdiffusion.models.flux.transformers.transformer_flux_flax import FluxTransformer2DModel

0 commit comments

Comments
 (0)