Skip to content

Commit d6c9842

Browse files
committed
Add qwen2 implementation
1 parent ca7e2df commit d6c9842

20 files changed

Lines changed: 894 additions & 39 deletions

File tree

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ MaxText aims to provide you with the best OSS models, whether as a reference imp
107107
* Gemma 2 (2B, 9B, 27B)
108108
* Gemma 1 (2B, 7B)
109109
* Alibaba
110+
* Qwen 2.5 (7B, 14B)
110111
* Qwen 3 MoE 2507 (235B, 480B)
111112
* Qwen 3 MoE (30B, 235B)
112113
* Qwen 3 Dense (0.6B, 1.7B, 4B, 8B, 14B, 32B)

docs/guides/checkpointing_solutions/convert_checkpoint.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ The following models are supported:
1111
| **Gemma2** | 2B, 9B, 27B |||||
1212
| **Gemma3** (Multimodal) | 4B, 12B, 27B |||||
1313
| **Llama3.1** | 8B, 70B, 450B |||||
14+
| **Qwen2.5** | 7B, 14B |||||
1415
| **Qwen3** | 0.6B, 4B, 8B, 14B, 32B |||||
1516
| **Qwen3 MoE** | 30B, 235B, 480B |||||
1617
| **Mixtral** | 8x7B, 8x22B |||||

src/maxtext/checkpoint_conversion/utils/hf_model_configs.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,41 @@
210210
query_pre_attn_scalar=144,
211211
)
212212

213+
qwen25_7b_config = transformers.Qwen2Config(
214+
vocab_size=152064,
215+
hidden_size=3584,
216+
intermediate_size=18944,
217+
num_hidden_layers=28,
218+
num_attention_heads=28,
219+
num_key_value_heads=4,
220+
hidden_act="silu",
221+
max_position_embeddings=32768,
222+
initializer_range=0.02,
223+
rms_norm_eps=1e-06,
224+
use_cache=True,
225+
rope_theta=1000000.0,
226+
tie_word_embeddings=False,
227+
torch_dtype="bfloat16",
228+
attention_bias=True,
229+
)
230+
231+
qwen25_14b_config = transformers.Qwen2Config(
232+
vocab_size=152064,
233+
hidden_size=5120,
234+
intermediate_size=13824,
235+
num_hidden_layers=48,
236+
num_attention_heads=40,
237+
num_key_value_heads=8,
238+
hidden_act="silu",
239+
max_position_embeddings=32768,
240+
rms_norm_eps=1e-06,
241+
rope_theta=1000000.0,
242+
tie_word_embeddings=False,
243+
torch_dtype="bfloat16",
244+
attention_bias=True,
245+
)
246+
247+
213248
qwen3_0_6b_config = transformers.Qwen3Config(
214249
vocab_size=151936,
215250
hidden_size=1024,
@@ -815,6 +850,8 @@
815850
"gemma3-4b": gemma3_4b_config,
816851
"gemma3-12b": gemma3_12b_config,
817852
"gemma3-27b": gemma3_27b_config,
853+
"qwen2.5-7b": qwen25_7b_config,
854+
"qwen2.5-14b": qwen25_14b_config,
818855
"qwen3-0.6b": qwen3_0_6b_config,
819856
"qwen3-4b": qwen3_4b_config,
820857
"qwen3-4b-thinking-2507": qwen3_4b_config,

src/maxtext/checkpoint_conversion/utils/hf_shape.py

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -529,8 +529,8 @@ def GPT_OSS_HF_WEIGHTS_TO_SHAPE(config):
529529
return mapping
530530

531531

532-
def QWEN3_HF_WEIGHTS_TO_SHAPE(config):
533-
"""Returns mapping between HuggingFace Qwen3 weights path and the HuggingFace weights shape.
532+
def QWEN_HF_WEIGHTS_TO_SHAPE(config):
533+
"""Returns mapping between HuggingFace Qwen weights path and the HuggingFace weights shape.
534534
535535
To check this mapping, dump the huggingface model shapes:
536536
from transformers import AutoModelForCausalLM
@@ -555,6 +555,7 @@ def QWEN3_HF_WEIGHTS_TO_SHAPE(config):
555555
head_dim = config.get(
556556
"head_dim", config["hidden_size"] // config["num_attention_heads"]
557557
) # head_dim might not always be present
558+
attention_bias = config.get("attention_bias", False)
558559

559560
mapping = {
560561
"model.embed_tokens.weight": [config["vocab_size"], hidden_size],
@@ -580,6 +581,15 @@ def QWEN3_HF_WEIGHTS_TO_SHAPE(config):
580581
f"{layer_prefix}.self_attn.k_norm.weight": [head_dim],
581582
}
582583

584+
if attention_bias:
585+
layer_mapping.update(
586+
{
587+
f"{layer_prefix}.self_attn.q_proj.bias": [num_attention_heads * head_dim],
588+
f"{layer_prefix}.self_attn.k_proj.bias": [num_key_value_heads * head_dim],
589+
f"{layer_prefix}.self_attn.v_proj.bias": [num_key_value_heads * head_dim],
590+
}
591+
)
592+
583593
if num_experts > 1:
584594
# MoE MLP layers
585595
moe_ffn_intermediate_size = config.get("moe_intermediate_size")
@@ -756,18 +766,20 @@ def MIXTRAL_HF_WEIGHTS_TO_SHAPE(config):
756766
"gemma3-4b": GEMMA3_HF_WEIGHTS_TO_SHAPE,
757767
"gemma3-12b": GEMMA3_HF_WEIGHTS_TO_SHAPE,
758768
"gemma3-27b": GEMMA3_HF_WEIGHTS_TO_SHAPE,
759-
"qwen3-0.6b": QWEN3_HF_WEIGHTS_TO_SHAPE,
760-
"qwen3-4b": QWEN3_HF_WEIGHTS_TO_SHAPE,
761-
"qwen3-4b-thinking-2507": QWEN3_HF_WEIGHTS_TO_SHAPE,
762-
"qwen3-8b": QWEN3_HF_WEIGHTS_TO_SHAPE,
763-
"qwen3-14b": QWEN3_HF_WEIGHTS_TO_SHAPE,
764-
"qwen3-32b": QWEN3_HF_WEIGHTS_TO_SHAPE,
769+
"qwen2.5-7b": QWEN_HF_WEIGHTS_TO_SHAPE,
770+
"qwen2.5-14b": QWEN_HF_WEIGHTS_TO_SHAPE,
771+
"qwen3-0.6b": QWEN_HF_WEIGHTS_TO_SHAPE,
772+
"qwen3-4b": QWEN_HF_WEIGHTS_TO_SHAPE,
773+
"qwen3-4b-thinking-2507": QWEN_HF_WEIGHTS_TO_SHAPE,
774+
"qwen3-8b": QWEN_HF_WEIGHTS_TO_SHAPE,
775+
"qwen3-14b": QWEN_HF_WEIGHTS_TO_SHAPE,
776+
"qwen3-32b": QWEN_HF_WEIGHTS_TO_SHAPE,
765777
"llama3.1-8b": LLAMA31_HF_WEIGHTS_TO_SHAPE,
766778
"llama3.1-70b": LLAMA31_HF_WEIGHTS_TO_SHAPE,
767779
"llama3.1-405b": LLAMA31_HF_WEIGHTS_TO_SHAPE,
768-
"qwen3-30b-a3b": QWEN3_HF_WEIGHTS_TO_SHAPE,
769-
"qwen3-235b-a22b": QWEN3_HF_WEIGHTS_TO_SHAPE,
770-
"qwen3-480b-a35b": QWEN3_HF_WEIGHTS_TO_SHAPE,
780+
"qwen3-30b-a3b": QWEN_HF_WEIGHTS_TO_SHAPE,
781+
"qwen3-235b-a22b": QWEN_HF_WEIGHTS_TO_SHAPE,
782+
"qwen3-480b-a35b": QWEN_HF_WEIGHTS_TO_SHAPE,
771783
"deepseek3-671b": DEEPSEEK_HF_WEIGHTS_TO_SHAPE,
772784
"gpt-oss-20b": GPT_OSS_HF_WEIGHTS_TO_SHAPE,
773785
"gpt-oss-120b": GPT_OSS_HF_WEIGHTS_TO_SHAPE,

src/maxtext/checkpoint_conversion/utils/param_mapping.py

Lines changed: 65 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -587,11 +587,11 @@ def scale_query_layer(input_tensor, target_shape):
587587
return mapping
588588

589589

590-
def QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_layers=False):
591-
"""Returns mapping from MaxText to HuggingFace Qwen3 weight paths.
590+
def QWEN_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_layers=False):
591+
"""Returns mapping from MaxText to HuggingFace Qwen weight paths.
592592
593593
This function generates a dictionary that maps parameter names from a MaxText
594-
Qwen3 checkpoint to their corresponding names in the Hugging Face format.
594+
Qwen checkpoint to their corresponding names in the Hugging Face format.
595595
It handles both dense and Mixture-of-Experts (MoE) model variants.
596596
597597
Args:
@@ -631,6 +631,15 @@ def QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_layers=False)
631631
"params-decoder-layers-self_attention-value-kernel": [
632632
f"model.layers.{i}.self_attn.v_proj.weight" for i in range(n_layers)
633633
],
634+
"params-decoder-layers-self_attention-query-bias": [
635+
f"model.layers.{i}.self_attn.q_proj.bias" for i in range(n_layers)
636+
],
637+
"params-decoder-layers-self_attention-key-bias": [
638+
f"model.layers.{i}.self_attn.k_proj.bias" for i in range(n_layers)
639+
],
640+
"params-decoder-layers-self_attention-value-bias": [
641+
f"model.layers.{i}.self_attn.v_proj.bias" for i in range(n_layers)
642+
],
634643
"params-decoder-layers-self_attention-out-kernel": [
635644
f"model.layers.{i}.self_attn.o_proj.weight" for i in range(n_layers)
636645
],
@@ -688,6 +697,9 @@ def QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_layers=False)
688697
f"params-decoder-layers_{i}-self_attention-key-kernel": f"model.layers.{i}.self_attn.k_proj.weight",
689698
f"params-decoder-layers_{i}-self_attention-value-kernel": f"model.layers.{i}.self_attn.v_proj.weight",
690699
f"params-decoder-layers_{i}-self_attention-out-kernel": f"model.layers.{i}.self_attn.o_proj.weight",
700+
f"params-decoder-layers_{i}-self_attention-query-bias": f"model.layers.{i}.self_attn.q_proj.bias",
701+
f"params-decoder-layers_{i}-self_attention-key-bias": f"model.layers.{i}.self_attn.k_proj.bias",
702+
f"params-decoder-layers_{i}-self_attention-value-bias": f"model.layers.{i}.self_attn.v_proj.bias",
691703
f"params-decoder-layers_{i}-self_attention-query_norm-scale": f"model.layers.{i}.self_attn.q_norm.weight",
692704
f"params-decoder-layers_{i}-self_attention-key_norm-scale": f"model.layers.{i}.self_attn.k_norm.weight",
693705
f"params-decoder-layers_{i}-post_self_attention_layer_norm-scale": f"model.layers.{i}.post_attention_layernorm.weight",
@@ -721,8 +733,8 @@ def QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_layers=False)
721733
return mapping
722734

723735

724-
def QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN(config, maxtext_config, scan_layers=False, saving_to_hf=False):
725-
"""Creates parameter transformation functions for Qwen3.
736+
def QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN(config, maxtext_config, scan_layers=False, saving_to_hf=False):
737+
"""Creates parameter transformation functions for Qwen.
726738
727739
This function provides a dictionary of transformation functions (hooks) for
728740
converting Qwen3 model parameters between MaxText and Hugging Face formats.
@@ -766,6 +778,15 @@ def reshape_kernel(input_tensor, target_shape):
766778
else:
767779
return input_tensor.T.reshape(target_shape)
768780

781+
def reshape_bias(input_tensor, target_shape=None):
782+
"""Reshapes biases between MaxText 2D (heads, dim) and HF 1D (hidden)."""
783+
if saving_to_hf:
784+
# MaxText [heads, head_dim] -> HF [hidden_dim] (flatten)
785+
return input_tensor.reshape(target_shape)
786+
else:
787+
# HF [hidden_dim] -> MaxText [heads, head_dim]
788+
return input_tensor.reshape(target_shape)
789+
769790
mapping = {
770791
"params-token_embedder-embedding": pad_embedding_layer,
771792
"params-decoder-logits_dense-kernel": reshape_kernel,
@@ -780,6 +801,11 @@ def reshape_kernel(input_tensor, target_shape):
780801
"mlp-wi_1-kernel",
781802
"mlp-wo-kernel",
782803
]
804+
bias_hooks = [
805+
"self_attention-query-bias",
806+
"self_attention-key-bias",
807+
"self_attention-value-bias",
808+
]
783809
moe_kernel_hooks = [
784810
"moe_block-gate-kernel",
785811
"moe_block-wi_0-kernel",
@@ -793,13 +819,17 @@ def reshape_kernel(input_tensor, target_shape):
793819
if scan_layers:
794820
for key in kernel_hooks:
795821
mapping[f"params-decoder-layers-{key}"] = reshape_kernel
822+
for key in bias_hooks:
823+
mapping[f"params-decoder-layers-{key}"] = reshape_bias
796824
if num_experts > 1:
797825
for key in moe_kernel_hooks:
798826
mapping[f"params-decoder-layers-{key}"] = reshape_kernel
799827
else:
800828
for i in range(n_layers):
801829
for key in kernel_hooks:
802830
mapping[f"params-decoder-layers_{i}-{key}"] = reshape_kernel
831+
for key in bias_hooks:
832+
mapping[f"params-decoder-layers_{i}-{key}"] = reshape_bias
803833
if num_experts > 1:
804834
for key in moe_kernel_hooks:
805835
mapping[f"params-decoder-layers_{i}-{key}"] = reshape_kernel
@@ -1376,7 +1406,7 @@ def QWEN3_OMNI_MOE_MAXTEXT_TO_HF_PARAM_MAPPING(config, maxtext_config, scan_laye
13761406
# Text mapping with "thinker." prefix, reusing QWEN3-MOE mapping function
13771407
num_experts_text = config["thinker_config"]["text_config"].get("num_experts", 0)
13781408
n_layers_text = config["thinker_config"]["text_config"]["num_hidden_layers"]
1379-
text_mapping = QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING(
1409+
text_mapping = QWEN_MAXTEXT_TO_HF_PARAM_MAPPING(
13801410
config={"num_hidden_layers": n_layers_text, "num_experts": num_experts_text},
13811411
maxtext_config=maxtext_config,
13821412
scan_layers=scan_layers,
@@ -1544,7 +1574,7 @@ def QWEN3_OMNI_MOE_MAXTEXT_TO_HF_PARAM_HOOK_FN(config, maxtext_config, scan_laye
15441574
# Text hooks, reusing QWEN3-MOE hook function
15451575
num_experts_text = config["thinker_config"]["text_config"].get("num_experts", 0)
15461576
n_layers_text = config["thinker_config"]["text_config"]["num_hidden_layers"]
1547-
text_hooks = QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN(
1577+
text_hooks = QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN(
15481578
config={"num_hidden_layers": n_layers_text, "num_experts": num_experts_text},
15491579
maxtext_config=maxtext_config,
15501580
scan_layers=scan_layers,
@@ -2332,18 +2362,23 @@ def pad_hf_embedding_layer(input_tensor, target_shape):
23322362
"gemma3-4b": GEMMA3_MAXTEXT_TO_HF_PARAM_MAPPING,
23332363
"gemma3-12b": GEMMA3_MAXTEXT_TO_HF_PARAM_MAPPING,
23342364
"gemma3-27b": GEMMA3_MAXTEXT_TO_HF_PARAM_MAPPING,
2335-
"qwen3-0.6b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2336-
"qwen3-4b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2337-
"qwen3-4b-thinking-2507": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2338-
"qwen3-8b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2339-
"qwen3-14b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2340-
"qwen3-32b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2365+
"qwen2.5-0.5b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2366+
"qwen2.5-1.5b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2367+
"qwen2.5-3b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2368+
"qwen2.5-7b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2369+
"qwen2.5-14b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2370+
"qwen3-0.6b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2371+
"qwen3-4b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2372+
"qwen3-4b-thinking-2507": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2373+
"qwen3-8b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2374+
"qwen3-14b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2375+
"qwen3-32b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
23412376
"llama3.1-8b": LLAMA31_MAXTEXT_TO_HF_PARAM_MAPPING,
23422377
"llama3.1-70b": LLAMA31_MAXTEXT_TO_HF_PARAM_MAPPING,
23432378
"llama3.1-405b": LLAMA31_MAXTEXT_TO_HF_PARAM_MAPPING,
2344-
"qwen3-30b-a3b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2345-
"qwen3-235b-a22b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2346-
"qwen3-coder-480b-a35b": QWEN3_MAXTEXT_TO_HF_PARAM_MAPPING,
2379+
"qwen3-30b-a3b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2380+
"qwen3-235b-a22b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
2381+
"qwen3-coder-480b-a35b": QWEN_MAXTEXT_TO_HF_PARAM_MAPPING,
23472382
"deepseek3-671b": DEEPSEEK_MAXTEXT_TO_HF_PARAM_MAPPING,
23482383
"gpt-oss-20b": GPT_OSS_MAXTEXT_TO_HF_PARAM_MAPPING,
23492384
"gpt-oss-120b": GPT_OSS_MAXTEXT_TO_HF_PARAM_MAPPING,
@@ -2364,18 +2399,23 @@ def pad_hf_embedding_layer(input_tensor, target_shape):
23642399
"gemma3-4b": GEMMA3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23652400
"gemma3-12b": GEMMA3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23662401
"gemma3-27b": GEMMA3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2367-
"qwen3-0.6b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2368-
"qwen3-4b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2369-
"qwen3-4b-thinking-2507": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2370-
"qwen3-8b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2371-
"qwen3-14b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2372-
"qwen3-32b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2402+
"qwen2.5-0.5b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2403+
"qwen2.5-1.5b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2404+
"qwen2.5-3b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2405+
"qwen2.5-7b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2406+
"qwen2.5-14b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2407+
"qwen3-0.6b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2408+
"qwen3-4b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2409+
"qwen3-4b-thinking-2507": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2410+
"qwen3-8b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2411+
"qwen3-14b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2412+
"qwen3-32b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23732413
"llama3.1-8b": LLAMA31_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23742414
"llama3.1-70b": LLAMA31_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23752415
"llama3.1-405b": LLAMA31_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2376-
"qwen3-30b-a3b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2377-
"qwen3-235b-a22b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2378-
"qwen3-coder-480b-a35b": QWEN3_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2416+
"qwen3-30b-a3b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2417+
"qwen3-235b-a22b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
2418+
"qwen3-coder-480b-a35b": QWEN_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23792419
"deepseek3-671b": DEEPSEEK_MAXTEXT_TO_HF_PARAM_HOOK_FN,
23802420
"gpt-oss-20b": GPT_OSS_TO_HF_PARAM_HOOK_FN,
23812421
"gpt-oss-120b": GPT_OSS_TO_HF_PARAM_HOOK_FN,

src/maxtext/common/common_types.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ class DecoderBlockType(enum.Enum):
9292
GEMMA = "gemma"
9393
GEMMA2 = "gemma2"
9494
GEMMA3 = "gemma3"
95+
QWEN2 = "qwen2"
9596
QWEN3 = "qwen3"
9697
QWEN3_MOE = "qwen3_moe"
9798
QWEN3_NEXT = "qwen3_next"
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Copyright 2023–2026 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# Qwen 2.5 14B Instruct Configuration
16+
# https://huggingface.co/Qwen/Qwen2.5-14B-Instruct
17+
18+
base_emb_dim: 5120
19+
base_num_query_heads: 40
20+
base_num_kv_heads: 8
21+
base_mlp_dim: 13824
22+
base_num_decoder_layers: 48
23+
head_dim: 128
24+
mlp_activations: ["silu", "linear"]
25+
vocab_size: 152064
26+
decoder_block: "qwen2"
27+
normalization_layer_epsilon: 1.0e-6
28+
rope_max_timescale: 1000000.0
29+
use_qk_norm: False
30+
# Bias for q, k, v proj.
31+
attention_bias: True
32+
logits_via_embedding: False
33+
normalize_embedding_logits: False
34+
tokenizer_type: "huggingface"
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Copyright 2023–2026 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# Qwen 2.5 7B Instruct Configuration
16+
# https://huggingface.co/Qwen/Qwen2.5-7B-Instruct
17+
18+
base_emb_dim: 3584
19+
base_num_query_heads: 28
20+
base_num_kv_heads: 4
21+
base_mlp_dim: 18944
22+
base_num_decoder_layers: 28
23+
head_dim: 128
24+
mlp_activations: ["silu", "linear"]
25+
vocab_size: 152064
26+
decoder_block: "qwen2"
27+
normalization_layer_epsilon: 1e-06
28+
rope_max_timescale: 1000000.0
29+
use_qk_norm: False
30+
# Bias for q, k, v proj.
31+
attention_bias: True
32+
logits_via_embedding: False
33+
normalize_embedding_logits: False
34+
tokenizer_type: "huggingface"

0 commit comments

Comments
 (0)