[Bugfix] Fix prefix creation for Qwen3.5 (#34723)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin
2026-02-18 02:39:15 -05:00
committed by GitHub
parent a88b3be7c4
commit 909b147197

View File

@@ -676,9 +676,10 @@ class Qwen3_5ForCausalLMBase(
super().__init__()
self.config = config
self.scheduler_config = scheduler_config
self.model = Qwen3_5Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
# Deal with the case where the prefix is already "language_model" since
# Qwen/Qwen3.5-397B-A17B has naming like: model.language_model.layers.0
model_prefix = prefix if "model" in prefix else "model"
self.model = Qwen3_5Model(vllm_config=vllm_config, prefix=model_prefix)
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
@@ -754,7 +755,7 @@ class Qwen3_5MoeForCausalLM(Qwen3_5ForCausalLMBase, QwenNextMixtureOfExperts):
dummy_inputs=Qwen3VLDummyInputsBuilder,
)
class Qwen3_5ForConditionalGeneration(Qwen3VLForConditionalGeneration, IsHybrid):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
# protocols have not __init__ method, so we need to use nn.Module.__init__
nn.Module.__init__(self)
config: Qwen3_5Config = vllm_config.model_config.hf_config
@@ -962,7 +963,7 @@ class Qwen3_5_MoeMixtureOfExperts(MixtureOfExperts):
class Qwen3_5MoeForConditionalGeneration(
Qwen3_5ForConditionalGeneration, Qwen3_5_MoeMixtureOfExperts
):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
# protocols have not __init__ method, so we need to use nn.Module.__init__
nn.Module.__init__(self)
config: Qwen3_5MoeConfig = vllm_config.model_config.hf_config