Fix quantized Falcon-H1 model loading issues (#32728)

Signed-off-by: Shengliang Xu <shengliangx@nvidia.com>
Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk>
This commit is contained in:
Shengliang Xu
2026-02-02 22:31:27 -08:00
committed by GitHub
parent 4c4b6f7a97
commit f1cb9b5544

View File

@@ -35,7 +35,10 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead, ParallelLMHead,
VocabParallelEmbedding, VocabParallelEmbedding,
) )
from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.config import set_default_rope_theta from vllm.transformers_utils.config import set_default_rope_theta
@@ -278,6 +281,7 @@ class FalconH1AttentionDecoderLayer(nn.Module):
self.scaling, self.scaling,
num_kv_heads=self.num_kv_heads, num_kv_heads=self.num_kv_heads,
cache_config=cache_config, cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn", prefix=f"{prefix}.attn",
) )
self.key_multiplier = config.key_multiplier self.key_multiplier = config.key_multiplier
@@ -360,7 +364,9 @@ class FalconH1ParallelHybrid(nn.Module):
self.attention_in_multiplier = config.attention_in_multiplier self.attention_in_multiplier = config.attention_in_multiplier
self.attn_out_multiplier = config.attention_out_multiplier self.attn_out_multiplier = config.attention_out_multiplier
self.feed_forward = FalconH1MLP(config, prefix=f"{prefix}.feed_forward") self.feed_forward = FalconH1MLP(
config, quant_config=quant_config, prefix=f"{prefix}.feed_forward"
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.pre_ff_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@@ -647,6 +653,12 @@ class FalconH1ForCausalLM(
if "mamba" in name: if "mamba" in name:
name = name.replace("mamba", "mamba.mamba") name = name.replace("mamba", "mamba.mamba")
if "scale" in name:
# Remapping the name of kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
for param_name, weight_name, shard_id in stacked_params_mapping: for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name: if weight_name not in name:
continue continue