[Bugfix] Allow skipping MoE in NVFP4 (fix for MTP) (#25987)

Signed-off-by: Benjamin Chislett <bchislett@nvidia.com>
This commit is contained in:
Benjamin Chislett
2025-10-06 16:16:30 -04:00
committed by GitHub
parent f23b4c04fd
commit 2161efe978
5 changed files with 18 additions and 5 deletions

View File

@@ -1055,11 +1055,13 @@ class DeepseekV2DecoderLayer(nn.Module):
self,
vllm_config: VllmConfig,
prefix: str,
config: Optional[DeepseekV2Config] = None,
topk_indices_buffer: Optional[torch.Tensor] = None,
) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
if config is None:
config = vllm_config.model_config.hf_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
@@ -1200,7 +1202,7 @@ class DeepseekV2Model(nn.Module):
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: DeepseekV2DecoderLayer(
vllm_config, prefix, topk_indices_buffer
vllm_config, prefix, topk_indices_buffer=topk_indices_buffer
),
prefix=f"{prefix}.layers",
)