[Bugfix] Allow skipping MoE in NVFP4 (fix for MTP) (#25987)

Signed-off-by: Benjamin Chislett <bchislett@nvidia.com>
This commit is contained in:
Benjamin Chislett
2025-10-06 16:16:30 -04:00
committed by GitHub
parent f23b4c04fd
commit 2161efe978
5 changed files with 18 additions and 5 deletions

View File

@@ -48,7 +48,8 @@ class DeepSeekMultiTokenPredictorLayer(nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
config = vllm_config.speculative_config.draft_model_config.hf_config
self.config = config
quant_config = vllm_config.quant_config
self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@@ -66,11 +67,15 @@ class DeepSeekMultiTokenPredictorLayer(nn.Module):
)
else:
topk_indices_buffer = None
self.shared_head = SharedHead(
config=config, prefix=prefix, quant_config=quant_config
)
self.mtp_block = DeepseekV2DecoderLayer(
vllm_config, prefix, topk_indices_buffer
vllm_config,
prefix,
config=self.config,
topk_indices_buffer=topk_indices_buffer,
)
def forward(