[MoE Refactor][5/N] Isolate zero expert to LongCatFlash (#28891)

Signed-off-by: baonudesifeizhai <85092850+baonudesifeizhai@users.noreply.github.com>
Signed-off-by: Dongjie Zou <85092850+baonudesifeizhai@users.noreply.github.com>
Signed-off-by: baonudesifeizhai <baonudesifeizhai@gmail.com>
Signed-off-by: Robert Shaw <robertgshaw2@gmail.com>
Co-authored-by: Robert Shaw <robshaw@redhat.com>
Co-authored-by: Robert Shaw <robertgshaw2@gmail.com>
This commit is contained in:
baonudesifeizhai
2025-12-20 13:22:04 -05:00
committed by GitHub
parent 560ae9638c
commit 54c8924384
19 changed files with 264 additions and 109 deletions

View File

@@ -896,7 +896,7 @@ class Mxfp4MoEMethod(FusedMoEMethodBase):
raise NotImplementedError("EPLB is not supported for mxfp4")
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
topk_weights, topk_ids, _ = layer.select_experts(
topk_weights, topk_ids = layer.select_experts(
hidden_states=x,
router_logits=router_logits,
)
@@ -990,7 +990,7 @@ class Mxfp4MoEMethod(FusedMoEMethodBase):
):
from vllm.utils.flashinfer import flashinfer_cutlass_fused_moe
topk_weights, topk_ids, _ = layer.select_experts(
topk_weights, topk_ids = layer.select_experts(
hidden_states=x,
router_logits=router_logits,
)