[Bugfix] Fix torch.compile error for DP + MoE on CPU Backend (#31650)

Signed-off-by: kunzh <zhikun.wu@outlook.com>
This commit is contained in:
kzwrime
2026-01-06 20:06:20 +08:00
committed by GitHub
parent e0327c9db2
commit bf0f3a4638

View File

@@ -1899,11 +1899,11 @@ class FusedMoE(CustomOp):
) )
post_quant_allgather = ( post_quant_allgather = (
has_flashinfer_trtllm_fused_moe() self.quant_method is not None
and self.quant_method is not None
and self.dp_size > 1 and self.dp_size > 1
and self.use_ep and self.use_ep
and isinstance(self.quant_method, ModelOptNvFp4FusedMoE) and isinstance(self.quant_method, ModelOptNvFp4FusedMoE)
and has_flashinfer_trtllm_fused_moe()
) )
if post_quant_allgather: if post_quant_allgather:
hidden_states_to_dispatch, extra_tensors = ( hidden_states_to_dispatch, extra_tensors = (