[Bugfix] Fix torch.compile error for DP + MoE on CPU Backend (#31650)
Signed-off-by: kunzh <zhikun.wu@outlook.com>
This commit is contained in:
@@ -1899,11 +1899,11 @@ class FusedMoE(CustomOp):
|
||||
)
|
||||
|
||||
post_quant_allgather = (
|
||||
has_flashinfer_trtllm_fused_moe()
|
||||
and self.quant_method is not None
|
||||
self.quant_method is not None
|
||||
and self.dp_size > 1
|
||||
and self.use_ep
|
||||
and isinstance(self.quant_method, ModelOptNvFp4FusedMoE)
|
||||
and has_flashinfer_trtllm_fused_moe()
|
||||
)
|
||||
if post_quant_allgather:
|
||||
hidden_states_to_dispatch, extra_tensors = (
|
||||
|
||||
Reference in New Issue
Block a user