[Bugfix]Fix EP precision for Qwen3.5, Qwen3-Next (#39181)

Signed-off-by: Song Kai <songkai05@baidu.com>
This commit is contained in:
Kai Song
2026-04-09 05:47:48 +08:00
committed by GitHub
parent 3352bf8b03
commit f3c7941ec8
2 changed files with 4 additions and 0 deletions

View File

@@ -80,6 +80,7 @@ class Qwen2MoeMLP(nn.Module):
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
expert_gate: torch.nn.Linear | None = None,
is_sequence_parallel: bool = False,
prefix: str = "",
) -> None:
super().__init__()
@@ -88,6 +89,7 @@ class Qwen2MoeMLP(nn.Module):
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
disable_tp=is_sequence_parallel,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
@@ -96,6 +98,7 @@ class Qwen2MoeMLP(nn.Module):
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
disable_tp=is_sequence_parallel,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":

View File

@@ -140,6 +140,7 @@ class Qwen3NextSparseMoeBlock(nn.Module):
quant_config=quant_config,
reduce_results=False,
expert_gate=self.shared_expert_gate,
is_sequence_parallel=self.is_sequence_parallel,
prefix=f"{prefix}.shared_expert",
)
else: