[Bugfix] Define router_logits_dtype for remaining MoE models (#33737)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin
2026-02-04 00:24:14 -05:00
committed by GitHub
parent 2647163674
commit eb5ed20743
6 changed files with 9 additions and 4 deletions

View File

@@ -142,6 +142,7 @@ class AfmoeMoE(nn.Module):
e_score_correction_bias=self.expert_bias,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
router_logits_dtype=torch.float32,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:

View File

@@ -300,6 +300,7 @@ class BailingMoE(nn.Module):
num_expert_group=self.n_group,
topk_group=self.topk_group,
use_grouped_topk=self.use_grouped_topk,
router_logits_dtype=self.router_dtype,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:

View File

@@ -71,7 +71,6 @@ class FlexOlmoMoE(nn.Module):
prefix=f"{prefix}.gate",
)
# Gate always runs at half / full precision for now.
self.experts = FusedMoE(
num_experts=hf_config.num_experts,
top_k=hf_config.num_experts_per_tok,
@@ -82,6 +81,7 @@ class FlexOlmoMoE(nn.Module):
quant_config=None,
tp_size=tp_size,
prefix=f"{prefix}.experts",
router_logits_dtype=torch.float32,
)
self.top_k = hf_config.num_experts_per_tok

View File

@@ -236,9 +236,9 @@ class FlashMLP(nn.Module):
class LongcatRouter(nn.Module):
def __init__(
self,
config,
zero_expert_num=0,
rounter_params_dtype=torch.bfloat16,
config: FlashConfig,
zero_expert_num: int,
rounter_params_dtype: torch.dtype,
prefix: str = "",
):
super().__init__()
@@ -309,6 +309,7 @@ class LongcatMoe(nn.Module):
prefix=f"{prefix}.experts",
enable_eplb=enable_eplb,
routed_scaling_factor=config.routed_scaling_factor,
router_logits_dtype=self.rounter_params_dtype,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:

View File

@@ -174,6 +174,7 @@ class MiMoV2MoE(nn.Module):
num_expert_group=config.n_group,
topk_group=config.topk_group,
scoring_func="sigmoid",
router_logits_dtype=self.gate_dtype,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:

View File

@@ -388,6 +388,7 @@ class FusedMoEBlock(nn.Module):
routed_scaling_factor=config.moe_router_scaling_factor,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
router_logits_dtype=torch.float32,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: