[Model] Add MoE support for NemotronH (#25863)
Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com>
This commit is contained in:
@@ -185,6 +185,15 @@ class NemotronHConfig(PretrainedConfig):
|
||||
mamba_proj_bias=False,
|
||||
mamba_chunk_size=256,
|
||||
rescale_prenorm_residual=True,
|
||||
n_routed_experts=8,
|
||||
n_shared_experts=1,
|
||||
moe_intermediate_size=7688,
|
||||
moe_shared_expert_intermediate_size=7688,
|
||||
num_experts_per_tok=2,
|
||||
routed_scaling_factor=1.0,
|
||||
n_group=1,
|
||||
topk_group=1,
|
||||
norm_topk_prob=True,
|
||||
**kwargs,
|
||||
):
|
||||
self.vocab_size = vocab_size
|
||||
@@ -241,6 +250,15 @@ class NemotronHConfig(PretrainedConfig):
|
||||
self.mamba_proj_bias = mamba_proj_bias
|
||||
self.chunk_size = mamba_chunk_size
|
||||
self.rescale_prenorm_residual = rescale_prenorm_residual
|
||||
self.n_routed_experts = n_routed_experts
|
||||
self.n_shared_experts = n_shared_experts
|
||||
self.moe_intermediate_size = moe_intermediate_size
|
||||
self.moe_shared_expert_intermediate_size = moe_shared_expert_intermediate_size # noqa: E501
|
||||
self.num_experts_per_tok = num_experts_per_tok
|
||||
self.routed_scaling_factor = routed_scaling_factor
|
||||
self.n_group = n_group
|
||||
self.topk_group = topk_group
|
||||
self.norm_topk_prob = norm_topk_prob
|
||||
|
||||
super().__init__(
|
||||
pad_token_id=pad_token_id,
|
||||
@@ -258,5 +276,7 @@ class NemotronHConfig(PretrainedConfig):
|
||||
else "attention"
|
||||
if self.hybrid_override_pattern[i] == "*"
|
||||
else "mlp"
|
||||
if self.hybrid_override_pattern[i] == "-"
|
||||
else "moe"
|
||||
for i in range(self.num_hidden_layers)
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user