[Bugfix] Fix Basic Models Test (#34818)

Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com>
This commit is contained in:
Matthew Bonanni
2026-02-19 17:49:07 -05:00
committed by GitHub
parent 4fb8beefaa
commit 662205d34e
14 changed files with 175 additions and 221 deletions

View File

@@ -457,6 +457,9 @@ def dummy_hf_overrides(
# Kimi uses `num_expert_group` instead of `n_group`.
if n_group is None:
n_group = getattr(text_config, "num_expert_group", None)
# InternS1Pro uses `router_n_groups` instead of `n_group`.
if n_group is None:
n_group = getattr(text_config, "router_n_groups", None)
num_experts = n_group * 2 if n_group is not None else 2
# we use three layers for Gemma-3n to check
@@ -486,12 +489,14 @@ def dummy_hf_overrides(
# Only set MoE related config when the model has MoE layers.
# Otherwise all models detected as MoE by _get_transformers_backend_cls.
if model_arch_config.num_experts > 0:
orig_topk = getattr(text_config, "num_experts_per_tok", 2)
topk = min(orig_topk, 2)
update_dict.update(
{
"num_experts": num_experts,
"num_experts_per_tok": 2,
"num_experts_per_tok": topk,
# Kimi uses `num_experts_per_token`.
"num_experts_per_token": 2,
"num_experts_per_token": topk,
"num_local_experts": num_experts,
# Otherwise there will not be any expert layers
"first_k_dense_replace": 0,