2025-08-25 11:47:52 -07:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
|
"""Tests for the MoE grouped topk kernel
|
|
|
|
|
|
|
|
|
|
Run `pytest tests/kernels/moe/test_grouped_topk.py`.
|
|
|
|
|
"""
|
2025-10-05 15:06:22 +01:00
|
|
|
|
2025-08-25 11:47:52 -07:00
|
|
|
import pytest
|
|
|
|
|
import torch
|
|
|
|
|
|
2026-01-02 23:04:01 +08:00
|
|
|
from vllm.config import (
|
|
|
|
|
CompilationConfig,
|
|
|
|
|
VllmConfig,
|
|
|
|
|
get_cached_compilation_config,
|
|
|
|
|
set_current_vllm_config,
|
|
|
|
|
)
|
2026-01-18 11:40:49 -05:00
|
|
|
from vllm.model_executor.layers.fused_moe.router.grouped_topk_router import (
|
2025-12-17 17:43:00 +08:00
|
|
|
GroupedTopk,
|
2025-08-25 11:47:52 -07:00
|
|
|
fused_grouped_topk,
|
|
|
|
|
)
|
|
|
|
|
from vllm.platforms import current_platform
|
2026-01-05 10:34:04 +08:00
|
|
|
from vllm.utils.torch_utils import set_random_seed
|
2025-08-25 11:47:52 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(
|
|
|
|
|
not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform."
|
|
|
|
|
)
|
|
|
|
|
@pytest.mark.parametrize("n_token", [1, 33, 64])
|
|
|
|
|
@pytest.mark.parametrize("n_hidden", [1024, 2048])
|
|
|
|
|
@pytest.mark.parametrize("n_expert", [16])
|
|
|
|
|
@pytest.mark.parametrize("topk", [2])
|
|
|
|
|
@pytest.mark.parametrize("renormalize", [True, False])
|
|
|
|
|
@pytest.mark.parametrize("num_expert_group", [8])
|
|
|
|
|
@pytest.mark.parametrize("topk_group", [2])
|
|
|
|
|
@pytest.mark.parametrize("scoring_func", ["softmax", "sigmoid"])
|
|
|
|
|
@pytest.mark.parametrize("routed_scaling_factor", [1.0, 2.5])
|
2026-01-07 12:16:32 -08:00
|
|
|
@pytest.mark.parametrize("input_dtype", [torch.bfloat16, torch.float32])
|
|
|
|
|
@pytest.mark.parametrize("bias_dtype", [torch.float32])
|
2025-08-25 11:47:52 -07:00
|
|
|
def test_grouped_topk(
|
|
|
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
|
|
|
n_token: int,
|
|
|
|
|
n_hidden: int,
|
|
|
|
|
n_expert: int,
|
|
|
|
|
topk: int,
|
|
|
|
|
renormalize: bool,
|
|
|
|
|
num_expert_group: int,
|
|
|
|
|
topk_group: int,
|
|
|
|
|
scoring_func: str,
|
|
|
|
|
routed_scaling_factor: float,
|
2026-01-07 12:16:32 -08:00
|
|
|
input_dtype: torch.dtype,
|
|
|
|
|
bias_dtype: torch.dtype,
|
2025-08-25 11:47:52 -07:00
|
|
|
):
|
2026-01-02 23:04:01 +08:00
|
|
|
vllm_config = VllmConfig(
|
|
|
|
|
compilation_config=CompilationConfig(custom_ops=["all", "+grouped_topk"])
|
|
|
|
|
)
|
|
|
|
|
get_cached_compilation_config.cache_clear()
|
|
|
|
|
|
2026-01-05 10:34:04 +08:00
|
|
|
set_random_seed(0)
|
2026-01-07 12:16:32 -08:00
|
|
|
hidden_states = torch.randn((n_token, n_hidden), dtype=input_dtype, device="cuda")
|
|
|
|
|
gating_output = torch.randn((n_token, n_expert), dtype=input_dtype, device="cuda")
|
|
|
|
|
e_score_correction_bias = torch.randn((n_expert,), dtype=bias_dtype, device="cuda")
|
2025-08-25 11:47:52 -07:00
|
|
|
|
2026-01-02 23:04:01 +08:00
|
|
|
with set_current_vllm_config(vllm_config), monkeypatch.context() as m:
|
2025-08-25 11:47:52 -07:00
|
|
|
m.setenv("VLLM_USE_FUSED_MOE_GROUPED_TOPK", "0")
|
2025-12-17 17:43:00 +08:00
|
|
|
grouped_topk = GroupedTopk(
|
2025-08-25 11:47:52 -07:00
|
|
|
topk=topk,
|
|
|
|
|
renormalize=renormalize,
|
|
|
|
|
num_expert_group=num_expert_group,
|
|
|
|
|
topk_group=topk_group,
|
|
|
|
|
scoring_func=scoring_func,
|
|
|
|
|
routed_scaling_factor=routed_scaling_factor,
|
2025-12-17 17:43:00 +08:00
|
|
|
)
|
2026-01-02 23:04:01 +08:00
|
|
|
assert grouped_topk._forward_method.__name__ == "forward_cuda"
|
2025-12-17 17:43:00 +08:00
|
|
|
baseline_topk_weights, baseline_topk_ids = grouped_topk(
|
|
|
|
|
hidden_states=hidden_states,
|
|
|
|
|
gating_output=gating_output,
|
2025-08-25 11:47:52 -07:00
|
|
|
e_score_correction_bias=e_score_correction_bias,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
test_topk_weights, test_topk_ids = fused_grouped_topk(
|
|
|
|
|
hidden_states=hidden_states,
|
|
|
|
|
gating_output=gating_output,
|
|
|
|
|
topk=topk,
|
|
|
|
|
renormalize=renormalize,
|
|
|
|
|
num_expert_group=num_expert_group,
|
|
|
|
|
topk_group=topk_group,
|
|
|
|
|
scoring_func=scoring_func,
|
|
|
|
|
routed_scaling_factor=routed_scaling_factor,
|
|
|
|
|
e_score_correction_bias=e_score_correction_bias,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if renormalize:
|
|
|
|
|
torch.testing.assert_close(
|
|
|
|
|
baseline_topk_weights, test_topk_weights, atol=2e-2, rtol=0
|
|
|
|
|
)
|
|
|
|
|
torch.testing.assert_close(baseline_topk_ids, test_topk_ids, atol=0, rtol=0)
|