[UX] Add --moe-backend arg for explicit kernel selection (#33807)
Signed-off-by: mgoin <mgoin64@gmail.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com>
This commit is contained in:
@@ -85,34 +85,34 @@ def can_initialize(
|
||||
)
|
||||
)
|
||||
def test_llama4_fp8_tensor_moe_flashinfer_cutlass(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP8", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "throughput")
|
||||
can_initialize(
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP8", hf_overrides=HF_OVERRIDE_MM
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP8",
|
||||
hf_overrides=HF_OVERRIDE_MM,
|
||||
extra_args=["--moe-backend=flashinfer_cutlass"],
|
||||
)
|
||||
|
||||
|
||||
def test_llama4_fp8_tensor_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP8", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "latency")
|
||||
can_initialize(
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP8", hf_overrides=HF_OVERRIDE_MM
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP8",
|
||||
hf_overrides=HF_OVERRIDE_MM,
|
||||
extra_args=["--moe-backend=flashinfer_trtllm"],
|
||||
)
|
||||
|
||||
|
||||
def test_llama4_nvfp4_moe_flashinfer_cutlass(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP4", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "throughput")
|
||||
can_initialize(
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP4", hf_overrides=HF_OVERRIDE_MM
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP4",
|
||||
hf_overrides=HF_OVERRIDE_MM,
|
||||
extra_args=["--moe-backend=flashinfer_cutlass"],
|
||||
)
|
||||
|
||||
|
||||
def test_llama4_nvfp4_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP4", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "latency")
|
||||
can_initialize(
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP4", hf_overrides=HF_OVERRIDE_MM
|
||||
"nvidia/Llama-4-Scout-17B-16E-Instruct-FP4",
|
||||
hf_overrides=HF_OVERRIDE_MM,
|
||||
extra_args=["--moe-backend=flashinfer_trtllm"],
|
||||
)
|
||||
|
||||
|
||||
@@ -120,8 +120,11 @@ def test_llama4_nvfp4_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
|
||||
|
||||
def test_deepseek_fp8_block_moe_deep_gemm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_DEEP_GEMM", "1")
|
||||
can_initialize("deepseek-ai/DeepSeek-V3.1", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"deepseek-ai/DeepSeek-V3.1",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=deep_gemm"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skip(
|
||||
@@ -131,27 +134,35 @@ def test_deepseek_fp8_block_moe_deep_gemm(monkeypatch: pytest.MonkeyPatch):
|
||||
)
|
||||
)
|
||||
def test_deepseek_fp8_block_moe_flashinfer_cutlass(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP8", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "throughput")
|
||||
can_initialize("deepseek-ai/DeepSeek-V3.1", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"deepseek-ai/DeepSeek-V3.1",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=flashinfer_cutlass"],
|
||||
)
|
||||
|
||||
|
||||
def test_deepseek_fp8_block_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP8", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "latency")
|
||||
can_initialize("deepseek-ai/DeepSeek-V3.1", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"deepseek-ai/DeepSeek-V3.1",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=flashinfer_trtllm"],
|
||||
)
|
||||
|
||||
|
||||
def test_deepseek_nvfp4_moe_flashinfer_cutlass(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP4", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "throughput")
|
||||
can_initialize("nvidia/DeepSeek-R1-0528-FP4-v2", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"nvidia/DeepSeek-R1-0528-FP4-v2",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=flashinfer_cutlass"],
|
||||
)
|
||||
|
||||
|
||||
def test_deepseek_nvfp4_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP4", "1")
|
||||
monkeypatch.setenv("VLLM_FLASHINFER_MOE_BACKEND", "latency")
|
||||
can_initialize("nvidia/DeepSeek-R1-0528-FP4-v2", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"nvidia/DeepSeek-R1-0528-FP4-v2",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=flashinfer_trtllm"],
|
||||
)
|
||||
|
||||
|
||||
## GPT-OSS ##
|
||||
@@ -184,5 +195,8 @@ def test_gptoss_eager(monkeypatch: pytest.MonkeyPatch):
|
||||
|
||||
|
||||
def test_qwen3_next_bf16_moe_flashinfer_trtllm(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP16", "1")
|
||||
can_initialize("Qwen/Qwen3-Next-80B-A3B-Instruct", hf_overrides=HF_OVERRIDE_TEXT)
|
||||
can_initialize(
|
||||
"Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
hf_overrides=HF_OVERRIDE_TEXT,
|
||||
extra_args=["--moe-backend=flashinfer_trtllm"],
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user