[torch.compile] Unwrap fused_marlin_moe custom op (#26739)

Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com>
Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com>
This commit is contained in:
Varun Sundar Rabindranath
2025-10-13 22:22:16 -04:00
committed by GitHub
parent 8a0af6a561
commit 8ae169286f
10 changed files with 22 additions and 52 deletions

View File

@@ -26,6 +26,7 @@ from vllm.model_executor.layers.fused_moe.config import (
int4_w4a16_moe_quant_config,
int8_w8a16_moe_quant_config,
)
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import fused_marlin_moe
from vllm.model_executor.layers.fused_moe.fused_moe import (
fused_topk,
modular_triton_fused_moe,
@@ -724,7 +725,7 @@ def test_fused_marlin_moe(
with set_current_vllm_config(vllm_config):
torch_output = torch_moe(a, w_ref1, w_ref2, score, topk, expert_map=e_map)
marlin_output = torch.ops.vllm.fused_marlin_moe(
marlin_output = fused_marlin_moe(
a,
qweight1,
qweight2,
@@ -837,7 +838,7 @@ def test_fused_marlin_moe_with_bias(m):
with set_current_vllm_config(vllm_config):
torch_output = torch_moe(a, w_ref1, w_ref2, score, topk, b_bias1, b_bias2)
marlin_output = torch.ops.vllm.fused_marlin_moe(
marlin_output = fused_marlin_moe(
a,
qweight1,
qweight2,