[Refactor] Replace activation: str with MoEActivation enum (#33843)
Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: Michael Goin <mgoin64@gmail.com>
This commit is contained in:
@@ -15,6 +15,7 @@ from torch._prims_common import TensorLikeType
|
||||
from tests.kernels.quant_utils import native_w8a8_block_matmul
|
||||
from vllm.model_executor.custom_op import op_registry
|
||||
from vllm.model_executor.layers.activation import SiluAndMul
|
||||
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
|
||||
from vllm.model_executor.layers.fused_moe.utils import moe_kernel_quantize_input
|
||||
from vllm.utils.torch_utils import make_tensor_with_pad
|
||||
from vllm.v1.attention.backend import AttentionType
|
||||
@@ -840,7 +841,7 @@ def torch_experts(
|
||||
per_act_token_quant=False,
|
||||
block_shape: list[int] | None = None,
|
||||
apply_router_weights_on_input: bool = False,
|
||||
activation: str = "silu_and_mul",
|
||||
activation: MoEActivation = MoEActivation.SILU,
|
||||
) -> torch.Tensor:
|
||||
assert (
|
||||
global_num_experts == -1
|
||||
@@ -883,7 +884,7 @@ def torch_experts(
|
||||
|
||||
f32 = torch.float32
|
||||
|
||||
act = op_registry[activation]
|
||||
act = op_registry[activation.custom_op_name]
|
||||
|
||||
for i in range(num_experts):
|
||||
mask = topk_ids == i
|
||||
@@ -973,7 +974,7 @@ def torch_moe(
|
||||
b_bias2: torch.Tensor | None = None,
|
||||
global_num_experts: int = -1,
|
||||
expert_map: torch.Tensor | None = None,
|
||||
activation: str = "silu_and_mul",
|
||||
activation: MoEActivation = MoEActivation.SILU,
|
||||
) -> torch.Tensor:
|
||||
score = torch.softmax(score, dim=-1, dtype=torch.float32)
|
||||
topk_weight, topk_ids = torch.topk(score, topk)
|
||||
|
||||
Reference in New Issue
Block a user