[Doc] Add developer guide for CustomOp (#30886)
Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
@@ -22,6 +22,7 @@ from vllm.utils.collection_utils import LazyDict
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
# --8<-- [start:fatrelu_and_mul]
|
||||
@CustomOp.register("fatrelu_and_mul")
|
||||
class FatreluAndMul(CustomOp):
|
||||
"""An activation function for FATReLU.
|
||||
@@ -35,6 +36,8 @@ class FatreluAndMul(CustomOp):
|
||||
return: (num_tokens, d) or (batch_size, seq_len, d)
|
||||
"""
|
||||
|
||||
# --8<-- [end:fatrelu_and_mul]
|
||||
|
||||
def __init__(self, threshold: float = 0.0):
|
||||
super().__init__()
|
||||
self.threshold = threshold
|
||||
@@ -58,6 +61,7 @@ class FatreluAndMul(CustomOp):
|
||||
return out
|
||||
|
||||
|
||||
# --8<-- [start:silu_and_mul]
|
||||
@CustomOp.register("silu_and_mul")
|
||||
class SiluAndMul(CustomOp):
|
||||
"""An activation function for SwiGLU.
|
||||
@@ -69,6 +73,8 @@ class SiluAndMul(CustomOp):
|
||||
return: (num_tokens, d) or (batch_size, seq_len, d)
|
||||
"""
|
||||
|
||||
# --8<-- [end:silu_and_mul]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if current_platform.is_cuda_alike():
|
||||
@@ -101,6 +107,7 @@ class SiluAndMul(CustomOp):
|
||||
return out
|
||||
|
||||
|
||||
# --8<-- [start:mul_and_silu]
|
||||
@CustomOp.register("mul_and_silu")
|
||||
class MulAndSilu(CustomOp):
|
||||
"""An activation function for SwiGLU.
|
||||
@@ -112,6 +119,8 @@ class MulAndSilu(CustomOp):
|
||||
return: (num_tokens, d) or (batch_size, seq_len, d)
|
||||
"""
|
||||
|
||||
# --8<-- [end:mul_and_silu]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if current_platform.is_cuda_alike():
|
||||
@@ -139,6 +148,7 @@ class MulAndSilu(CustomOp):
|
||||
# def forward_xpu(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
|
||||
# --8<-- [start:gelu_and_mul_sparse]
|
||||
@CustomOp.register("gelu_and_mul_sparse")
|
||||
class GeluAndMulSparse(CustomOp):
|
||||
"""An activation function for GeluAndMulSparse.
|
||||
@@ -153,6 +163,8 @@ class GeluAndMulSparse(CustomOp):
|
||||
return: (num_tokens, d) or (batch_size, seq_len, d)
|
||||
"""
|
||||
|
||||
# --8<-- [end:gelu_and_mul_sparse]
|
||||
|
||||
def __init__(self, activation_sparsity: float, approximate: str = "none"):
|
||||
super().__init__()
|
||||
# Gelu.
|
||||
@@ -195,6 +207,7 @@ class GeluAndMulSparse(CustomOp):
|
||||
return self.forward_native(x)
|
||||
|
||||
|
||||
# --8<-- [start:gelu_and_mul]
|
||||
@CustomOp.register("gelu_and_mul")
|
||||
class GeluAndMul(CustomOp):
|
||||
"""An activation function for GeGLU.
|
||||
@@ -206,6 +219,8 @@ class GeluAndMul(CustomOp):
|
||||
return: (batch_size, seq_len, d) or (num_tokens, d)
|
||||
"""
|
||||
|
||||
# --8<-- [end:gelu_and_mul]
|
||||
|
||||
def __init__(self, approximate: str = "none"):
|
||||
super().__init__()
|
||||
self.approximate = approximate
|
||||
@@ -257,9 +272,12 @@ class GeluAndMul(CustomOp):
|
||||
return f"approximate={repr(self.approximate)}"
|
||||
|
||||
|
||||
# --8<-- [start:swigluoai_and_mul]
|
||||
@CustomOp.register("swigluoai_and_mul")
|
||||
class SwigluOAIAndMul(CustomOp):
|
||||
# https://github.com/huggingface/transformers/blob/v4.55.0/src/transformers/models/gpt_oss/modeling_gpt_oss.py#L106-L110
|
||||
# --8<-- [end:swigluoai_and_mul]
|
||||
|
||||
def __init__(self, alpha: float = 1.702, limit: float = 7.0):
|
||||
super().__init__()
|
||||
self.alpha = alpha
|
||||
@@ -286,8 +304,11 @@ class SwigluOAIAndMul(CustomOp):
|
||||
return f"alpha={repr(self.alpha)}, limit={repr(self.limit)}"
|
||||
|
||||
|
||||
# --8<-- [start:gelu_new]
|
||||
@CustomOp.register("gelu_new")
|
||||
class NewGELU(CustomOp):
|
||||
# --8<-- [end:gelu_new]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if current_platform.is_cuda_alike() or current_platform.is_cpu():
|
||||
@@ -311,8 +332,11 @@ class NewGELU(CustomOp):
|
||||
return self.op(x)
|
||||
|
||||
|
||||
# --8<-- [start:gelu_fast]
|
||||
@CustomOp.register("gelu_fast")
|
||||
class FastGELU(CustomOp):
|
||||
# --8<-- [end:gelu_fast]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if current_platform.is_cuda_alike() or current_platform.is_cpu():
|
||||
@@ -335,9 +359,12 @@ class FastGELU(CustomOp):
|
||||
return self.op(x)
|
||||
|
||||
|
||||
# --8<-- [start:quick_gelu]
|
||||
@CustomOp.register("quick_gelu")
|
||||
class QuickGELU(CustomOp):
|
||||
# https://github.com/huggingface/transformers/blob/main/src/transformers/activations.py#L90
|
||||
# --8<-- [end:quick_gelu]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if current_platform.is_cuda_alike() or current_platform.is_cpu():
|
||||
@@ -365,12 +392,15 @@ class QuickGELU(CustomOp):
|
||||
# def forward_xpu(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
|
||||
# --8<-- [start:relu2]
|
||||
@CustomOp.register("relu2")
|
||||
class ReLUSquaredActivation(CustomOp):
|
||||
"""
|
||||
Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
|
||||
"""
|
||||
|
||||
# --8<-- [end:relu2]
|
||||
|
||||
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""PyTorch-native implementation equivalent to forward()."""
|
||||
return torch.square(F.relu(x))
|
||||
@@ -380,6 +410,7 @@ class ReLUSquaredActivation(CustomOp):
|
||||
return self.forward_native(x)
|
||||
|
||||
|
||||
# --8<-- [start:xielu]
|
||||
@CustomOp.register("xielu")
|
||||
class XIELU(CustomOp):
|
||||
"""
|
||||
@@ -388,6 +419,8 @@ class XIELU(CustomOp):
|
||||
Otherwise, we emit a single warning and use xIELU Python
|
||||
"""
|
||||
|
||||
# --8<-- [end:xielu]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alpha_p_init: float = 0.8,
|
||||
|
||||
@@ -105,10 +105,13 @@ class ConvLayerBase(CustomOp):
|
||||
return s
|
||||
|
||||
|
||||
# --8<-- [start:conv2d]
|
||||
@CustomOp.register("conv2d")
|
||||
class Conv2dLayer(ConvLayerBase):
|
||||
"""Conv layer with Conv2d."""
|
||||
|
||||
# --8<-- [end:conv2d]
|
||||
|
||||
num_dim = 2
|
||||
|
||||
def _forward_mulmat(self, x: torch.Tensor) -> torch.Tensor:
|
||||
@@ -204,10 +207,13 @@ class CausalConv2dLayer(Conv2dLayer):
|
||||
return x
|
||||
|
||||
|
||||
# --8<-- [start:conv3d]
|
||||
@CustomOp.register("conv3d")
|
||||
class Conv3dLayer(ConvLayerBase):
|
||||
"""Conv layer with Conv3d."""
|
||||
|
||||
# --8<-- [end:conv3d]
|
||||
|
||||
num_dim = 3
|
||||
|
||||
def _forward_mulmat(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@@ -1459,10 +1459,13 @@ def grouped_topk(
|
||||
return topk_weights.to(torch.float32), topk_ids.to(torch.int32)
|
||||
|
||||
|
||||
# --8<-- [start:grouped_topk]
|
||||
@CustomOp.register("grouped_topk")
|
||||
class GroupedTopk(CustomOp):
|
||||
"""GroupedTopk used by the Deepseek-V2 and Deepseek-V3 model."""
|
||||
|
||||
# --8<-- [end:grouped_topk]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
topk: int,
|
||||
|
||||
@@ -21,8 +21,11 @@ from vllm.model_executor.layers.fused_moe.modular_kernel import (
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
# --8<-- [start:modular_fused_moe]
|
||||
@CustomOp.register("modular_fused_moe")
|
||||
class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp):
|
||||
# --8<-- [end:modular_fused_moe]
|
||||
|
||||
def __init__(
|
||||
self, old_quant_method: FusedMoEMethodBase, experts: FusedMoEModularKernel
|
||||
):
|
||||
|
||||
@@ -302,6 +302,7 @@ class FusedMoERouterImpl(FusedMoERouter):
|
||||
return self.layer._select_experts(hidden_states, router_logits)
|
||||
|
||||
|
||||
# --8<-- [start:fused_moe]
|
||||
@CustomOp.register("fused_moe")
|
||||
class FusedMoE(CustomOp):
|
||||
"""FusedMoE layer for MoE models.
|
||||
@@ -326,6 +327,8 @@ class FusedMoE(CustomOp):
|
||||
router_logits_dtype: Data type for router logits buffers.
|
||||
"""
|
||||
|
||||
# --8<-- [end:fused_moe]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_experts: int, # Global number of experts
|
||||
|
||||
@@ -52,10 +52,13 @@ else:
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
# --8<-- [start:unquantized_fused_moe]
|
||||
@CustomOp.register("unquantized_fused_moe")
|
||||
class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp):
|
||||
"""MoE method without quantization."""
|
||||
|
||||
# --8<-- [end:unquantized_fused_moe]
|
||||
|
||||
def __init__(self, moe: FusedMoEConfig):
|
||||
super().__init__(moe)
|
||||
|
||||
|
||||
@@ -88,6 +88,7 @@ def dispatch_rocm_rmsnorm_func(
|
||||
return rms_norm
|
||||
|
||||
|
||||
# --8<-- [start:rms_norm]
|
||||
@CustomOp.register("rms_norm")
|
||||
class RMSNorm(CustomOp):
|
||||
"""Root mean square normalization.
|
||||
@@ -96,6 +97,8 @@ class RMSNorm(CustomOp):
|
||||
Refer to https://arxiv.org/abs/1910.07467
|
||||
"""
|
||||
|
||||
# --8<-- [end:rms_norm]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
@@ -253,6 +256,7 @@ class RMSNorm(CustomOp):
|
||||
return s
|
||||
|
||||
|
||||
# --8<-- [start:gemma_rms_norm]
|
||||
@CustomOp.register("gemma_rms_norm")
|
||||
class GemmaRMSNorm(CustomOp):
|
||||
"""RMS normalization for Gemma.
|
||||
@@ -262,6 +266,8 @@ class GemmaRMSNorm(CustomOp):
|
||||
2. (x * w).to(orig_dtype) instead of x.to(orig_dtype) * w.
|
||||
"""
|
||||
|
||||
# --8<-- [end:gemma_rms_norm]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
@@ -321,6 +327,7 @@ class GemmaRMSNorm(CustomOp):
|
||||
return self.forward_native(x, residual)
|
||||
|
||||
|
||||
# --8<-- [start:rms_norm_gated]
|
||||
@CustomOp.register("rms_norm_gated")
|
||||
class RMSNormGated(CustomOp):
|
||||
"""RMS Normalization with optional gating.
|
||||
@@ -331,6 +338,8 @@ class RMSNormGated(CustomOp):
|
||||
- Optional gating with SiLU activation
|
||||
"""
|
||||
|
||||
# --8<-- [end:rms_norm_gated]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
|
||||
@@ -304,6 +304,7 @@ class LinearBase(CustomOp):
|
||||
param.tp_size = self.tp_size
|
||||
|
||||
|
||||
# --8<-- [start:replicated_linear]
|
||||
@CustomOp.register("replicated_linear")
|
||||
class ReplicatedLinear(LinearBase):
|
||||
"""Replicated linear layer.
|
||||
@@ -321,6 +322,8 @@ class ReplicatedLinear(LinearBase):
|
||||
disable_tp: Take no effect for replicated linear layers.
|
||||
"""
|
||||
|
||||
# --8<-- [end:replicated_linear]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_size: int,
|
||||
@@ -421,6 +424,7 @@ class ReplicatedLinear(LinearBase):
|
||||
return s
|
||||
|
||||
|
||||
# --8<-- [start:column_parallel_linear]
|
||||
@CustomOp.register("column_parallel_linear")
|
||||
class ColumnParallelLinear(LinearBase):
|
||||
"""Linear layer with column parallelism.
|
||||
@@ -448,6 +452,8 @@ class ColumnParallelLinear(LinearBase):
|
||||
disable_tp: If true, weights matrix won't be sharded through tp rank.
|
||||
"""
|
||||
|
||||
# --8<-- [end:column_parallel_linear]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_size: int,
|
||||
@@ -1289,6 +1295,7 @@ class QKVParallelLinear(ColumnParallelLinear):
|
||||
param_data.copy_(loaded_weight)
|
||||
|
||||
|
||||
# --8<-- [start:row_parallel_linear]
|
||||
@CustomOp.register("row_parallel_linear")
|
||||
class RowParallelLinear(LinearBase):
|
||||
"""Linear layer with row parallelism.
|
||||
@@ -1323,6 +1330,8 @@ class RowParallelLinear(LinearBase):
|
||||
disable_tp: If true, weights matrix won't be sharded through tp rank.
|
||||
"""
|
||||
|
||||
# --8<-- [end:row_parallel_linear]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_size: int,
|
||||
|
||||
@@ -13,6 +13,7 @@ from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmb
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
|
||||
# --8<-- [start:logits_processor]
|
||||
@CustomOp.register("logits_processor")
|
||||
class LogitsProcessor(CustomOp):
|
||||
"""Process logits and apply logits processors from sampling metadata.
|
||||
@@ -23,6 +24,8 @@ class LogitsProcessor(CustomOp):
|
||||
3. Apply logits processors (if any).
|
||||
"""
|
||||
|
||||
# --8<-- [end:logits_processor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size: int,
|
||||
|
||||
@@ -39,6 +39,7 @@ from vllm.v1.attention.backends.mamba1_attn import Mamba1AttentionMetadata
|
||||
|
||||
|
||||
# Adapted from transformers.models.mamba.modeling_mamba.MambaMixer
|
||||
# --8<-- [start:mamba_mixer]
|
||||
@CustomOp.register("mamba_mixer")
|
||||
class MambaMixer(MambaBase, CustomOp):
|
||||
"""
|
||||
@@ -51,6 +52,8 @@ class MambaMixer(MambaBase, CustomOp):
|
||||
**selective** state spaces)
|
||||
"""
|
||||
|
||||
# --8<-- [end:mamba_mixer]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
|
||||
@@ -49,8 +49,11 @@ from vllm.v1.attention.backends.mamba2_attn import Mamba2AttentionMetadata
|
||||
|
||||
|
||||
# Adapted from transformers.models.mamba2.modeling_mamba2.MambaRMSNormGated
|
||||
# --8<-- [start:mixer2_gated_rms_norm]
|
||||
@CustomOp.register("mixer2_gated_rms_norm")
|
||||
class Mixer2RMSNormGated(CustomOp):
|
||||
# --8<-- [end:mixer2_gated_rms_norm]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
full_hidden_size: int,
|
||||
@@ -214,6 +217,7 @@ def mamba_v2_sharded_weight_loader(
|
||||
|
||||
|
||||
# Adapted from transformers.models.mamba.modeling_mamba.MambaMixer
|
||||
# --8<-- [start:mamba_mixer2]
|
||||
@CustomOp.register("mamba_mixer2")
|
||||
class MambaMixer2(MambaBase, CustomOp):
|
||||
"""
|
||||
@@ -226,6 +230,8 @@ class MambaMixer2(MambaBase, CustomOp):
|
||||
**selective** state spaces)
|
||||
"""
|
||||
|
||||
# --8<-- [end:mamba_mixer2]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
|
||||
@@ -27,8 +27,11 @@ from vllm.utils.torch_utils import direct_register_custom_op
|
||||
from vllm.v1.attention.backends.short_conv_attn import ShortConvAttentionMetadata
|
||||
|
||||
|
||||
# --8<-- [start:short_conv]
|
||||
@CustomOp.register("short_conv")
|
||||
class ShortConv(MambaBase, CustomOp):
|
||||
# --8<-- [end:short_conv]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
|
||||
@@ -29,6 +29,7 @@ class MLAModules:
|
||||
indexer_rotary_emb: torch.nn.Module | None = None
|
||||
|
||||
|
||||
# --8<-- [start:multi_head_latent_attention]
|
||||
@CustomOp.register("multi_head_latent_attention")
|
||||
class MultiHeadLatentAttentionWrapper(CustomOp):
|
||||
"""MLA layer registered as CustomOp to allow OOT backends to add
|
||||
@@ -47,6 +48,8 @@ class MultiHeadLatentAttentionWrapper(CustomOp):
|
||||
3. Return the output tensor.
|
||||
"""
|
||||
|
||||
# --8<-- [end:multi_head_latent_attention]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
|
||||
@@ -18,6 +18,7 @@ _FP8_MIN, _FP8_MAX = get_fp8_min_max()
|
||||
_FP8_MIN_SCALING_FACTOR = 1.0 / (_FP8_MAX * 512.0)
|
||||
|
||||
|
||||
# --8<-- [start:quant_fp8]
|
||||
@CustomOp.register("quant_fp8")
|
||||
class QuantFP8(CustomOp):
|
||||
"""
|
||||
@@ -25,6 +26,8 @@ class QuantFP8(CustomOp):
|
||||
This CustomOp supports both static and dynamic quantization.
|
||||
"""
|
||||
|
||||
# --8<-- [end:quant_fp8]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
static: bool,
|
||||
|
||||
@@ -10,10 +10,13 @@ from vllm.model_executor.custom_op import CustomOp
|
||||
from .common import ApplyRotaryEmb
|
||||
|
||||
|
||||
# --8<-- [start:rotary_embedding]
|
||||
@CustomOp.register("rotary_embedding")
|
||||
class RotaryEmbeddingBase(CustomOp):
|
||||
"""Original rotary positional embedding."""
|
||||
|
||||
# --8<-- [end:rotary_embedding]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
head_size: int,
|
||||
|
||||
@@ -118,8 +118,11 @@ direct_register_custom_op(
|
||||
)
|
||||
|
||||
|
||||
# --8<-- [start:apply_rotary_emb]
|
||||
@CustomOp.register("apply_rotary_emb")
|
||||
class ApplyRotaryEmb(CustomOp):
|
||||
# --8<-- [end:apply_rotary_emb]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
enforce_enable: bool = False,
|
||||
|
||||
@@ -9,10 +9,13 @@ from vllm.model_executor.custom_op import CustomOp
|
||||
from .common import rotate_gptj, rotate_neox
|
||||
|
||||
|
||||
# --8<-- [start:dual_chunk_rotary_embedding]
|
||||
@CustomOp.register("dual_chunk_rotary_embedding")
|
||||
class DualChunkRotaryEmbedding(CustomOp):
|
||||
"""Rotary positional embedding for Dual Chunk Attention."""
|
||||
|
||||
# --8<-- [end:dual_chunk_rotary_embedding]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
head_size: int,
|
||||
|
||||
@@ -181,6 +181,7 @@ def get_masked_input_and_mask(
|
||||
return input_, ~vocab_mask
|
||||
|
||||
|
||||
# --8<-- [start:vocab_parallel_embedding]
|
||||
@CustomOp.register("vocab_parallel_embedding")
|
||||
class VocabParallelEmbedding(CustomOp):
|
||||
"""Embedding parallelized in the vocabulary dimension.
|
||||
@@ -221,6 +222,8 @@ class VocabParallelEmbedding(CustomOp):
|
||||
prefix: full name of the layer in the state dict
|
||||
""" # noqa: E501
|
||||
|
||||
# --8<-- [end:vocab_parallel_embedding]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_embeddings: int,
|
||||
@@ -492,6 +495,7 @@ class VocabParallelEmbedding(CustomOp):
|
||||
return s
|
||||
|
||||
|
||||
# --8<-- [start:parallel_lm_head]
|
||||
@CustomOp.register("parallel_lm_head")
|
||||
class ParallelLMHead(VocabParallelEmbedding):
|
||||
"""Parallelized LM head.
|
||||
@@ -509,6 +513,8 @@ class ParallelLMHead(VocabParallelEmbedding):
|
||||
padding_size: padding size for the vocabulary.
|
||||
"""
|
||||
|
||||
# --8<-- [end:parallel_lm_head]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_embeddings: int,
|
||||
|
||||
Reference in New Issue
Block a user