[Refactor] Make FP8 Linear Ops use kernel abstraction (#27814)
Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com>
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import vllm.config
|
||||
import vllm.plugins
|
||||
from vllm._aiter_ops import IS_AITER_FOUND, rocm_aiter_ops
|
||||
from vllm.compilation.fusion import FUSED_OPS, FusedRMSQuantKey, RMSNormQuantFusionPass
|
||||
@@ -20,8 +21,22 @@ from vllm.config import (
|
||||
VllmConfig,
|
||||
)
|
||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
|
||||
W8A8BlockFp8LinearOp,
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm.cutlass import (
|
||||
CutlassFP8ScaledMMLinearKernel,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm.flashinfer import (
|
||||
FlashInferFP8ScaledMMLinearKernel,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm.pytorch import (
|
||||
ChannelWiseTorchFP8ScaledMMLinearKernel,
|
||||
PerTensorTorchFP8ScaledMMLinearKernel,
|
||||
RowWiseTorchFP8ScaledMMLinearKernel,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm.rocm import (
|
||||
ROCmFP8ScaledMMLinearKernel,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.kernels.scaled_mm.ScaledMMLinearKernel import ( # noqa: E501
|
||||
FP8ScaledMMLinearKernel,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.utils.quant_utils import (
|
||||
GroupShape,
|
||||
@@ -29,15 +44,14 @@ from vllm.model_executor.layers.quantization.utils.quant_utils import (
|
||||
ScaleDesc,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
|
||||
Fp8LinearOp,
|
||||
cutlass_block_fp8_supported,
|
||||
cutlass_fp8_supported,
|
||||
maybe_create_device_identity,
|
||||
)
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.utils.deep_gemm import is_deep_gemm_supported
|
||||
from vllm.utils.deep_gemm import (
|
||||
is_deep_gemm_supported,
|
||||
)
|
||||
|
||||
from ..utils import override_cutlass_fp8_supported
|
||||
from ..utils import TestBlockFP8Layer, TestFP8Layer
|
||||
from .backend import TestBackend
|
||||
|
||||
FP8_DTYPE = current_platform.fp8_dtype()
|
||||
@@ -45,157 +59,195 @@ FP8_DTYPE = current_platform.fp8_dtype()
|
||||
RMS_OP = torch.ops._C.rms_norm.default
|
||||
RMS_ADD_OP = torch.ops._C.fused_add_rms_norm.default
|
||||
|
||||
# Kernel and group_shape combinations: (kernel, group_shape)
|
||||
# CUDA kernels
|
||||
CUDA_KERNEL_GROUPSHAPE_COMBINATIONS = [
|
||||
# FlashInferFP8ScaledMMLinearKernel supports both per-tensor only
|
||||
(FlashInferFP8ScaledMMLinearKernel, GroupShape.PER_TENSOR),
|
||||
# CutlassFP8ScaledMMLinearKernel supports both per-tensor and per-token
|
||||
(CutlassFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN),
|
||||
(CutlassFP8ScaledMMLinearKernel, GroupShape.PER_TENSOR),
|
||||
# PerTensorTorchFP8ScaledMMLinearKernel only supports per-tensor
|
||||
(PerTensorTorchFP8ScaledMMLinearKernel, GroupShape.PER_TENSOR),
|
||||
# ChannelWiseTorchFP8ScaledMMLinearKernel only supports per-token
|
||||
(ChannelWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN),
|
||||
# Blockwise group shapes (no kernel abstraction)
|
||||
(None, GroupShape(1, 128)),
|
||||
(None, GroupShape(1, 64)),
|
||||
]
|
||||
|
||||
# ROCm kernels
|
||||
ROCM_KERNEL_GROUPSHAPE_COMBINATIONS = [
|
||||
# ROCmFP8ScaledMMLinearKernel supports per-tensor only
|
||||
(ROCmFP8ScaledMMLinearKernel, GroupShape.PER_TENSOR),
|
||||
# RowWiseTorchFP8ScaledMMLinearKernel only supports per-token
|
||||
(RowWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN),
|
||||
# ChannelWiseTorchFP8ScaledMMLinearKernel only supports per-token
|
||||
(ChannelWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN),
|
||||
# Blockwise group shapes (no kernel abstraction)
|
||||
(None, GroupShape(1, 128)),
|
||||
(None, GroupShape(1, 64)),
|
||||
]
|
||||
|
||||
KERNEL_GROUPSHAPE_COMBINATIONS = (
|
||||
CUDA_KERNEL_GROUPSHAPE_COMBINATIONS
|
||||
if current_platform.is_cuda()
|
||||
else ROCM_KERNEL_GROUPSHAPE_COMBINATIONS
|
||||
)
|
||||
|
||||
# For Aiter tests we toggle use_aiter_quant_op
|
||||
AITER_KERNEL_GROUPSHAPE_COMBINATIONS = [
|
||||
# Per-token with ROCmFP8ScaledMMLinearKernel
|
||||
(ROCmFP8ScaledMMLinearKernel, GroupShape.PER_TENSOR, False),
|
||||
# Per-token with RowWiseTorchFP8ScaledMMLinearKernel
|
||||
(RowWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN, True),
|
||||
(RowWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN, False),
|
||||
# Per-token with ChannelWiseTorchFP8ScaledMMLinearKernel
|
||||
(ChannelWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN, True),
|
||||
(ChannelWiseTorchFP8ScaledMMLinearKernel, GroupShape.PER_TOKEN, False),
|
||||
# Blockwise (no kernel abstraction)
|
||||
(None, GroupShape(1, 128), True),
|
||||
]
|
||||
|
||||
|
||||
class TestModel(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
eps: float,
|
||||
force_kernel: FP8ScaledMMLinearKernel | None,
|
||||
group_shape: GroupShape,
|
||||
use_aiter: bool = False,
|
||||
cuda_force_torch: bool = False,
|
||||
use_aiter_quant_op: bool = True,
|
||||
use_aiter_fusion: bool = False,
|
||||
use_aiter_quant: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.use_aiter = use_aiter
|
||||
self.use_aiter_quant_op = use_aiter_quant_op
|
||||
self.cuda_force_torch = cuda_force_torch
|
||||
self.fp8_linear_layers: list[torch.nn.Module]
|
||||
self.group_shape = group_shape
|
||||
self.enable_quant_fp8_custom_op = None # Will be set later if applicable
|
||||
|
||||
self.use_aiter_quant_op = use_aiter_quant
|
||||
self.use_aiter_fusion = use_aiter_fusion
|
||||
self.norm = [RMSNorm(hidden_size, eps) for _ in range(4)]
|
||||
|
||||
# Setup quantization scale descriptor
|
||||
static = group_shape == GroupShape.PER_TENSOR and not use_aiter
|
||||
quant_scale = ScaleDesc(torch.float32, static, group_shape)
|
||||
self.quant_key = QuantKey(dtype=FP8_DTYPE, scale=quant_scale, symmetric=True)
|
||||
|
||||
# Setup scales
|
||||
if static:
|
||||
self.scale = [torch.rand(1, dtype=torch.float32) for _ in range(3)]
|
||||
else:
|
||||
self.scale = [None for _ in range(3)]
|
||||
|
||||
# Setup weights
|
||||
self.w = [
|
||||
torch.rand(hidden_size, hidden_size).to(dtype=FP8_DTYPE) for _ in range(3)
|
||||
]
|
||||
if not group_shape.is_per_group() or use_aiter:
|
||||
self.w = [self.w[0].t() for _ in range(3)]
|
||||
|
||||
# Setup weight scales
|
||||
if group_shape.is_per_group():
|
||||
scale_size = (
|
||||
(hidden_size + 128 - 1) // 128
|
||||
if use_aiter
|
||||
else hidden_size // group_shape[1]
|
||||
)
|
||||
wscale_shape: tuple[int, ...] = (scale_size, scale_size)
|
||||
else:
|
||||
wscale_shape = (1,)
|
||||
self.wscale = [torch.rand(wscale_shape, dtype=torch.float32) for _ in range(3)]
|
||||
|
||||
# Setup FP8 linear operation
|
||||
is_per_group = group_shape.is_per_group()
|
||||
if is_per_group and use_aiter:
|
||||
self.fp8_linear = W8A8BlockFp8LinearOp(
|
||||
weight_group_shape=GroupShape(128, 128),
|
||||
act_quant_group_shape=group_shape,
|
||||
use_aiter_and_is_supported=use_aiter_quant_op,
|
||||
)
|
||||
# AITER blockwise doesn't use enable_quant_fp8_custom_op
|
||||
elif is_per_group:
|
||||
self.fp8_linear = W8A8BlockFp8LinearOp(
|
||||
weight_group_shape=GroupShape(group_shape[1], group_shape[1]),
|
||||
act_quant_group_shape=group_shape,
|
||||
cutlass_block_fp8_supported=cutlass_block_fp8_supported(),
|
||||
use_aiter_and_is_supported=False,
|
||||
)
|
||||
self.enable_quant_fp8_custom_op = self.fp8_linear.input_quant_op.enabled()
|
||||
elif use_aiter:
|
||||
self.fp8_linear = Fp8LinearOp(
|
||||
act_quant_static=False,
|
||||
act_quant_group_shape=group_shape,
|
||||
)
|
||||
self.fp8_linear.quant_fp8.use_aiter = use_aiter_quant_op
|
||||
self.enable_quant_fp8_custom_op = self.fp8_linear.quant_fp8.enabled()
|
||||
else:
|
||||
with override_cutlass_fp8_supported(not cuda_force_torch):
|
||||
self.fp8_linear = Fp8LinearOp(
|
||||
act_quant_static=static,
|
||||
act_quant_group_shape=group_shape,
|
||||
)
|
||||
self.enable_quant_fp8_custom_op = self.fp8_linear.quant_fp8.enabled()
|
||||
|
||||
self.enable_rms_norm_custom_op = self.norm[0].enabled()
|
||||
|
||||
# Determine if blockwise based on group_shape
|
||||
is_blockwise = group_shape.is_per_group()
|
||||
|
||||
if is_blockwise:
|
||||
act_quant_scale_desc = ScaleDesc(torch.float32, False, group_shape)
|
||||
self.activation_quant_key = QuantKey(
|
||||
dtype=FP8_DTYPE, scale=act_quant_scale_desc, symmetric=True
|
||||
)
|
||||
self.fp8_linear_layers = [
|
||||
TestBlockFP8Layer(
|
||||
weight_shape=(hidden_size, hidden_size),
|
||||
group_shape=group_shape,
|
||||
cutlass_block_fp8_supported=cutlass_block_fp8_supported(),
|
||||
use_aiter_and_is_supported=use_aiter_quant,
|
||||
transpose_weights=use_aiter_fusion,
|
||||
)
|
||||
for _ in range(3)
|
||||
]
|
||||
|
||||
self.enable_quant_fp8_custom_op = (
|
||||
False
|
||||
if use_aiter_quant
|
||||
else self.fp8_linear_layers[0].linear_op.input_quant_op.enabled()
|
||||
)
|
||||
|
||||
else:
|
||||
is_static = group_shape == GroupShape.PER_TENSOR
|
||||
act_quant_scale_desc = ScaleDesc(torch.float32, is_static, group_shape)
|
||||
w_quant_scale_desc = ScaleDesc(torch.float32, True, group_shape)
|
||||
self.activation_quant_key = QuantKey(
|
||||
dtype=FP8_DTYPE, scale=act_quant_scale_desc, symmetric=True
|
||||
)
|
||||
self.weight_quant_key = QuantKey(
|
||||
dtype=FP8_DTYPE, scale=w_quant_scale_desc, symmetric=True
|
||||
)
|
||||
self.fp8_linear_layers = [
|
||||
TestFP8Layer(
|
||||
weight_shape=(hidden_size, hidden_size),
|
||||
activation_quant_key=self.activation_quant_key,
|
||||
weight_quant_key=self.weight_quant_key,
|
||||
force_kernel=force_kernel,
|
||||
)
|
||||
for _ in range(3)
|
||||
]
|
||||
|
||||
# Enable aiter quantization if requested
|
||||
for layer in self.fp8_linear_layers:
|
||||
layer.kernel.quant_fp8.use_aiter = use_aiter_quant
|
||||
|
||||
self.enable_quant_fp8_custom_op = self.fp8_linear_layers[
|
||||
0
|
||||
].is_quant_fp8_enabled()
|
||||
|
||||
def forward(self, x):
|
||||
# avoid having graph input be an arg to a pattern directly
|
||||
x = resid = torch.relu(x)
|
||||
y = self.norm[0](x)
|
||||
|
||||
x2 = self.fp8_linear.apply(
|
||||
y, self.w[0], self.wscale[0], input_scale=self.scale[0]
|
||||
)
|
||||
x2 = self.fp8_linear_layers[0](y)
|
||||
# make sure resid is used for replacement to work
|
||||
y2, resid = self.norm[1](x2, resid)
|
||||
|
||||
x3 = self.fp8_linear.apply(
|
||||
y2, self.w[1], self.wscale[1], input_scale=self.scale[1]
|
||||
)
|
||||
x3 = self.fp8_linear_layers[1](y2)
|
||||
|
||||
y3, resid = self.norm[2](x3, resid) # use resid here
|
||||
|
||||
x4 = self.fp8_linear.apply(
|
||||
y3, self.w[2], self.wscale[2], input_scale=self.scale[2]
|
||||
)
|
||||
x4 = self.fp8_linear_layers[2](y3)
|
||||
|
||||
y4, resid = self.norm[3](x4, resid) # use resid here
|
||||
return y4
|
||||
|
||||
def ops_in_model_before(self):
|
||||
if (
|
||||
self.use_aiter
|
||||
and self.group_shape.is_per_group()
|
||||
and current_platform.is_fp8_fnuz()
|
||||
):
|
||||
return [rocm_aiter_ops.get_group_quant_op()]
|
||||
if self.use_aiter and self.group_shape.is_per_group():
|
||||
return [torch.ops.vllm.triton_per_token_group_quant_fp8.default]
|
||||
if self.use_aiter and self.use_aiter_quant_op:
|
||||
return [rocm_aiter_ops.get_per_token_quant_op()]
|
||||
if self.use_aiter:
|
||||
return [QUANT_OPS[self.quant_key]]
|
||||
if self.enable_quant_fp8_custom_op:
|
||||
return [QUANT_OPS[self.quant_key]]
|
||||
return [torch.ops.aten.reciprocal]
|
||||
if self.group_shape.is_per_group():
|
||||
# Blockwise path
|
||||
if self.use_aiter_fusion and self.use_aiter_quant_op:
|
||||
return [rocm_aiter_ops.get_group_quant_op()]
|
||||
if self.use_aiter_fusion:
|
||||
return [torch.ops.vllm.triton_per_token_group_quant_fp8.default]
|
||||
else:
|
||||
if self.use_aiter_quant_op:
|
||||
return [rocm_aiter_ops.get_per_token_quant_op()]
|
||||
|
||||
# Common path
|
||||
return (
|
||||
[QUANT_OPS[self.activation_quant_key]]
|
||||
if self.enable_quant_fp8_custom_op
|
||||
else [torch.ops.aten.reciprocal]
|
||||
)
|
||||
|
||||
def ops_in_model_after(self):
|
||||
if self.use_aiter and self.group_shape.is_per_group():
|
||||
from vllm.compilation.rocm_aiter_fusion import (
|
||||
AiterFusedAddRMSFp8GroupQuantPattern,
|
||||
AiterRMSFp8GroupQuantPattern,
|
||||
)
|
||||
if self.use_aiter_fusion:
|
||||
if self.group_shape.is_per_group():
|
||||
# Blockwise aiter fusion
|
||||
from vllm.compilation.rocm_aiter_fusion import (
|
||||
AiterFusedAddRMSFp8GroupQuantPattern,
|
||||
AiterRMSFp8GroupQuantPattern,
|
||||
)
|
||||
|
||||
return [
|
||||
AiterFusedAddRMSFp8GroupQuantPattern.FUSED_OP,
|
||||
AiterRMSFp8GroupQuantPattern.FUSED_OP,
|
||||
]
|
||||
if self.use_aiter:
|
||||
from vllm.compilation.rocm_aiter_fusion import (
|
||||
AiterFusedAddRMSNormDynamicQuantPattern,
|
||||
AiterRMSNormDynamicQuantPattern,
|
||||
)
|
||||
return [
|
||||
AiterFusedAddRMSFp8GroupQuantPattern.FUSED_OP,
|
||||
AiterRMSFp8GroupQuantPattern.FUSED_OP,
|
||||
]
|
||||
else:
|
||||
# Per-token aiter fusion
|
||||
from vllm.compilation.rocm_aiter_fusion import (
|
||||
AiterFusedAddRMSNormDynamicQuantPattern,
|
||||
AiterRMSNormDynamicQuantPattern,
|
||||
)
|
||||
|
||||
return [
|
||||
AiterFusedAddRMSNormDynamicQuantPattern.FUSED_OP,
|
||||
AiterRMSNormDynamicQuantPattern.FUSED_OP,
|
||||
]
|
||||
return [
|
||||
AiterFusedAddRMSNormDynamicQuantPattern.FUSED_OP,
|
||||
AiterRMSNormDynamicQuantPattern.FUSED_OP,
|
||||
]
|
||||
|
||||
# Regular fusion
|
||||
return [
|
||||
FUSED_OPS[FusedRMSQuantKey(self.quant_key, True)],
|
||||
FUSED_OPS[FusedRMSQuantKey(self.quant_key, False)],
|
||||
FUSED_OPS[FusedRMSQuantKey(self.activation_quant_key, True)],
|
||||
FUSED_OPS[FusedRMSQuantKey(self.activation_quant_key, False)],
|
||||
]
|
||||
|
||||
def ops_in_model_before_partial(self):
|
||||
@@ -206,14 +258,6 @@ class TestModel(torch.nn.Module):
|
||||
)
|
||||
|
||||
|
||||
GROUP_SHAPES = [
|
||||
GroupShape.PER_TOKEN,
|
||||
GroupShape.PER_TENSOR,
|
||||
GroupShape(1, 128),
|
||||
GroupShape(1, 64),
|
||||
]
|
||||
|
||||
|
||||
def _run_fusion_test(
|
||||
model,
|
||||
fusion_pass,
|
||||
@@ -259,14 +303,9 @@ def _run_fusion_test(
|
||||
@pytest.mark.parametrize("hidden_size", [256])
|
||||
@pytest.mark.parametrize("num_tokens", [257])
|
||||
@pytest.mark.parametrize("eps", [1e-5, 1e-6])
|
||||
@pytest.mark.parametrize("group_shape", GROUP_SHAPES)
|
||||
@pytest.mark.parametrize("kernel_groupshape", KERNEL_GROUPSHAPE_COMBINATIONS)
|
||||
@pytest.mark.parametrize("enable_rms_norm_custom_op", [True, False])
|
||||
@pytest.mark.parametrize("enable_quant_fp8_custom_op", [True, False])
|
||||
# cuda_force_torch used to test torch code path on platforms that
|
||||
# cutlass_fp8_supported() == True.
|
||||
@pytest.mark.parametrize(
|
||||
"cuda_force_torch", [True, False] if cutlass_fp8_supported() else [True]
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
not current_platform.is_cuda_alike(), reason="Only test on CUDA and ROCm"
|
||||
)
|
||||
@@ -275,11 +314,12 @@ def test_fusion_rmsnorm_quant(
|
||||
hidden_size,
|
||||
num_tokens,
|
||||
eps,
|
||||
group_shape,
|
||||
kernel_groupshape,
|
||||
enable_rms_norm_custom_op,
|
||||
enable_quant_fp8_custom_op,
|
||||
cuda_force_torch,
|
||||
):
|
||||
force_kernel, group_shape = kernel_groupshape
|
||||
|
||||
if not enable_quant_fp8_custom_op and group_shape.is_per_group():
|
||||
pytest.skip("Unsupported unwrapped quant fp8 op for blockwise quantization")
|
||||
|
||||
@@ -310,15 +350,16 @@ def test_fusion_rmsnorm_quant(
|
||||
torch.set_default_device("cuda")
|
||||
torch.set_default_dtype(dtype)
|
||||
torch.manual_seed(1)
|
||||
maybe_create_device_identity()
|
||||
|
||||
fusion_pass = RMSNormQuantFusionPass(vllm_config)
|
||||
|
||||
model = TestModel(
|
||||
hidden_size=hidden_size,
|
||||
eps=eps,
|
||||
force_kernel=force_kernel,
|
||||
group_shape=group_shape,
|
||||
use_aiter=False,
|
||||
cuda_force_torch=cuda_force_torch,
|
||||
use_aiter_fusion=False,
|
||||
use_aiter_quant=False,
|
||||
)
|
||||
|
||||
backend, _ = _run_fusion_test(
|
||||
@@ -339,19 +380,12 @@ def test_fusion_rmsnorm_quant(
|
||||
assert n_add_nodes(backend.graph_post_pass) == 2
|
||||
|
||||
|
||||
GROUP_SHAPE_QUANT_OPS_MATCHS = [
|
||||
(GroupShape.PER_TOKEN, True),
|
||||
(GroupShape.PER_TOKEN, False),
|
||||
(GroupShape(1, 128), True),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [torch.bfloat16])
|
||||
@pytest.mark.parametrize("hidden_size", [256])
|
||||
@pytest.mark.parametrize("num_tokens", [257])
|
||||
@pytest.mark.parametrize("eps", [1e-5, 1e-6])
|
||||
@pytest.mark.parametrize(
|
||||
"group_shape, use_aiter_quant_op", GROUP_SHAPE_QUANT_OPS_MATCHS
|
||||
"kernel_groupshape_quant", AITER_KERNEL_GROUPSHAPE_COMBINATIONS
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
(not current_platform.is_rocm() or not IS_AITER_FOUND),
|
||||
@@ -362,10 +396,10 @@ def test_aiter_fusion_rmsnorm_quant(
|
||||
hidden_size: int,
|
||||
num_tokens: int,
|
||||
eps: float,
|
||||
group_shape: GroupShape,
|
||||
use_aiter_quant_op: bool,
|
||||
kernel_groupshape_quant: tuple,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
):
|
||||
force_kernel, group_shape, use_aiter_quant_op = kernel_groupshape_quant
|
||||
vllm_config = VllmConfig(
|
||||
model_config=ModelConfig(dtype=dtype),
|
||||
compilation_config=CompilationConfig(
|
||||
@@ -379,20 +413,22 @@ def test_aiter_fusion_rmsnorm_quant(
|
||||
from vllm.compilation.rocm_aiter_fusion import RocmAiterRMSNormFusionPass
|
||||
|
||||
m.setenv("VLLM_ROCM_USE_AITER", "1")
|
||||
|
||||
rocm_aiter_ops.refresh_env_variables()
|
||||
|
||||
torch.set_default_device("cuda")
|
||||
torch.set_default_dtype(dtype)
|
||||
torch.manual_seed(1)
|
||||
maybe_create_device_identity()
|
||||
|
||||
fusion_pass = RocmAiterRMSNormFusionPass(vllm_config)
|
||||
|
||||
model = TestModel(
|
||||
hidden_size=hidden_size,
|
||||
eps=eps,
|
||||
force_kernel=force_kernel,
|
||||
group_shape=group_shape,
|
||||
use_aiter=True,
|
||||
use_aiter_quant_op=use_aiter_quant_op,
|
||||
use_aiter_fusion=True, # Always use aiter fusion ops in aiter test
|
||||
use_aiter_quant=use_aiter_quant_op, # Toggle aiter quantization
|
||||
)
|
||||
|
||||
_run_fusion_test(
|
||||
|
||||
Reference in New Issue
Block a user