[Kernel][Misc] register ops to prevent graph breaks (#6917)
Co-authored-by: Sage Moore <sage@neuralmagic.com>
This commit is contained in:
@@ -2,6 +2,7 @@ import pytest
|
||||
import torch
|
||||
|
||||
from tests.kernels.quant_utils import ref_dynamic_per_token_quant
|
||||
from tests.kernels.utils import opcheck
|
||||
from vllm._custom_ops import scaled_int8_quant
|
||||
|
||||
DTYPES = [torch.half, torch.bfloat16, torch.float]
|
||||
@@ -12,6 +13,16 @@ SEEDS = [0]
|
||||
SCALE = [0.1, 0.5, 0.8, 1.2, 2.1]
|
||||
|
||||
|
||||
def opcheck_int8_quant(output, input, scale=None):
|
||||
if scale is not None:
|
||||
opcheck(torch.ops._C.static_scaled_int8_quant, (output, input, scale))
|
||||
else:
|
||||
scale = torch.empty((input.numel() // input.shape[-1], 1),
|
||||
device=input.device,
|
||||
dtype=torch.float32)
|
||||
opcheck(torch.ops._C.dynamic_scaled_int8_quant, (output, input, scale))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
||||
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@@ -34,6 +45,8 @@ def test_dynamic_scaled_int8_quant(num_tokens: int, hidden_size: int,
|
||||
ops_out, ref_out, atol=1,
|
||||
rtol=0.0) # big atol to account for rounding errors
|
||||
|
||||
opcheck_int8_quant(ops_out, x)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
|
||||
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
|
||||
@@ -58,3 +71,5 @@ def test_static_scaled_int8_quant(num_tokens: int, hidden_size: int,
|
||||
torch.testing.assert_close(
|
||||
out1, out2, atol=1,
|
||||
rtol=0.0) # big atol to account for rounding errors
|
||||
|
||||
opcheck_int8_quant(out2, x, scale)
|
||||
|
||||
Reference in New Issue
Block a user