Adds padding and perf improvements to wvSplitK_fp8 (#33527)

Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com>
This commit is contained in:
Hashem Hashemi
2026-02-05 14:16:02 -08:00
committed by GitHub
parent 42d5d705f9
commit d5c4800112
3 changed files with 169 additions and 229 deletions

View File

@@ -73,21 +73,40 @@ NKM_FACTORS_WVSPLITKRC = [
NKM_FACTORS_WVSPLITK_FP8 = [
# FP8-specific cases with K % 16 == 0
(1, 16, 16),
(1, 32, 16 + 16),
(1, 64, 64),
(1, 64, 64 + 16),
(1, 64 + 16, 64),
(1, 64 + 16, 64 + 16),
(4, 64, 64),
(4, 64, 64 + 16),
(4, 64 + 16, 64),
(4, 64 + 16, 64 + 16),
(2, 512, 512),
(3, 512, 512),
(3, 512, 512 + 16),
(4, 512, 512),
(3, 2048, 2048),
(3, 2048, 2048 + 16),
(4, 2048 + 16, 2048),
(4, 2048 + 16, 2048 + 16),
(4, 4096, 4096),
(4, 16400, 2048),
(4, 16400, 2048 + 16),
# Extended FP8 dimensions not covered by WVSPLITK
(1, 14336, 1024),
(2, 24576, 2048),
(4, 32768, 28672),
(4, 32768 * 2, 28672),
(4, 32768 * 2, 28672 + 16),
(4, 32768 * 2 + 16, 28672),
(4, 32768 * 2 + 16, 28672 + 16),
]
SEEDS = [0]
def pad_weights_fp8(weight):
def pad_fp8(weight):
num_pad = 256 // weight.element_size()
import torch.nn.functional as F
@@ -195,72 +214,41 @@ def test_rocm_wvsplitk_bias2D_kernel(n, k, m, dtype, seed):
assert torch.allclose(out, ref_out, rtol=0.01)
@pytest.mark.parametrize("xnorm", [False, True])
@pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK_FP8)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("padded", [False, True])
@pytest.mark.parametrize("padded_a", [False, True])
@pytest.mark.parametrize("padded_b", [False, True])
@pytest.mark.parametrize("biased", [False, True])
@pytest.mark.skipif(
not (current_platform.is_rocm() and current_platform.supports_fp8()),
reason="only test for rocm fp8",
)
def test_rocm_wvsplitk_fp8_kernel(n, k, m, dtype, seed, padded):
def test_rocm_wvsplitk_fp8_kernel(
xnorm, n, k, m, dtype, seed, padded_a, padded_b, biased
):
torch.manual_seed(seed)
A = torch.rand(n, k, device="cuda") - 0.5
B = torch.rand(m, k, device="cuda") - 0.5
xavier = math.sqrt(2 / k) if xnorm else 1 # normalize to avoid large deltas
A = (torch.rand(n, k, device="cuda") * 2 - 1) * xavier
B = (torch.rand(m, k, device="cuda") * 2 - 1) * xavier
A, scale_a = ref_dynamic_per_tensor_fp8_quant(A)
B, scale_b = ref_dynamic_per_tensor_fp8_quant(B)
if padded:
B = pad_weights_fp8(B)
if padded_b:
B = pad_fp8(B)
if padded_a:
A = pad_fp8(A)
ref_out = torch._scaled_mm(
A, B.t(), out_dtype=dtype, scale_a=scale_a, scale_b=scale_b
)
out = ops.wvSplitKQ(
B,
A,
dtype,
scale_a,
scale_b,
get_cu_count(),
)
assert torch.allclose(out, ref_out, rtol=0.01)
@pytest.mark.parametrize("n,k,m", NKM_FACTORS_WVSPLITK_FP8)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("padded", [False, True])
@pytest.mark.skipif(
not (current_platform.is_rocm() and current_platform.supports_fp8()),
reason="only test for rocm fp8",
)
def test_rocm_wvsplitk_fp8_bias1D_kernel(n, k, m, dtype, seed, padded):
torch.manual_seed(seed)
xavier = math.sqrt(2 / k) # normalize to avoid large output-bias deltas
A = (torch.rand(n, k, device="cuda") - 0.5) * xavier
B = (torch.rand(m, k, device="cuda") - 0.5) * xavier
BIAS = torch.rand(m, dtype=dtype, device="cuda") - 0.5
A, scale_a = ref_dynamic_per_tensor_fp8_quant(A)
B, scale_b = ref_dynamic_per_tensor_fp8_quant(B)
if padded:
B = pad_weights_fp8(B)
BIAS = None if (not biased) else (torch.rand(m, dtype=dtype, device="cuda") * 2 - 1)
ref_out = torch._scaled_mm(
A, B.t(), out_dtype=dtype, scale_a=scale_a, scale_b=scale_b, bias=BIAS
)
out = ops.wvSplitKQ(
B,
A,
dtype,
scale_a,
scale_b,
get_cu_count(),
BIAS,
)
out = ops.wvSplitKQ(B, A, dtype, scale_a, scale_b, get_cu_count(), BIAS)
assert torch.allclose(out, ref_out, rtol=0.01)
if xnorm:
assert torch.allclose(out, ref_out, atol=1e-3, rtol=1e-8)
else:
assert torch.allclose(out, ref_out, 0.01)