[Feature] Batch Invariant: Support DeepGEMM and Blackwell (#27127)

Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
Wentao Ye
2025-10-18 09:28:05 -04:00
committed by GitHub
parent 1d165d6d85
commit 245e4f2c01
3 changed files with 71 additions and 21 deletions

View File

@@ -10,9 +10,9 @@ import torch
from vllm import LLM, SamplingParams
from vllm.platforms import current_platform
hopper_only = pytest.mark.skipif(
not (current_platform.is_cuda() and current_platform.is_device_capability(90)),
reason="Requires CUDA and Hopper (SM90)",
skip_unsupported = pytest.mark.skipif(
not (current_platform.is_cuda() and current_platform.has_device_capability(90)),
reason="Requires CUDA and >= Hopper (SM90)",
)
@@ -74,7 +74,7 @@ def _random_prompt(min_words: int = 1024, max_words: int = 1024 * 2) -> str:
return base_prompt
@hopper_only
@skip_unsupported
@pytest.mark.timeout(1000)
def test_v1_generation_is_deterministic_across_batch_sizes_with_needle():
"""
@@ -219,7 +219,7 @@ def _extract_step_logprobs(request_output):
return None, None
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER"])
@pytest.mark.forked
def test_logprobs_bitwise_batch_invariance_bs1_vs_bsN(backend):
@@ -434,7 +434,7 @@ def test_logprobs_bitwise_batch_invariance_bs1_vs_bsN(backend):
pytest.fail(msg)
@hopper_only
@skip_unsupported
def test_simple_generation():
"""
Simple test that runs the model with a basic prompt and prints the output.
@@ -480,7 +480,7 @@ def test_simple_generation():
llm.shutdown()
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER"])
@pytest.mark.forked
def test_logprobs_WITHOUT_batch_invariance_should_FAIL(backend):
@@ -707,7 +707,7 @@ def test_logprobs_WITHOUT_batch_invariance_should_FAIL(backend):
os.environ["VLLM_BATCH_INVARIANT"] = old_value
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("backend", ["FLASH_ATTN"])
@pytest.mark.forked
def test_decode_logprobs_match_prefill_logprobs(backend):

View File

@@ -14,13 +14,13 @@ from vllm.model_executor.layers.batch_invariant import rms_norm as triton_rms_no
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.platforms import current_platform
hopper_only = pytest.mark.skipif(
not (current_platform.is_cuda() and current_platform.is_device_capability(90)),
reason="Requires CUDA and Hopper (SM90)",
skip_unsupported = pytest.mark.skipif(
not (current_platform.is_cuda() and current_platform.has_device_capability(90)),
reason="Requires CUDA and >= Hopper (SM90)",
)
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("batch_size", [1, 4, 16, 64])
@pytest.mark.parametrize("hidden_size", [512, 2048, 4096, 8192])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@@ -69,7 +69,7 @@ def test_rms_norm_batch_invariant_vs_standard(
)
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("batch_size", [1, 16, 128])
@pytest.mark.parametrize("seq_len", [1, 32, 512])
@pytest.mark.parametrize("hidden_size", [2048, 4096])
@@ -111,7 +111,7 @@ def test_rms_norm_3d_input(batch_size: int, seq_len: int, hidden_size: int):
)
@hopper_only
@skip_unsupported
def test_rms_norm_numerical_stability():
"""
Test RMS norm numerical stability with extreme values.
@@ -171,7 +171,7 @@ def test_rms_norm_numerical_stability():
)
@hopper_only
@skip_unsupported
def test_rms_norm_formula():
"""
Test that RMS norm follows the correct mathematical formula.
@@ -204,7 +204,7 @@ def test_rms_norm_formula():
)
@hopper_only
@skip_unsupported
@pytest.mark.parametrize("hidden_size", [128, 1024, 4096, 16384])
def test_rms_norm_different_hidden_sizes(hidden_size: int):
"""
@@ -242,7 +242,7 @@ def test_rms_norm_different_hidden_sizes(hidden_size: int):
)
@hopper_only
@skip_unsupported
def test_rms_norm_determinism():
"""
Test that batch-invariant RMS norm produces deterministic results.