[Bugfix] Fix illegal memory access error with chunked prefill, prefix caching, block manager v2 and xformers enabled together (#9532)

Signed-off-by: sasha0552 <admin@sasha0552.org>
This commit is contained in:
sasha0552
2024-10-31 18:46:36 +00:00
committed by GitHub
parent 77f7ef2908
commit 55650c83a0
2 changed files with 34 additions and 3 deletions

View File

@@ -5,6 +5,7 @@ Run `pytest tests/prefix_caching/test_prefix_caching.py`.
import pytest
from tests.kernels.utils import override_backend_env_variable
from vllm import SamplingParams, TokensPrompt
from ..models.utils import check_outputs_equal
@@ -12,6 +13,14 @@ MODELS = [
"facebook/opt-125m",
]
UNSTABLE_PROMPT_SEQUENCE = [
([0] * 588) + ([1] * 1332) + ([2] * 30) + ([3] * 1),
([0] * 588) + ([1] * 1332) + ([4] * 3) + ([5] * 50),
([0] * 588) + ([1] * 1332) + ([2] * 30) + ([6] * 95),
([0] * 588) + ([1] * 1332) + ([4] * 3) + ([7] * 174),
([0] * 588) + ([8] * 1539),
]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"])
@@ -57,3 +66,22 @@ def test_mixed_requests(
name_0="hf",
name_1="vllm",
)
@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"])
def test_unstable_prompt_sequence(
vllm_runner,
backend: str,
monkeypatch,
) -> None:
override_backend_env_variable(monkeypatch, backend)
with vllm_runner(
"Qwen/Qwen2.5-0.5B-Instruct",
enable_chunked_prefill=True,
enable_prefix_caching=True,
max_model_len=4096,
) as vllm_model:
for prompt in UNSTABLE_PROMPT_SEQUENCE:
vllm_model.generate(TokensPrompt(prompt_token_ids=prompt),
SamplingParams(max_tokens=1))