[Attention] Refactor CUDA attention backend selection logic (#24794)
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com>
This commit is contained in:
@@ -12,7 +12,7 @@ from tests.v1.attention.utils import (
|
||||
create_standard_kv_cache_spec,
|
||||
try_get_attention_backend,
|
||||
)
|
||||
from vllm.attention.backends.registry import _Backend
|
||||
from vllm.attention.backends.registry import AttentionBackendEnum
|
||||
from vllm.config import (
|
||||
CacheConfig,
|
||||
DeviceConfig,
|
||||
@@ -177,7 +177,9 @@ def test_mtp_propose(num_speculative_tokens, monkeypatch):
|
||||
sampling_metadata = mock.MagicMock()
|
||||
|
||||
# Setup attention metadata
|
||||
attn_metadata_builder_cls, _ = try_get_attention_backend(_Backend.FLASH_ATTN)
|
||||
attn_metadata_builder_cls, _ = try_get_attention_backend(
|
||||
AttentionBackendEnum.FLASH_ATTN
|
||||
)
|
||||
|
||||
attn_metadata_builder = attn_metadata_builder_cls(
|
||||
kv_cache_spec=create_standard_kv_cache_spec(proposer.vllm_config),
|
||||
|
||||
Reference in New Issue
Block a user