[Attention] Implement universal BACKEND_MAP (#25900)

Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
This commit is contained in:
Matthew Bonanni
2025-10-08 15:00:25 -04:00
committed by GitHub
parent b25d7b5657
commit 76879cc160
12 changed files with 119 additions and 75 deletions

View File

@@ -9,7 +9,7 @@ import torch
from tests.v1.attention.utils import (
create_standard_kv_cache_spec,
create_vllm_config,
get_attention_backend,
try_get_attention_backend,
)
from vllm.attention.backends.registry import _Backend
from vllm.config import ParallelConfig, SpeculativeConfig
@@ -63,7 +63,7 @@ def forward_attention(
# Build common metadata.
model_name = "meta-llama/Meta-Llama-3-8B"
builder_cls, impl_cls = get_attention_backend(backend)
builder_cls, impl_cls = try_get_attention_backend(backend)
vllm_config = create_vllm_config(model_name=model_name, max_model_len=max(seq_lens))
if spec_token_tree is not None:
# Create speculative config if token tree is specified.