2025-02-04 03:24:11 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-04 03:24:11 -05:00
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
from vllm.attention.selector import _cached_get_attn_backend, get_attn_backend
|
|
|
|
|
from vllm.platforms.rocm import RocmPlatform
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
|
def clear_cache():
|
|
|
|
|
"""Clear lru cache to ensure each test case runs without caching."""
|
|
|
|
|
_cached_get_attn_backend.cache_clear()
|
|
|
|
|
|
|
|
|
|
|
2025-09-21 16:03:28 -07:00
|
|
|
@pytest.mark.skip(reason="Skipped for now. Should be revisited.")
|
2025-03-17 11:35:57 +08:00
|
|
|
def test_selector(monkeypatch: pytest.MonkeyPatch):
|
|
|
|
|
with monkeypatch.context() as m:
|
2025-11-28 20:35:19 +08:00
|
|
|
m.setenv("VLLM_ATTENTION_BACKEND", "ROCM_ATTN")
|
2025-02-04 03:24:11 -05:00
|
|
|
|
2025-03-17 11:35:57 +08:00
|
|
|
# Set the current platform to ROCm using monkeypatch
|
|
|
|
|
monkeypatch.setattr("vllm.attention.selector.current_platform", RocmPlatform())
|
|
|
|
|
|
|
|
|
|
# Test standard ROCm attention
|
2025-02-04 03:24:11 -05:00
|
|
|
backend = get_attn_backend(16, torch.float16, torch.float16, 16, False)
|
2025-03-15 01:02:20 -04:00
|
|
|
assert backend.get_name() == "ROCM_FLASH" or backend.get_name() == "TRITON_ATTN"
|
2025-03-17 11:35:57 +08:00
|
|
|
|
2025-04-23 00:31:13 +08:00
|
|
|
# MLA test for deepseek related
|
|
|
|
|
|
|
|
|
|
# change the attention backend to triton MLA
|
2025-11-28 20:35:19 +08:00
|
|
|
m.setenv("VLLM_ATTENTION_BACKEND", "TRITON_MLA")
|
2025-07-19 13:53:17 -07:00
|
|
|
backend = get_attn_backend(576, torch.bfloat16, "auto", 16, False, use_mla=True)
|
2025-09-25 13:37:50 -04:00
|
|
|
assert backend.get_name() == "TRITON_MLA"
|
2025-04-23 00:31:13 +08:00
|
|
|
|
|
|
|
|
# If attention backend is None
|
|
|
|
|
# If use_mla is true
|
|
|
|
|
# The selected backend is triton MLA
|
2025-11-28 20:35:19 +08:00
|
|
|
m.setenv("VLLM_ATTENTION_BACKEND", "")
|
2025-07-19 13:53:17 -07:00
|
|
|
backend = get_attn_backend(576, torch.bfloat16, "auto", 16, False, use_mla=True)
|
2025-09-25 13:37:50 -04:00
|
|
|
assert backend.get_name() == "TRITON_MLA"
|
2025-04-23 00:31:13 +08:00
|
|
|
|
|
|
|
|
# change the attention backend to AITER MLA
|
2025-11-28 20:35:19 +08:00
|
|
|
m.setenv("VLLM_ATTENTION_BACKEND", "ROCM_AITER_MLA")
|
2025-07-19 13:53:17 -07:00
|
|
|
backend = get_attn_backend(576, torch.bfloat16, "auto", 1, False, use_mla=True)
|
2025-09-25 13:37:50 -04:00
|
|
|
assert backend.get_name() == "ROCM_AITER_MLA"
|
2025-04-23 00:31:13 +08:00
|
|
|
|
|
|
|
|
# If attention backend is None
|
|
|
|
|
# If use_mla is true
|
|
|
|
|
# If VLLM_ROCM_USE_AITER is enabled
|
|
|
|
|
# The selected backend is ROCM_AITER_MLA
|
2025-11-28 20:35:19 +08:00
|
|
|
m.setenv("VLLM_ATTENTION_BACKEND", "")
|
2025-04-23 00:31:13 +08:00
|
|
|
m.setenv("VLLM_ROCM_USE_AITER", "1")
|
2025-07-19 13:53:17 -07:00
|
|
|
backend = get_attn_backend(576, torch.bfloat16, "auto", 1, False, use_mla=True)
|
2025-09-25 13:37:50 -04:00
|
|
|
assert backend.get_name() == "ROCM_AITER_MLA"
|