[Model] Support Mamba (#6484)
This commit is contained in:
committed by
GitHub
parent
df3dcdf49d
commit
7342a7d7f8
@@ -20,22 +20,22 @@ def test_env(name: str, device: str, monkeypatch):
|
||||
|
||||
if device == "cpu":
|
||||
with patch("vllm.attention.selector.is_cpu", return_value=True):
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
||||
torch.float16, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
|
||||
16, False)
|
||||
assert backend.name == "TORCH_SDPA"
|
||||
elif device == "hip":
|
||||
with patch("vllm.attention.selector.is_hip", return_value=True):
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
||||
torch.float16, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
|
||||
16, False)
|
||||
assert backend.name == "ROCM_FLASH"
|
||||
elif device == "openvino":
|
||||
with patch("vllm.attention.selector.is_openvino", return_value=True):
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
||||
torch.float16, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, torch.float16,
|
||||
16, False)
|
||||
assert backend.name == "OPENVINO"
|
||||
else:
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16,
|
||||
torch.float16, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, torch.float16, 16,
|
||||
False)
|
||||
assert backend.name == name
|
||||
|
||||
|
||||
@@ -46,32 +46,37 @@ def test_flash_attn(monkeypatch):
|
||||
|
||||
# Unsupported CUDA arch
|
||||
with patch("torch.cuda.get_device_capability", return_value=(7, 5)):
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, None, 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Unsupported data type
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float8_e4m3fn, None, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float8_e4m3fn, None, 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Unsupported kv cache data type
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16, "fp8", 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, "fp8", 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Unsupported block size
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 8)
|
||||
backend = which_attn_to_use(16, None, torch.float16, None, 8, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Unsupported sliding window
|
||||
backend = which_attn_to_use(8, 16, 8, 1, torch.float16, None, 16)
|
||||
backend = which_attn_to_use(16, 1, torch.float16, None, 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# flash-attn is not installed
|
||||
with patch.dict('sys.modules', {'vllm_flash_attn': None}):
|
||||
backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|
||||
backend = which_attn_to_use(16, None, torch.float16, None, 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Unsupported head size
|
||||
backend = which_attn_to_use(8, 17, 8, None, torch.float16, None, 16)
|
||||
backend = which_attn_to_use(17, None, torch.float16, None, 16, False)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
# Attention-free models should bypass env and use PlaceholderAttention
|
||||
backend = which_attn_to_use(16, None, torch.float16, torch.float16, 16,
|
||||
True)
|
||||
assert backend.name != STR_FLASH_ATTN_VAL
|
||||
|
||||
|
||||
@@ -79,4 +84,4 @@ def test_invalid_env(monkeypatch):
|
||||
"""Throw an exception if the backend name is invalid."""
|
||||
override_backend_env_variable(monkeypatch, STR_INVALID_VAL)
|
||||
with pytest.raises(ValueError):
|
||||
which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
|
||||
which_attn_to_use(16, None, torch.float16, None, 16, False)
|
||||
|
||||
Reference in New Issue
Block a user