[5/N][Attention] Finish eliminating vllm/attention folder (#32064)

Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
This commit is contained in:
Matthew Bonanni
2026-01-27 10:02:51 -05:00
committed by GitHub
parent 1f3a2c2944
commit a608b4c6c2
151 changed files with 585 additions and 527 deletions

View File

@@ -5,7 +5,6 @@ import numpy as np
import pytest
import torch
from vllm.attention.layer import Attention
from vllm.config import (
AttentionConfig,
CacheConfig,
@@ -19,6 +18,7 @@ from vllm.distributed.parallel_state import (
init_distributed_environment,
initialize_model_parallel,
)
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams