[5/N][Attention] Finish eliminating vllm/attention folder (#32064)
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
This commit is contained in:
@@ -11,7 +11,7 @@ from vllm.compilation.decorators import support_torch_compile
|
||||
from vllm.config import CacheConfig, ModelConfig, PoolerConfig, VllmConfig
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size
|
||||
from vllm.model_executor.layers.activation import get_act_fn
|
||||
from vllm.model_executor.layers.attention.encoder_only_attention import (
|
||||
from vllm.model_executor.layers.attention import (
|
||||
EncoderOnlyAttention,
|
||||
)
|
||||
from vllm.model_executor.layers.linear import (
|
||||
|
||||
Reference in New Issue
Block a user