[5/N][Attention] Finish eliminating vllm/attention folder (#32064)
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
This commit is contained in:
@@ -29,7 +29,6 @@ import torch
|
||||
from torch import nn
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
from vllm.attention.layer import Attention, AttentionType
|
||||
from vllm.compilation.decorators import support_torch_compile
|
||||
from vllm.config import CacheConfig, ParallelConfig, VllmConfig
|
||||
from vllm.distributed import (
|
||||
@@ -41,7 +40,8 @@ from vllm.distributed import (
|
||||
tensor_model_parallel_all_gather,
|
||||
)
|
||||
from vllm.model_executor.layers.activation import SiluAndMul
|
||||
from vllm.model_executor.layers.attention.static_sink_attention import (
|
||||
from vllm.model_executor.layers.attention import (
|
||||
Attention,
|
||||
StaticSinkAttention,
|
||||
)
|
||||
from vllm.model_executor.layers.fused_moe import SharedFusedMoE
|
||||
@@ -84,6 +84,7 @@ from vllm.model_executor.utils import set_weight_attrs
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.sequence import IntermediateTensors
|
||||
from vllm.transformers_utils.config import set_default_rope_theta
|
||||
from vllm.v1.attention.backend import AttentionType
|
||||
from vllm.v1.attention.backends.flash_attn_diffkv import FlashAttentionDiffKVBackend
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user