Separate attention backends (#3005)

This commit is contained in:
Woosuk Kwon
2024-03-07 01:45:50 -08:00
committed by GitHub
parent cbf4c05b15
commit 2daf23ab0c
35 changed files with 561 additions and 271 deletions

View File

@@ -43,7 +43,7 @@ from transformers import PretrainedConfig
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import PagedAttention
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
LinearMethodBase,
QKVParallelLinear,
@@ -108,7 +108,7 @@ class PhiAttention(nn.Module):
max_position=max_position_embeddings,
base=rope_theta,
)
self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
self.attn = Attention(self.num_heads, self.head_size, scaling)
def forward(
self,