Separate attention backends (#3005)

This commit is contained in:
Woosuk Kwon
2024-03-07 01:45:50 -08:00
committed by GitHub
parent cbf4c05b15
commit 2daf23ab0c
35 changed files with 561 additions and 271 deletions

View File

@@ -26,7 +26,7 @@ from transformers import GPTBigCodeConfig
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import PagedAttention
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
LinearMethodBase,
QKVParallelLinear,
@@ -85,10 +85,10 @@ class GPTBigCodeAttention(nn.Module):
bias=True,
linear_method=linear_method,
)
self.attn = PagedAttention(self.num_heads,
self.head_dim,
scale=self.scale,
num_kv_heads=self.num_kv_heads)
self.attn = Attention(self.num_heads,
self.head_dim,
scale=self.scale,
num_kv_heads=self.num_kv_heads)
def forward(
self,