Separate attention backends (#3005)

This commit is contained in:
Woosuk Kwon
2024-03-07 01:45:50 -08:00
committed by GitHub
parent cbf4c05b15
commit 2daf23ab0c
35 changed files with 561 additions and 271 deletions

View File

@@ -12,7 +12,7 @@ from transformers import PretrainedConfig
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import PagedAttention
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (LinearMethodBase,
MergedColumnParallelLinear,
@@ -104,7 +104,7 @@ class QWenAttention(nn.Module):
base=rope_theta,
rope_scaling=rope_scaling,
)
self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
self.attn = Attention(self.num_heads, self.head_dim, self.scaling)
def forward(
self,