[Misc][Refactor] Generalize linear_method to be quant_method (#4373)

This commit is contained in:
Cody Yu
2024-04-26 13:41:14 -07:00
committed by GitHub
parent 603ad84815
commit a62aaf1df5
45 changed files with 759 additions and 713 deletions

View File

@@ -32,10 +32,11 @@ from vllm.distributed import (get_tensor_model_parallel_rank,
tensor_model_parallel_all_reduce)
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
LinearMethodBase,
QKVParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig)
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.layers.vocab_parallel_embedding import (
@@ -76,7 +77,7 @@ class FalconAttention(nn.Module):
def __init__(
self,
config: FalconConfig,
linear_method: Optional[LinearMethodBase] = None,
quant_config: Optional[QuantizationConfig] = None,
):
super().__init__()
@@ -115,7 +116,7 @@ class FalconAttention(nn.Module):
self.total_num_kv_heads,
bias=config.bias,
skip_bias_add=True,
linear_method=linear_method,
quant_config=quant_config,
)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
@@ -129,7 +130,7 @@ class FalconAttention(nn.Module):
self.hidden_size,
bias=config.bias,
skip_bias_add=True,
linear_method=linear_method,
quant_config=quant_config,
reduce_results=self.reduce_row_parallel_results)
self.use_rotary = config.rotary
@@ -192,7 +193,7 @@ class FalconMLP(nn.Module):
def __init__(
self,
config: FalconConfig,
linear_method: Optional[LinearMethodBase] = None,
quant_config: Optional[QuantizationConfig] = None,
):
super().__init__()
hidden_size = config.hidden_size
@@ -201,8 +202,8 @@ class FalconMLP(nn.Module):
4 * hidden_size,
bias=config.bias,
skip_bias_add=True,
linear_method=linear_method)
quant_config = getattr(linear_method, "quant_config", None)
quant_config=quant_config)
quant_config = getattr(quant_config, "quant_config", None)
self.act = get_act_fn("gelu", quant_config, 4 * hidden_size)
self.reduce_row_parallel_results = not (config.new_decoder_architecture
or config.parallel_attn)
@@ -212,7 +213,7 @@ class FalconMLP(nn.Module):
bias=config.bias,
skip_bias_add=True,
reduce_results=self.reduce_row_parallel_results,
linear_method=linear_method)
quant_config=quant_config)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# NOTE(zhuohan): Following huggingface, we do not fuse bias add here.
@@ -229,13 +230,13 @@ class FalconDecoderLayer(nn.Module):
def __init__(
self,
config: FalconConfig,
linear_method: Optional[LinearMethodBase] = None,
quant_config: Optional[QuantizationConfig] = None,
):
super().__init__()
hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.self_attention = FalconAttention(config, linear_method)
self.mlp = FalconMLP(config, linear_method)
self.self_attention = FalconAttention(config, quant_config)
self.mlp = FalconMLP(config, quant_config)
self.config = config
if config.new_decoder_architecture:
@@ -311,7 +312,7 @@ class FalconModel(nn.Module):
def __init__(
self,
config: FalconConfig,
linear_method: Optional[LinearMethodBase] = None,
quant_config: Optional[QuantizationConfig] = None,
):
super().__init__()
self.config = config
@@ -327,7 +328,7 @@ class FalconModel(nn.Module):
# Transformer blocks
self.h = nn.ModuleList([
FalconDecoderLayer(config, linear_method)
FalconDecoderLayer(config, quant_config)
for _ in range(config.num_hidden_layers)
])
@@ -359,12 +360,12 @@ class FalconForCausalLM(nn.Module):
def __init__(
self,
config: FalconConfig,
linear_method: Optional[LinearMethodBase] = None,
quant_config: Optional[QuantizationConfig] = None,
):
super().__init__()
self.config = config
self.linear_method = linear_method
self.transformer = FalconModel(config, linear_method)
self.quant_config = quant_config
self.transformer = FalconModel(config, quant_config)
self.lm_head_weight = self.transformer.word_embeddings.weight
self.logits_processor = LogitsProcessor(config.vocab_size)
self.sampler = Sampler()