[Kernel][Core] Add AWQ support to the Marlin kernel (#6612)
This commit is contained in:
committed by
GitHub
parent
25e778aa16
commit
396d92d5e0
@@ -1,54 +1,92 @@
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import numpy
|
||||
import torch
|
||||
|
||||
from vllm import _custom_ops as ops
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
from .quant_utils import pack_cols, unpack_cols
|
||||
|
||||
GPTQ_MARLIN_TILE = 16
|
||||
GPTQ_MARLIN_MIN_THREAD_N = 64
|
||||
GPTQ_MARLIN_MIN_THREAD_K = 128
|
||||
GPTQ_MARLIN_MAX_PARALLEL = 16
|
||||
|
||||
GPTQ_MARLIN_SUPPORTED_NUM_BITS = [4, 8]
|
||||
GPTQ_MARLIN_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128]
|
||||
GPTQ_MARLIN_SUPPORTED_SYM = [True]
|
||||
GTPQ_MARLIN_UNSUPPORTED_GROUP_SIZE_ACT_ORDER = [-1]
|
||||
MARLIN_SUPPORTED_NUM_BITS = [4, 8]
|
||||
MARLIN_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128]
|
||||
|
||||
|
||||
def check_marlin_supported(num_bits: int, group_size: int, is_sym: bool,
|
||||
min_capability: int) -> bool:
|
||||
def _check_marlin_supported(num_bits: int, group_size: int, is_sym: bool,
|
||||
min_capability: Optional[int],
|
||||
has_zp: bool) -> Tuple[bool, Optional[str]]:
|
||||
if min_capability is not None:
|
||||
major, minor = current_platform.get_device_capability()
|
||||
device_capability = major * 10 + minor
|
||||
if device_capability < min_capability:
|
||||
return (False, "Marlin does not support device_capability = {}"
|
||||
", the min_capability required is {}".format(
|
||||
device_capability, min_capability))
|
||||
|
||||
# If the capability of the device is too low, cannot convert.
|
||||
major, minor = current_platform.get_device_capability()
|
||||
device_capability = major * 10 + minor
|
||||
if device_capability < min_capability:
|
||||
return False
|
||||
if num_bits not in MARLIN_SUPPORTED_NUM_BITS:
|
||||
return (False, "Marlin does not support weight_bits = {}. "
|
||||
"Only weight_bits = {} are supported.".format(
|
||||
num_bits, MARLIN_SUPPORTED_NUM_BITS))
|
||||
|
||||
return (device_capability >= min_capability
|
||||
and num_bits in GPTQ_MARLIN_SUPPORTED_NUM_BITS
|
||||
and group_size in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES
|
||||
and is_sym in GPTQ_MARLIN_SUPPORTED_SYM)
|
||||
if group_size not in MARLIN_SUPPORTED_GROUP_SIZES:
|
||||
return (False, "Marlin does not support group_size = {}. Only "
|
||||
"group_sizes = {} are supported.".format(
|
||||
group_size, MARLIN_SUPPORTED_GROUP_SIZES))
|
||||
|
||||
if not has_zp and not is_sym:
|
||||
return (False,
|
||||
"Marlin without zero_points must have symmetric quantization")
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def verify_marlin_supported(num_bits: int, group_size: Optional[int],
|
||||
is_sym: bool) -> None:
|
||||
def check_gptq_marlin_supported(num_bits: int, group_size: int, is_sym: bool,
|
||||
min_capability: int) -> bool:
|
||||
cond, _ = _check_marlin_supported(num_bits,
|
||||
group_size,
|
||||
is_sym,
|
||||
min_capability,
|
||||
has_zp=False)
|
||||
return cond
|
||||
|
||||
if num_bits not in GPTQ_MARLIN_SUPPORTED_NUM_BITS:
|
||||
raise ValueError(
|
||||
f"Marlin does not support weight_bits = {num_bits}. "
|
||||
f"Only weight_bits = {GPTQ_MARLIN_SUPPORTED_NUM_BITS} "
|
||||
"are supported.")
|
||||
if (group_size is None
|
||||
or group_size not in GPTQ_MARLIN_SUPPORTED_GROUP_SIZES):
|
||||
raise ValueError(
|
||||
f"Marlin does not support group_size = {group_size}. "
|
||||
f"Only group_sizes = {GPTQ_MARLIN_SUPPORTED_GROUP_SIZES} "
|
||||
"are supported.")
|
||||
if is_sym not in GPTQ_MARLIN_SUPPORTED_SYM:
|
||||
raise ValueError(
|
||||
f"Marlin does not support is_sym = is_sym. "
|
||||
f"Only sym = {GPTQ_MARLIN_SUPPORTED_SYM} are supported.")
|
||||
|
||||
def check_awq_marlin_supported(num_bits: int, group_size: int, has_zp: bool,
|
||||
min_capability: int) -> bool:
|
||||
cond, _ = _check_marlin_supported(num_bits,
|
||||
group_size,
|
||||
False,
|
||||
min_capability,
|
||||
has_zp=has_zp)
|
||||
return cond
|
||||
|
||||
|
||||
def verify_gptq_marlin_supported(num_bits: int, group_size: int,
|
||||
is_sym: bool) -> None:
|
||||
cond, err_msg = _check_marlin_supported(num_bits,
|
||||
group_size,
|
||||
is_sym,
|
||||
min_capability=None,
|
||||
has_zp=False)
|
||||
if not cond:
|
||||
assert err_msg is not None
|
||||
raise ValueError("GPTQ" + err_msg)
|
||||
|
||||
|
||||
def verify_awq_marlin_supported(num_bits: int, group_size: int,
|
||||
has_zp: bool) -> None:
|
||||
cond, err_msg = _check_marlin_supported(num_bits,
|
||||
group_size,
|
||||
False,
|
||||
min_capability=None,
|
||||
has_zp=has_zp)
|
||||
if not cond:
|
||||
assert err_msg is not None
|
||||
raise ValueError("AWQ" + err_msg)
|
||||
|
||||
|
||||
def verify_marlin_supports_shape(output_size_per_partition: int,
|
||||
@@ -138,6 +176,51 @@ def marlin_permute_scales(s: torch.Tensor, size_k: int, size_n: int,
|
||||
return s
|
||||
|
||||
|
||||
def marlin_zero_points(zp: torch.Tensor, size_k: int, size_n: int,
|
||||
num_bits: int) -> torch.Tensor:
|
||||
# Permute zero-points in a similar way to scales, but do not use the
|
||||
# "single" permutation, since zero-points are applied on every MMA
|
||||
scale_perm, _ = get_scale_perms()
|
||||
zp = zp.reshape((-1, len(scale_perm)))[:, scale_perm]
|
||||
|
||||
# Interleave column dim (for the dequantize code) and pack it to int32
|
||||
if num_bits == 4:
|
||||
interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7])
|
||||
elif num_bits == 8:
|
||||
interleave = numpy.array([0, 2, 1, 3])
|
||||
else:
|
||||
raise Exception("num_bits must be 4 or 8, got {}".format(num_bits))
|
||||
|
||||
zp = zp.reshape((-1, len(interleave)))[:, interleave].ravel()
|
||||
zp = zp.reshape((-1, size_n)).contiguous()
|
||||
zp = pack_cols(zp, num_bits, size_k, size_n)
|
||||
|
||||
return zp
|
||||
|
||||
|
||||
def awq_to_marlin_zero_points(q_zp_packed: torch.Tensor, size_k: int,
|
||||
size_n: int, num_bits: int) -> torch.Tensor:
|
||||
# AWQ zero-points are quantized and packed on the column dim.
|
||||
# In addition, the values are permuted based on dequantizer.
|
||||
# Here we undo both of these, and then apply marlin permutation
|
||||
# and pack it back.
|
||||
q_zp = unpack_cols(q_zp_packed, num_bits, size_k, size_n)
|
||||
|
||||
# Undo interleaving (use argsort(..) to get inverse perm)
|
||||
if num_bits == 4:
|
||||
undo_interleave = numpy.argsort(numpy.array([0, 2, 4, 6, 1, 3, 5, 7]))
|
||||
elif num_bits == 8:
|
||||
undo_interleave = numpy.argsort(numpy.array([0, 2, 1, 3]))
|
||||
else:
|
||||
raise Exception("num_bits must be 4 or 8, got {}".format(num_bits))
|
||||
|
||||
q_zp = q_zp.reshape((-1, len(undo_interleave)))[:, undo_interleave].ravel()
|
||||
q_zp = q_zp.reshape((-1, size_n)).contiguous()
|
||||
|
||||
marlin_zp = marlin_zero_points(q_zp, size_k, size_n, num_bits)
|
||||
return marlin_zp
|
||||
|
||||
|
||||
# Newly generated tensors need to replace existing tensors that are
|
||||
# already registered as parameters by vLLM (and won't be freed)
|
||||
def replace_tensor(layer: torch.nn.Module, name: str,
|
||||
@@ -149,23 +232,26 @@ def replace_tensor(layer: torch.nn.Module, name: str,
|
||||
del new_t
|
||||
|
||||
|
||||
def apply_marlin_linear(input: torch.Tensor,
|
||||
weight: torch.Tensor,
|
||||
weight_scale: torch.Tensor,
|
||||
g_idx: torch.Tensor,
|
||||
g_idx_sort_indices: torch.Tensor,
|
||||
workspace: torch.Tensor,
|
||||
num_bits: int,
|
||||
output_size_per_partition: int,
|
||||
input_size_per_partition: int,
|
||||
is_k_full: bool,
|
||||
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
||||
def apply_gptq_marlin_linear(
|
||||
input: torch.Tensor,
|
||||
weight: torch.Tensor,
|
||||
weight_scale: torch.Tensor,
|
||||
weight_zp: torch.Tensor,
|
||||
g_idx: torch.Tensor,
|
||||
g_idx_sort_indices: torch.Tensor,
|
||||
workspace: torch.Tensor,
|
||||
num_bits: int,
|
||||
output_size_per_partition: int,
|
||||
input_size_per_partition: int,
|
||||
is_k_full: bool,
|
||||
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
||||
reshaped_x = input.reshape(-1, input.shape[-1])
|
||||
out_shape = input.shape[:-1] + (output_size_per_partition, )
|
||||
|
||||
output = ops.gptq_marlin_gemm(reshaped_x,
|
||||
weight,
|
||||
weight_scale,
|
||||
weight_zp,
|
||||
g_idx,
|
||||
g_idx_sort_indices,
|
||||
workspace,
|
||||
@@ -173,7 +259,43 @@ def apply_marlin_linear(input: torch.Tensor,
|
||||
size_m=reshaped_x.shape[0],
|
||||
size_n=output_size_per_partition,
|
||||
size_k=input_size_per_partition,
|
||||
is_k_full=is_k_full)
|
||||
is_k_full=is_k_full,
|
||||
has_zp=False)
|
||||
|
||||
if bias is not None:
|
||||
output.add_(bias) # In-place add
|
||||
|
||||
return output.reshape(out_shape)
|
||||
|
||||
|
||||
def apply_awq_marlin_linear(
|
||||
input: torch.Tensor,
|
||||
weight: torch.Tensor,
|
||||
weight_scale: torch.Tensor,
|
||||
weight_zp: torch.Tensor,
|
||||
g_idx: torch.Tensor,
|
||||
g_idx_sort_indices: torch.Tensor,
|
||||
workspace: torch.Tensor,
|
||||
num_bits: int,
|
||||
output_size_per_partition: int,
|
||||
input_size_per_partition: int,
|
||||
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
||||
reshaped_x = input.reshape(-1, input.shape[-1])
|
||||
out_shape = input.shape[:-1] + (output_size_per_partition, )
|
||||
|
||||
output = ops.gptq_marlin_gemm(reshaped_x,
|
||||
weight,
|
||||
weight_scale,
|
||||
weight_zp,
|
||||
g_idx,
|
||||
g_idx_sort_indices,
|
||||
workspace,
|
||||
num_bits,
|
||||
size_m=reshaped_x.shape[0],
|
||||
size_n=output_size_per_partition,
|
||||
size_k=input_size_per_partition,
|
||||
is_k_full=True,
|
||||
has_zp=True)
|
||||
|
||||
if bias is not None:
|
||||
output.add_(bias) # In-place add
|
||||
|
||||
Reference in New Issue
Block a user