2024-04-30 17:46:12 -04:00
|
|
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
2024-04-19 21:28:57 -07:00
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
from torch.nn import Module
|
|
|
|
|
from torch.nn.parameter import Parameter
|
|
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
from vllm import _custom_ops as ops
|
2024-04-30 17:46:12 -04:00
|
|
|
from vllm.logger import init_logger
|
2024-04-26 13:41:14 -07:00
|
|
|
from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
|
2024-04-19 21:28:57 -07:00
|
|
|
from vllm.model_executor.layers.quantization.base_config import (
|
2024-05-22 13:28:20 -07:00
|
|
|
QuantizationConfig, QuantizeMethodBase)
|
2024-04-26 13:41:14 -07:00
|
|
|
from vllm.model_executor.utils import set_weight_attrs
|
2024-05-22 13:28:20 -07:00
|
|
|
from vllm.utils import print_warning_once
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
ACTIVATION_SCHEMES = ["static", "dynamic"]
|
|
|
|
|
|
|
|
|
|
logger = init_logger(__name__)
|
|
|
|
|
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-06-07 04:42:35 -04:00
|
|
|
def cutlass_fp8_supported() -> bool:
|
|
|
|
|
capability = torch.cuda.get_device_capability()
|
|
|
|
|
capability = capability[0] * 10 + capability[1]
|
2024-06-07 17:42:05 -07:00
|
|
|
major, minor = torch.version.cuda.split(".")
|
|
|
|
|
version = int(major) * 10 + int(minor)
|
2024-06-07 04:42:35 -04:00
|
|
|
|
|
|
|
|
# CUTLASS FP8 kernels need at least
|
|
|
|
|
# CUDA 12.0 on SM90 systems (Hopper)
|
|
|
|
|
# CUDA 12.4 on SM89 systems (Lovelace)
|
|
|
|
|
gpu_is_supported = False
|
2024-06-07 17:42:05 -07:00
|
|
|
if capability >= 90:
|
2024-06-07 04:42:35 -04:00
|
|
|
gpu_is_supported = version > 120
|
2024-06-07 17:42:05 -07:00
|
|
|
elif capability >= 89:
|
2024-06-07 04:42:35 -04:00
|
|
|
gpu_is_supported = version > 124
|
|
|
|
|
|
|
|
|
|
return gpu_is_supported
|
|
|
|
|
|
|
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
class Fp8Config(QuantizationConfig):
|
2024-04-19 21:28:57 -07:00
|
|
|
"""Config class for FP8."""
|
|
|
|
|
|
2024-04-26 21:49:59 -07:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2024-04-30 17:46:12 -04:00
|
|
|
is_checkpoint_fp8_serialized: bool = False,
|
2024-04-26 21:49:59 -07:00
|
|
|
activation_scheme: str = "dynamic",
|
|
|
|
|
) -> None:
|
2024-04-30 17:46:12 -04:00
|
|
|
self.is_checkpoint_fp8_serialized = is_checkpoint_fp8_serialized
|
|
|
|
|
if is_checkpoint_fp8_serialized:
|
|
|
|
|
logger.warning("Detected fp8 checkpoint. Please note that the "
|
|
|
|
|
"format is experimental and subject to change.")
|
|
|
|
|
if activation_scheme not in ACTIVATION_SCHEMES:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
f"Unsupported activation scheme {activation_scheme}")
|
2024-04-26 21:49:59 -07:00
|
|
|
self.activation_scheme = activation_scheme
|
|
|
|
|
|
2024-04-19 21:28:57 -07:00
|
|
|
@classmethod
|
|
|
|
|
def get_name(cls) -> str:
|
|
|
|
|
return "fp8"
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
|
|
|
|
|
return [torch.bfloat16, torch.half]
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def get_min_capability(cls) -> int:
|
2024-04-30 17:46:12 -04:00
|
|
|
return 89
|
2024-04-19 21:28:57 -07:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def get_config_filenames(cls) -> List[str]:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2024-04-26 13:41:14 -07:00
|
|
|
def from_config(cls, config: Dict[str, Any]) -> "Fp8Config":
|
2024-04-30 17:46:12 -04:00
|
|
|
quant_method = cls.get_from_keys(config, ["quant_method"])
|
|
|
|
|
is_checkpoint_fp8_serialized = ("fp8" in quant_method)
|
2024-04-26 21:49:59 -07:00
|
|
|
activation_scheme = cls.get_from_keys(config, ["activation_scheme"])
|
2024-04-30 17:46:12 -04:00
|
|
|
return cls(is_checkpoint_fp8_serialized=is_checkpoint_fp8_serialized,
|
|
|
|
|
activation_scheme=activation_scheme)
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
def get_quant_method(
|
2024-05-22 13:28:20 -07:00
|
|
|
self, layer: torch.nn.Module) -> Optional["QuantizeMethodBase"]:
|
|
|
|
|
from vllm.attention.layer import Attention # Avoid circular import
|
|
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
if isinstance(layer, LinearBase):
|
|
|
|
|
return Fp8LinearMethod(self)
|
2024-05-22 13:28:20 -07:00
|
|
|
if isinstance(layer, Attention):
|
|
|
|
|
return Fp8KVCacheMethod(self)
|
2024-04-26 13:41:14 -07:00
|
|
|
return None
|
2024-04-19 21:28:57 -07:00
|
|
|
|
|
|
|
|
def get_scaled_act_names(self) -> List[str]:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Fp8LinearMethod(LinearMethodBase):
|
|
|
|
|
"""Linear method for FP8.
|
2024-04-30 17:46:12 -04:00
|
|
|
Supports loading FP8 checkpoints with static weight scale and
|
|
|
|
|
dynamic/static activation scale.
|
|
|
|
|
|
|
|
|
|
Also supports loading quantized FP16/BF16 model checkpoints with dynamic
|
|
|
|
|
activation scaling. The weight scaling factor will be initialized after
|
|
|
|
|
the model weights are loaded.
|
2024-04-19 21:28:57 -07:00
|
|
|
|
|
|
|
|
Limitations:
|
|
|
|
|
1. Only support per-tensor quantization due to torch._scaled_mm support.
|
|
|
|
|
2. Only support float8_e4m3fn data type due to the limitation of
|
|
|
|
|
torch._scaled_mm (https://github.com/pytorch/pytorch/blob/2e48b39603411a41c5025efbe52f89560b827825/aten/src/ATen/native/cuda/Blas.cpp#L854-L856)
|
2024-06-07 17:42:05 -07:00
|
|
|
|
2024-04-19 21:28:57 -07:00
|
|
|
Args:
|
|
|
|
|
quant_config: The quantization config.
|
|
|
|
|
"""
|
|
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
def __init__(self, quant_config: Fp8Config):
|
2024-04-19 21:28:57 -07:00
|
|
|
self.quant_config = quant_config
|
2024-06-07 04:42:35 -04:00
|
|
|
self.cutlass_fp8_supported = cutlass_fp8_supported()
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
def _create_scale_param(
|
|
|
|
|
self,
|
|
|
|
|
scale_name: str,
|
|
|
|
|
layer: torch.nn.Module,
|
|
|
|
|
output_partition_sizes: List[int],
|
|
|
|
|
**extra_weight_attrs,
|
|
|
|
|
) -> None:
|
|
|
|
|
scale = Parameter(torch.empty(len(output_partition_sizes),
|
|
|
|
|
dtype=torch.float32),
|
|
|
|
|
requires_grad=False)
|
|
|
|
|
layer.register_parameter(scale_name, scale)
|
|
|
|
|
set_weight_attrs(
|
|
|
|
|
scale, {
|
|
|
|
|
**extra_weight_attrs,
|
|
|
|
|
"fp8_scales_shard_indexer":
|
|
|
|
|
self.scales_shard_indexer,
|
|
|
|
|
})
|
|
|
|
|
|
2024-04-19 21:28:57 -07:00
|
|
|
def create_weights(
|
|
|
|
|
self,
|
|
|
|
|
layer: torch.nn.Module,
|
|
|
|
|
input_size_per_partition: int,
|
2024-04-23 21:26:33 -04:00
|
|
|
output_partition_sizes: List[int],
|
2024-04-19 21:28:57 -07:00
|
|
|
input_size: int,
|
|
|
|
|
output_size: int,
|
|
|
|
|
params_dtype: torch.dtype,
|
|
|
|
|
**extra_weight_attrs,
|
|
|
|
|
):
|
2024-04-30 17:46:12 -04:00
|
|
|
del input_size, output_size
|
2024-04-23 21:26:33 -04:00
|
|
|
output_size_per_partition = sum(output_partition_sizes)
|
2024-04-30 17:46:12 -04:00
|
|
|
|
|
|
|
|
layer.process_after_load = True
|
|
|
|
|
layer.logical_widths = output_partition_sizes
|
|
|
|
|
|
|
|
|
|
# WEIGHT
|
|
|
|
|
weight_dtype = (torch.float8_e4m3fn
|
|
|
|
|
if self.quant_config.is_checkpoint_fp8_serialized else
|
|
|
|
|
params_dtype)
|
2024-04-19 21:28:57 -07:00
|
|
|
weight = Parameter(torch.empty(output_size_per_partition,
|
|
|
|
|
input_size_per_partition,
|
2024-04-30 17:46:12 -04:00
|
|
|
dtype=weight_dtype),
|
2024-04-19 21:28:57 -07:00
|
|
|
requires_grad=False)
|
|
|
|
|
layer.register_parameter("weight", weight)
|
2024-04-30 17:46:12 -04:00
|
|
|
set_weight_attrs(weight, {
|
|
|
|
|
**extra_weight_attrs,
|
|
|
|
|
"input_dim": 1,
|
|
|
|
|
"output_dim": 0,
|
|
|
|
|
})
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
# If checkpoint is serialized fp8, load them.
|
|
|
|
|
# Otherwise, wait until process_weights_after_loading.
|
|
|
|
|
if self.quant_config.is_checkpoint_fp8_serialized:
|
|
|
|
|
# WEIGHT SCALE
|
|
|
|
|
self._create_scale_param(
|
|
|
|
|
scale_name="weight_scale",
|
|
|
|
|
layer=layer,
|
|
|
|
|
output_partition_sizes=output_partition_sizes,
|
|
|
|
|
**extra_weight_attrs)
|
|
|
|
|
|
2024-06-08 13:54:05 -04:00
|
|
|
# INPUT ACTIVATION SCALE
|
2024-04-30 17:46:12 -04:00
|
|
|
if self.quant_config.activation_scheme == "static":
|
|
|
|
|
self._create_scale_param(
|
2024-06-08 13:54:05 -04:00
|
|
|
scale_name="input_scale",
|
2024-04-30 17:46:12 -04:00
|
|
|
layer=layer,
|
|
|
|
|
output_partition_sizes=output_partition_sizes,
|
|
|
|
|
**extra_weight_attrs)
|
|
|
|
|
|
|
|
|
|
def scales_shard_indexer(
|
|
|
|
|
self, param: torch.Tensor, loaded_weight: torch.Tensor,
|
|
|
|
|
shard_id: Union[str, int]) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
|
|
|
qkv_idxs = {"q": 0, "k": 1, "v": 2}
|
|
|
|
|
|
|
|
|
|
if isinstance(shard_id, int):
|
|
|
|
|
pass
|
|
|
|
|
elif isinstance(shard_id, str):
|
|
|
|
|
if shard_id not in qkv_idxs:
|
|
|
|
|
raise ValueError(f"Unknown shard_id: {shard_id}")
|
|
|
|
|
shard_id = qkv_idxs[shard_id]
|
|
|
|
|
else:
|
|
|
|
|
ValueError(f"Shard id must be int or str but got {type(shard_id)}")
|
|
|
|
|
|
|
|
|
|
return param[shard_id], loaded_weight
|
2024-04-19 21:28:57 -07:00
|
|
|
|
|
|
|
|
def process_weights_after_loading(self, layer: Module) -> None:
|
2024-04-30 17:46:12 -04:00
|
|
|
if (not hasattr(layer, "process_after_load")
|
|
|
|
|
or not layer.process_after_load):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# If checkpoint is fp/bf16 (not serialized fp8), quantize the weights.
|
|
|
|
|
if not self.quant_config.is_checkpoint_fp8_serialized:
|
|
|
|
|
qweight, weight_scale = ops.scaled_fp8_quant(layer.weight,
|
|
|
|
|
scale=None)
|
|
|
|
|
layer.weight = Parameter(qweight.t(), requires_grad=False)
|
|
|
|
|
layer.weight_scale = Parameter(weight_scale, requires_grad=False)
|
|
|
|
|
layer.logical_widths = None
|
2024-06-08 13:54:05 -04:00
|
|
|
layer.input_scale = None
|
2024-04-19 21:28:57 -07:00
|
|
|
return
|
|
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
# If checkpoint is fp8, requantize the separately quantized logical
|
|
|
|
|
# weights into a single fp8 weight with a single weight scale.
|
|
|
|
|
else:
|
|
|
|
|
# WEIGHT_SCALE / WEIGHT
|
|
|
|
|
# Loop over logical weights, requantizing with single scale.
|
|
|
|
|
max_w_scale = layer.weight_scale.max()
|
|
|
|
|
start = 0
|
|
|
|
|
for idx, logical_width in enumerate(layer.logical_widths):
|
|
|
|
|
end = start + logical_width
|
|
|
|
|
weight_dq = per_tensor_dequantize(layer.weight[start:end, :],
|
|
|
|
|
layer.weight_scale[idx])
|
|
|
|
|
|
|
|
|
|
layer.weight[start:end, :] = per_tensor_quantize(
|
|
|
|
|
weight_dq, layer.weight_scale.max())
|
|
|
|
|
start = end
|
|
|
|
|
layer.weight_scale = Parameter(max_w_scale, requires_grad=False)
|
|
|
|
|
|
|
|
|
|
# WEIGHT
|
|
|
|
|
# Transpose weight for passing to torch._scaled_mm
|
|
|
|
|
weight = layer.weight
|
|
|
|
|
layer.weight = Parameter(weight.t(), requires_grad=False)
|
|
|
|
|
|
2024-06-08 13:54:05 -04:00
|
|
|
# INPUT ACTIVATION SCALE
|
2024-04-30 17:46:12 -04:00
|
|
|
# Dynamic: set to None (required input to ops.scaled_fp8_quant).
|
2024-06-08 13:54:05 -04:00
|
|
|
# Static: set to max of the input_scales (since they are equal).
|
2024-04-30 17:46:12 -04:00
|
|
|
if self.quant_config.activation_scheme == "dynamic":
|
2024-06-08 13:54:05 -04:00
|
|
|
layer.input_scale = None
|
2024-04-30 17:46:12 -04:00
|
|
|
elif self.quant_config.activation_scheme == "static":
|
2024-06-08 13:54:05 -04:00
|
|
|
if not all_close_1d(layer.input_scale):
|
2024-04-30 17:46:12 -04:00
|
|
|
raise ValueError(
|
2024-06-08 13:54:05 -04:00
|
|
|
"All the input_scales for the logical weights of a "
|
|
|
|
|
f"layer must be equal. But got {layer.input_scale}")
|
|
|
|
|
layer.input_scale = Parameter(layer.input_scale.max(),
|
|
|
|
|
requires_grad=False)
|
2024-04-30 17:46:12 -04:00
|
|
|
else:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
f"Unknown scheme {self.quant_config.activation_scheme}")
|
2024-04-19 21:28:57 -07:00
|
|
|
|
2024-04-26 13:41:14 -07:00
|
|
|
def apply(self,
|
|
|
|
|
layer: torch.nn.Module,
|
|
|
|
|
x: torch.Tensor,
|
|
|
|
|
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
2024-06-07 04:42:35 -04:00
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
# ops.scaled_fp8_quant supports both dynamic and static quant.
|
2024-06-08 13:54:05 -04:00
|
|
|
# If dynamic, layer.input_scale is None and x_scale computed from x.
|
|
|
|
|
# If static, layer.input_scale is scalar and x_scale is input_scale.
|
2024-06-07 04:42:35 -04:00
|
|
|
|
2024-06-14 12:30:15 -04:00
|
|
|
if bias is None and self.cutlass_fp8_supported:
|
2024-06-08 13:54:05 -04:00
|
|
|
qinput, x_scale = ops.scaled_fp8_quant(x, layer.input_scale)
|
2024-06-07 04:42:35 -04:00
|
|
|
|
|
|
|
|
# Fused GEMM_DQ
|
2024-06-13 14:22:19 -04:00
|
|
|
output = ops.cutlass_scaled_mm(
|
2024-06-07 04:42:35 -04:00
|
|
|
qinput,
|
|
|
|
|
layer.weight,
|
|
|
|
|
out_dtype=x.dtype,
|
|
|
|
|
scale_a=x_scale,
|
|
|
|
|
scale_b=layer.weight_scale,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
qinput, x_scale = ops.scaled_fp8_quant(x,
|
2024-06-08 13:54:05 -04:00
|
|
|
layer.input_scale,
|
2024-06-07 04:42:35 -04:00
|
|
|
batch_dim_padding=17)
|
|
|
|
|
|
|
|
|
|
# Fused GEMM_DQ -- note we padded the input above because
|
|
|
|
|
# torch._scaled_mm is more performant for matrices with
|
|
|
|
|
# batch dimension > 16. Note that this could change
|
|
|
|
|
# in the future.
|
|
|
|
|
output, _ = torch._scaled_mm(
|
|
|
|
|
qinput,
|
|
|
|
|
layer.weight,
|
|
|
|
|
out_dtype=x.dtype,
|
|
|
|
|
scale_a=x_scale,
|
|
|
|
|
scale_b=layer.weight_scale,
|
|
|
|
|
bias=bias,
|
|
|
|
|
)
|
2024-04-30 17:46:12 -04:00
|
|
|
|
[Kernel] [FP8] Improve FP8 linear layer performance (#4691)
This PR improves the FP8 performance of linear layers, which had been lacking before (#4118 (comment) and #4118 (comment)).
We noticed that CUBLASLt can find a better algorithm if the first dimension of the matrix is greater than 16. So this PR enlarges matrices appropriately during quantization. This improves FP8 performance and removes the performance regression vs. FP16, in many cases exceeding FP16 performance.
Here are benchmarks on llama3 70b (ITL numbers for 1000 input and 50 output tokens at fixed qps and at TP 4), all FP8 measurements are for dynamic quantization:
qps = 1: 24 ms (FP8, this PR), 32 ms (FP8, previous main), 26 ms (FP16)
qps = 2: 26 ms (FP8, this PR), 34ms (FP8, previous main), 28 ms (FP16)
qps = 4: 33 ms (FP8, this PR), 44 ms (FP8, previous main), 36 ms (FP16)
qps = 6: 46 ms (FP8, this PR), 56 ms (FP8, previous main), 54 ms (FP16)
qps = 8: 85 ms (FP8, this PR), 85 ms (FP8, previous main), 138 ms (FP16)
2024-05-09 16:38:07 -07:00
|
|
|
return torch.narrow(output, 0, 0, x.shape[0])
|
2024-04-30 17:46:12 -04:00
|
|
|
|
|
|
|
|
|
2024-05-22 13:28:20 -07:00
|
|
|
class Fp8KVCacheMethod(QuantizeMethodBase):
|
|
|
|
|
"""Supports loading kv-cache scaling factors from FP8 checkpoints.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, quant_config: Fp8Config):
|
|
|
|
|
self.quant_config = quant_config
|
|
|
|
|
|
|
|
|
|
def create_weights(self, layer: torch.nn.Module):
|
2024-06-07 17:42:05 -07:00
|
|
|
"""Create "weight" (aka kv_scale) for an attention layer.
|
|
|
|
|
|
2024-05-22 13:28:20 -07:00
|
|
|
Args:
|
|
|
|
|
layer: The layer that is using the QuantizeMethodBase factory.
|
|
|
|
|
"""
|
|
|
|
|
# Initialize the KV cache scale to 1.0 as the default value.
|
|
|
|
|
# If the kv_scale appears in the checkpoint, it will be
|
|
|
|
|
# overwritten when loading weights.
|
|
|
|
|
layer.kv_scale = Parameter(torch.tensor(1.0), requires_grad=False)
|
|
|
|
|
|
|
|
|
|
def apply(self, layer: torch.nn.Module) -> torch.Tensor:
|
|
|
|
|
raise RuntimeError("Fp8KVCacheMethod.apply should not be called.")
|
|
|
|
|
|
|
|
|
|
def process_weights_after_loading(self, layer: Module) -> None:
|
|
|
|
|
# If the kv-cache dtype is auto, we enforce the kv-scale to be 1.0
|
|
|
|
|
# regardless whether the kv-scale is available in the checkpoint.
|
|
|
|
|
if layer.kv_cache_dtype != "auto":
|
|
|
|
|
kv_scale = layer.kv_scale.to("cpu").tolist()
|
|
|
|
|
if not isinstance(kv_scale, float):
|
|
|
|
|
raise ValueError("Only support per-tensor scaling factor "
|
|
|
|
|
"for fp8 KV cache")
|
|
|
|
|
layer._kv_scale = kv_scale
|
|
|
|
|
if layer._kv_scale == 1.0 and "e5m2" not in layer.kv_cache_dtype:
|
|
|
|
|
print_warning_once(
|
|
|
|
|
"Using KV cache scaling factor 1.0 for fp8_e4m3. This may "
|
|
|
|
|
"cause accuracy issues. Please make sure kv-cache scaling "
|
|
|
|
|
"factor is available in the fp8 checkpoint.")
|
|
|
|
|
del layer.kv_scale
|
|
|
|
|
|
|
|
|
|
|
2024-04-30 17:46:12 -04:00
|
|
|
def all_close_1d(x: torch.Tensor) -> bool:
|
|
|
|
|
assert len(x.shape) == 1
|
|
|
|
|
return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def per_tensor_quantize(tensor: torch.Tensor,
|
2024-06-05 10:58:50 -07:00
|
|
|
inv_scale: Union[float, torch.Tensor]) -> torch.Tensor:
|
2024-04-30 17:46:12 -04:00
|
|
|
finfo = torch.finfo(torch.float8_e4m3fn)
|
|
|
|
|
qweight = (tensor / inv_scale).clamp(min=finfo.min, max=finfo.max)
|
|
|
|
|
return qweight.to(torch.float8_e4m3fn)
|
|
|
|
|
|
|
|
|
|
|
2024-06-05 10:58:50 -07:00
|
|
|
def per_tensor_dequantize(
|
|
|
|
|
tensor: torch.Tensor, inv_scale: Union[float,
|
|
|
|
|
torch.Tensor]) -> torch.Tensor:
|
2024-04-30 17:46:12 -04:00
|
|
|
fake_qweight = tensor.to(torch.float16)
|
|
|
|
|
dq_weight = fake_qweight * inv_scale
|
|
|
|
|
return dq_weight
|