Quantization: support FP4 quantized models on AMD CDNA2/CDNA3 GPUs (#22527)

Signed-off-by: feng <fengli1702@gmail.com>
Signed-off-by: Michael Goin <mgoin64@gmail.com>
Co-authored-by: Michael Goin <mgoin64@gmail.com>
This commit is contained in:
Daifeng Li
2025-08-23 10:53:21 +08:00
committed by GitHub
parent f6818a92cb
commit fa78de9dc3
8 changed files with 451 additions and 5 deletions

View File

@@ -1119,9 +1119,20 @@ class ModelConfig:
def _verify_quantization(self) -> None:
supported_quantization = me_quant.QUANTIZATION_METHODS
optimized_quantization_methods = [
"fp8", "modelopt", "gptq_marlin_24", "gptq_marlin", "awq_marlin",
"fbgemm_fp8", "compressed-tensors", "experts_int8", "quark",
"modelopt_fp4", "bitblas", "gptq_bitblas", "inc"
"fp8",
"modelopt",
"gptq_marlin_24",
"gptq_marlin",
"awq_marlin",
"fbgemm_fp8",
"compressed-tensors",
"experts_int8",
"quark",
"modelopt_fp4",
"bitblas",
"gptq_bitblas",
"inc",
"petit_nvfp4",
]
if self.quantization is not None:
self.quantization = cast(me_quant.QuantizationMethods,
@@ -1153,6 +1164,7 @@ class ModelConfig:
"moe_wna16",
"modelopt",
"modelopt_fp4",
"petit_nvfp4",
]
quantization_methods = [
q for q in supported_quantization if q not in overrides