[Minor] Rename quantization nvfp4 to modelopt_fp4 (#18356)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin
2025-05-20 12:08:37 -04:00
committed by GitHub
parent 8f55962a7f
commit f4a8a37465
4 changed files with 7 additions and 7 deletions

View File

@@ -14,7 +14,7 @@ QuantizationMethods = Literal[
"ptpc_fp8",
"fbgemm_fp8",
"modelopt",
"nvfp4",
"modelopt_fp4",
"marlin",
"bitblas",
"gguf",
@@ -120,7 +120,7 @@ def get_quantization_config(quantization: str) -> type[QuantizationConfig]:
"fp8": Fp8Config,
"fbgemm_fp8": FBGEMMFp8Config,
"modelopt": ModelOptFp8Config,
"nvfp4": ModelOptNvFp4Config,
"modelopt_fp4": ModelOptNvFp4Config,
"marlin": MarlinConfig,
"bitblas": BitBLASConfig,
"gguf": GGUFConfig,

View File

@@ -192,7 +192,7 @@ class ModelOptNvFp4Config(QuantizationConfig):
@classmethod
def get_name(cls) -> QuantizationMethods:
return "nvfp4"
return "modelopt_fp4"
@classmethod
def get_supported_act_dtypes(cls) -> list[torch.dtype]: