[Misc] Improve error messages for unsupported types and parameters (#30593)

Signed-off-by: BlankR <hjyblanche@gmail.com>
Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com>
This commit is contained in:
BlankR
2026-01-07 01:00:16 -08:00
committed by GitHub
parent 1f33e38e81
commit 0790f07695
11 changed files with 40 additions and 16 deletions

View File

@@ -56,22 +56,22 @@ class AutoRoundConfig(QuantizationConfig):
if weight_bits not in self.SUPPORTED_BITS:
raise ValueError(
f"Unsupported weight_bits: {weight_bits}, "
f"currently only support {self.SUPPORTED_BITS}"
f"currently only support {self.SUPPORTED_BITS}."
)
if data_type not in self.SUPPORTED_DTYPES:
raise ValueError(
f"Unsupported data_type: {data_type},"
f" currently only support {self.SUPPORTED_DTYPES}"
f"Unsupported data_type: {data_type}, "
f"currently only support {self.SUPPORTED_DTYPES}."
)
if packing_format not in self.SUPPORTED_FORMATS:
raise ValueError(
f"Unsupported packing_format: {packing_format}, "
f"currently only support {self.SUPPORTED_FORMATS}"
f"currently only support {self.SUPPORTED_FORMATS}."
)
if backend not in self.SUPPORTED_BACKENDS:
raise ValueError(
f"Unsupported backend: {backend}, "
f"currently only support {self.SUPPORTED_BACKENDS}"
f"Unsupported backend: {backend}, "
f"currently only support {self.SUPPORTED_BACKENDS}."
)
self.weight_bits = weight_bits

View File

@@ -158,7 +158,10 @@ class CompressedTensorsW8A8Fp8(CompressedTensorsScheme):
input_scale = None
else:
raise ValueError(f"Unknown quantization strategy {self.strategy}")
raise ValueError(
f"Unknown quantization strategy {self.strategy}: "
f"should be one of {list(QuantizationStrategy)}"
)
# required by torch.compile to be torch.nn.Parameter
layer.weight = Parameter(weight.data, requires_grad=False)

View File

@@ -783,7 +783,10 @@ class Mxfp4MoEMethod(FusedMoEMethodBase):
layer.w13_weight = w13_weight
layer.w2_weight = w2_weight
else:
raise ValueError(f"Unsupported backend: {self.mxfp4_backend}")
raise ValueError(
f"Unsupported mxfp4_backend: {self.mxfp4_backend}: "
f"should be one of: {list(Mxfp4Backend)}."
)
def get_fused_moe_quant_config(
self, layer: torch.nn.Module