improve the robustness of parsing vlms config in AutoRound (#18894)

Signed-off-by: wenhuach21 <wenhua.cheng@intel.com>
This commit is contained in:
Wenhua Cheng
2025-05-30 10:24:47 +08:00
committed by GitHub
parent 3132290a14
commit 3de3eadf5b

View File

@@ -116,8 +116,9 @@ class AutoRoundConfig(QuantizationConfig):
quantized = True
if self.block_name_to_quantize:
quantized = any(name in layer_name
for name in self.block_name_to_quantize)
quantized = any(
layer_name.startswith(name)
for name in self.block_name_to_quantize)
elif isinstance(layer, ParallelLMHead):
quantized = False