[Feature] models: pass layer prefix to replace_linear_class for per-layer quantization routing. Addresses #23239 (#23556)

Signed-off-by: Shrey Gupta <shreyg1303@gmail.com>
This commit is contained in:
Shrey Gupta
2025-08-28 08:42:44 +05:30
committed by GitHub
parent a69693e38f
commit 1b7b161a09
2 changed files with 18 additions and 8 deletions

View File

@@ -408,13 +408,17 @@ class DeepseekVLV2ForCausalLM(nn.Module, SupportsMultiModal, SupportsPP):
if isinstance(module, nn.Linear):
parent, attr_name = self._get_parent_and_attr(vit, name)
if isinstance(parent, timm.layers.Mlp) and attr_name == "fc1":
new_linear = replace_linear_class(module, "colwise",
quant_config)
new_linear = replace_linear_class(module,
"colwise",
quant_config,
prefix=name)
setattr(parent, attr_name, new_linear)
elif isinstance(parent,
timm.layers.Mlp) and attr_name == "fc2":
new_linear = replace_linear_class(module, "rowwise",
quant_config)
new_linear = replace_linear_class(module,
"rowwise",
quant_config,
prefix=name)
setattr(parent, attr_name, new_linear)
return vit