[Misc][Model][Refactor] Pass the prefix into Linear layers (#31669)

Signed-off-by: Wang Kunpeng <1289706727@qq.com>
This commit is contained in:
Wang Kunpeng
2026-01-06 04:03:18 +08:00
committed by GitHub
parent 02dbb933cb
commit 5708297e4e
17 changed files with 181 additions and 40 deletions

View File

@@ -86,7 +86,13 @@ class Zamba2LoRA(nn.Module):
B_class = MergedColumnParallelLinear
else:
B_class = ColumnParallelLinear
self.B = B_class(rank, output_dim, bias=False, quant_config=quant_config)
self.B = B_class(
rank,
output_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.B",
)
def forward(
self,
@@ -346,6 +352,7 @@ class Zamba2MLP(nn.Module):
config.adapter_rank,
2 * [self.intermediate_size],
quant_config,
prefix=f"{prefix}.gate_up_proj_adapter_list.{block_idx}",
)
else:
gate_up_proj_adapter = nn.Identity()