[Model] Pass param prefix to LLMHead (#24862)

Signed-off-by: whx-sjtu <2952154980@qq.com>
This commit is contained in:
whx
2025-09-17 16:01:27 +08:00
committed by GitHub
parent 03191cd8f0
commit 4a9375fe9d
58 changed files with 102 additions and 31 deletions

View File

@@ -302,7 +302,8 @@ class GPTBigCodeForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
self.lm_head = ParallelLMHead(
self.transformer.vocab_size,
self.transformer.embed_dim,
org_num_embeddings=self.config.vocab_size)
org_num_embeddings=self.config.vocab_size,
prefix=maybe_prefix(prefix, "lm_head"))
self.unpadded_vocab_size = config.vocab_size
if lora_config:
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size