[Bugfix] support tie_word_embeddings for all models (#5724)
This commit is contained in:
@@ -252,6 +252,8 @@ class QWenLMHeadModel(nn.Module):
|
||||
self.lm_head = ParallelLMHead(config.vocab_size,
|
||||
config.hidden_size,
|
||||
quant_config=quant_config)
|
||||
if self.config.tie_word_embeddings:
|
||||
self.lm_head.weight = self.transformer.wte.weight
|
||||
self.logits_processor = LogitsProcessor(config.vocab_size)
|
||||
self.sampler = Sampler()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user