Fix a bug in tying OPT embeddings (#1)

This commit is contained in:
Woosuk Kwon
2023-02-24 16:29:36 -08:00
committed by GitHub
parent c84c708a1d
commit cbf8779afa
2 changed files with 25 additions and 2 deletions

View File

@@ -232,6 +232,28 @@ class OPTForCausalLM(OPTPreTrainedModel):
# Initialize weights and apply final processing
self.post_init()
# NOTE(woosuk): While the following methods are not called in the model code,
# they may be internally used by the transformers library.
# For example, tie_weights() does not work without these methods.
# Thus, do not delete these methods.
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def forward(
self,
input_ids: torch.LongTensor,