[Model] Add support for the multi-modal Llama 3.2 model (#8811)
Co-authored-by: simon-mo <xmo@berkeley.edu> Co-authored-by: Chang Su <chang.s.su@oracle.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Roger Wang <136131678+ywang96@users.noreply.github.com> Co-authored-by: Roger Wang <ywang@roblox.com>
This commit is contained in:
@@ -576,7 +576,9 @@ class ModelConfig:
|
||||
@property
|
||||
def is_encoder_decoder_model(self) -> bool:
|
||||
"""Extract the HF encoder/decoder model flag."""
|
||||
return getattr(self.hf_config, "is_encoder_decoder", False)
|
||||
return getattr(self.hf_config, "is_encoder_decoder", False) or (
|
||||
(hasattr(self.hf_config, "text_config") and getattr(
|
||||
self.hf_config.text_config, "is_encoder_decoder", False)))
|
||||
|
||||
@property
|
||||
def is_embedding_model(self) -> bool:
|
||||
|
||||
Reference in New Issue
Block a user