Support Pixtral-Large HF by using llava multimodal_projector_bias config (#12710)
Signed-off-by: mgoin <michael@neuralmagic.com>
This commit is contained in:
@@ -75,19 +75,20 @@ class LlavaMultiModalProjector(nn.Module):
|
||||
vision_hidden_size: int,
|
||||
text_hidden_size: int,
|
||||
projector_hidden_act: str,
|
||||
multimodal_projector_bias: bool,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = ""):
|
||||
super().__init__()
|
||||
|
||||
self.linear_1 = ColumnParallelLinear(vision_hidden_size,
|
||||
text_hidden_size,
|
||||
bias=True,
|
||||
bias=multimodal_projector_bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.linear_1")
|
||||
self.act = get_act_fn(projector_hidden_act)
|
||||
self.linear_2 = RowParallelLinear(text_hidden_size,
|
||||
text_hidden_size,
|
||||
bias=True,
|
||||
bias=multimodal_projector_bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.linear_2")
|
||||
|
||||
@@ -503,6 +504,7 @@ class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
|
||||
vision_hidden_size=config.vision_config.hidden_size,
|
||||
text_hidden_size=config.text_config.hidden_size,
|
||||
projector_hidden_act=config.projector_hidden_act,
|
||||
multimodal_projector_bias=config.multimodal_projector_bias,
|
||||
quant_config=quant_config,
|
||||
prefix=maybe_prefix(prefix, "multi_modal_projector"))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user