Implicit language-model-only mode via limit-mm-per-prompt (#22299)
Signed-off-by: Roger Wang <hey@rogerw.me> Signed-off-by: Andy Xie <andy.xning@gmail.com> Signed-off-by: tjtanaa <tunjian.tan@embeddedllm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Zhiyu Cheng <zhiyuc@nvidia.com> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Po-Han Huang <pohanh@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: XIn Li <xinli@nvidia.com> Signed-off-by: Junhao Li <junhao@ubicloud.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: zRzRzRzRzRzRzR <2448370773@qq.com> Signed-off-by: zitian.zhao <zitian.zhao@tencentmusic.com> Signed-off-by: zitian zhao <zitian.zhao@tencentmusic.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: iAmir97 <Amir.balwel@embeddedllm.com> Signed-off-by: iAmir97 <71513472+iAmir97@users.noreply.github.com> Signed-off-by: Linkun <github@lkchen.net> Co-authored-by: Ning Xie <andy.xning@gmail.com> Co-authored-by: TJian <tunjian.tan@embeddedllm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Zhiyu <zhiyuc@nvidia.com> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: XIn Li <xinli@nvidia.com> Co-authored-by: Junhao Li <streaver91@gmail.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: Yuxuan Zhang <2448370773@qq.com> Co-authored-by: ZiTian Zhao <zitian.zhao@tencentmusic.com> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Po-Han Huang (NVIDIA) <53919306+nvpohanh@users.noreply.github.com> Co-authored-by: iAmir97 <71513472+iAmir97@users.noreply.github.com> Co-authored-by: iAmir97 <Amir.balwel@embeddedllm.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Hong Hanh <hanh.usth@gmail.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: lkchen <github@lkchen.net>
This commit is contained in:
@@ -837,27 +837,35 @@ class Step3VLForConditionalGeneration(nn.Module, SupportsMultiModal,
|
||||
self.config = config
|
||||
self.multimodal_config = multimodal_config
|
||||
|
||||
self.vision_model = Step3VisionTransformer(config.vision_config,
|
||||
None,
|
||||
prefix=maybe_prefix(
|
||||
prefix, "vision_model"))
|
||||
self.vit_downsampler = nn.Conv2d(
|
||||
config.vision_config.hidden_size,
|
||||
config.vision_config.output_hidden_size,
|
||||
kernel_size=2,
|
||||
stride=config.understand_projector_stride)
|
||||
self.vit_downsampler2 = nn.Conv2d(
|
||||
config.vision_config.output_hidden_size,
|
||||
config.vision_config.output_hidden_size * 2,
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
padding=1,
|
||||
)
|
||||
self.vit_large_projector = nn.Linear(
|
||||
config.vision_config.output_hidden_size * 2,
|
||||
config.hidden_size,
|
||||
bias=config.projector_bias,
|
||||
)
|
||||
if multimodal_config.get_limit_per_prompt("image"):
|
||||
self.vision_model = Step3VisionTransformer(config.vision_config,
|
||||
None,
|
||||
prefix=maybe_prefix(
|
||||
prefix,
|
||||
"vision_model"))
|
||||
self.vit_downsampler = nn.Conv2d(
|
||||
config.vision_config.hidden_size,
|
||||
config.vision_config.output_hidden_size,
|
||||
kernel_size=2,
|
||||
stride=config.understand_projector_stride)
|
||||
self.vit_downsampler2 = nn.Conv2d(
|
||||
config.vision_config.output_hidden_size,
|
||||
config.vision_config.output_hidden_size * 2,
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
padding=1,
|
||||
)
|
||||
self.vit_large_projector = nn.Linear(
|
||||
config.vision_config.output_hidden_size * 2,
|
||||
config.hidden_size,
|
||||
bias=config.projector_bias,
|
||||
)
|
||||
else:
|
||||
self.vision_model = None
|
||||
self.vit_downsampler = None
|
||||
self.vit_downsampler2 = None
|
||||
self.vit_large_projector = None
|
||||
|
||||
self.language_model = init_vllm_registered_model(
|
||||
vllm_config=vllm_config,
|
||||
hf_config=config.text_config,
|
||||
@@ -1046,7 +1054,15 @@ class Step3VLForConditionalGeneration(nn.Module, SupportsMultiModal,
|
||||
return self.language_model.sample(logits, sampling_metadata)
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
|
||||
loader = AutoWeightsLoader(self)
|
||||
|
||||
skip_prefixes = []
|
||||
if self.vision_model is None and self.vit_large_projector is None:
|
||||
skip_prefixes = [
|
||||
"vision_model.", "vit_downsampler.", "vit_downsampler2.",
|
||||
"vit_large_projector."
|
||||
]
|
||||
|
||||
loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
|
||||
loaded_weights = loader.load_weights(weights,
|
||||
mapper=self.hf_to_vllm_mapper)
|
||||
return loaded_weights
|
||||
|
||||
Reference in New Issue
Block a user