Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -15,9 +15,11 @@ import torch.nn as nn
from transformers import BatchFeature, InternVLProcessor, PretrainedConfig
from transformers.activations import ACT2FN
from transformers.models.got_ocr2.image_processing_got_ocr2_fast import (
GotOcr2ImageProcessorFast)
GotOcr2ImageProcessorFast,
)
from transformers.models.internvl.video_processing_internvl import (
InternVLVideoProcessor)
InternVLVideoProcessor,
)
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
@@ -25,38 +27,57 @@ from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.models.interns1_vit import InternS1VisionModel
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (MultiModalDataDict, MultiModalFieldConfig,
MultiModalKwargsItems)
from vllm.multimodal.parse import (ImageEmbeddingItems, ImageProcessorItems,
ImageSize, MultiModalDataItems)
from vllm.multimodal.processing import (BaseMultiModalProcessor,
BaseProcessingInfo, PromptReplacement,
PromptUpdate, PromptUpdateDetails)
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import (
ImageEmbeddingItems,
ImageProcessorItems,
ImageSize,
MultiModalDataItems,
)
from vllm.multimodal.processing import (
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.multimodal.profiling import BaseDummyInputsBuilder
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.processor import (
cached_video_processor_from_config)
from vllm.transformers_utils.processor import cached_video_processor_from_config
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import (MultiModalEmbeddings, SupportsLoRA,
SupportsMultiModal, SupportsPP)
from .utils import (AutoWeightsLoader, WeightsMapper,
init_vllm_registered_model, maybe_prefix)
from .interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
)
from .utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
class InternS1MultiModalProjector(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size *
int(1 / config.downsample_ratio)**2)
self.layer_norm = nn.LayerNorm(
config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2
)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size *
int(1 / config.downsample_ratio)**2,
config.text_config.hidden_size)
config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2,
config.text_config.hidden_size,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(config.text_config.hidden_size,
config.text_config.hidden_size)
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size
)
def forward(self, image_features):
hidden_states = self.layer_norm(image_features)
@@ -75,6 +96,7 @@ class InternS1ImagePixelInputs(TensorSchema):
- w: Width
- bn: Batch size * number of images
"""
type: Literal["pixel_values"] = "pixel_values"
pixel_values: Annotated[torch.Tensor, TensorShape("bnp", 3, "h", "w")]
num_patches: Annotated[torch.Tensor, TensorShape("bn")]
@@ -87,13 +109,14 @@ class InternS1ImageEmbeddingInputs(TensorSchema):
- tifs: Total image feature size
- hs: Hidden size (must match language model backbone)
"""
type: Literal["image_embeds"] = "image_embeds"
data: Annotated[Union[torch.Tensor, list[torch.Tensor]],
TensorShape("ni", "tifs", "hs")]
data: Annotated[
Union[torch.Tensor, list[torch.Tensor]], TensorShape("ni", "tifs", "hs")
]
InternS1ImageInputs = Union[InternS1ImagePixelInputs,
InternS1ImageEmbeddingInputs]
InternS1ImageInputs = Union[InternS1ImagePixelInputs, InternS1ImageEmbeddingInputs]
class InternS1VideoPixelInputs(TensorSchema):
@@ -105,6 +128,7 @@ class InternS1VideoPixelInputs(TensorSchema):
- h: Height
- w: Width
"""
type: Literal["pixel_values_videos"] = "pixel_values_videos"
pixel_values: Annotated[torch.Tensor, TensorShape("bnv", 3, "h", "w")]
num_patches: Annotated[torch.Tensor, TensorShape("bn")]
@@ -117,13 +141,14 @@ class InternS1VideoEmbeddingInputs(TensorSchema):
- tvfs: Total video feature size
- hs: Hidden size (must match language model backbone)
"""
type: Literal["video_embeds"] = "video_embeds"
data: Annotated[Union[torch.Tensor, list[torch.Tensor]],
TensorShape("nv", "tvfs", "hs")]
data: Annotated[
Union[torch.Tensor, list[torch.Tensor]], TensorShape("nv", "tvfs", "hs")
]
InternS1VideoInputs = Union[InternS1VideoPixelInputs,
InternS1VideoEmbeddingInputs]
InternS1VideoInputs = Union[InternS1VideoPixelInputs, InternS1VideoEmbeddingInputs]
def resolve_interns1_min_max_num(
@@ -145,10 +170,13 @@ def get_interns1_target_ratios(
min_num: int,
max_num: int,
) -> list[tuple[int, int]]:
target_ratios = {(i, j)
for n in range(min_num, max_num + 1)
for i in range(1, n + 1)
for j in range(1, n + 1) if min_num <= i * j <= max_num}
target_ratios = {
(i, j)
for n in range(min_num, max_num + 1)
for i in range(1, n + 1)
for j in range(1, n + 1)
if min_num <= i * j <= max_num
}
return sorted(target_ratios, key=lambda x: x[0] * x[1])
@@ -158,9 +186,8 @@ class InternS1ProcessingInfo(BaseProcessingInfo):
def get_hf_processor(self, **kwargs: object) -> InternVLProcessor:
hf_processor = self.ctx.get_hf_processor(InternVLProcessor, **kwargs)
hf_processor.video_processor = cached_video_processor_from_config(
self.ctx.model_config,
processor_cls=InternVLVideoProcessor,
**kwargs)
self.ctx.model_config, processor_cls=InternVLVideoProcessor, **kwargs
)
return hf_processor
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
@@ -171,18 +198,19 @@ class InternS1ProcessingInfo(BaseProcessingInfo):
*,
image_width: int,
image_height: int,
processor: Optional['GotOcr2ImageProcessorFast'] = None,
processor: Optional["GotOcr2ImageProcessorFast"] = None,
) -> int:
if processor is None:
processor = self.get_hf_processor().image_processor
if not isinstance(processor, GotOcr2ImageProcessorFast):
raise ValueError(f'GotOcr2ImageProcessorFast is expected but got '
f'{type(processor)}')
raise ValueError(
f"GotOcr2ImageProcessorFast is expected but got {type(processor)}"
)
num_image_patches = processor.get_number_of_image_patches(
image_height, image_width, images_kwargs=dict())
num_image_tokens = self.get_hf_processor(
).image_seq_length * num_image_patches
image_height, image_width, images_kwargs=dict()
)
num_image_tokens = self.get_hf_processor().image_seq_length * num_image_patches
return num_image_tokens
def resolve_target_ratios(self, use_thumbnail: Optional[bool] = None):
@@ -197,7 +225,8 @@ class InternS1ProcessingInfo(BaseProcessingInfo):
min_dynamic_patch,
max_dynamic_patch,
dynamic_image_size,
use_thumbnail=use_thumbnail)
use_thumbnail=use_thumbnail,
)
return get_interns1_target_ratios(min_num, max_num)
@@ -219,11 +248,11 @@ class InternS1ProcessingInfo(BaseProcessingInfo):
)
if feat_size > largest_feature_size:
largest_feature_size = feat_size
largest_feature_pinpoint = ImageSize(width=width,
height=height)
largest_feature_pinpoint = ImageSize(width=width, height=height)
assert not (largest_feature_size == 0 or largest_feature_pinpoint
is None), ("Cannot have a largest feature size of 0!")
assert not (largest_feature_size == 0 or largest_feature_pinpoint is None), (
"Cannot have a largest feature size of 0!"
)
return largest_feature_pinpoint
@@ -248,15 +277,13 @@ class InternS1ProcessingInfo(BaseProcessingInfo):
processor = self.get_hf_processor()
max_image_tokens = self.get_max_image_tokens() * max_images
max_total_frames = (seq_len -
max_image_tokens) // processor.image_seq_length
max_total_frames = (seq_len - max_image_tokens) // processor.image_seq_length
max_frames_per_video = max_total_frames // max(max_videos, 1)
return max(max_frames_per_video, 1)
class InternS1DummyInputsBuilder(BaseDummyInputsBuilder[InternS1ProcessingInfo]
):
class InternS1DummyInputsBuilder(BaseDummyInputsBuilder[InternS1ProcessingInfo]):
"""DummyInputsBuilder for InternS1-style models."""
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
@@ -273,10 +300,10 @@ class InternS1DummyInputsBuilder(BaseDummyInputsBuilder[InternS1ProcessingInfo]
mm_counts: Mapping[str, int],
mm_options: Optional[Mapping[str, BaseDummyOptions]] = None,
) -> MultiModalDataDict:
target_width, target_height = \
self.info.get_image_size_with_most_features()
target_num_frames = \
self.info.get_num_frames_with_most_features(seq_len, mm_counts)
target_width, target_height = self.info.get_image_size_with_most_features()
target_num_frames = self.info.get_num_frames_with_most_features(
seq_len, mm_counts
)
num_images = mm_counts.get("image", 0)
num_videos = mm_counts.get("video", 0)
@@ -287,23 +314,24 @@ class InternS1DummyInputsBuilder(BaseDummyInputsBuilder[InternS1ProcessingInfo]
video_overrides = mm_options.get("video") if mm_options else None
return {
"image":
self._get_dummy_images(width=target_width,
height=target_height,
num_images=num_images,
overrides=image_overrides),
"video":
self._get_dummy_videos(width=image_size_w,
height=image_size_h,
num_frames=target_num_frames,
num_videos=num_videos,
overrides=video_overrides),
"image": self._get_dummy_images(
width=target_width,
height=target_height,
num_images=num_images,
overrides=image_overrides,
),
"video": self._get_dummy_videos(
width=image_size_w,
height=image_size_h,
num_frames=target_num_frames,
num_videos=num_videos,
overrides=video_overrides,
),
}
class InternS1MultiModalProcessor(
BaseMultiModalProcessor[InternS1ProcessingInfo]):
""" Basic image-only MultiModalProcessor for InternS1-style models."""
class InternS1MultiModalProcessor(BaseMultiModalProcessor[InternS1ProcessingInfo]):
"""Basic image-only MultiModalProcessor for InternS1-style models."""
def _call_hf_processor(
self,
@@ -320,15 +348,14 @@ class InternS1MultiModalProcessor(
hf_processor = self.info.get_hf_processor(**mm_kwargs)
tokenizer = hf_processor.tokenizer
video_token_id = tokenizer.encode(hf_processor.video_token,
add_special_tokens=False)
video_token_id = tokenizer.encode(
hf_processor.video_token, add_special_tokens=False
)
assert len(video_token_id) == 1
video_token_id = video_token_id[0]
prompt = re.sub(hf_processor.image_token, "<image_placeholder>",
prompt)
prompt = re.sub(hf_processor.video_token, "<video_placeholder>",
prompt)
prompt = re.sub(hf_processor.image_token, "<image_placeholder>", prompt)
prompt = re.sub(hf_processor.video_token, "<video_placeholder>", prompt)
image_outputs = {}
if images:
@@ -340,13 +367,11 @@ class InternS1MultiModalProcessor(
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
image_pixel_values.append(
processed_outputs.pop("pixel_values"))
image_pixel_values.append(processed_outputs.pop("pixel_values"))
input_ids = processed_outputs.pop("input_ids")
image_placeholder = tokenizer.batch_decode(input_ids)[0]
prompt = prompt.replace("<image_placeholder>",
image_placeholder, 1)
prompt = prompt.replace("<image_placeholder>", image_placeholder, 1)
num_patches = [len(item) for item in image_pixel_values]
image_outputs = {
@@ -365,16 +390,13 @@ class InternS1MultiModalProcessor(
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
video_pixel_values.append(
processed_outputs.pop("pixel_values"))
video_pixel_values.append(processed_outputs.pop("pixel_values"))
input_ids = processed_outputs.pop("input_ids")
input_ids[input_ids ==
hf_processor.image_token_id] = video_token_id
input_ids[input_ids == hf_processor.image_token_id] = video_token_id
video_placeholder = tokenizer.batch_decode(input_ids)[0]
prompt = prompt.replace("<video_placeholder>",
video_placeholder, 1)
prompt = prompt.replace("<video_placeholder>", video_placeholder, 1)
num_frames = [len(item) for item in video_pixel_values]
video_outputs = {
@@ -383,10 +405,8 @@ class InternS1MultiModalProcessor(
"video_token_id": torch.tensor(video_token_id),
}
prompt = re.sub("<image_placeholder>", hf_processor.image_token,
prompt)
prompt = re.sub("<video_placeholder>", hf_processor.video_token,
prompt)
prompt = re.sub("<image_placeholder>", hf_processor.image_token, prompt)
prompt = re.sub("<video_placeholder>", hf_processor.video_token, prompt)
text_outputs = tokenizer(prompt, **tok_kwargs, return_tensors="pt")
return BatchFeature({**text_outputs, **image_outputs, **video_outputs})
@@ -396,7 +416,6 @@ class InternS1MultiModalProcessor(
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
image_num_patches = hf_inputs.get("image_num_patches", torch.empty(0))
video_num_patches = hf_inputs.get("video_num_patches", torch.empty(0))
num_images = len(image_num_patches)
@@ -404,12 +423,14 @@ class InternS1MultiModalProcessor(
return dict(
pixel_values=MultiModalFieldConfig.flat_from_sizes(
"image", image_num_patches),
"image", image_num_patches
),
image_num_patches=MultiModalFieldConfig.batched("image"),
image_embeds=MultiModalFieldConfig.batched("image"),
image_token_id=MultiModalFieldConfig.shared("image", num_images),
pixel_values_videos=MultiModalFieldConfig.flat_from_sizes(
"video", video_num_patches),
"video", video_num_patches
),
video_num_patches=MultiModalFieldConfig.batched("video"),
video_token_id=MultiModalFieldConfig.shared("video", num_videos),
)
@@ -443,7 +464,8 @@ class InternS1MultiModalProcessor(
def get_replacement_interns1_image(item_idx: int):
images = mm_items.get_items(
"image", (ImageEmbeddingItems, ImageProcessorItems))
"image", (ImageEmbeddingItems, ImageProcessorItems)
)
if isinstance(images, ImageEmbeddingItems):
feature_size = images.get_feature_size(item_idx)
@@ -453,19 +475,16 @@ class InternS1MultiModalProcessor(
repl_features = img_context_token * feature_size
repl_full = start_image_token + repl_features + end_image_token
return PromptUpdateDetails.select_text(repl_full,
img_context_token)
return PromptUpdateDetails.select_text(repl_full, img_context_token)
def get_replacement_interns1_video(item_idx: int):
num_patches = video_num_patches[item_idx]
repl_features = video_token * hf_processor.image_seq_length
repl_features_with_sep = (start_image_token + repl_features +
end_image_token)
repl_features_with_sep = start_image_token + repl_features + end_image_token
# num_patches is equal to num_frames
repl_full = '\n'.join([
f'Frame{i+1}: {repl_features_with_sep}'
for i in range(num_patches)
])
repl_full = "\n".join(
[f"Frame{i + 1}: {repl_features_with_sep}" for i in range(num_patches)]
)
return PromptUpdateDetails.select_text(repl_full, video_token)
@@ -486,9 +505,11 @@ class InternS1MultiModalProcessor(
@MULTIMODAL_REGISTRY.register_processor(
InternS1MultiModalProcessor,
info=InternS1ProcessingInfo,
dummy_inputs=InternS1DummyInputsBuilder)
class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
SupportsPP, SupportsLoRA):
dummy_inputs=InternS1DummyInputsBuilder,
)
class InternS1ForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA
):
merge_by_field_config = True
# To ensure correct weight loading and mapping.
@@ -498,14 +519,15 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
"model.language_model.": "language_model.model.",
"model.vision_tower.": "vision_tower.",
"model.multi_modal_projector.": "multi_modal_projector.",
})
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
# transformers InternVLProcessor uses <IMG_CONTEXT> as the separator
# refer to https://github.com/huggingface/transformers/blob/f90de364c2484c7c325bbe05befdcf487bd75b63/src/transformers/models/internvl/processing_internvl.py#L116
if modality.startswith("image"):
return '<IMG_CONTEXT>'
return "<IMG_CONTEXT>"
if modality.startswith("video"):
return "<video>"
@@ -524,7 +546,8 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
patch_size = config.vision_config.patch_size[0]
self.patch_size = patch_size
self.num_image_token = int(
(image_size // patch_size)**2 * (config.downsample_ratio**2))
(image_size // patch_size) ** 2 * (config.downsample_ratio**2)
)
self.downsample_ratio = config.downsample_ratio
self.llm_arch_name = config.text_config.architectures[0]
@@ -547,7 +570,8 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
self.visual_token_mask = None
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors)
self.language_model.make_empty_intermediate_tensors
)
def _init_vision_model(
self,
@@ -573,8 +597,12 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
int(c / (scale_factor * scale_factor)))
x = x.view(
n,
int(h * scale_factor),
int(w * scale_factor),
int(c / (scale_factor * scale_factor)),
)
x = x.permute(0, 2, 1, 3).contiguous()
return x
@@ -582,18 +610,17 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
vit_embeds = self.vision_tower(pixel_values=pixel_values)
vit_embeds = vit_embeds[:, 1:, :]
h = w = int(vit_embeds.shape[1]**0.5)
h = w = int(vit_embeds.shape[1] ** 0.5)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
vit_embeds = self.pixel_shuffle(vit_embeds,
scale_factor=self.downsample_ratio)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1,
vit_embeds.shape[-1])
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
vit_embeds = self.multi_modal_projector(vit_embeds)
return vit_embeds
def _parse_and_validate_image_input(
self, **kwargs: object) -> Optional[InternS1ImageInputs]:
self, **kwargs: object
) -> Optional[InternS1ImageInputs]:
pixel_values = kwargs.pop("pixel_values", None)
image_num_patches = kwargs.pop("image_num_patches", None)
image_embeds = kwargs.pop("image_embeds", None)
@@ -626,7 +653,8 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
raise AssertionError("This line should be unreachable.")
def _parse_and_validate_video_input(
self, **kwargs: object) -> Optional[InternS1VideoInputs]:
self, **kwargs: object
) -> Optional[InternS1VideoInputs]:
pixel_values_flat_video = kwargs.pop("pixel_values_videos", None)
video_num_patches = kwargs.pop("video_num_patches", None)
video_embeds = kwargs.pop("video_embeds", None)
@@ -662,8 +690,10 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
self,
image_input: Union[InternS1ImageInputs, InternS1VideoInputs],
) -> tuple[torch.Tensor, ...]:
if (image_input["type"] == "image_embeds"
or image_input["type"] == "video_embeds"):
if (
image_input["type"] == "image_embeds"
or image_input["type"] == "video_embeds"
):
return image_input["data"]
assert self.vision_tower is not None
@@ -674,14 +704,12 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
# Only one image in the current batch
if len(num_patches) == 1:
return (image_embeds.view(-1,
self.config.text_config.hidden_size), )
return (image_embeds.view(-1, self.config.text_config.hidden_size),)
# NOTE: Image embeddings are split into separate tensors for each image
# by the size of each embedding.
feature_size = image_embeds.shape[1]
image_embeds = image_embeds.view(-1,
self.config.text_config.hidden_size)
image_embeds = image_embeds.view(-1, self.config.text_config.hidden_size)
image_feature_sizes = [
num_patches * feature_size for num_patches in num_patches
]
@@ -693,14 +721,13 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
# Preserve the order of modalities if there are multiple of them
# from the order of kwargs.
for input_key in kwargs:
if input_key in ("pixel_values",
"image_embeds") and "images" not in modalities:
modalities["images"] = self._parse_and_validate_image_input(
**kwargs)
if input_key in (
"pixel_values_videos", ) and "videos" not in modalities:
modalities["videos"] = self._parse_and_validate_video_input(
**kwargs)
if (
input_key in ("pixel_values", "image_embeds")
and "images" not in modalities
):
modalities["images"] = self._parse_and_validate_image_input(**kwargs)
if input_key in ("pixel_values_videos",) and "videos" not in modalities:
modalities["videos"] = self._parse_and_validate_video_input(**kwargs)
return modalities
@@ -710,9 +737,7 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def get_multimodal_embeddings(self,
**kwargs: object) -> MultiModalEmbeddings:
def get_multimodal_embeddings(self, **kwargs: object) -> MultiModalEmbeddings:
modalities = self._parse_and_validate_multimodal_inputs(**kwargs)
if not modalities:
return []
@@ -743,8 +768,7 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
is_multimodal: Optional[torch.Tensor] = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
if multimodal_embeddings is not None and len(
multimodal_embeddings) > 0:
if multimodal_embeddings is not None and len(multimodal_embeddings) > 0:
self._set_visual_token_mask(input_ids)
# This is to satisfy the type checker for each overload
@@ -766,7 +790,6 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
inputs_embeds: Optional[torch.Tensor] = None,
**kwargs: object,
) -> IntermediateTensors:
if intermediate_tensors is not None:
input_ids = None
inputs_embeds = None
@@ -787,8 +810,7 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
) -> Optional[torch.Tensor]:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str,
torch.Tensor]]) -> set[str]:
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
@@ -799,4 +821,5 @@ class InternS1ForConditionalGeneration(nn.Module, SupportsMultiModal,
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="multi_modal_projector",
tower_model="vision_tower")
tower_model="vision_tower",
)