Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -60,21 +60,34 @@ from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead)
DEFAULT_VOCAB_PADDING_SIZE,
ParallelLMHead,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader, maybe_remap_kv_scale_name)
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.deepseek_v2 import DeepseekV2Model
from vllm.model_executor.models.interfaces import (SupportsMultiModal,
SupportsPP)
from vllm.model_executor.models.interfaces import SupportsMultiModal, SupportsPP
from vllm.model_executor.models.moonvit import MoonVitPretrainedModel
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (MultiModalDataDict, MultiModalFieldConfig,
MultiModalKwargsItems, NestedTensors)
from vllm.multimodal.parse import (ImageEmbeddingItems, ImageProcessorItems,
MultiModalDataItems)
from vllm.multimodal.processing import (BaseMultiModalProcessor,
BaseProcessingInfo, PromptReplacement,
PromptUpdate)
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
NestedTensors,
)
from vllm.multimodal.parse import (
ImageEmbeddingItems,
ImageProcessorItems,
MultiModalDataItems,
)
from vllm.multimodal.processing import (
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
)
from vllm.multimodal.profiling import BaseDummyInputsBuilder
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs import KimiVLConfig, MoonViTConfig
@@ -93,33 +106,35 @@ class MaxImageTokenMeta:
class KimiVLMultiModalProjector(nn.Module):
def __init__(self, config: KimiVLConfig, \
use_data_parallel: bool = False, prefix: str = ""):
def __init__(
self, config: KimiVLConfig, use_data_parallel: bool = False, prefix: str = ""
):
super().__init__()
self.use_data_parallel = use_data_parallel
self.hidden_size = (config.vision_config.hidden_size *
config.vision_config.merge_kernel_size[0] *
config.vision_config.merge_kernel_size[1])
self.hidden_size = (
config.vision_config.hidden_size
* config.vision_config.merge_kernel_size[0]
* config.vision_config.merge_kernel_size[1]
)
self.pre_norm = torch.nn.LayerNorm(config.vision_config.hidden_size,
eps=1e-5)
self.linear_1 = ReplicatedLinear(self.hidden_size,
self.hidden_size,
bias=True,
prefix=maybe_prefix(
prefix, "linear_1"))
self.linear_2 = ReplicatedLinear(self.hidden_size,
config.text_config.hidden_size,
bias=True,
prefix=maybe_prefix(
prefix, "linear_2"))
self.pre_norm = torch.nn.LayerNorm(config.vision_config.hidden_size, eps=1e-5)
self.linear_1 = ReplicatedLinear(
self.hidden_size,
self.hidden_size,
bias=True,
prefix=maybe_prefix(prefix, "linear_1"),
)
self.linear_2 = ReplicatedLinear(
self.hidden_size,
config.text_config.hidden_size,
bias=True,
prefix=maybe_prefix(prefix, "linear_2"),
)
self.act = GELUActivation()
def forward(self, image_features: torch.Tensor) -> torch.Tensor:
hidden_states = self.pre_norm(image_features).view(
-1, self.hidden_size)
hidden_states = self.pre_norm(image_features).view(-1, self.hidden_size)
hidden_states, _ = self.linear_1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.linear_2(hidden_states)
@@ -134,6 +149,7 @@ class KimiVLImagePixelInputs(TensorSchema):
- ps: Patch size
- ni: Number of images
"""
type: Literal["pixel_values"] = "pixel_values"
pixel_values: Annotated[
@@ -150,7 +166,6 @@ KimiVLImageInputs = KimiVLImagePixelInputs
class KimiVLProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(KimiVLConfig)
@@ -169,25 +184,25 @@ class KimiVLProcessingInfo(BaseProcessingInfo):
in_token_limit = hf_processor.image_processor.in_token_limit
height = image_height
width = image_width
assert isinstance(height,
int), f"height must be int, current height {height}"
assert isinstance(width,
int), f"width must be int, current width {width}"
assert isinstance(height, int), f"height must be int, current height {height}"
assert isinstance(width, int), f"width must be int, current width {width}"
assert kernel_size is not None, "kernel_size must be specified"
if (width // patch_size) * (height // patch_size) > in_token_limit:
scale = math.sqrt(in_token_limit / ((width // patch_size) *
(height // patch_size)))
scale = math.sqrt(
in_token_limit / ((width // patch_size) * (height // patch_size))
)
new_w, new_h = int(width * scale), int(height * scale)
width, height = new_w, new_h
kernel_height, kernel_width = kernel_size
pad_height = (kernel_height * patch_size - height %
(kernel_height * patch_size)) % (kernel_height *
patch_size)
pad_width = (kernel_width * patch_size - width %
(kernel_width * patch_size)) % (kernel_width * patch_size)
pad_height = (
kernel_height * patch_size - height % (kernel_height * patch_size)
) % (kernel_height * patch_size)
pad_width = (
kernel_width * patch_size - width % (kernel_width * patch_size)
) % (kernel_width * patch_size)
# Calculate new dimensions after padding and patching
token_height = (height + pad_height) // (kernel_size[0] * patch_size)
@@ -200,7 +215,6 @@ class KimiVLProcessingInfo(BaseProcessingInfo):
class KimiVLDummyInputsBuilder(BaseDummyInputsBuilder[KimiVLProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
@@ -220,16 +234,16 @@ class KimiVLDummyInputsBuilder(BaseDummyInputsBuilder[KimiVLProcessingInfo]):
image_overrides = mm_options.get("image") if mm_options else None
return {
"image":
self._get_dummy_images(width=MaxImageTokenMeta.width,
height=MaxImageTokenMeta.height,
num_images=num_images,
overrides=image_overrides)
"image": self._get_dummy_images(
width=MaxImageTokenMeta.width,
height=MaxImageTokenMeta.height,
num_images=num_images,
overrides=image_overrides,
)
}
class KimiVLMultiModalProcessor(BaseMultiModalProcessor[KimiVLProcessingInfo]):
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
@@ -242,7 +256,8 @@ class KimiVLMultiModalProcessor(BaseMultiModalProcessor[KimiVLProcessingInfo]):
# image_grid_hws is shapes for each subtensor in pixel_values
return dict(
pixel_values=MultiModalFieldConfig.flat_from_sizes(
"image", image_grid_sizes),
"image", image_grid_sizes
),
image_grid_hws=MultiModalFieldConfig.batched("image"),
)
@@ -256,7 +271,8 @@ class KimiVLMultiModalProcessor(BaseMultiModalProcessor[KimiVLProcessingInfo]):
def get_replacement(item_idx: int):
images = mm_items.get_items(
"image", (ImageEmbeddingItems, ImageProcessorItems))
"image", (ImageEmbeddingItems, ImageProcessorItems)
)
if isinstance(images, ImageEmbeddingItems):
num_image_tokens = images.get_feature_size(item_idx)
@@ -278,11 +294,12 @@ class KimiVLMultiModalProcessor(BaseMultiModalProcessor[KimiVLProcessingInfo]):
]
@MULTIMODAL_REGISTRY.register_processor(KimiVLMultiModalProcessor,
info=KimiVLProcessingInfo,
dummy_inputs=KimiVLDummyInputsBuilder)
class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
SupportsPP):
@MULTIMODAL_REGISTRY.register_processor(
KimiVLMultiModalProcessor,
info=KimiVLProcessingInfo,
dummy_inputs=KimiVLDummyInputsBuilder,
)
class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
merge_by_field_config = True
supports_encoder_tp_data = True
@@ -306,21 +323,27 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
quant_config = vllm_config.quant_config
assert isinstance(config.vision_config, MoonViTConfig)
self.use_data_parallel = model_config.multimodal_config.mm_encoder_tp_mode == "data"
self.use_data_parallel = (
model_config.multimodal_config.mm_encoder_tp_mode == "data"
)
self.hidden_size = config.text_config.hidden_size
self.vision_tower = MoonVitPretrainedModel(config.vision_config,
self.use_data_parallel,
prefix=maybe_prefix(
prefix, "vision_tower"))
self.vision_tower = MoonVitPretrainedModel(
config.vision_config,
self.use_data_parallel,
prefix=maybe_prefix(prefix, "vision_tower"),
)
self.multi_modal_projector = KimiVLMultiModalProjector(
config=config,
use_data_parallel=self.use_data_parallel,
prefix=maybe_prefix(prefix, "multi_modal_projector"))
prefix=maybe_prefix(prefix, "multi_modal_projector"),
)
self.quant_config = quant_config
sub_vllm_config = copy.deepcopy(vllm_config)
sub_vllm_config.model_config.hf_config = sub_vllm_config.model_config.hf_config.text_config
sub_vllm_config.model_config.hf_config = (
sub_vllm_config.model_config.hf_config.text_config
)
self.language_model = DeepseekV2Model(
vllm_config=sub_vllm_config,
prefix=maybe_prefix(prefix, "language_model"),
@@ -337,14 +360,17 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
else:
self.lm_head = PPMissingLayer()
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors)
self.language_model.make_empty_intermediate_tensors
)
logit_scale = getattr(config, "logit_scale", 1.0)
self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
config.vocab_size, logit_scale)
self.logits_processor = LogitsProcessor(
self.unpadded_vocab_size, config.vocab_size, logit_scale
)
self.media_placeholder: int = self.config.media_placeholder_token_id
def _parse_and_validate_image_input(
self, **kwargs: object) -> Optional[KimiVLImageInputs]:
self, **kwargs: object
) -> Optional[KimiVLImageInputs]:
# image input type must be pixel values now
pixel_values = kwargs.pop("pixel_values", None)
image_grid_hws = kwargs.pop("image_grid_hws", None)
@@ -360,34 +386,32 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
# perform vt on processored pixel_values
@torch.inference_mode()
def _process_image_pixels(self,
inputs: KimiVLImagePixelInputs) -> torch.Tensor:
def _process_image_pixels(self, inputs: KimiVLImagePixelInputs) -> torch.Tensor:
assert self.vision_tower is not None
pixel_values = inputs["pixel_values"]
image_grid_hws = inputs["image_grid_hws"]
if self.use_data_parallel:
return run_dp_sharded_mrope_vision_model(self.vision_tower,
pixel_values,
image_grid_hws.tolist(),
rope_type="rope_2d")
return run_dp_sharded_mrope_vision_model(
self.vision_tower,
pixel_values,
image_grid_hws.tolist(),
rope_type="rope_2d",
)
else:
return self.vision_tower(pixel_values, image_grid_hws)
def _process_image_input(self,
image_input: KimiVLImageInputs) -> torch.Tensor:
def _process_image_input(self, image_input: KimiVLImageInputs) -> torch.Tensor:
assert image_input["type"] == "pixel_values"
image_features = self._process_image_pixels(image_input)
assert isinstance(image_features, (list, tuple))
lengths = [x.shape[0] for x in image_features]
return self.multi_modal_projector(
torch.cat(image_features)).split(lengths)
return self.multi_modal_projector(torch.cat(image_features)).split(lengths)
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def get_multimodal_embeddings(self,
**kwargs: object) -> Optional[NestedTensors]:
def get_multimodal_embeddings(self, **kwargs: object) -> Optional[NestedTensors]:
# Validate the multimodal input keyword arguments
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
@@ -417,8 +441,7 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor,
**kwargs) -> torch.Tensor:
def compute_logits(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
logits = self.logits_processor(self.lm_head, hidden_states, **kwargs)
return logits
@@ -447,7 +470,8 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=config.n_routed_experts)
num_experts=config.n_routed_experts,
)
else:
expert_params_mapping = []
@@ -463,8 +487,7 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
if spec_layer is not None:
continue # skip spec decode layers for main model
if ("rotary_emb.cos_cached" in name
or "rotary_emb.sin_cached" in name):
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
@@ -478,8 +501,7 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
# not vision model for now.
use_default_weight_loading = True
else:
for (param_name, weight_name,
shard_id) in stacked_params_mapping:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
@@ -488,7 +510,7 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if (("mlp.experts." in name) and name not in params_dict):
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
@@ -503,8 +525,12 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
weight_loader(param, loaded_weight, shard_id, **kwargs)
break
else:
for idx, (param_name, weight_name, expert_id,
shard_id) in enumerate(expert_params_mapping):
for idx, (
param_name,
weight_name,
expert_id,
shard_id,
) in enumerate(expert_params_mapping):
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
@@ -514,12 +540,14 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param,
loaded_weight,
name,
expert_id=expert_id,
shard_id=shard_id,
**kwargs)
weight_loader(
param,
loaded_weight,
name,
expert_id=expert_id,
shard_id=shard_id,
**kwargs,
)
break
else:
use_default_weight_loading = True
@@ -536,18 +564,18 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal,
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight, **kwargs)
def get_spec_layer_idx_from_weight_name(config: DeepseekV2Config,
weight_name: str) -> Optional[int]:
if hasattr(config,
"num_nextn_predict_layers") and (config.num_nextn_predict_layers
> 0):
def get_spec_layer_idx_from_weight_name(
config: DeepseekV2Config, weight_name: str
) -> Optional[int]:
if hasattr(config, "num_nextn_predict_layers") and (
config.num_nextn_predict_layers > 0
):
layer_idx = config.num_hidden_layers
for i in range(config.num_nextn_predict_layers):
if weight_name.startswith(f"model.layers.{layer_idx+i}."):
if weight_name.startswith(f"model.layers.{layer_idx + i}."):
return layer_idx + i
return None