[Misc] Reorganize inputs (#35182)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-03-26 01:22:54 +08:00
committed by GitHub
parent 678b3c99e8
commit ba2f0acc2d
142 changed files with 1212 additions and 1342 deletions

View File

@@ -22,16 +22,14 @@ from typing import TYPE_CHECKING
import torch
from vllm.config.utils import getattr_iter
from vllm.inputs import MultiModalDataDict, MultiModalInput, mm_input
from vllm.logger import init_logger
from vllm.model_executor.models.interfaces import SupportsMRoPE, SupportsMultiModal
from vllm.multimodal import MultiModalKwargsItems
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFeatureSpec,
MultiModalFieldConfig,
MultiModalInputs,
PlaceholderRange,
mm_inputs,
)
from vllm.multimodal.parse import (
ImageProcessorItems,
@@ -179,7 +177,7 @@ class MultiModalProcessor(BaseMultiModalProcessor[MultiModalProcessingInfo]):
self,
inputs: ProcessorInputs,
timing_ctx: TimingContext,
) -> MultiModalInputs:
) -> MultiModalInput:
"""
Process multi-modal inputs to be used in vLLM.
@@ -261,7 +259,7 @@ class MultiModalProcessor(BaseMultiModalProcessor[MultiModalProcessingInfo]):
with timing_ctx.record("get_mm_hashes"):
mm_hashes = inputs.get_mm_hashes(self.info.model_id)
return mm_inputs(
return mm_input(
prompt_token_ids=prompt_ids,
mm_kwargs=mm_kwargs,
mm_hashes=mm_hashes,