[Core] Rename input data types (#8688)
This commit is contained in:
@@ -15,8 +15,8 @@ from typing_extensions import NotRequired
|
||||
|
||||
from vllm.attention import AttentionMetadata
|
||||
from vllm.config import CacheConfig, MultiModalConfig
|
||||
from vllm.inputs import INPUT_REGISTRY, InputContext, LLMInputs
|
||||
from vllm.logger import init_logger
|
||||
from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, InputContext,
|
||||
token_inputs)
|
||||
from vllm.model_executor.layers.activation import get_act_fn
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.model_executor.layers.sampler import Sampler, SamplerOutput
|
||||
@@ -37,8 +37,6 @@ from .siglip import (SiglipVisionModel, dummy_seq_data_for_siglip,
|
||||
from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model,
|
||||
merge_multimodal_embeddings)
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
# Result in the max possible feature size (2x2 grid of 336x336px tiles)
|
||||
MAX_IMAGE_FEATURE_SIZE_HEIGHT = MAX_IMAGE_FEATURE_SIZE_WIDTH = 448
|
||||
|
||||
@@ -252,10 +250,10 @@ def dummy_data_for_llava_onevision(ctx: InputContext, seq_len: int,
|
||||
|
||||
|
||||
def input_processor_when_multimodal_input_image(ctx: InputContext,
|
||||
llm_inputs: LLMInputs):
|
||||
multi_modal_data = llm_inputs.get("multi_modal_data")
|
||||
inputs: DecoderOnlyInputs):
|
||||
multi_modal_data = inputs.get("multi_modal_data")
|
||||
if multi_modal_data is None or "image" not in multi_modal_data:
|
||||
return llm_inputs
|
||||
return inputs
|
||||
|
||||
model_config = ctx.model_config
|
||||
hf_config = ctx.get_hf_config(LlavaOnevisionConfig)
|
||||
@@ -290,7 +288,7 @@ def input_processor_when_multimodal_input_image(ctx: InputContext,
|
||||
return input_processor_for_clip(
|
||||
model_config,
|
||||
vision_config,
|
||||
llm_inputs,
|
||||
inputs,
|
||||
image_token_id=hf_config.image_token_index,
|
||||
image_feature_size_override=image_feature_size,
|
||||
)
|
||||
@@ -298,7 +296,7 @@ def input_processor_when_multimodal_input_image(ctx: InputContext,
|
||||
return input_processor_for_siglip(
|
||||
model_config,
|
||||
vision_config,
|
||||
llm_inputs,
|
||||
inputs,
|
||||
image_token_id=hf_config.image_token_index,
|
||||
image_feature_size_override=image_feature_size,
|
||||
)
|
||||
@@ -308,10 +306,10 @@ def input_processor_when_multimodal_input_image(ctx: InputContext,
|
||||
|
||||
|
||||
def input_processor_when_multimodal_input_video(ctx: InputContext,
|
||||
llm_inputs: LLMInputs):
|
||||
multi_modal_data = llm_inputs.get("multi_modal_data")
|
||||
inputs: DecoderOnlyInputs):
|
||||
multi_modal_data = inputs.get("multi_modal_data")
|
||||
if multi_modal_data is None or "video" not in multi_modal_data:
|
||||
return llm_inputs
|
||||
return inputs
|
||||
video_data = multi_modal_data["video"]
|
||||
|
||||
model_config = ctx.model_config
|
||||
@@ -326,15 +324,15 @@ def input_processor_when_multimodal_input_video(ctx: InputContext,
|
||||
|
||||
new_prompt, new_token_ids = repeat_and_pad_placeholder_tokens(
|
||||
tokenizer,
|
||||
llm_inputs.get("prompt"),
|
||||
llm_inputs["prompt_token_ids"],
|
||||
inputs.get("prompt"),
|
||||
inputs["prompt_token_ids"],
|
||||
placeholder_token_id=hf_config.video_token_index,
|
||||
repeat_count=video_feature_size,
|
||||
)
|
||||
|
||||
return LLMInputs(prompt_token_ids=new_token_ids,
|
||||
prompt=new_prompt,
|
||||
multi_modal_data=multi_modal_data)
|
||||
return token_inputs(prompt_token_ids=new_token_ids,
|
||||
prompt=new_prompt,
|
||||
multi_modal_data=multi_modal_data)
|
||||
|
||||
elif is_list_of(video_data, np.ndarray):
|
||||
raise NotImplementedError(
|
||||
@@ -345,15 +343,15 @@ def input_processor_when_multimodal_input_video(ctx: InputContext,
|
||||
|
||||
|
||||
def input_processor_for_llava_onevision(ctx: InputContext,
|
||||
llm_inputs: LLMInputs):
|
||||
multi_modal_data = llm_inputs.get("multi_modal_data")
|
||||
inputs: DecoderOnlyInputs):
|
||||
multi_modal_data = inputs.get("multi_modal_data")
|
||||
if multi_modal_data is None or ("video" not in multi_modal_data
|
||||
and "image" not in multi_modal_data):
|
||||
return llm_inputs
|
||||
return inputs
|
||||
if "image" in multi_modal_data:
|
||||
return input_processor_when_multimodal_input_image(ctx, llm_inputs)
|
||||
return input_processor_when_multimodal_input_image(ctx, inputs)
|
||||
if "video" in multi_modal_data:
|
||||
return input_processor_when_multimodal_input_video(ctx, llm_inputs)
|
||||
return input_processor_when_multimodal_input_video(ctx, inputs)
|
||||
|
||||
msg = "Unsupported multi data type"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
Reference in New Issue
Block a user