2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2024-11-21 22:20:08 +08:00
|
|
|
"""Tests for InternVL's multimodal preprocessing kwargs."""
|
2025-02-04 16:44:52 +08:00
|
|
|
from typing import Optional
|
2024-11-21 22:20:08 +08:00
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
2025-02-04 16:44:52 +08:00
|
|
|
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
|
|
|
from vllm.multimodal.utils import cached_get_tokenizer
|
2024-11-21 22:20:08 +08:00
|
|
|
|
2025-01-11 13:50:05 +08:00
|
|
|
from ....conftest import _ImageAssets
|
|
|
|
|
from ...utils import build_model_context
|
2024-11-21 22:20:08 +08:00
|
|
|
|
|
|
|
|
|
2025-02-04 16:44:52 +08:00
|
|
|
@pytest.mark.parametrize("model_id", ["OpenGVLab/InternVL2-2B"])
|
2024-11-21 22:20:08 +08:00
|
|
|
@pytest.mark.parametrize("max_dynamic_patch", [1, 4])
|
|
|
|
|
@pytest.mark.parametrize("dynamic_image_size", [True, False, None])
|
2025-02-04 16:44:52 +08:00
|
|
|
@pytest.mark.parametrize("num_imgs", [1, 2])
|
|
|
|
|
def test_processor_override(
|
|
|
|
|
model_id: str,
|
2024-11-21 22:20:08 +08:00
|
|
|
image_assets: _ImageAssets,
|
|
|
|
|
max_dynamic_patch: int,
|
|
|
|
|
dynamic_image_size: Optional[bool],
|
|
|
|
|
num_imgs: int,
|
|
|
|
|
):
|
|
|
|
|
ctx = build_model_context(
|
2025-02-04 16:44:52 +08:00
|
|
|
model_name=model_id,
|
|
|
|
|
tokenizer_name=model_id,
|
2024-11-21 22:20:08 +08:00
|
|
|
trust_remote_code=True,
|
|
|
|
|
mm_processor_kwargs=None,
|
2025-02-04 16:44:52 +08:00
|
|
|
limit_mm_per_prompt={"image": num_imgs},
|
2024-11-21 22:20:08 +08:00
|
|
|
)
|
2025-02-04 16:44:52 +08:00
|
|
|
tokenizer = cached_get_tokenizer(
|
|
|
|
|
ctx.model_config.tokenizer,
|
|
|
|
|
trust_remote_code=ctx.model_config.trust_remote_code,
|
|
|
|
|
)
|
|
|
|
|
processor = MULTIMODAL_REGISTRY.create_processor(
|
|
|
|
|
ctx.model_config,
|
|
|
|
|
tokenizer=tokenizer,
|
2024-11-21 22:20:08 +08:00
|
|
|
)
|
|
|
|
|
|
2025-02-04 16:44:52 +08:00
|
|
|
mm_processor_kwargs = {
|
|
|
|
|
"max_dynamic_patch": max_dynamic_patch,
|
|
|
|
|
}
|
|
|
|
|
if dynamic_image_size is not None:
|
|
|
|
|
mm_processor_kwargs["dynamic_image_size"] = dynamic_image_size
|
2024-11-21 22:20:08 +08:00
|
|
|
|
2025-02-04 16:44:52 +08:00
|
|
|
# Build the image str / prompt based on the number of images we pass
|
|
|
|
|
prompt = "<image>" * num_imgs
|
|
|
|
|
image = image_assets[0].pil_image.resize((448 * 2, 448 * 2))
|
|
|
|
|
mm_data = {"image": [image] * num_imgs}
|
2024-11-21 22:20:08 +08:00
|
|
|
|
|
|
|
|
expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1
|
|
|
|
|
if dynamic_image_size is False:
|
|
|
|
|
expected_num_patches = 1
|
|
|
|
|
|
2025-02-04 16:44:52 +08:00
|
|
|
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
|
2024-11-21 22:20:08 +08:00
|
|
|
|
|
|
|
|
# Ensure we have the right number of placeholders per num_crops size
|
2025-02-04 16:44:52 +08:00
|
|
|
image_token_id = tokenizer.convert_tokens_to_ids("<IMG_CONTEXT>")
|
2024-11-21 22:20:08 +08:00
|
|
|
img_tok_count = processed_inputs["prompt_token_ids"].count(image_token_id)
|
2025-02-04 16:44:52 +08:00
|
|
|
pixel_shape = processed_inputs["mm_kwargs"]["pixel_values_flat"].shape
|
|
|
|
|
|
|
|
|
|
assert img_tok_count == 256 * expected_num_patches * num_imgs
|
|
|
|
|
assert pixel_shape[0] == expected_num_patches * num_imgs
|