2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2024-10-30 10:32:17 -06:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
from ..conftest import IMAGE_ASSETS
|
|
|
|
|
|
|
|
|
|
HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts(
|
|
|
|
|
{
|
|
|
|
|
"stop_sign": "USER: <image>\nWhat's the content of the image?\nASSISTANT:",
|
|
|
|
|
"cherry_blossom": "USER: <image>\nWhat is the season?\nASSISTANT:",
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
models = ["llava-hf/llava-1.5-7b-hf"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("model", models)
|
|
|
|
|
def test_context_length_too_short(vllm_runner, image_assets, model):
|
|
|
|
|
images = [asset.pil_image for asset in image_assets]
|
|
|
|
|
|
2025-04-09 00:45:21 +08:00
|
|
|
with pytest.raises(ValueError, match="longer than the maximum model length"):
|
2024-10-30 10:32:17 -06:00
|
|
|
vllm_model = vllm_runner(
|
|
|
|
|
model,
|
|
|
|
|
max_model_len=128, # LLaVA has a feature size of 576
|
|
|
|
|
enforce_eager=True,
|
2025-09-17 19:38:09 -07:00
|
|
|
load_format="dummy",
|
2024-10-30 10:32:17 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
with vllm_model:
|
|
|
|
|
vllm_model.generate_greedy(
|
|
|
|
|
[HF_IMAGE_PROMPTS[0]], max_tokens=1, images=[images[0]]
|
|
|
|
|
)
|