2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2024-08-23 10:12:44 -03:00
|
|
|
import pytest
|
2025-10-22 11:52:02 -04:00
|
|
|
import torch
|
2024-08-23 10:12:44 -03:00
|
|
|
|
|
|
|
|
from vllm import LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_empty_prompt():
|
2025-02-22 19:19:45 -08:00
|
|
|
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
|
2025-04-09 00:45:21 +08:00
|
|
|
with pytest.raises(ValueError, match="decoder prompt cannot be empty"):
|
2024-08-23 10:12:44 -03:00
|
|
|
llm.generate([""])
|
2024-10-29 16:13:20 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_out_of_vocab_token():
|
2025-02-22 19:19:45 -08:00
|
|
|
llm = LLM(model="openai-community/gpt2", enforce_eager=True)
|
2024-10-29 16:13:20 -05:00
|
|
|
with pytest.raises(ValueError, match="out of vocabulary"):
|
|
|
|
|
llm.generate({"prompt_token_ids": [999999]})
|
2025-10-22 11:52:02 -04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_require_mm_embeds():
|
|
|
|
|
llm = LLM(
|
|
|
|
|
model="llava-hf/llava-1.5-7b-hf",
|
|
|
|
|
enforce_eager=True,
|
|
|
|
|
enable_mm_embeds=False,
|
|
|
|
|
)
|
|
|
|
|
with pytest.raises(ValueError, match="--enable-mm-embeds"):
|
|
|
|
|
llm.generate(
|
|
|
|
|
{
|
|
|
|
|
"prompt": "<image>",
|
|
|
|
|
"multi_modal_data": {"image": torch.empty(1, 1, 1)},
|
|
|
|
|
}
|
|
|
|
|
)
|