[Misc] Use VLLMValidationError in batch, pooling, and tokenize protocol validators (#36256)

Signed-off-by: umut-polat <52835619+umut-polat@users.noreply.github.com>
This commit is contained in:
Umut Polat
2026-03-17 16:52:30 +03:00
committed by GitHub
parent f340324335
commit 56cb1baa66
3 changed files with 13 additions and 8 deletions

View File

@@ -54,6 +54,7 @@ from vllm.entrypoints.pooling.score.protocol import (
ScoreResponse,
)
from vllm.entrypoints.utils import create_error_response
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
from vllm.reasoning import ReasoningParserManager
from vllm.utils import random_uuid
@@ -86,9 +87,10 @@ class BatchTranscriptionRequest(TranscriptionRequest):
def validate_no_file(cls, data: Any):
"""Ensure file field is not provided in batch requests."""
if isinstance(data, dict) and "file" in data:
raise ValueError(
raise VLLMValidationError(
"The 'file' field is not supported in batch requests. "
"Use 'file_url' instead."
"Use 'file_url' instead.",
parameter="file",
)
return data
@@ -116,9 +118,10 @@ class BatchTranslationRequest(TranslationRequest):
def validate_no_file(cls, data: Any):
"""Ensure file field is not provided in batch requests."""
if isinstance(data, dict) and "file" in data:
raise ValueError(
raise VLLMValidationError(
"The 'file' field is not supported in batch requests. "
"Use 'file_url' instead."
"Use 'file_url' instead.",
parameter="file",
)
return data

View File

@@ -11,6 +11,7 @@ from vllm.entrypoints.chat_utils import (
ChatTemplateContentFormatOption,
)
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel
from vllm.exceptions import VLLMValidationError
from vllm.renderers import ChatParams, merge_kwargs
from vllm.utils import random_uuid
from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness
@@ -147,9 +148,9 @@ class ChatRequestMixin(OpenAIBaseModel):
@classmethod
def check_generation_prompt(cls, data):
if data.get("continue_final_message") and data.get("add_generation_prompt"):
raise ValueError(
raise VLLMValidationError(
"Cannot set both `continue_final_message` and "
"`add_generation_prompt` to True."
"`add_generation_prompt` to True.",
)
return data

View File

@@ -17,6 +17,7 @@ from vllm.entrypoints.openai.chat_completion.protocol import (
from vllm.entrypoints.openai.engine.protocol import (
OpenAIBaseModel,
)
from vllm.exceptions import VLLMValidationError
from vllm.renderers import ChatParams, TokenizeParams, merge_kwargs
@@ -120,9 +121,9 @@ class TokenizeChatRequest(OpenAIBaseModel):
@classmethod
def check_generation_prompt(cls, data):
if data.get("continue_final_message") and data.get("add_generation_prompt"):
raise ValueError(
raise VLLMValidationError(
"Cannot set both `continue_final_message` and "
"`add_generation_prompt` to True."
"`add_generation_prompt` to True.",
)
return data