[Misc] Reorganize inputs (#35182)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-03-26 01:22:54 +08:00
committed by GitHub
parent 678b3c99e8
commit ba2f0acc2d
142 changed files with 1212 additions and 1342 deletions

View File

@@ -37,7 +37,7 @@ from vllm.entrypoints.openai.responses.serving import (
from vllm.entrypoints.openai.responses.streaming_events import (
StreamingState,
)
from vllm.inputs.data import TokensPrompt
from vllm.inputs import tokens_input
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
@@ -258,20 +258,20 @@ class TestValidateGeneratorInput:
"""Test _validate_generator_input with valid prompt length"""
# Create an engine prompt with valid length (less than max_model_len)
valid_prompt_token_ids = list(range(5)) # 5 tokens < 100 max_model_len
engine_prompt = TokensPrompt(prompt_token_ids=valid_prompt_token_ids)
engine_input = tokens_input(valid_prompt_token_ids)
# Call the method
result = serving_responses_instance._validate_generator_input(engine_prompt)
result = serving_responses_instance._validate_generator_input(engine_input)
# Should return None for valid input
assert result is None
# create an invalid engine prompt
invalid_prompt_token_ids = list(range(200)) # 100 tokens >= 100 max_model_len
engine_prompt = TokensPrompt(prompt_token_ids=invalid_prompt_token_ids)
engine_input = tokens_input(invalid_prompt_token_ids)
# Call the method
result = serving_responses_instance._validate_generator_input(engine_prompt)
result = serving_responses_instance._validate_generator_input(engine_input)
# Should return an ErrorResponse
assert result is not None