[CI/Build] Separate out flaky responses API tests (#32110)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
0
tests/entrypoints/openai/responses/__init__.py
Normal file
0
tests/entrypoints/openai/responses/__init__.py
Normal file
89
tests/entrypoints/openai/responses/test_errors.py
Normal file
89
tests/entrypoints/openai/responses/test_errors.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from http import HTTPStatus
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from vllm.entrypoints.openai.protocol import ErrorResponse
|
||||
from vllm.entrypoints.openai.serving_engine import GenerationError, OpenAIServing
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raise_if_error_raises_generation_error():
|
||||
"""test _raise_if_error raises GenerationError"""
|
||||
# create a minimal OpenAIServing instance
|
||||
mock_engine = MagicMock()
|
||||
mock_engine.model_config = MagicMock()
|
||||
mock_engine.model_config.max_model_len = 100
|
||||
mock_models = MagicMock()
|
||||
|
||||
serving = OpenAIServing(
|
||||
engine_client=mock_engine,
|
||||
models=mock_models,
|
||||
request_logger=None,
|
||||
)
|
||||
|
||||
# test that error finish_reason raises GenerationError
|
||||
with pytest.raises(GenerationError) as exc_info:
|
||||
serving._raise_if_error("error", "test-request-id")
|
||||
|
||||
assert str(exc_info.value) == "Internal server error"
|
||||
assert exc_info.value.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
|
||||
# test that other finish_reasons don't raise
|
||||
serving._raise_if_error("stop", "test-request-id") # should not raise
|
||||
serving._raise_if_error("length", "test-request-id") # should not raise
|
||||
serving._raise_if_error(None, "test-request-id") # should not raise
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_generation_error_to_response():
|
||||
"""test _convert_generation_error_to_response creates proper ErrorResponse"""
|
||||
mock_engine = MagicMock()
|
||||
mock_engine.model_config = MagicMock()
|
||||
mock_engine.model_config.max_model_len = 100
|
||||
mock_models = MagicMock()
|
||||
|
||||
serving = OpenAIServing(
|
||||
engine_client=mock_engine,
|
||||
models=mock_models,
|
||||
request_logger=None,
|
||||
)
|
||||
|
||||
# create a GenerationError
|
||||
gen_error = GenerationError("Internal server error")
|
||||
|
||||
# convert to ErrorResponse
|
||||
error_response = serving._convert_generation_error_to_response(gen_error)
|
||||
|
||||
assert isinstance(error_response, ErrorResponse)
|
||||
assert error_response.error.type == "InternalServerError"
|
||||
assert error_response.error.message == "Internal server error"
|
||||
assert error_response.error.code == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_generation_error_to_streaming_response():
|
||||
"""test _convert_generation_error_to_streaming_response output"""
|
||||
mock_engine = MagicMock()
|
||||
mock_engine.model_config = MagicMock()
|
||||
mock_engine.model_config.max_model_len = 100
|
||||
mock_models = MagicMock()
|
||||
|
||||
serving = OpenAIServing(
|
||||
engine_client=mock_engine,
|
||||
models=mock_models,
|
||||
request_logger=None,
|
||||
)
|
||||
|
||||
# create a GenerationError
|
||||
gen_error = GenerationError("Internal server error")
|
||||
|
||||
# convert to streaming error response
|
||||
error_json = serving._convert_generation_error_to_streaming_response(gen_error)
|
||||
|
||||
assert isinstance(error_json, str)
|
||||
assert "Internal server error" in error_json
|
||||
assert "InternalServerError" in error_json
|
||||
330
tests/entrypoints/openai/responses/test_function_call_parsing.py
Normal file
330
tests/entrypoints/openai/responses/test_function_call_parsing.py
Normal file
@@ -0,0 +1,330 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""Test function call parsing in ResponsesRequest."""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from openai.types.responses import ResponseFunctionToolCall
|
||||
|
||||
from vllm.entrypoints.openai.protocol import ResponsesRequest
|
||||
|
||||
|
||||
def test_function_call_dict_converted_to_object():
|
||||
"""Test that function_call dictionaries are correctly parsed into
|
||||
ResponseFunctionToolCall objects."""
|
||||
# Create a request with function_call as dict
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "fc_123",
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "Boston", "unit": "celsius"}',
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify the input item is now a ResponseFunctionToolCall object
|
||||
assert len(request.input) == 1
|
||||
assert isinstance(request.input[0], ResponseFunctionToolCall)
|
||||
assert request.input[0].call_id == "fc_123"
|
||||
assert request.input[0].name == "get_weather"
|
||||
assert request.input[0].arguments == '{"location": "Boston", "unit": "celsius"}'
|
||||
|
||||
|
||||
def test_direct_function_call_object_preservation():
|
||||
"""Test that ResponseFunctionToolCall objects passed directly are preserved."""
|
||||
# Create a request with ResponseFunctionToolCall object
|
||||
function_call = ResponseFunctionToolCall(
|
||||
type="function_call",
|
||||
call_id="fc_456",
|
||||
name="get_stock_price",
|
||||
arguments='{"symbol": "AAPL"}',
|
||||
)
|
||||
|
||||
request_data = {"model": "gpt-oss", "input": [function_call]}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify the object is preserved
|
||||
assert len(request.input) == 1
|
||||
assert request.input[0] is function_call
|
||||
|
||||
|
||||
def test_mixed_input_types_with_function_calls():
|
||||
"""Test parsing with mixed input types including function calls."""
|
||||
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
# Valid Message type
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type": "input_text", "text": "What's the weather?"}],
|
||||
},
|
||||
# Function call that should be parsed
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "fc_789",
|
||||
"name": "check_weather",
|
||||
"arguments": '{"location": "NYC"}',
|
||||
},
|
||||
# Another function call
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "fc_790",
|
||||
"name": "get_time",
|
||||
"arguments": "{}",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify mixed types are handled correctly
|
||||
assert len(request.input) == 3
|
||||
# First item should be validated as Message
|
||||
assert request.input[0]["type"] == "message"
|
||||
# Second item should be parsed to ResponseFunctionToolCall
|
||||
assert isinstance(request.input[1], ResponseFunctionToolCall)
|
||||
assert request.input[1].call_id == "fc_789"
|
||||
assert request.input[1].name == "check_weather"
|
||||
# Third item should also be parsed to ResponseFunctionToolCall
|
||||
assert isinstance(request.input[2], ResponseFunctionToolCall)
|
||||
assert request.input[2].call_id == "fc_790"
|
||||
assert request.input[2].name == "get_time"
|
||||
|
||||
|
||||
def test_function_call_with_complex_arguments():
|
||||
"""Test parsing function calls with complex nested arguments."""
|
||||
complex_args = {
|
||||
"query": "weather forecast",
|
||||
"filters": {
|
||||
"location": {"city": "San Francisco", "state": "CA"},
|
||||
"timeRange": {"start": "2024-01-01", "end": "2024-01-07"},
|
||||
"metrics": ["temperature", "humidity", "precipitation"],
|
||||
},
|
||||
"options": {"format": "detailed", "includeAlerts": True},
|
||||
}
|
||||
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "fc_complex",
|
||||
"name": "advanced_weather_query",
|
||||
"arguments": json.dumps(complex_args),
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify complex arguments are preserved correctly
|
||||
assert len(request.input) == 1
|
||||
assert isinstance(request.input[0], ResponseFunctionToolCall)
|
||||
assert request.input[0].call_id == "fc_complex"
|
||||
assert request.input[0].name == "advanced_weather_query"
|
||||
|
||||
# Parse the arguments back to verify they're intact
|
||||
parsed_args = json.loads(request.input[0].arguments)
|
||||
assert parsed_args == complex_args
|
||||
|
||||
|
||||
def test_invalid_function_call_fallback():
|
||||
"""Test that invalid function call dictionaries fall back gracefully."""
|
||||
# Missing required field 'call_id'
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
{"type": "function_call", "name": "incomplete_function", "arguments": "{}"}
|
||||
],
|
||||
}
|
||||
|
||||
# This should not raise an error during model creation
|
||||
# The validator should keep the original dict and let Pydantic
|
||||
# handle validation
|
||||
with pytest.raises(ValueError):
|
||||
# Pydantic should raise a validation error for the invalid structure
|
||||
ResponsesRequest(**request_data)
|
||||
|
||||
|
||||
def test_string_input_not_affected():
|
||||
"""Test that string input is not affected by the validator."""
|
||||
request_data = {"model": "gpt-oss", "input": "This is a simple string input"}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify string input remains unchanged
|
||||
assert request.input == "This is a simple string input"
|
||||
|
||||
|
||||
def test_empty_list_input():
|
||||
"""Test that empty list input is handled correctly."""
|
||||
request_data = {"model": "gpt-oss", "input": []}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# Verify empty list is preserved
|
||||
assert request.input == []
|
||||
|
||||
|
||||
def test_function_call_output_not_affected():
|
||||
"""Test that FunctionCallOutput is not affected by the function_call parsing."""
|
||||
|
||||
# Test with FunctionCallOutput as dict (should not be parsed)
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
{
|
||||
"type": "function_call_output",
|
||||
"call_id": "fc_output_123",
|
||||
"output": "The weather in Boston is 72°F and sunny.",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
# FunctionCallOutput should remain as dict (not converted to an object)
|
||||
assert len(request.input) == 1
|
||||
assert isinstance(request.input[0], dict)
|
||||
assert request.input[0]["type"] == "function_call_output"
|
||||
assert request.input[0]["call_id"] == "fc_output_123"
|
||||
assert request.input[0]["output"] == "The weather in Boston is 72°F and sunny."
|
||||
|
||||
|
||||
def test_mixed_function_call_and_output():
|
||||
"""Test that function_call is parsed while function_call_output is preserved."""
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
# This should be parsed to ResponseFunctionToolCall
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "fc_call_456",
|
||||
"name": "get_weather",
|
||||
"arguments": '{"location": "NYC"}',
|
||||
},
|
||||
# This should remain as dict
|
||||
{
|
||||
"type": "function_call_output",
|
||||
"call_id": "fc_call_456",
|
||||
"output": "NYC weather is 68°F with light rain",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**request_data)
|
||||
|
||||
assert len(request.input) == 2
|
||||
|
||||
# First item should be parsed to ResponseFunctionToolCall
|
||||
assert isinstance(request.input[0], ResponseFunctionToolCall)
|
||||
assert request.input[0].call_id == "fc_call_456"
|
||||
assert request.input[0].name == "get_weather"
|
||||
|
||||
# Second item should remain as dict (FunctionCallOutput)
|
||||
assert isinstance(request.input[1], dict)
|
||||
assert request.input[1]["type"] == "function_call_output"
|
||||
assert request.input[1]["call_id"] == "fc_call_456"
|
||||
assert request.input[1]["output"] == "NYC weather is 68°F with light rain"
|
||||
|
||||
|
||||
def test_function_call_validation_failure_logs_debug(caplog):
|
||||
"""Test that validation failures are logged at debug level."""
|
||||
from unittest.mock import patch
|
||||
|
||||
request_data = {
|
||||
"model": "gpt-oss",
|
||||
"input": [
|
||||
{
|
||||
"type": "function_call",
|
||||
"name": "incomplete_function",
|
||||
"arguments": "{}", # Missing call_id
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
# Mock the logger to verify debug was called
|
||||
with patch("vllm.entrypoints.openai.protocol.logger") as mock_logger:
|
||||
with pytest.raises(ValueError):
|
||||
ResponsesRequest(**request_data)
|
||||
|
||||
# Verify debug was called with expected message
|
||||
mock_logger.debug.assert_called_once()
|
||||
call_args = mock_logger.debug.call_args[0][0]
|
||||
assert "Failed to parse function_call" in call_args
|
||||
|
||||
|
||||
def test_validator_handles_iterator_input():
|
||||
"""Test that validator can handle ValidatorIterator input (Pydantic internal)."""
|
||||
|
||||
# This test simulates when Pydantic passes a ValidatorIterator instead of a list
|
||||
# This happened with complex nested structures containing reasoning + function_call
|
||||
|
||||
# Create test data that would normally be a list
|
||||
test_input_items = [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type": "input_text", "text": "Test"}],
|
||||
},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"id": "rs_1",
|
||||
"summary": [{"type": "summary_text", "text": "Test reasoning"}],
|
||||
"content": [{"type": "reasoning_text", "text": "Test content"}],
|
||||
},
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": "call_1",
|
||||
"name": "test_function",
|
||||
"arguments": '{"test": "value"}',
|
||||
"id": "fc_1",
|
||||
},
|
||||
]
|
||||
|
||||
# Mock data where input is an iterator (simulates Pydantic ValidatorIterator)
|
||||
mock_data = {
|
||||
"model": "test-model",
|
||||
"input": iter(test_input_items), # Iterator instead of list
|
||||
}
|
||||
|
||||
# This should NOT raise an error with the fixed validator
|
||||
try:
|
||||
request = ResponsesRequest(**mock_data)
|
||||
|
||||
# Verify the validator processed the data correctly
|
||||
assert len(request.input) == 3
|
||||
|
||||
# Verify function_call was converted to ResponseFunctionToolCall object
|
||||
function_call_item = None
|
||||
for item in request.input:
|
||||
if isinstance(item, ResponseFunctionToolCall):
|
||||
function_call_item = item
|
||||
break
|
||||
|
||||
assert function_call_item is not None
|
||||
assert function_call_item.call_id == "call_1"
|
||||
assert function_call_item.name == "test_function"
|
||||
|
||||
except Exception as e:
|
||||
pytest.fail(f"Validator should handle iterator input, but failed with: {e}")
|
||||
|
||||
|
||||
def test_validator_handles_empty_iterator():
|
||||
"""Test validator handles empty iterator gracefully."""
|
||||
mock_data = {
|
||||
"model": "test-model",
|
||||
"input": iter([]), # Empty iterator
|
||||
}
|
||||
|
||||
request = ResponsesRequest(**mock_data)
|
||||
assert request.input == []
|
||||
1345
tests/entrypoints/openai/responses/test_harmony.py
Normal file
1345
tests/entrypoints/openai/responses/test_harmony.py
Normal file
File diff suppressed because it is too large
Load Diff
352
tests/entrypoints/openai/responses/test_mcp_tools.py
Normal file
352
tests/entrypoints/openai/responses/test_mcp_tools.py
Normal file
@@ -0,0 +1,352 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from openai import OpenAI
|
||||
from openai_harmony import ToolDescription, ToolNamespaceConfig
|
||||
|
||||
from vllm.entrypoints.tool_server import MCPToolServer
|
||||
|
||||
from ....utils import RemoteOpenAIServer
|
||||
|
||||
MODEL_NAME = "openai/gpt-oss-20b"
|
||||
|
||||
|
||||
def test_get_tool_description():
|
||||
"""Test MCPToolServer.get_tool_description filtering logic.
|
||||
|
||||
Note: The wildcard "*" is normalized to None by
|
||||
_extract_allowed_tools_from_mcp_requests before reaching this layer,
|
||||
so we only test None and specific tool filtering here.
|
||||
See test_serving_responses.py for "*" normalization tests.
|
||||
"""
|
||||
pytest.importorskip("mcp")
|
||||
|
||||
server = MCPToolServer()
|
||||
tool1 = ToolDescription.new(
|
||||
name="tool1", description="First", parameters={"type": "object"}
|
||||
)
|
||||
tool2 = ToolDescription.new(
|
||||
name="tool2", description="Second", parameters={"type": "object"}
|
||||
)
|
||||
tool3 = ToolDescription.new(
|
||||
name="tool3", description="Third", parameters={"type": "object"}
|
||||
)
|
||||
|
||||
server.harmony_tool_descriptions = {
|
||||
"test_server": ToolNamespaceConfig(
|
||||
name="test_server", description="test", tools=[tool1, tool2, tool3]
|
||||
)
|
||||
}
|
||||
|
||||
# Nonexistent server
|
||||
assert server.get_tool_description("nonexistent") is None
|
||||
|
||||
# None (no filter) - returns all tools
|
||||
result = server.get_tool_description("test_server", allowed_tools=None)
|
||||
assert len(result.tools) == 3
|
||||
|
||||
# Filter to specific tools
|
||||
result = server.get_tool_description(
|
||||
"test_server", allowed_tools=["tool1", "tool3"]
|
||||
)
|
||||
assert len(result.tools) == 2
|
||||
assert result.tools[0].name == "tool1"
|
||||
assert result.tools[1].name == "tool3"
|
||||
|
||||
# Single tool
|
||||
result = server.get_tool_description(
|
||||
"test_server",
|
||||
allowed_tools=["tool2"],
|
||||
)
|
||||
assert len(result.tools) == 1
|
||||
assert result.tools[0].name == "tool2"
|
||||
|
||||
# No matching tools - returns None
|
||||
result = server.get_tool_description("test_server", allowed_tools=["nonexistent"])
|
||||
assert result is None
|
||||
|
||||
# Empty list - returns None
|
||||
assert server.get_tool_description("test_server", allowed_tools=[]) is None
|
||||
|
||||
|
||||
class TestMCPEnabled:
|
||||
"""Tests that require MCP tools to be enabled via environment variable."""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def monkeypatch_class(self):
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
mpatch = MonkeyPatch()
|
||||
yield mpatch
|
||||
mpatch.undo()
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def mcp_enabled_server(self, monkeypatch_class: pytest.MonkeyPatch):
|
||||
args = ["--enforce-eager", "--tool-server", "demo"]
|
||||
|
||||
with monkeypatch_class.context() as m:
|
||||
m.setenv("VLLM_ENABLE_RESPONSES_API_STORE", "1")
|
||||
m.setenv("PYTHON_EXECUTION_BACKEND", "dangerously_use_uv")
|
||||
m.setenv(
|
||||
"VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS", "code_interpreter,container"
|
||||
)
|
||||
# Helps the model follow instructions better
|
||||
m.setenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "1")
|
||||
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def mcp_enabled_client(self, mcp_enabled_server):
|
||||
async with mcp_enabled_server.get_async_client() as async_client:
|
||||
yield async_client
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_env_flag_enabled(
|
||||
self, mcp_enabled_client: OpenAI, model_name: str
|
||||
):
|
||||
response = await mcp_enabled_client.responses.create(
|
||||
model=model_name,
|
||||
input=(
|
||||
"Execute the following code: "
|
||||
"import random; print(random.randint(1, 1000000))"
|
||||
),
|
||||
instructions=(
|
||||
"You must use the Python tool to execute code. "
|
||||
"Never simulate execution."
|
||||
),
|
||||
tools=[
|
||||
{
|
||||
"type": "mcp",
|
||||
"server_label": "code_interpreter",
|
||||
# URL unused for DemoToolServer
|
||||
"server_url": "http://localhost:8888",
|
||||
}
|
||||
],
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
# Verify output messages: Tool calls and responses on analysis channel
|
||||
tool_call_found = False
|
||||
tool_response_found = False
|
||||
for message in response.output_messages:
|
||||
recipient = message.get("recipient")
|
||||
if recipient and recipient.startswith("python"):
|
||||
tool_call_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool call should be on analysis channel"
|
||||
)
|
||||
author = message.get("author", {})
|
||||
if (
|
||||
author.get("role") == "tool"
|
||||
and author.get("name")
|
||||
and author.get("name").startswith("python")
|
||||
):
|
||||
tool_response_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool response should be on analysis channel"
|
||||
)
|
||||
|
||||
assert tool_call_found, "Should have found at least one Python tool call"
|
||||
assert tool_response_found, (
|
||||
"Should have found at least one Python tool response"
|
||||
)
|
||||
for message in response.input_messages:
|
||||
assert message.get("author").get("role") != "developer", (
|
||||
"No developer messages should be present with valid mcp tool"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_with_allowed_tools_star(
|
||||
self, mcp_enabled_client: OpenAI, model_name: str
|
||||
):
|
||||
"""Test MCP tool with allowed_tools=['*'] to select all available
|
||||
tools.
|
||||
|
||||
This E2E test verifies that the "*" wildcard works end-to-end.
|
||||
See test_serving_responses.py for detailed unit tests of "*"
|
||||
normalization.
|
||||
"""
|
||||
response = await mcp_enabled_client.responses.create(
|
||||
model=model_name,
|
||||
input=(
|
||||
"Execute the following code: "
|
||||
"import random; print(random.randint(1, 1000000))"
|
||||
),
|
||||
instructions=(
|
||||
"You must use the Python tool to execute code. "
|
||||
"Never simulate execution."
|
||||
),
|
||||
tools=[
|
||||
{
|
||||
"type": "mcp",
|
||||
"server_label": "code_interpreter",
|
||||
"server_url": "http://localhost:8888",
|
||||
# Using "*" to allow all tools from this MCP server
|
||||
"allowed_tools": ["*"],
|
||||
}
|
||||
],
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
# Verify tool calls work with allowed_tools=["*"]
|
||||
tool_call_found = False
|
||||
for message in response.output_messages:
|
||||
recipient = message.get("recipient")
|
||||
if recipient and recipient.startswith("python"):
|
||||
tool_call_found = True
|
||||
break
|
||||
assert tool_call_found, (
|
||||
"Should have found at least one Python tool call with '*'"
|
||||
)
|
||||
|
||||
@pytest.mark.flaky(reruns=3)
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_calling_streaming_types(
|
||||
self, mcp_enabled_client: OpenAI, model_name: str
|
||||
):
|
||||
pairs_of_event_types = {
|
||||
"response.completed": "response.created",
|
||||
"response.output_item.done": "response.output_item.added",
|
||||
"response.content_part.done": "response.content_part.added",
|
||||
"response.output_text.done": "response.output_text.delta",
|
||||
"response.reasoning_text.done": "response.reasoning_text.delta",
|
||||
"response.reasoning_part.done": "response.reasoning_part.added",
|
||||
"response.mcp_call_arguments.done": ("response.mcp_call_arguments.delta"),
|
||||
"response.mcp_call.completed": "response.mcp_call.in_progress",
|
||||
}
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "mcp",
|
||||
"server_label": "code_interpreter",
|
||||
}
|
||||
]
|
||||
input_text = "What is 13 * 24? Use python to calculate the result."
|
||||
|
||||
stream_response = await mcp_enabled_client.responses.create(
|
||||
model=model_name,
|
||||
input=input_text,
|
||||
tools=tools,
|
||||
stream=True,
|
||||
instructions=(
|
||||
"You must use the Python tool to execute code. "
|
||||
"Never simulate execution."
|
||||
),
|
||||
)
|
||||
|
||||
stack_of_event_types = []
|
||||
saw_mcp_type = False
|
||||
async for event in stream_response:
|
||||
if event.type == "response.created":
|
||||
stack_of_event_types.append(event.type)
|
||||
elif event.type == "response.completed":
|
||||
assert stack_of_event_types[-1] == pairs_of_event_types[event.type]
|
||||
stack_of_event_types.pop()
|
||||
elif (
|
||||
event.type.endswith("added")
|
||||
or event.type == "response.mcp_call.in_progress"
|
||||
):
|
||||
stack_of_event_types.append(event.type)
|
||||
elif event.type.endswith("delta"):
|
||||
if stack_of_event_types[-1] == event.type:
|
||||
continue
|
||||
stack_of_event_types.append(event.type)
|
||||
elif (
|
||||
event.type.endswith("done")
|
||||
or event.type == "response.mcp_call.completed"
|
||||
):
|
||||
assert stack_of_event_types[-1] == pairs_of_event_types[event.type]
|
||||
if "mcp_call" in event.type:
|
||||
saw_mcp_type = True
|
||||
stack_of_event_types.pop()
|
||||
|
||||
assert len(stack_of_event_types) == 0
|
||||
assert saw_mcp_type, "Should have seen at least one mcp call"
|
||||
|
||||
|
||||
class TestMCPDisabled:
|
||||
"""Tests that verify behavior when MCP tools are disabled."""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def monkeypatch_class(self):
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
mpatch = MonkeyPatch()
|
||||
yield mpatch
|
||||
mpatch.undo()
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def mcp_disabled_server(self, monkeypatch_class: pytest.MonkeyPatch):
|
||||
args = ["--enforce-eager", "--tool-server", "demo"]
|
||||
|
||||
with monkeypatch_class.context() as m:
|
||||
m.setenv("VLLM_ENABLE_RESPONSES_API_STORE", "1")
|
||||
m.setenv("PYTHON_EXECUTION_BACKEND", "dangerously_use_uv")
|
||||
# Helps the model follow instructions better
|
||||
m.setenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "1")
|
||||
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def mcp_disabled_client(self, mcp_disabled_server):
|
||||
async with mcp_disabled_server.get_async_client() as async_client:
|
||||
yield async_client
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_env_flag_disabled(
|
||||
self, mcp_disabled_client: OpenAI, model_name: str
|
||||
):
|
||||
response = await mcp_disabled_client.responses.create(
|
||||
model=model_name,
|
||||
input=(
|
||||
"Execute the following code if the tool is present: "
|
||||
"import random; print(random.randint(1, 1000000))"
|
||||
),
|
||||
tools=[
|
||||
{
|
||||
"type": "mcp",
|
||||
"server_label": "code_interpreter",
|
||||
# URL unused for DemoToolServer
|
||||
"server_url": "http://localhost:8888",
|
||||
}
|
||||
],
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
# Verify output messages: No tool calls and responses
|
||||
tool_call_found = False
|
||||
tool_response_found = False
|
||||
for message in response.output_messages:
|
||||
recipient = message.get("recipient")
|
||||
if recipient and recipient.startswith("python"):
|
||||
tool_call_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool call should be on analysis channel"
|
||||
)
|
||||
author = message.get("author", {})
|
||||
if (
|
||||
author.get("role") == "tool"
|
||||
and author.get("name")
|
||||
and author.get("name").startswith("python")
|
||||
):
|
||||
tool_response_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool response should be on analysis channel"
|
||||
)
|
||||
|
||||
assert not tool_call_found, "Should not have a python call"
|
||||
assert not tool_response_found, "Should not have a tool response"
|
||||
for message in response.input_messages:
|
||||
assert message.get("author").get("role") != "developer", (
|
||||
"No developer messages should be present without a valid tool"
|
||||
)
|
||||
201
tests/entrypoints/openai/responses/test_parsable_context.py
Normal file
201
tests/entrypoints/openai/responses/test_parsable_context.py
Normal file
@@ -0,0 +1,201 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import importlib
|
||||
import json
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from openai import OpenAI
|
||||
|
||||
from ....utils import RemoteOpenAIServer
|
||||
|
||||
MODEL_NAME = "Qwen/Qwen3-8B"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def server():
|
||||
assert importlib.util.find_spec("gpt_oss") is not None, (
|
||||
"Harmony tests require gpt_oss package to be installed"
|
||||
)
|
||||
|
||||
args = [
|
||||
"--reasoning-parser",
|
||||
"qwen3",
|
||||
"--max_model_len",
|
||||
"5000",
|
||||
"--structured-outputs-config.backend",
|
||||
"xgrammar",
|
||||
"--enable-auto-tool-choice",
|
||||
"--tool-call-parser",
|
||||
"hermes",
|
||||
"--tool-server",
|
||||
"demo",
|
||||
]
|
||||
env_dict = dict(
|
||||
VLLM_ENABLE_RESPONSES_API_STORE="1",
|
||||
VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT="1",
|
||||
PYTHON_EXECUTION_BACKEND="dangerously_use_uv",
|
||||
)
|
||||
|
||||
with RemoteOpenAIServer(MODEL_NAME, args, env_dict=env_dict) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def client(server):
|
||||
async with server.get_async_client() as async_client:
|
||||
yield async_client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_basic(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is 13 * 24?",
|
||||
)
|
||||
assert response is not None
|
||||
print("response: ", response)
|
||||
assert response.status == "completed"
|
||||
assert response.incomplete_details is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_reasoning_and_function_items(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=[
|
||||
{"type": "message", "content": "Hello.", "role": "user"},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"id": "lol",
|
||||
"content": [
|
||||
{
|
||||
"type": "reasoning_text",
|
||||
"text": "We need to respond: greeting.",
|
||||
}
|
||||
],
|
||||
"summary": [],
|
||||
},
|
||||
{
|
||||
"arguments": '{"location": "Paris", "unit": "celsius"}',
|
||||
"call_id": "call_5f7b38f3b81e4b8380fd0ba74f3ca3ab",
|
||||
"name": "get_weather",
|
||||
"type": "function_call",
|
||||
"id": "fc_4fe5d6fc5b6c4d6fa5f24cc80aa27f78",
|
||||
"status": "completed",
|
||||
},
|
||||
{
|
||||
"call_id": "call_5f7b38f3b81e4b8380fd0ba74f3ca3ab",
|
||||
"id": "fc_4fe5d6fc5b6c4d6fa5f24cc80aa27f78",
|
||||
"output": "The weather in Paris is 20 Celsius",
|
||||
"status": "completed",
|
||||
"type": "function_call_output",
|
||||
},
|
||||
],
|
||||
temperature=0.0,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
# make sure we get a reasoning and text output
|
||||
assert response.output[0].type == "reasoning"
|
||||
assert response.output[1].type == "message"
|
||||
assert type(response.output[1].content[0].text) is str
|
||||
|
||||
|
||||
def get_horoscope(sign):
|
||||
return f"{sign}: Next Tuesday you will befriend a baby otter."
|
||||
|
||||
|
||||
def call_function(name, args):
|
||||
if name == "get_horoscope":
|
||||
return get_horoscope(**args)
|
||||
else:
|
||||
raise ValueError(f"Unknown function: {name}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_function_call_first_turn(client: OpenAI, model_name: str):
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_horoscope",
|
||||
"description": "Get today's horoscope for an astrological sign.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sign": {"type": "string"},
|
||||
},
|
||||
"required": ["sign"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"strict": True,
|
||||
}
|
||||
]
|
||||
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is the horoscope for Aquarius today?",
|
||||
tools=tools,
|
||||
temperature=0.0,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
assert len(response.output) == 2
|
||||
assert response.output[0].type == "reasoning"
|
||||
assert response.output[1].type == "function_call"
|
||||
|
||||
function_call = response.output[1]
|
||||
assert function_call.name == "get_horoscope"
|
||||
assert function_call.call_id is not None
|
||||
|
||||
args = json.loads(function_call.arguments)
|
||||
assert "sign" in args
|
||||
|
||||
# the multi turn function call is tested above in
|
||||
# test_reasoning_and_function_items
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_call(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is 13 * 24? Use python to calculate the result.",
|
||||
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
|
||||
extra_body={"enable_response_messages": True},
|
||||
temperature=0.0,
|
||||
)
|
||||
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
assert response.output[0].type == "reasoning"
|
||||
assert response.output[1].type == "mcp_call"
|
||||
assert type(response.output[1].arguments) is str
|
||||
assert type(response.output[1].output) is str
|
||||
assert response.output[2].type == "reasoning"
|
||||
# make sure the correct math is in the final output
|
||||
assert response.output[3].type == "message"
|
||||
assert "312" in response.output[3].content[0].text
|
||||
|
||||
# test raw input_messages / output_messages
|
||||
assert len(response.input_messages) == 1
|
||||
assert len(response.output_messages) == 3
|
||||
assert "312" in response.output_messages[2]["message"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_max_tokens(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is the first paragraph of Moby Dick?",
|
||||
reasoning={"effort": "low"},
|
||||
max_output_tokens=30,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "incomplete"
|
||||
assert response.incomplete_details.reason == "max_output_tokens"
|
||||
149
tests/entrypoints/openai/responses/test_simple.py
Normal file
149
tests/entrypoints/openai/responses/test_simple.py
Normal file
@@ -0,0 +1,149 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from openai import OpenAI
|
||||
|
||||
from ....utils import RemoteOpenAIServer
|
||||
|
||||
MODEL_NAME = "Qwen/Qwen3-8B"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def server():
|
||||
args = ["--reasoning-parser", "qwen3", "--max_model_len", "5000"]
|
||||
env_dict = dict(
|
||||
VLLM_ENABLE_RESPONSES_API_STORE="1",
|
||||
# uncomment for tool calling
|
||||
# PYTHON_EXECUTION_BACKEND="dangerously_use_uv",
|
||||
)
|
||||
|
||||
with RemoteOpenAIServer(MODEL_NAME, args, env_dict=env_dict) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def client(server):
|
||||
async with server.get_async_client() as async_client:
|
||||
yield async_client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_basic(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is 13 * 24?",
|
||||
)
|
||||
assert response is not None
|
||||
print("response: ", response)
|
||||
assert response.status == "completed"
|
||||
assert response.incomplete_details is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_enable_response_messages(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="Hello?",
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response.status == "completed"
|
||||
assert response.input_messages[0]["type"] == "raw_message_tokens"
|
||||
assert type(response.input_messages[0]["message"]) is str
|
||||
assert len(response.input_messages[0]["message"]) > 10
|
||||
assert type(response.input_messages[0]["tokens"][0]) is int
|
||||
assert type(response.output_messages[0]["message"]) is str
|
||||
assert len(response.output_messages[0]["message"]) > 10
|
||||
assert type(response.output_messages[0]["tokens"][0]) is int
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_reasoning_item(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=[
|
||||
{"type": "message", "content": "Hello.", "role": "user"},
|
||||
{
|
||||
"type": "reasoning",
|
||||
"id": "lol",
|
||||
"content": [
|
||||
{
|
||||
"type": "reasoning_text",
|
||||
"text": "We need to respond: greeting.",
|
||||
}
|
||||
],
|
||||
"summary": [],
|
||||
},
|
||||
],
|
||||
temperature=0.0,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
# make sure we get a reasoning and text output
|
||||
assert response.output[0].type == "reasoning"
|
||||
assert response.output[1].type == "message"
|
||||
assert type(response.output[1].content[0].text) is str
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_streaming_output_consistency(client: OpenAI, model_name: str):
|
||||
"""Test that streaming delta text matches the final response output_text.
|
||||
|
||||
This test verifies that when using streaming mode:
|
||||
1. The concatenated text from all 'response.output_text.delta' events
|
||||
2. Matches the 'output_text' in the final 'response.completed' event
|
||||
"""
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="Say hello in one sentence.",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
events = []
|
||||
async for event in response:
|
||||
events.append(event)
|
||||
|
||||
assert len(events) > 0
|
||||
|
||||
# Concatenate all delta text from streaming events
|
||||
streaming_text = "".join(
|
||||
event.delta for event in events if event.type == "response.output_text.delta"
|
||||
)
|
||||
|
||||
# Get the final response from the last event
|
||||
response_completed_event = events[-1]
|
||||
assert response_completed_event.type == "response.completed"
|
||||
assert response_completed_event.response.status == "completed"
|
||||
|
||||
# Get output_text from the final response
|
||||
final_output_text = response_completed_event.response.output_text
|
||||
|
||||
# Verify final response has output
|
||||
assert len(response_completed_event.response.output) > 0
|
||||
|
||||
# Verify streaming text matches final output_text
|
||||
assert streaming_text == final_output_text, (
|
||||
f"Streaming text does not match final output_text.\n"
|
||||
f"Streaming: {streaming_text!r}\n"
|
||||
f"Final: {final_output_text!r}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_max_tokens(client: OpenAI, model_name: str):
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is the first paragraph of Moby Dick?",
|
||||
reasoning={"effort": "low"},
|
||||
max_output_tokens=30,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "incomplete"
|
||||
assert response.incomplete_details.reason == "max_output_tokens"
|
||||
Reference in New Issue
Block a user