[CI/Build] Use Common Event Map Fixture in Harmony / MCP Server Tests (#32531)
Signed-off-by: Alex-Brooks <Alex.Brooks@ibm.com>
This commit is contained in:
30
tests/entrypoints/openai/responses/conftest.py
Normal file
30
tests/entrypoints/openai/responses/conftest.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pairs_of_event_types() -> dict[str, str]:
|
||||
"""Links the 'done' event type with the corresponding 'start' event type.
|
||||
|
||||
This mapping should link all done <-> start events; if tests mean to
|
||||
restrict the allowed events, they should filter this fixture to avoid
|
||||
copy + paste errors in the mappings or unexpected KeyErrors due to missing
|
||||
events.
|
||||
"""
|
||||
# fmt: off
|
||||
event_pairs = {
|
||||
"response.completed": "response.created",
|
||||
"response.output_item.done": "response.output_item.added",
|
||||
"response.content_part.done": "response.content_part.added",
|
||||
"response.output_text.done": "response.output_text.delta",
|
||||
"response.reasoning_text.done": "response.reasoning_text.delta",
|
||||
"response.reasoning_part.done": "response.reasoning_part.added",
|
||||
"response.mcp_call_arguments.done": "response.mcp_call_arguments.delta",
|
||||
"response.mcp_call.completed": "response.mcp_call.in_progress",
|
||||
"response.function_call_arguments.done": "response.function_call_arguments.delta", # noqa: E501
|
||||
"response.code_interpreter_call_code.done": "response.code_interpreter_call_code.delta", # noqa: E501
|
||||
"response.web_search_call.completed": "response.web_search_call.in_progress",
|
||||
}
|
||||
# fmt: on
|
||||
return event_pairs
|
||||
@@ -280,24 +280,13 @@ async def test_stateful_multi_turn(client: OpenAI, model_name: str):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_streaming_types(client: OpenAI, model_name: str):
|
||||
async def test_streaming_types(
|
||||
pairs_of_event_types: dict[str, str], client: OpenAI, model_name: str
|
||||
):
|
||||
prompts = [
|
||||
"tell me a story about a cat in 20 words",
|
||||
]
|
||||
|
||||
# this links the "done" type with the "start" type
|
||||
# so every "done" type should have a corresponding "start" type
|
||||
# and every open block should be closed by the end of the stream
|
||||
pairs_of_event_types = {
|
||||
"response.completed": "response.created",
|
||||
"response.output_item.done": "response.output_item.added",
|
||||
"response.content_part.done": "response.content_part.added",
|
||||
"response.output_text.done": "response.output_text.delta",
|
||||
"response.web_search_call.done": "response.web_search_call.added",
|
||||
"response.reasoning_text.done": "response.reasoning_text.delta",
|
||||
"response.reasoning_part.done": "response.reasoning_part.added",
|
||||
}
|
||||
|
||||
for prompt in prompts:
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
@@ -329,19 +318,9 @@ async def test_streaming_types(client: OpenAI, model_name: str):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_function_calling_with_streaming_types(client: OpenAI, model_name: str):
|
||||
# this links the "done" type with the "start" type
|
||||
# so every "done" type should have a corresponding "start" type
|
||||
# and every open block should be closed by the end of the stream
|
||||
pairs_of_event_types = {
|
||||
"response.completed": "response.created",
|
||||
"response.output_item.done": "response.output_item.added",
|
||||
"response.output_text.done": "response.output_text.delta",
|
||||
"response.reasoning_text.done": "response.reasoning_text.delta",
|
||||
"response.reasoning_part.done": "response.reasoning_part.added",
|
||||
"response.function_call_arguments.done": "response.function_call_arguments.delta", # noqa
|
||||
}
|
||||
|
||||
async def test_function_calling_with_streaming_types(
|
||||
pairs_of_event_types: dict[str, str], client: OpenAI, model_name: str
|
||||
):
|
||||
tools = [GET_WEATHER_SCHEMA]
|
||||
input_list = [
|
||||
{
|
||||
|
||||
@@ -210,19 +210,11 @@ class TestMCPEnabled:
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_mcp_tool_calling_streaming_types(
|
||||
self, mcp_enabled_client: OpenAI, model_name: str
|
||||
self,
|
||||
pairs_of_event_types: dict[str, str],
|
||||
mcp_enabled_client: OpenAI,
|
||||
model_name: str,
|
||||
):
|
||||
pairs_of_event_types = {
|
||||
"response.completed": "response.created",
|
||||
"response.output_item.done": "response.output_item.added",
|
||||
"response.content_part.done": "response.content_part.added",
|
||||
"response.output_text.done": "response.output_text.delta",
|
||||
"response.reasoning_text.done": "response.reasoning_text.delta",
|
||||
"response.reasoning_part.done": "response.reasoning_part.added",
|
||||
"response.mcp_call_arguments.done": ("response.mcp_call_arguments.delta"),
|
||||
"response.mcp_call.completed": "response.mcp_call.in_progress",
|
||||
}
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "mcp",
|
||||
|
||||
Reference in New Issue
Block a user