[Frontend] OpenAI Responses API supports Tool/Function calling - non-harmony (#26874)
Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
41
tests/v1/entrypoints/openai/serving_responses/conftest.py
Normal file
41
tests/v1/entrypoints/openai/serving_responses/conftest.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from tests.utils import RemoteOpenAIServer
|
||||
|
||||
# Use a small reasoning model to test the responses API.
|
||||
MODEL_NAME = "Qwen/Qwen3-1.7B"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def default_server_args():
|
||||
return [
|
||||
"--max-model-len",
|
||||
"8192",
|
||||
"--enforce-eager", # For faster startup.
|
||||
"--enable-auto-tool-choice",
|
||||
"--structured-outputs-config.backend",
|
||||
"xgrammar",
|
||||
"--tool-call-parser",
|
||||
"hermes",
|
||||
"--reasoning-parser",
|
||||
"qwen3",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def server_with_store(default_server_args):
|
||||
with RemoteOpenAIServer(
|
||||
MODEL_NAME,
|
||||
default_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def client(server_with_store):
|
||||
async with server_with_store.get_async_client() as async_client:
|
||||
yield async_client
|
||||
93
tests/v1/entrypoints/openai/serving_responses/test_basic.py
Normal file
93
tests/v1/entrypoints/openai/serving_responses/test_basic.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import openai # use the official client for correctness check
|
||||
import openai.types.responses as openai_responses_types
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_input(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(input="What is 13 * 24?")
|
||||
print(response)
|
||||
|
||||
outputs = response.output
|
||||
# Whether the output contains the answer.
|
||||
assert outputs[-1].type == "message"
|
||||
assert "312" in outputs[-1].content[0].text
|
||||
|
||||
# Whether the output contains the reasoning.
|
||||
assert outputs[0].type == "reasoning"
|
||||
assert outputs[0].content[0].text != ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_instructions(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
instructions="Finish the answer with QED.",
|
||||
input="What is 13 * 24?",
|
||||
)
|
||||
print(response)
|
||||
|
||||
output_text = response.output[-1].content[0].text
|
||||
assert "312" in output_text
|
||||
assert "QED" in output_text
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{"role": "system", "content": "Finish the answer with QED."},
|
||||
{"role": "user", "content": "What is 5 * 3?"},
|
||||
{"role": "assistant", "content": "15. QED."},
|
||||
{"role": "user", "content": "Multiply the result by 2."},
|
||||
],
|
||||
)
|
||||
print(response)
|
||||
|
||||
output_text = response.output[-1].content[0].text
|
||||
assert "30" in output_text
|
||||
assert "QED" in output_text
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_with_input_type(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{"type": "input_text", "text": "Hello!"}],
|
||||
},
|
||||
],
|
||||
)
|
||||
print(response)
|
||||
assert response.status == "completed"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_logprobs(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
include=["message.output_text.logprobs"],
|
||||
input="What is 13 * 24?",
|
||||
top_logprobs=5,
|
||||
)
|
||||
print(response)
|
||||
outputs = response.output
|
||||
assert outputs[-1].content[-1].logprobs
|
||||
assert len(outputs[-1].content[-1].logprobs[0].top_logprobs) == 5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming(client: openai.AsyncOpenAI):
|
||||
stream = await client.responses.create(
|
||||
input="What is 13 * 24?",
|
||||
stream=True,
|
||||
)
|
||||
events = [event async for event in stream]
|
||||
assert isinstance(events[0], openai_responses_types.ResponseCreatedEvent)
|
||||
assert any(
|
||||
isinstance(event, openai_responses_types.ResponseTextDeltaEvent)
|
||||
for event in events
|
||||
)
|
||||
assert isinstance(events[-1], openai_responses_types.ResponseCompletedEvent)
|
||||
@@ -0,0 +1,198 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import json
|
||||
|
||||
import openai # use the official client for correctness check
|
||||
import pytest
|
||||
|
||||
MODEL_NAME = "Qwen/Qwen3-1.7B"
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {
|
||||
"type": "string",
|
||||
"description": "The city to find the weather for, e.g. 'Vienna'",
|
||||
"default": "Vienna",
|
||||
},
|
||||
"country": {
|
||||
"type": "string",
|
||||
"description": "The country that the city is in, e.g. 'Austria'",
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "The unit to fetch the temperature in",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
},
|
||||
"options": {
|
||||
"$ref": "#/$defs/WeatherOptions",
|
||||
"description": "Optional parameters for weather query",
|
||||
},
|
||||
},
|
||||
"required": ["country", "unit"],
|
||||
"$defs": {
|
||||
"WeatherOptions": {
|
||||
"title": "WeatherOptions",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
"default": "celsius",
|
||||
"description": "Temperature unit",
|
||||
"title": "Temperature Unit",
|
||||
},
|
||||
"include_forecast": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "Whether to include a 24-hour forecast",
|
||||
"title": "Include Forecast",
|
||||
},
|
||||
"language": {
|
||||
"type": "string",
|
||||
"default": "zh-CN",
|
||||
"description": "Language of the response",
|
||||
"title": "Language",
|
||||
"enum": ["zh-CN", "en-US", "ja-JP"],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_forecast",
|
||||
"description": "Get the weather forecast for a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {
|
||||
"type": "string",
|
||||
"description": "The city to get the forecast for, e.g. 'Vienna'",
|
||||
"default": "Vienna",
|
||||
},
|
||||
"country": {
|
||||
"type": "string",
|
||||
"description": "The country that the city is in, e.g. 'Austria'",
|
||||
},
|
||||
"days": {
|
||||
"type": "integer",
|
||||
"description": "Number of days to get the forecast for (1-7)",
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "The unit to fetch the temperature in",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
},
|
||||
},
|
||||
"required": ["country", "days", "unit"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.parametrize("tool_choice", ["auto", "required"])
|
||||
async def test_function_tool_use(
|
||||
client: openai.AsyncOpenAI, model_name: str, tool_choice: str
|
||||
):
|
||||
prompt = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Can you tell me what the current weather is in Berlin and the "
|
||||
"forecast for the next 5 days, in fahrenheit?",
|
||||
},
|
||||
]
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=prompt,
|
||||
tools=tools,
|
||||
tool_choice=tool_choice,
|
||||
)
|
||||
|
||||
assert len(response.output) >= 1
|
||||
tool_call = None
|
||||
reasoning = None
|
||||
for out in response.output:
|
||||
if out.type == "function_call":
|
||||
tool_call = out
|
||||
if out.type == "reasoning":
|
||||
reasoning = out
|
||||
assert tool_call is not None
|
||||
assert tool_call.type == "function_call"
|
||||
assert json.loads(tool_call.arguments) is not None
|
||||
assert reasoning is not None
|
||||
assert reasoning.type == "reasoning"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_named_tool_use(client: openai.AsyncOpenAI):
|
||||
def get_weather(latitude: float, longitude: float) -> str:
|
||||
"""
|
||||
Mock function to simulate getting weather data.
|
||||
In a real application, this would call an external weather API.
|
||||
"""
|
||||
return f"Current temperature at ({latitude}, {longitude}) is 20°C."
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_weather",
|
||||
"description": (
|
||||
"Get current temperature for provided coordinates in celsius."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"latitude": {"type": "number"},
|
||||
"longitude": {"type": "number"},
|
||||
},
|
||||
"required": ["latitude", "longitude"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"strict": True,
|
||||
}
|
||||
]
|
||||
|
||||
input_messages = [
|
||||
{"role": "user", "content": "What's the weather like in Paris today?"}
|
||||
]
|
||||
|
||||
response = await client.responses.create(
|
||||
model=MODEL_NAME,
|
||||
input=input_messages,
|
||||
tools=tools,
|
||||
tool_choice={"type": "function", "name": "get_weather"},
|
||||
)
|
||||
assert len(response.output) >= 1
|
||||
for out in response.output:
|
||||
if out.type == "function_call":
|
||||
tool_call = out
|
||||
assert tool_call is not None
|
||||
assert tool_call.type == "function_call"
|
||||
assert tool_call.name == "get_weather"
|
||||
args = json.loads(tool_call.arguments)
|
||||
assert args["latitude"] is not None
|
||||
assert args["longitude"] is not None
|
||||
# call the tool
|
||||
result = get_weather(args["latitude"], args["longitude"])
|
||||
input_messages.append(tool_call) # append model's function call message
|
||||
input_messages.append(
|
||||
{ # append result message
|
||||
"type": "function_call_output",
|
||||
"call_id": tool_call.call_id,
|
||||
"output": str(result),
|
||||
}
|
||||
)
|
||||
# create a new response with the tool call result
|
||||
response_2 = await client.responses.create(model=MODEL_NAME, input=input_messages)
|
||||
# check the output
|
||||
assert len(response_2.output_text) > 0
|
||||
171
tests/v1/entrypoints/openai/serving_responses/test_image.py
Normal file
171
tests/v1/entrypoints/openai/serving_responses/test_image.py
Normal file
@@ -0,0 +1,171 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import json
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from tests.utils import RemoteOpenAIServer
|
||||
from vllm.multimodal.utils import encode_image_base64
|
||||
|
||||
# Use a small vision model for testing
|
||||
MODEL_NAME = "Qwen/Qwen2.5-VL-3B-Instruct"
|
||||
MAXIMUM_IMAGES = 2
|
||||
# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
|
||||
TEST_IMAGE_ASSETS = [
|
||||
"2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", # "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
"Grayscale_8bits_palette_sample_image.png", # "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png",
|
||||
"1280px-Venn_diagram_rgb.svg.png", # "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Venn_diagram_rgb.svg/1280px-Venn_diagram_rgb.svg.png",
|
||||
"RGBA_comp.png", # "https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def default_image_server_args():
|
||||
return [
|
||||
"--enforce-eager",
|
||||
"--max-model-len",
|
||||
"6000",
|
||||
"--max-num-seqs",
|
||||
"128",
|
||||
"--limit-mm-per-prompt",
|
||||
json.dumps({"image": MAXIMUM_IMAGES}),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def image_server(default_image_server_args):
|
||||
with RemoteOpenAIServer(
|
||||
MODEL_NAME,
|
||||
default_image_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def client(image_server):
|
||||
async with image_server.get_async_client() as async_client:
|
||||
yield async_client
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def base64_encoded_image(local_asset_server) -> dict[str, str]:
|
||||
return {
|
||||
image_url: encode_image_base64(local_asset_server.get_image_asset(image_url))
|
||||
for image_url in TEST_IMAGE_ASSETS
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
|
||||
async def test_single_chat_session_image(
|
||||
client: openai.AsyncOpenAI, model_name: str, image_url: str
|
||||
):
|
||||
content_text = "What's in this image?"
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
},
|
||||
{"type": "input_text", "text": content_text},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
# test image url
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=messages,
|
||||
)
|
||||
assert len(response.output_text) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.parametrize("raw_image_url", TEST_IMAGE_ASSETS)
|
||||
async def test_single_chat_session_image_base64encoded(
|
||||
client: openai.AsyncOpenAI,
|
||||
model_name: str,
|
||||
raw_image_url: str,
|
||||
base64_encoded_image: dict[str, str],
|
||||
):
|
||||
content_text = "What's in this image?"
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": f"data:image/jpeg;base64,{base64_encoded_image[raw_image_url]}", # noqa: E501
|
||||
"detail": "auto",
|
||||
},
|
||||
{"type": "input_text", "text": content_text},
|
||||
],
|
||||
}
|
||||
]
|
||||
# test image base64
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=messages,
|
||||
)
|
||||
assert len(response.output_text) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.parametrize(
|
||||
"image_urls",
|
||||
[TEST_IMAGE_ASSETS[:i] for i in range(2, len(TEST_IMAGE_ASSETS))],
|
||||
indirect=True,
|
||||
)
|
||||
async def test_multi_image_input(
|
||||
client: openai.AsyncOpenAI, model_name: str, image_urls: list[str]
|
||||
):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
*(
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
}
|
||||
for image_url in image_urls
|
||||
),
|
||||
{"type": "input_text", "text": "What's in this image?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
if len(image_urls) > MAXIMUM_IMAGES:
|
||||
with pytest.raises(openai.BadRequestError): # test multi-image input
|
||||
await client.responses.create(
|
||||
model=model_name,
|
||||
input=messages,
|
||||
)
|
||||
# the server should still work afterwards
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Paris today?",
|
||||
}
|
||||
],
|
||||
)
|
||||
assert len(response.output_text) > 0
|
||||
else:
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=messages,
|
||||
)
|
||||
assert len(response.output_text) > 0
|
||||
139
tests/v1/entrypoints/openai/serving_responses/test_stateful.py
Normal file
139
tests/v1/entrypoints/openai/serving_responses/test_stateful.py
Normal file
@@ -0,0 +1,139 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import asyncio
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_store(client: openai.AsyncOpenAI):
|
||||
# By default, store is True.
|
||||
response = await client.responses.create(input="Hello!")
|
||||
assert response.status == "completed"
|
||||
|
||||
# Retrieve the response.
|
||||
response = await client.responses.retrieve(response.id)
|
||||
assert response.status == "completed"
|
||||
|
||||
# Test store=False.
|
||||
response = await client.responses.create(
|
||||
input="Hello!",
|
||||
store=False,
|
||||
)
|
||||
assert response.status == "completed"
|
||||
|
||||
# The response should not be found.
|
||||
with pytest.raises(openai.NotFoundError, match="Response with id .* not found."):
|
||||
await client.responses.retrieve(response.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_background(client: openai.AsyncOpenAI):
|
||||
# NOTE: This query should be easy enough for the model to answer
|
||||
# within the 10 seconds.
|
||||
response = await client.responses.create(
|
||||
input="Hello!",
|
||||
background=True,
|
||||
)
|
||||
assert response.status == "queued"
|
||||
|
||||
max_retries = 10
|
||||
for _ in range(max_retries):
|
||||
await asyncio.sleep(1)
|
||||
response = await client.responses.retrieve(response.id)
|
||||
if response.status != "queued":
|
||||
break
|
||||
print(response)
|
||||
|
||||
assert response.status == "completed"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_background_error(client: openai.AsyncOpenAI):
|
||||
with pytest.raises(
|
||||
openai.BadRequestError, match="background can only be used when `store` is true"
|
||||
):
|
||||
_ = await client.responses.create(
|
||||
input="What is 13 * 24?",
|
||||
background=True,
|
||||
store=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_background_cancel(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
input="Write a long story about a cat.",
|
||||
background=True,
|
||||
)
|
||||
assert response.status == "queued"
|
||||
|
||||
# Cancel the response before it is completed.
|
||||
# FIXME: This test can be flaky.
|
||||
await asyncio.sleep(0.5)
|
||||
response = await client.responses.cancel(response.id)
|
||||
assert response.status == "cancelled"
|
||||
|
||||
# Make sure the response status remains unchanged.
|
||||
await asyncio.sleep(5)
|
||||
response = await client.responses.retrieve(response.id)
|
||||
assert response.status == "cancelled"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancel_completed(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(input="Hello")
|
||||
assert response.status == "completed"
|
||||
|
||||
with pytest.raises(
|
||||
openai.BadRequestError, match="Cannot cancel a synchronous response."
|
||||
):
|
||||
await client.responses.cancel(response.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_previous_response_id(client: openai.AsyncOpenAI):
|
||||
response1 = await client.responses.create(
|
||||
instructions="You are tested on your ability to retrieve the correct "
|
||||
"information from the previous response.",
|
||||
input="Hello, my name is John.",
|
||||
)
|
||||
|
||||
response2 = await client.responses.create(
|
||||
input="Actually, my name is not John. My real name is Mark.",
|
||||
previous_response_id=response1.id,
|
||||
)
|
||||
|
||||
response3 = await client.responses.create(
|
||||
input="What is my real name again? Answer in one word.",
|
||||
previous_response_id=response2.id,
|
||||
)
|
||||
print(response3)
|
||||
assert "Mark" in response3.output[-1].content[0].text
|
||||
assert "John" not in response3.output[-1].content[0].text
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_two_responses_with_same_prev_id(client: openai.AsyncOpenAI):
|
||||
response1 = await client.responses.create(
|
||||
instructions="You are tested on your ability to retrieve the correct "
|
||||
"information from the previous response.",
|
||||
input="Hello, my name is John.",
|
||||
)
|
||||
|
||||
# Both response 2 and 3 use response 1 as the previous response.
|
||||
response2 = client.responses.create(
|
||||
input="Actually, my name is not John. My name is Mark.",
|
||||
previous_response_id=response1.id,
|
||||
)
|
||||
response3 = client.responses.create(
|
||||
input="What is my name again? Answer in one word.",
|
||||
previous_response_id=response1.id,
|
||||
)
|
||||
|
||||
_ = await response2
|
||||
response3_result = await response3
|
||||
print(response3_result)
|
||||
assert "John" in response3_result.output[-1].content[0].text
|
||||
assert "Mark" not in response3_result.output[-1].content[0].text
|
||||
@@ -0,0 +1,78 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import json
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_structured_output(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{"role": "system", "content": "Extract the event information."},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Alice and Bob are going to a science fair on Friday.",
|
||||
},
|
||||
],
|
||||
text={
|
||||
"format": {
|
||||
"type": "json_schema",
|
||||
"name": "calendar_event",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"event_name": {"type": "string"},
|
||||
"date": {"type": "string"},
|
||||
"participants": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
"required": ["event_name", "date", "participants"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"description": "A calendar event.",
|
||||
"strict": True,
|
||||
}
|
||||
},
|
||||
)
|
||||
print(response)
|
||||
|
||||
# NOTE: The JSON schema is applied to the output text, not reasoning.
|
||||
output_text = response.output[-1].content[0].text
|
||||
event = json.loads(output_text)
|
||||
|
||||
assert event["event_name"].lower() == "science fair"
|
||||
assert event["date"] == "Friday"
|
||||
participants = event["participants"]
|
||||
assert len(participants) == 2
|
||||
assert participants[0] == "Alice"
|
||||
assert participants[1] == "Bob"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_structured_output_with_parse(client: openai.AsyncOpenAI):
|
||||
class CalendarEvent(BaseModel):
|
||||
event_name: str
|
||||
date: str
|
||||
participants: list[str]
|
||||
|
||||
response = await client.responses.parse(
|
||||
model=None,
|
||||
instructions="Extract the event information.",
|
||||
input="Alice and Bob are going to a science fair on Friday.",
|
||||
text_format=CalendarEvent,
|
||||
)
|
||||
print(response)
|
||||
|
||||
# The output is successfully parsed.
|
||||
event = response.output_parsed
|
||||
assert event is not None
|
||||
|
||||
# The output is correct.
|
||||
assert event.event_name.lower() == "science fair"
|
||||
assert event.date == "Friday"
|
||||
participants = event.participants
|
||||
assert len(participants) == 2
|
||||
assert participants[0] == "Alice"
|
||||
assert participants[1] == "Bob"
|
||||
Reference in New Issue
Block a user