Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -17,6 +17,7 @@ MODEL_NAME = "openai/gpt-oss-20b"
@pytest.fixture(scope="module")
def monkeypatch_module():
from _pytest.monkeypatch import MonkeyPatch
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
@@ -94,22 +95,10 @@ async def test_chat(client: OpenAI, model_name: str):
response = await client.responses.create(
model=model_name,
input=[
{
"role": "system",
"content": "Respond in Korean."
},
{
"role": "user",
"content": "Hello!"
},
{
"role": "assistant",
"content": "Hello! How can I help you today?"
},
{
"role": "user",
"content": "What is 13 * 24? Explain your answer."
},
{"role": "system", "content": "Respond in Korean."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hello! How can I help you today?"},
{"role": "user", "content": "What is 13 * 24? Explain your answer."},
],
)
assert response is not None
@@ -124,10 +113,7 @@ async def test_chat_with_input_type(client: OpenAI, model_name: str):
input=[
{
"role": "user",
"content": [{
"type": "input_text",
"text": "What is 13*24?"
}],
"content": [{"type": "input_text", "text": "What is 13*24?"}],
},
],
)
@@ -141,14 +127,10 @@ async def test_structured_output(client: OpenAI, model_name: str):
response = await client.responses.create(
model=model_name,
input=[
{
"role": "system",
"content": "Extract the event information."
},
{"role": "system", "content": "Extract the event information."},
{
"role": "user",
"content":
"Alice and Bob are going to a science fair on Friday.",
"content": "Alice and Bob are going to a science fair on Friday.",
},
],
text={
@@ -158,18 +140,9 @@ async def test_structured_output(client: OpenAI, model_name: str):
"schema": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"date": {
"type": "string"
},
"participants": {
"type": "array",
"items": {
"type": "string"
}
},
"name": {"type": "string"},
"date": {"type": "string"},
"participants": {"type": "array", "items": {"type": "string"}},
},
"required": ["name", "date", "participants"],
"additionalProperties": False,
@@ -319,11 +292,10 @@ async def test_streaming_types(client: OpenAI, model_name: str):
stack_of_event_types = []
async for event in response:
if event.type == 'response.created':
if event.type == "response.created":
stack_of_event_types.append(event.type)
elif event.type == 'response.completed':
assert stack_of_event_types[-1] == pairs_of_event_types[
event.type]
elif event.type == "response.completed":
assert stack_of_event_types[-1] == pairs_of_event_types[event.type]
stack_of_event_types.pop()
if event.type.endswith("added"):
stack_of_event_types.append(event.type)
@@ -332,8 +304,7 @@ async def test_streaming_types(client: OpenAI, model_name: str):
continue
stack_of_event_types.append(event.type)
elif event.type.endswith("done"):
assert stack_of_event_types[-1] == pairs_of_event_types[
event.type]
assert stack_of_event_types[-1] == pairs_of_event_types[event.type]
stack_of_event_types.pop()
assert len(stack_of_event_types) == 0
@@ -381,11 +352,12 @@ async def test_streaming(client: OpenAI, model_name: str, background: bool):
# test vllm custom types are in the response
if event.type in [
"response.completed", "response.in_progress",
"response.created"
"response.completed",
"response.in_progress",
"response.created",
]:
assert 'input_messages' in event.response.model_extra
assert 'output_messages' in event.response.model_extra
assert "input_messages" in event.response.model_extra
assert "output_messages" in event.response.model_extra
if current_event_mode != event.type:
current_event_mode = event.type
@@ -396,21 +368,21 @@ async def test_streaming(client: OpenAI, model_name: str, background: bool):
assert event.item.id != current_item_id
current_item_id = event.item.id
elif event.type in [
"response.output_text.delta",
"response.reasoning_text.delta"
"response.output_text.delta",
"response.reasoning_text.delta",
]:
assert event.item_id == current_item_id
# verify content_index_id is correct
if event.type in [
"response.content_part.added",
"response.reasoning_part.added"
"response.content_part.added",
"response.reasoning_part.added",
]:
assert event.content_index != current_content_index
current_content_index = event.content_index
elif event.type in [
"response.output_text.delta",
"response.reasoning_text.delta"
"response.output_text.delta",
"response.reasoning_text.delta",
]:
assert event.content_index == current_content_index
@@ -420,8 +392,10 @@ async def test_streaming(client: OpenAI, model_name: str, background: bool):
print(f"{event.delta}", end="", flush=True)
elif "response.code_interpreter_call_code.done" in event.type:
print(f"Code: {event.code}", end="", flush=True)
elif ("response.output_item.added" in event.type
and event.item.type == "web_search_call"):
elif (
"response.output_item.added" in event.type
and event.item.type == "web_search_call"
):
print(f"Web search: {event.item.action}", end="", flush=True)
events.append(event)
@@ -432,9 +406,8 @@ async def test_streaming(client: OpenAI, model_name: str, background: bool):
if background:
starting_after = 5
async with await client.responses.retrieve(
response_id=resp_id,
stream=True,
starting_after=starting_after) as stream:
response_id=resp_id, stream=True, starting_after=starting_after
) as stream:
counter = starting_after
async for event in stream:
counter += 1
@@ -448,9 +421,7 @@ async def test_web_search(client: OpenAI, model_name: str):
response = await client.responses.create(
model=model_name,
input="Who is the president of South Korea as of now?",
tools=[{
"type": "web_search_preview"
}],
tools=[{"type": "web_search_preview"}],
)
assert response is not None
assert response.status == "completed"
@@ -465,16 +436,13 @@ async def test_code_interpreter(client: OpenAI, model_name: str):
# TODO: Ideally should be able to set max tool calls
# to prevent multi-turn, but it is not currently supported
# would speed up the test
input=("What's the first 4 digits after the decimal point of "
"cube root of `19910212 * 20250910`? "
"Show only the digits. The python interpreter is not stateful "
"and you must print to see the output."),
tools=[{
"type": "code_interpreter",
"container": {
"type": "auto"
}
}],
input=(
"What's the first 4 digits after the decimal point of "
"cube root of `19910212 * 20250910`? "
"Show only the digits. The python interpreter is not stateful "
"and you must print to see the output."
),
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
)
assert response is not None
assert response.status == "completed"
@@ -505,26 +473,23 @@ def call_function(name, args):
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_function_calling(client: OpenAI, model_name: str):
tools = [{
"type": "function",
"name": "get_weather",
"description":
"Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"strict": True,
}]
"strict": True,
}
]
response = await client.responses.create(
model=model_name,
@@ -547,11 +512,13 @@ async def test_function_calling(client: OpenAI, model_name: str):
response_2 = await client.responses.create(
model=model_name,
input=[{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}],
input=[
{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}
],
tools=tools,
previous_response_id=response.id,
)
@@ -591,17 +558,12 @@ async def test_function_calling_multi_turn(client: OpenAI, model_name: str):
{
"type": "function",
"name": "get_weather",
"description":
"Get current temperature for provided coordinates in celsius.", # noqa
"description": "Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
},
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
@@ -612,8 +574,7 @@ async def test_function_calling_multi_turn(client: OpenAI, model_name: str):
response = await client.responses.create(
model=model_name,
input=
"Help me plan a trip to a random place. And tell me the weather there.",
input="Help me plan a trip to a random place. And tell me the weather there.",
tools=tools,
)
assert response is not None
@@ -630,11 +591,13 @@ async def test_function_calling_multi_turn(client: OpenAI, model_name: str):
response_2 = await client.responses.create(
model=model_name,
input=[{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}],
input=[
{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}
],
tools=tools,
previous_response_id=response.id,
)
@@ -652,11 +615,13 @@ async def test_function_calling_multi_turn(client: OpenAI, model_name: str):
response_3 = await client.responses.create(
model=model_name,
input=[{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}],
input=[
{
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}
],
tools=tools,
previous_response_id=response_2.id,
)
@@ -668,26 +633,23 @@ async def test_function_calling_multi_turn(client: OpenAI, model_name: str):
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_function_calling_required(client: OpenAI, model_name: str):
tools = [{
"type": "function",
"name": "get_weather",
"description":
"Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"strict": True,
}]
"strict": True,
}
]
with pytest.raises(BadRequestError):
await client.responses.create(
@@ -717,31 +679,27 @@ async def test_system_message_with_tools(client: OpenAI, model_name: str):
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_function_calling_full_history(client: OpenAI, model_name: str):
tools = [{
"type": "function",
"name": "get_weather",
"description":
"Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for provided coordinates in celsius.", # noqa
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"strict": True,
}]
"strict": True,
}
]
input_messages = [{
"role": "user",
"content": "What's the weather like in Paris today?"
}]
input_messages = [
{"role": "user", "content": "What's the weather like in Paris today?"}
]
response = await client.responses.create(
model=model_name,
@@ -758,8 +716,7 @@ async def test_function_calling_full_history(client: OpenAI, model_name: str):
result = call_function(name, args)
input_messages.extend(
response.output) # append model's function call message
input_messages.extend(response.output) # append model's function call message
input_messages.append(
{ # append result message
"type": "function_call_output",
@@ -780,12 +737,12 @@ async def test_function_calling_full_history(client: OpenAI, model_name: str):
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_output_messages_enabled(client: OpenAI, model_name: str,
server):
async def test_output_messages_enabled(client: OpenAI, model_name: str, server):
response = await client.responses.create(
model=model_name,
input="What is the capital of South Korea?",
extra_body={"enable_response_messages": True})
extra_body={"enable_response_messages": True},
)
assert response is not None
assert response.status == "completed"