Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -23,9 +23,9 @@ def default_server_args():
@pytest.fixture(scope="module")
def server_with_store(default_server_args):
with RemoteOpenAIServer(
MODEL_NAME,
default_server_args,
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
MODEL_NAME,
default_server_args,
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
) as remote_server:
yield remote_server

View File

@@ -36,24 +36,14 @@ async def test_instructions(client: openai.AsyncOpenAI):
@pytest.mark.asyncio
async def test_chat(client: openai.AsyncOpenAI):
response = await client.responses.create(input=[
{
"role": "system",
"content": "Finish the answer with QED."
},
{
"role": "user",
"content": "What is 5 * 3?"
},
{
"role": "assistant",
"content": "15. QED."
},
{
"role": "user",
"content": "Multiply the result by 2."
},
], )
response = await client.responses.create(
input=[
{"role": "system", "content": "Finish the answer with QED."},
{"role": "user", "content": "What is 5 * 3?"},
{"role": "assistant", "content": "15. QED."},
{"role": "user", "content": "Multiply the result by 2."},
],
)
print(response)
output_text = response.output[-1].content[0].text
@@ -63,15 +53,14 @@ async def test_chat(client: openai.AsyncOpenAI):
@pytest.mark.asyncio
async def test_chat_with_input_type(client: openai.AsyncOpenAI):
response = await client.responses.create(input=[
{
"role": "user",
"content": [{
"type": "input_text",
"text": "Hello!"
}],
},
], )
response = await client.responses.create(
input=[
{
"role": "user",
"content": [{"type": "input_text", "text": "Hello!"}],
},
],
)
print(response)
assert response.status == "completed"
@@ -99,6 +88,6 @@ async def test_streaming(client: openai.AsyncOpenAI):
assert isinstance(events[0], openai_responses_types.ResponseCreatedEvent)
assert any(
isinstance(event, openai_responses_types.ResponseTextDeltaEvent)
for event in events)
assert isinstance(events[-1],
openai_responses_types.ResponseCompletedEvent)
for event in events
)
assert isinstance(events[-1], openai_responses_types.ResponseCompletedEvent)

View File

@@ -38,9 +38,9 @@ def default_image_server_args():
@pytest.fixture(scope="module")
def image_server(default_image_server_args):
with RemoteOpenAIServer(
MODEL_NAME,
default_image_server_args,
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
MODEL_NAME,
default_image_server_args,
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
) as remote_server:
yield remote_server
@@ -54,8 +54,7 @@ async def client(image_server):
@pytest.fixture(scope="session")
def base64_encoded_image(local_asset_server) -> dict[str, str]:
return {
image_url:
encode_image_base64(local_asset_server.get_image_asset(image_url))
image_url: encode_image_base64(local_asset_server.get_image_asset(image_url))
for image_url in TEST_IMAGE_ASSETS
}
@@ -63,24 +62,23 @@ def base64_encoded_image(local_asset_server) -> dict[str, str]:
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
async def test_single_chat_session_image(client: openai.AsyncOpenAI,
model_name: str, image_url: str):
async def test_single_chat_session_image(
client: openai.AsyncOpenAI, model_name: str, image_url: str
):
content_text = "What's in this image?"
messages = [{
"role":
"user",
"content": [
{
"type": "input_image",
"image_url": image_url,
"detail": "auto",
},
{
"type": "input_text",
"text": content_text
},
],
}]
messages = [
{
"role": "user",
"content": [
{
"type": "input_image",
"image_url": image_url,
"detail": "auto",
},
{"type": "input_text", "text": content_text},
],
}
]
# test image url
response = await client.responses.create(
@@ -100,22 +98,19 @@ async def test_single_chat_session_image_base64encoded(
base64_encoded_image: dict[str, str],
):
content_text = "What's in this image?"
messages = [{
"role":
"user",
"content": [
{
"type": "input_image",
"image_url":
f"data:image/jpeg;base64,{base64_encoded_image[raw_image_url]}",
"detail": "auto",
},
{
"type": "input_text",
"text": content_text
},
],
}]
messages = [
{
"role": "user",
"content": [
{
"type": "input_image",
"image_url": f"data:image/jpeg;base64,{base64_encoded_image[raw_image_url]}",
"detail": "auto",
},
{"type": "input_text", "text": content_text},
],
}
]
# test image base64
response = await client.responses.create(
model=model_name,
@@ -129,24 +124,27 @@ async def test_single_chat_session_image_base64encoded(
@pytest.mark.parametrize(
"image_urls",
[TEST_IMAGE_ASSETS[:i] for i in range(2, len(TEST_IMAGE_ASSETS))],
indirect=True)
async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
image_urls: list[str]):
messages = [{
"role":
"user",
"content": [
*({
"type": "input_image",
"image_url": image_url,
"detail": "auto",
} for image_url in image_urls),
{
"type": "input_text",
"text": "What's in this image?"
},
],
}]
indirect=True,
)
async def test_multi_image_input(
client: openai.AsyncOpenAI, model_name: str, image_urls: list[str]
):
messages = [
{
"role": "user",
"content": [
*(
{
"type": "input_image",
"image_url": image_url,
"detail": "auto",
}
for image_url in image_urls
),
{"type": "input_text", "text": "What's in this image?"},
],
}
]
if len(image_urls) > MAXIMUM_IMAGES:
with pytest.raises(openai.BadRequestError): # test multi-image input
@@ -157,10 +155,12 @@ async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
# the server should still work afterwards
response = await client.responses.create(
model=model_name,
input=[{
"role": "user",
"content": "What's the weather like in Paris today?",
}],
input=[
{
"role": "user",
"content": "What's the weather like in Paris today?",
}
],
)
assert len(response.output_text) > 0
else:

View File

@@ -24,8 +24,7 @@ async def test_store(client: openai.AsyncOpenAI):
assert response.status == "completed"
# The response should not be found.
with pytest.raises(openai.NotFoundError,
match="Response with id .* not found."):
with pytest.raises(openai.NotFoundError, match="Response with id .* not found."):
await client.responses.retrieve(response.id)
@@ -53,8 +52,8 @@ async def test_background(client: openai.AsyncOpenAI):
@pytest.mark.asyncio
async def test_background_error(client: openai.AsyncOpenAI):
with pytest.raises(
openai.BadRequestError,
match="background can only be used when `store` is true"):
openai.BadRequestError, match="background can only be used when `store` is true"
):
_ = await client.responses.create(
input="What is 13 * 24?",
background=True,
@@ -87,8 +86,9 @@ async def test_cancel_completed(client: openai.AsyncOpenAI):
response = await client.responses.create(input="Hello")
assert response.status == "completed"
with pytest.raises(openai.BadRequestError,
match="Cannot cancel a synchronous response."):
with pytest.raises(
openai.BadRequestError, match="Cannot cancel a synchronous response."
):
await client.responses.cancel(response.id)
@@ -97,7 +97,8 @@ async def test_previous_response_id(client: openai.AsyncOpenAI):
response1 = await client.responses.create(
instructions="You are tested on your ability to retrieve the correct "
"information from the previous response.",
input="Hello, my name is John.")
input="Hello, my name is John.",
)
response2 = await client.responses.create(
input="Actually, my name is not John. My real name is Mark.",
@@ -118,7 +119,8 @@ async def test_two_responses_with_same_prev_id(client: openai.AsyncOpenAI):
response1 = await client.responses.create(
instructions="You are tested on your ability to retrieve the correct "
"information from the previous response.",
input="Hello, my name is John.")
input="Hello, my name is John.",
)
# Both response 2 and 3 use response 1 as the previous response.
response2 = client.responses.create(

View File

@@ -11,14 +11,10 @@ from pydantic import BaseModel
async def test_structured_output(client: openai.AsyncOpenAI):
response = await client.responses.create(
input=[
{
"role": "system",
"content": "Extract the event information."
},
{"role": "system", "content": "Extract the event information."},
{
"role": "user",
"content":
"Alice and Bob are going to a science fair on Friday.",
"content": "Alice and Bob are going to a science fair on Friday.",
},
],
text={
@@ -28,18 +24,9 @@ async def test_structured_output(client: openai.AsyncOpenAI):
"schema": {
"type": "object",
"properties": {
"event_name": {
"type": "string"
},
"date": {
"type": "string"
},
"participants": {
"type": "array",
"items": {
"type": "string"
}
},
"event_name": {"type": "string"},
"date": {"type": "string"},
"participants": {"type": "array", "items": {"type": "string"}},
},
"required": ["event_name", "date", "participants"],
"additionalProperties": False,
@@ -65,7 +52,6 @@ async def test_structured_output(client: openai.AsyncOpenAI):
@pytest.mark.asyncio
async def test_structured_output_with_parse(client: openai.AsyncOpenAI):
class CalendarEvent(BaseModel):
event_name: str
date: str

View File

@@ -40,8 +40,7 @@ async def client(server):
"model_name",
[MODEL_NAME],
)
async def test_invalid_json_schema(client: openai.AsyncOpenAI,
model_name: str) -> None:
async def test_invalid_json_schema(client: openai.AsyncOpenAI, model_name: str) -> None:
invalid_json_schema = {
"$defs": {
"CarType": {
@@ -51,35 +50,29 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
}
},
"properties": {
"brand": {
"title": "Brand",
"type": "string"
},
"model": {
"title": "Model",
"type": "string"
},
"car_type": {
"$ref": "#/$defs/CarType"
},
"brand": {"title": "Brand", "type": "string"},
"model": {"title": "Model", "type": "string"},
"car_type": {"$ref": "#/$defs/CarType"},
"foo": "bar",
},
"required": ["brand", "model", "car_type"],
"title": "CarDescription",
"type": "object",
}
prompt = ("Generate a JSON with the brand, model and car_type of"
"the most iconic car from the 90's")
prompt = (
"Generate a JSON with the brand, model and car_type of"
"the most iconic car from the 90's"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[{
"role": "user",
"content": prompt,
}],
extra_body={"structured_outputs": {
"json": invalid_json_schema
}},
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={"structured_outputs": {"json": invalid_json_schema}},
)
@@ -89,23 +82,22 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
[MODEL_NAME],
)
async def test_invalid_regex(client: openai.AsyncOpenAI, model_name: str):
prompt = ("Generate an email address for Alan Turing, who works in Enigma."
"End in .com and new line. Example result:"
"alan.turing@enigma.com\n")
prompt = (
"Generate an email address for Alan Turing, who works in Enigma."
"End in .com and new line. Example result:"
"alan.turing@enigma.com\n"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[{
"role": "user",
"content": prompt,
}],
extra_body={
"structured_outputs": {
"regex": r"[.*"
},
"stop": ["\n"]
},
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={"structured_outputs": {"regex": r"[.*"}, "stop": ["\n"]},
)
@@ -129,18 +121,20 @@ async def test_invalid_grammar(client: openai.AsyncOpenAI, model_name: str):
number ::= "1 " | "2 "
"""
prompt = ("Generate an SQL query to show the 'username' and 'email'"
"from the 'users' table.")
prompt = (
"Generate an SQL query to show the 'username' and 'email'"
"from the 'users' table."
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[{
"role": "user",
"content": prompt,
}],
extra_body={
"structured_outputs": {
"grammar": invalid_simplified_sql_grammar
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={
"structured_outputs": {"grammar": invalid_simplified_sql_grammar}
},
)

View File

@@ -31,12 +31,13 @@ def default_server_args():
]
@pytest.fixture(scope="module",
params=[["--no-enable-prefix-caching"],
[
"--no-enable-prefix-caching",
"--disable-frontend-multiprocessing"
]])
@pytest.fixture(
scope="module",
params=[
["--no-enable-prefix-caching"],
["--no-enable-prefix-caching", "--disable-frontend-multiprocessing"],
],
)
def server(default_server_args, request):
if request.param:
default_server_args = default_server_args + request.param
@@ -55,12 +56,10 @@ async def client(server):
"model_name",
[MODEL_NAME],
)
async def test_single_completion(client: openai.AsyncOpenAI,
model_name: str) -> None:
completion = await client.completions.create(model=model_name,
prompt="Hello, my name is",
max_tokens=5,
temperature=0.0)
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str) -> None:
completion = await client.completions.create(
model=model_name, prompt="Hello, my name is", max_tokens=5, temperature=0.0
)
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 1
@@ -69,7 +68,8 @@ async def test_single_completion(client: openai.AsyncOpenAI,
assert len(choice.text) >= 5
assert choice.finish_reason == "length"
assert completion.usage == openai.types.CompletionUsage(
completion_tokens=5, prompt_tokens=6, total_tokens=11)
completion_tokens=5, prompt_tokens=6, total_tokens=11
)
# test using token IDs
completion = await client.completions.create(
@@ -147,11 +147,12 @@ async def test_some_logprobs(client: openai.AsyncOpenAI, model_name: str):
"model_name",
[MODEL_NAME],
)
async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
model_name: str) -> None:
async def test_too_many_completion_logprobs(
client: openai.AsyncOpenAI, model_name: str
) -> None:
with pytest.raises(
(openai.BadRequestError, openai.APIError)): # test using token IDs
(openai.BadRequestError, openai.APIError)
): # test using token IDs
await client.completions.create(
model=model_name,
prompt=[0, 0, 0, 0, 0],
@@ -163,7 +164,8 @@ async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
)
...
with pytest.raises(
(openai.BadRequestError, openai.APIError)): # test using token IDs
(openai.BadRequestError, openai.APIError)
): # test using token IDs
stream = await client.completions.create(
model=model_name,
prompt=[0, 0, 0, 0, 0],
@@ -188,13 +190,13 @@ async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name, prompt_logprobs", [(MODEL_NAME, -1),
(MODEL_NAME, 0),
(MODEL_NAME, 1),
(MODEL_NAME, None)])
async def test_prompt_logprobs_completion(client: openai.AsyncOpenAI,
model_name: str,
prompt_logprobs: Optional[int]):
@pytest.mark.parametrize(
"model_name, prompt_logprobs",
[(MODEL_NAME, -1), (MODEL_NAME, 0), (MODEL_NAME, 1), (MODEL_NAME, None)],
)
async def test_prompt_logprobs_completion(
client: openai.AsyncOpenAI, model_name: str, prompt_logprobs: Optional[int]
):
params: dict = {
"prompt": ["A robot may not injure another robot", "My name is"],
"model": model_name,
@@ -223,8 +225,9 @@ async def test_prompt_logprobs_completion(client: openai.AsyncOpenAI,
"model_name",
[MODEL_NAME],
)
async def test_completion_streaming(client: openai.AsyncOpenAI,
model_name: str) -> None:
async def test_completion_streaming(
client: openai.AsyncOpenAI, model_name: str
) -> None:
prompt = "What is an LLM?"
single_completion = await client.completions.create(
@@ -234,11 +237,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
temperature=0.0,
)
single_output = single_completion.choices[0].text
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True)
stream = await client.completions.create(
model=model_name, prompt=prompt, max_tokens=5, temperature=0.0, stream=True
)
chunks: list[str] = []
finish_reason_count = 0
async for chunk in stream:
@@ -257,8 +258,7 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
"model_name",
[MODEL_NAME],
)
async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
model_name: str):
async def test_parallel_no_streaming(client: openai.AsyncOpenAI, model_name: str):
"""Parallel sampling without streaming.
A single request output contains a list of completions.
"""
@@ -268,27 +268,26 @@ async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
max_tokens = 50 # we want some to finish earlier than others
# High temperature to maximize chance of unique completions.
completion = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=max_tokens,
n=n,
temperature=1.0,
stream=False,
logprobs=0,
seed=42)
completion = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=max_tokens,
n=n,
temperature=1.0,
stream=False,
logprobs=0,
seed=42,
)
# Assert `n` completions
num_completions = len(completion.choices)
assert num_completions == n, (
f"Num completions {num_completions} but expected {n}.")
assert num_completions == n, f"Num completions {num_completions} but expected {n}."
completion_repeats: dict[str, int] = {}
output_token_lengths = set()
for idx, choice in enumerate(completion.choices):
# Assert correct completion index & some finish reason.
assert choice.index == idx, (
f"Index {choice.index} but expected {idx}.")
assert choice.finish_reason is not None, (
"None finish_reason is invalid.")
assert choice.index == idx, f"Index {choice.index} but expected {idx}."
assert choice.finish_reason is not None, "None finish_reason is invalid."
text = choice.text
completion_repeats[text] = completion_repeats.get(text, 0) + 1
output_token_lengths.add(len(choice.logprobs.tokens))
@@ -297,13 +296,10 @@ async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
# Assert `n` unique completions
num_unique = len(completion_repeats)
if num_unique != n:
repeats = {
txt: num
for (txt, num) in completion_repeats.items() if num > 1
}
repeats = {txt: num for (txt, num) in completion_repeats.items() if num > 1}
raise AssertionError(
f"Expected {n} unique completions, got {num_unique};"
f" repeats: {repeats}.")
f"Expected {n} unique completions, got {num_unique}; repeats: {repeats}."
)
@pytest.mark.asyncio
@@ -321,13 +317,15 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
n = 3
max_tokens = 50 # we want some to finish earlier than others
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=max_tokens,
n=n,
temperature=1.0,
stream=True,
seed=42)
stream = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=max_tokens,
n=n,
temperature=1.0,
stream=True,
seed=42,
)
chunks: list[list[str]] = [[] for _ in range(n)]
finish_reason_count = 0
async for chunk in stream:
@@ -338,7 +336,8 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
finish_reason_count += 1
# Assert `n` completions with correct finish reasons
assert finish_reason_count == n, (
f"Expected {n} completions with valid indices and finish_reason.")
f"Expected {n} completions with valid indices and finish_reason."
)
completion_repeats: dict[str, int] = {}
chunk_lengths = set()
for chunk in chunks:
@@ -346,7 +345,8 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
# Assert correct number of completion tokens
chunk_lengths.add(chunk_len)
assert chunk_len <= max_tokens, (
f"max_tokens={max_tokens} but chunk len is {chunk_len}.")
f"max_tokens={max_tokens} but chunk len is {chunk_len}."
)
text = "".join(chunk)
completion_repeats[text] = completion_repeats.get(text, 0) + 1
print(text)
@@ -355,12 +355,10 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
# Assert `n` unique completions
num_unique = len(completion_repeats)
if num_unique != n:
repeats = {
txt: num
for (txt, num) in completion_repeats.items() if num > 1
}
raise AssertionError(f"{num_unique} unique completions, expected {n};"
f" repeats: {repeats}")
repeats = {txt: num for (txt, num) in completion_repeats.items() if num > 1}
raise AssertionError(
f"{num_unique} unique completions, expected {n}; repeats: {repeats}"
)
@pytest.mark.asyncio
@@ -368,53 +366,55 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
"model_name",
[MODEL_NAME],
)
async def test_completion_stream_options(client: openai.AsyncOpenAI,
model_name: str):
async def test_completion_stream_options(client: openai.AsyncOpenAI, model_name: str):
prompt = "What is the capital of France?"
# Test stream=True, stream_options=
# {"include_usage": False, "continuous_usage_stats": False}
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": False,
"continuous_usage_stats":
False,
})
stream = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": False,
"continuous_usage_stats": False,
},
)
async for chunk in stream:
assert chunk.usage is None
# Test stream=True, stream_options=
# {"include_usage": False, "continuous_usage_stats": True}
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": False,
"continuous_usage_stats":
True,
})
stream = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": False,
"continuous_usage_stats": True,
},
)
async for chunk in stream:
assert chunk.usage is None
# Test stream=True, stream_options=
# {"include_usage": True, "continuous_usage_stats": False}
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": True,
"continuous_usage_stats":
False,
})
stream = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": True,
"continuous_usage_stats": False,
},
)
async for chunk in stream:
if chunk.choices[0].finish_reason is None:
assert chunk.usage is None
@@ -425,57 +425,63 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
assert final_chunk.usage.prompt_tokens > 0
assert final_chunk.usage.completion_tokens > 0
assert final_chunk.usage.total_tokens == (
final_chunk.usage.prompt_tokens +
final_chunk.usage.completion_tokens)
final_chunk.usage.prompt_tokens + final_chunk.usage.completion_tokens
)
assert final_chunk.choices == []
# Test stream=True, stream_options=
# {"include_usage": True, "continuous_usage_stats": True}
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": True,
"continuous_usage_stats":
True,
})
stream = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True,
stream_options={
"include_usage": True,
"continuous_usage_stats": True,
},
)
async for chunk in stream:
assert chunk.usage is not None
assert chunk.usage.prompt_tokens > 0
assert chunk.usage.completion_tokens > 0
assert chunk.usage.total_tokens == (chunk.usage.prompt_tokens +
chunk.usage.completion_tokens)
assert chunk.usage.total_tokens == (
chunk.usage.prompt_tokens + chunk.usage.completion_tokens
)
if chunk.choices[0].finish_reason is not None:
final_chunk = await stream.__anext__()
assert final_chunk.usage is not None
assert final_chunk.usage.prompt_tokens > 0
assert final_chunk.usage.completion_tokens > 0
assert final_chunk.usage.total_tokens == (
final_chunk.usage.prompt_tokens +
final_chunk.usage.completion_tokens)
final_chunk.usage.prompt_tokens + final_chunk.usage.completion_tokens
)
assert final_chunk.choices == []
# Test stream=False, stream_options=
# {"include_usage": None}
with pytest.raises(BadRequestError):
await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"include_usage": None})
await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"include_usage": None},
)
# Test stream=False, stream_options=
# {"include_usage": True}
with pytest.raises(BadRequestError):
await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"include_usage": True})
await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"include_usage": True},
)
# Test stream=False, stream_options=
# {"continuous_usage_stats": None}
@@ -486,7 +492,8 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"continuous_usage_stats": None})
stream_options={"continuous_usage_stats": None},
)
# Test stream=False, stream_options=
# {"continuous_usage_stats": True}
@@ -497,7 +504,8 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
max_tokens=5,
temperature=0.0,
stream=False,
stream_options={"continuous_usage_stats": True})
stream_options={"continuous_usage_stats": True},
)
@pytest.mark.asyncio
@@ -528,15 +536,19 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
extra_body=dict(
# NOTE: this has to be true for n > 1 in vLLM, but
# not necessary for official client.
use_beam_search=True),
use_beam_search=True
),
)
assert len(batch.choices) == 4
assert batch.choices[0].text != batch.choices[
1].text, "beam search should be different"
assert batch.choices[0].text == batch.choices[
2].text, "two copies of the same prompt should be the same"
assert batch.choices[1].text == batch.choices[
3].text, "two copies of the same prompt should be the same"
assert batch.choices[0].text != batch.choices[1].text, (
"beam search should be different"
)
assert batch.choices[0].text == batch.choices[2].text, (
"two copies of the same prompt should be the same"
)
assert batch.choices[1].text == batch.choices[3].text, (
"two copies of the same prompt should be the same"
)
# test streaming
batch = await client.completions.create(
@@ -560,31 +572,30 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
[MODEL_NAME],
)
@pytest.mark.parametrize("logprobs_arg", [1, 0])
async def test_echo_logprob_completion(client: openai.AsyncOpenAI,
model_name: str, logprobs_arg: int):
async def test_echo_logprob_completion(
client: openai.AsyncOpenAI, model_name: str, logprobs_arg: int
):
tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME)
# test using text and token IDs
for prompt in ("Hello, my name is", [0, 0, 0, 0, 0]):
completion = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
echo=True,
logprobs=logprobs_arg)
completion = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
echo=True,
logprobs=logprobs_arg,
)
prompt_text = tokenizer.decode(prompt) if isinstance(prompt,
list) else prompt
prompt_text = tokenizer.decode(prompt) if isinstance(prompt, list) else prompt
assert re.search(r"^" + prompt_text, completion.choices[0].text)
logprobs = completion.choices[0].logprobs
assert logprobs is not None
assert len(logprobs.text_offset) > 5
assert (len(logprobs.token_logprobs) > 5
and logprobs.token_logprobs[0] is None)
assert (len(logprobs.top_logprobs) > 5
and logprobs.top_logprobs[0] is None)
assert len(logprobs.token_logprobs) > 5 and logprobs.token_logprobs[0] is None
assert len(logprobs.top_logprobs) > 5 and logprobs.top_logprobs[0] is None
for top_logprobs in logprobs.top_logprobs[1:]:
assert max(logprobs_arg,
1) <= len(top_logprobs) <= logprobs_arg + 1
assert max(logprobs_arg, 1) <= len(top_logprobs) <= logprobs_arg + 1
assert len(logprobs.tokens) > 5
@@ -593,8 +604,7 @@ async def test_echo_logprob_completion(client: openai.AsyncOpenAI,
"model_name",
[MODEL_NAME],
)
async def test_invalid_json_schema(client: openai.AsyncOpenAI,
model_name: str) -> None:
async def test_invalid_json_schema(client: openai.AsyncOpenAI, model_name: str) -> None:
invalid_json_schema = {
"$defs": {
"CarType": {
@@ -604,32 +614,24 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
}
},
"properties": {
"brand": {
"title": "Brand",
"type": "string"
},
"model": {
"title": "Model",
"type": "string"
},
"car_type": {
"$ref": "#/$defs/CarType"
},
"brand": {"title": "Brand", "type": "string"},
"model": {"title": "Model", "type": "string"},
"car_type": {"$ref": "#/$defs/CarType"},
"foo": "bar",
},
"required": ["brand", "model", "car_type"],
"title": "CarDescription",
"type": "object",
}
prompt = ("Generate a JSON with the brand, model and car_type of"
"the most iconic car from the 90's")
prompt = (
"Generate a JSON with the brand, model and car_type of"
"the most iconic car from the 90's"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.completions.create(
model=model_name,
prompt=prompt,
extra_body={"structured_outputs": {
"json": invalid_json_schema
}},
extra_body={"structured_outputs": {"json": invalid_json_schema}},
)
@@ -639,20 +641,17 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
[MODEL_NAME],
)
async def test_invalid_regex(client: openai.AsyncOpenAI, model_name: str):
prompt = ("Generate an email address for Alan Turing, who works in Enigma."
"End in .com and new line. Example result:"
"alan.turing@enigma.com\n")
prompt = (
"Generate an email address for Alan Turing, who works in Enigma."
"End in .com and new line. Example result:"
"alan.turing@enigma.com\n"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.completions.create(
model=model_name,
prompt=prompt,
extra_body={
"structured_outputs": {
"regex": r"[.*"
},
"stop": ["\n"]
},
extra_body={"structured_outputs": {"regex": r"[.*"}, "stop": ["\n"]},
)
@@ -676,29 +675,29 @@ async def test_invalid_grammar(client: openai.AsyncOpenAI, model_name: str):
number ::= "1 " | "2 "
"""
prompt = ("Generate an SQL query to show the 'username' and 'email'"
"from the 'users' table.")
prompt = (
"Generate an SQL query to show the 'username' and 'email'"
"from the 'users' table."
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.completions.create(
model=model_name,
prompt=prompt,
extra_body={
"structured_outputs": {
"grammar": invalid_simplified_sql_grammar
}
"structured_outputs": {"grammar": invalid_simplified_sql_grammar}
},
)
@pytest.mark.asyncio
async def test_completion_with_empty_prompt_embeds(
client: openai.AsyncOpenAI) -> None:
async def test_completion_with_empty_prompt_embeds(client: openai.AsyncOpenAI) -> None:
"""Test completion with empty prompt embeds."""
payload: dict[str, object] = {"prompt": "Hello", "prompt_embeds": []}
headers: dict[str, str] = {"Content-Type": "application/json"}
# base_url = http://localhost:8000/v1/completions
response = requests.post(f"{client.base_url}completions",
headers=headers,
json=payload)
response = requests.post(
f"{client.base_url}completions", headers=headers, json=payload
)
assert response.status_code == 200, (
f"Expected status code 200, got {response.status_code}. ")
f"Expected status code 200, got {response.status_code}. "
)

View File

@@ -37,9 +37,9 @@ def default_image_embeds_server_args() -> list[str]:
@pytest.fixture(scope="module")
def server_with_image_embeds(default_image_embeds_server_args):
with RemoteOpenAIServer(MODEL_NAME,
default_image_embeds_server_args,
max_wait_seconds=600) as remote_server:
with RemoteOpenAIServer(
MODEL_NAME, default_image_embeds_server_args, max_wait_seconds=600
) as remote_server:
yield remote_server
@@ -57,7 +57,7 @@ def encode_image_embedding_to_base64(image_embedding) -> str:
torch.save(image_embedding, buffer)
buffer.seek(0)
binary_data = buffer.read()
base64_image_embedding = base64.b64encode(binary_data).decode('utf-8')
base64_image_embedding = base64.b64encode(binary_data).decode("utf-8")
return base64_image_embedding
@@ -75,19 +75,13 @@ async def test_completions_with_image_embeds(
base64_image_embedding = encode_image_embedding_to_base64(image_embeds)
chat_completion = await client_with_image_embeds.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role":
"user",
"role": "user",
"content": [
{
"type":
"text",
"text":
"Describe these images separately. For each image,"
"type": "text",
"text": "Describe these images separately. For each image,"
"reply with a short sentence (no more than 10 words).",
},
{

View File

@@ -50,16 +50,13 @@ async def client(server):
"model_name",
[MODEL_NAME],
)
async def test_single_completion(client: openai.AsyncOpenAI,
server: RemoteOpenAIServer,
model_name: str) -> None:
async def test_single_completion(
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
) -> None:
async def make_request():
completion = await client.completions.create(
model=model_name,
prompt="Hello, my name is",
max_tokens=10,
temperature=1.0)
model=model_name, prompt="Hello, my name is", max_tokens=10, temperature=1.0
)
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 1
@@ -108,9 +105,9 @@ async def test_single_completion(client: openai.AsyncOpenAI,
"model_name",
[MODEL_NAME],
)
async def test_completion_streaming(client: openai.AsyncOpenAI,
server: RemoteOpenAIServer,
model_name: str) -> None:
async def test_completion_streaming(
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
) -> None:
prompt = "What is an LLM?"
async def make_streaming_request():
@@ -124,11 +121,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
single_output = single_completion.choices[0].text
# Perform the streaming request
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True)
stream = await client.completions.create(
model=model_name, prompt=prompt, max_tokens=5, temperature=0.0, stream=True
)
chunks: list[str] = []
finish_reason_count = 0
last_chunk = None
@@ -139,16 +134,15 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
last_chunk = chunk # Keep track of the last chunk
# finish reason should only return in the last block for OpenAI API
assert finish_reason_count == 1, (
"Finish reason should appear exactly once.")
assert last_chunk is not None, (
"Stream should have yielded at least one chunk.")
assert last_chunk.choices[
0].finish_reason == "length", "Finish reason should be 'length'."
assert finish_reason_count == 1, "Finish reason should appear exactly once."
assert last_chunk is not None, "Stream should have yielded at least one chunk."
assert last_chunk.choices[0].finish_reason == "length", (
"Finish reason should be 'length'."
)
# Check that the combined text matches the non-streamed version.
assert "".join(
chunks
) == single_output, "Streamed output should match non-streamed output."
assert "".join(chunks) == single_output, (
"Streamed output should match non-streamed output."
)
return True # Indicate success for this request
# Test single request
@@ -162,9 +156,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
tasks = [make_streaming_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(
results
) == num_requests, f"Expected {num_requests} results, got {len(results)}"
assert len(results) == num_requests, (
f"Expected {num_requests} results, got {len(results)}"
)
assert all(results), "Not all streaming requests completed successfully."
await asyncio.sleep(0.5)
@@ -172,9 +166,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
tasks = [make_streaming_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(
results
) == num_requests, f"Expected {num_requests} results, got {len(results)}"
assert len(results) == num_requests, (
f"Expected {num_requests} results, got {len(results)}"
)
assert all(results), "Not all streaming requests completed successfully."
# Check request balancing via Prometheus metrics if DP_SIZE > 1