Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -26,8 +26,10 @@ def sample_token_ids():
|
||||
|
||||
@pytest.fixture
|
||||
def sample_regex():
|
||||
return (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}"
|
||||
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)")
|
||||
return (
|
||||
r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}"
|
||||
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)"
|
||||
)
|
||||
|
||||
|
||||
# Note: Ensure this only uses attributes compatible with xgrammar
|
||||
@@ -36,53 +38,44 @@ def sample_json_schema():
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"age": {
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {"type": "string"},
|
||||
"age": {"type": "integer"},
|
||||
"skills": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
}
|
||||
},
|
||||
},
|
||||
"grade": {
|
||||
"type": "string",
|
||||
"pattern": "^[A-D]$" # Regex pattern
|
||||
"pattern": "^[A-D]$", # Regex pattern
|
||||
},
|
||||
"email": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
|
||||
"pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$",
|
||||
},
|
||||
"work_history": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"company": {
|
||||
"type": "string"
|
||||
},
|
||||
"company": {"type": "string"},
|
||||
"duration": {
|
||||
"type": "number",
|
||||
"minimum": 0.0,
|
||||
"maximum": 100.0, # Numeric range
|
||||
},
|
||||
"position": {
|
||||
"type": "string"
|
||||
}
|
||||
"position": {"type": "string"},
|
||||
},
|
||||
"required": ["company", "duration", "position"],
|
||||
"additionalProperties": False
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"minItems": 0,
|
||||
"maxItems": 3
|
||||
}
|
||||
"maxItems": 3,
|
||||
},
|
||||
},
|
||||
"required":
|
||||
["name", "age", "skills", "grade", "email", "work_history"],
|
||||
"additionalProperties": False
|
||||
"required": ["name", "age", "skills", "grade", "email", "work_history"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
|
||||
@@ -94,67 +87,60 @@ def unsupported_json_schema():
|
||||
"properties": {
|
||||
"score": {
|
||||
"type": "integer",
|
||||
"multipleOf": 5 # Numeric multiple
|
||||
"multipleOf": 5, # Numeric multiple
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"minLength": 10,
|
||||
"maxLength": 20
|
||||
}
|
||||
}
|
||||
"items": {"type": "string", "minLength": 10, "maxLength": 20},
|
||||
},
|
||||
},
|
||||
"required": ["score", "tags"],
|
||||
"additionalProperties": False
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_definition_json_schema():
|
||||
return {
|
||||
'$defs': {
|
||||
'Step': {
|
||||
'properties': {
|
||||
'explanation': {
|
||||
'title': 'Explanation',
|
||||
'type': 'string'
|
||||
},
|
||||
'output': {
|
||||
'title': 'Output',
|
||||
'type': 'string'
|
||||
}
|
||||
"$defs": {
|
||||
"Step": {
|
||||
"properties": {
|
||||
"explanation": {"title": "Explanation", "type": "string"},
|
||||
"output": {"title": "Output", "type": "string"},
|
||||
},
|
||||
'required': ['explanation', 'output'],
|
||||
'title': 'Step',
|
||||
'type': 'object'
|
||||
"required": ["explanation", "output"],
|
||||
"title": "Step",
|
||||
"type": "object",
|
||||
}
|
||||
},
|
||||
'properties': {
|
||||
'steps': {
|
||||
'items': {
|
||||
'$ref': '#/$defs/Step'
|
||||
},
|
||||
'title': 'Steps',
|
||||
'type': 'array'
|
||||
"properties": {
|
||||
"steps": {
|
||||
"items": {"$ref": "#/$defs/Step"},
|
||||
"title": "Steps",
|
||||
"type": "array",
|
||||
},
|
||||
'final_answer': {
|
||||
'title': 'Final Answer',
|
||||
'type': 'string'
|
||||
}
|
||||
"final_answer": {"title": "Final Answer", "type": "string"},
|
||||
},
|
||||
'required': ['steps', 'final_answer'],
|
||||
'title': 'MathReasoning',
|
||||
'type': 'object',
|
||||
"additionalProperties": False
|
||||
"required": ["steps", "final_answer"],
|
||||
"title": "MathReasoning",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_structured_outputs_choices():
|
||||
return [
|
||||
"Python", "Java", "JavaScript", "C++", "C#", "PHP", "TypeScript",
|
||||
"Ruby", "Swift", "Kotlin"
|
||||
"Python",
|
||||
"Java",
|
||||
"JavaScript",
|
||||
"C++",
|
||||
"C#",
|
||||
"PHP",
|
||||
"TypeScript",
|
||||
"Ruby",
|
||||
"Swift",
|
||||
"Kotlin",
|
||||
]
|
||||
|
||||
|
||||
@@ -172,11 +158,11 @@ number ::= "1" | "2"
|
||||
|
||||
@pytest.fixture
|
||||
def sample_sql_lark():
|
||||
return ("""
|
||||
return """
|
||||
start: select_statement
|
||||
select_statement: "SELECT" column "from" table "where" condition
|
||||
column: "col_1" | "col_2"
|
||||
table: "table_1" | "table_2"
|
||||
condition: column "=" number
|
||||
number: "1" | "2"
|
||||
""")
|
||||
"""
|
||||
|
||||
@@ -22,8 +22,11 @@ from vllm.entrypoints.llm import LLM
|
||||
from vllm.outputs import RequestOutput
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.reasoning.abs_reasoning_parsers import ReasoningParserManager
|
||||
from vllm.sampling_params import (GuidedDecodingParams, SamplingParams,
|
||||
StructuredOutputsParams)
|
||||
from vllm.sampling_params import (
|
||||
GuidedDecodingParams,
|
||||
SamplingParams,
|
||||
StructuredOutputsParams,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.config import TokenizerMode
|
||||
@@ -44,22 +47,18 @@ EAGLE_SPEC_CONFIG = {
|
||||
PARAMS_MODELS_BACKENDS_TOKENIZER_MODE = [
|
||||
("mistralai/Ministral-8B-Instruct-2410", "xgrammar", "auto", None),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "guidance", "auto", None),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "lm-format-enforcer", "auto",
|
||||
None),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "lm-format-enforcer", "auto", None),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "xgrammar", "mistral", None),
|
||||
("Qwen/Qwen2.5-1.5B-Instruct", "xgrammar", "auto", None),
|
||||
("Qwen/Qwen2.5-1.5B-Instruct", "lm-format-enforcer", "auto", None),
|
||||
#FIXME: This tests are flaky on CI thus disabled. Tracking in Issue #24402
|
||||
# FIXME: This tests are flaky on CI thus disabled. Tracking in Issue #24402
|
||||
# ("mistralai/Ministral-8B-Instruct-2410", "outlines", "auto", None),
|
||||
# ("mistralai/Ministral-8B-Instruct-2410", "outlines", "mistral", None),
|
||||
#("Qwen/Qwen2.5-1.5B-Instruct", "guidance", "auto"),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "outlines", "auto",
|
||||
NGRAM_SPEC_CONFIG),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "guidance", "auto",
|
||||
NGRAM_SPEC_CONFIG),
|
||||
# ("Qwen/Qwen2.5-1.5B-Instruct", "guidance", "auto"),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "outlines", "auto", NGRAM_SPEC_CONFIG),
|
||||
("mistralai/Ministral-8B-Instruct-2410", "guidance", "auto", NGRAM_SPEC_CONFIG),
|
||||
("Qwen/Qwen2.5-1.5B-Instruct", "xgrammar", "auto", NGRAM_SPEC_CONFIG),
|
||||
("meta-llama/Meta-Llama-3.1-8B-Instruct", "xgrammar", "auto",
|
||||
EAGLE_SPEC_CONFIG)
|
||||
("meta-llama/Meta-Llama-3.1-8B-Instruct", "xgrammar", "auto", EAGLE_SPEC_CONFIG),
|
||||
]
|
||||
|
||||
PARAMS_MODELS_TOKENIZER_MODE = [
|
||||
@@ -82,19 +81,16 @@ class CarDescription(BaseModel):
|
||||
|
||||
|
||||
def test_guided_decoding_deprecated():
|
||||
with pytest.warns(DeprecationWarning,
|
||||
match="GuidedDecodingParams is deprecated.*"):
|
||||
with pytest.warns(DeprecationWarning, match="GuidedDecodingParams is deprecated.*"):
|
||||
guided_decoding = GuidedDecodingParams(json_object=True)
|
||||
|
||||
structured_outputs = StructuredOutputsParams(json_object=True)
|
||||
assert fields(guided_decoding) == fields(structured_outputs)
|
||||
|
||||
with pytest.warns(DeprecationWarning,
|
||||
match="guided_decoding is deprecated.*"):
|
||||
with pytest.warns(DeprecationWarning, match="guided_decoding is deprecated.*"):
|
||||
sp1 = SamplingParams(guided_decoding=guided_decoding)
|
||||
|
||||
with pytest.warns(DeprecationWarning,
|
||||
match="guided_decoding is deprecated.*"):
|
||||
with pytest.warns(DeprecationWarning, match="guided_decoding is deprecated.*"):
|
||||
sp2 = SamplingParams.from_optional(guided_decoding=guided_decoding)
|
||||
|
||||
assert sp1 == sp2
|
||||
@@ -104,7 +100,8 @@ def test_guided_decoding_deprecated():
|
||||
@pytest.mark.skip_global_cleanup
|
||||
@pytest.mark.parametrize(
|
||||
"model_name, backend, tokenizer_mode, speculative_config",
|
||||
PARAMS_MODELS_BACKENDS_TOKENIZER_MODE)
|
||||
PARAMS_MODELS_BACKENDS_TOKENIZER_MODE,
|
||||
)
|
||||
def test_structured_output(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
sample_json_schema: dict[str, Any],
|
||||
@@ -125,15 +122,17 @@ def test_structured_output(
|
||||
|
||||
# Use a single LLM instance for several scenarios to
|
||||
# speed up the test suite.
|
||||
llm = LLM(model=model_name,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(backend=backend,
|
||||
disable_any_whitespace=backend
|
||||
in {"xgrammar", "guidance"}),
|
||||
seed=120,
|
||||
tokenizer_mode=tokenizer_mode,
|
||||
speculative_config=speculative_config)
|
||||
llm = LLM(
|
||||
model=model_name,
|
||||
enforce_eager=True,
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(
|
||||
backend=backend, disable_any_whitespace=backend in {"xgrammar", "guidance"}
|
||||
),
|
||||
seed=120,
|
||||
tokenizer_mode=tokenizer_mode,
|
||||
speculative_config=speculative_config,
|
||||
)
|
||||
|
||||
#
|
||||
# Test 1: Generate JSON output based on a provided schema
|
||||
@@ -141,11 +140,14 @@ def test_structured_output(
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=4096,
|
||||
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
|
||||
structured_outputs=StructuredOutputsParams(json=sample_json_schema),
|
||||
)
|
||||
|
||||
prompt = ("Give an example JSON for an employee profile that fits this "
|
||||
"schema. Make the response as short as possible. Schema: "
|
||||
f"{sample_json_schema}")
|
||||
prompt = (
|
||||
"Give an example JSON for an employee profile that fits this "
|
||||
"schema. Make the response as short as possible. Schema: "
|
||||
f"{sample_json_schema}"
|
||||
)
|
||||
outputs = llm.generate(
|
||||
[prompt] * 2,
|
||||
sampling_params=sampling_params,
|
||||
@@ -161,7 +163,7 @@ def test_structured_output(
|
||||
|
||||
generated_text = output.outputs[0].text
|
||||
assert generated_text is not None
|
||||
if backend != 'lm-format-enforcer':
|
||||
if backend != "lm-format-enforcer":
|
||||
assert "\n" not in generated_text
|
||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||
try:
|
||||
@@ -169,7 +171,8 @@ def test_structured_output(
|
||||
except json.JSONDecodeError as e:
|
||||
pytest.fail(
|
||||
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
||||
f"Schema: {sample_json_schema}\nError: {e}")
|
||||
f"Schema: {sample_json_schema}\nError: {e}"
|
||||
)
|
||||
jsonschema.validate(instance=output_json, schema=sample_json_schema)
|
||||
|
||||
#
|
||||
@@ -180,14 +183,18 @@ def test_structured_output(
|
||||
temperature=1.0,
|
||||
max_tokens=4096,
|
||||
n=2,
|
||||
structured_outputs=StructuredOutputsParams(json_object=True))
|
||||
structured_outputs=StructuredOutputsParams(json_object=True),
|
||||
)
|
||||
|
||||
outputs = llm.generate(prompts=(
|
||||
"Generate a JSON object with curly braces for a person with "
|
||||
"name and age fields for John Smith who is 31 years old. "
|
||||
"Make the response as short as possible."),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True)
|
||||
outputs = llm.generate(
|
||||
prompts=(
|
||||
"Generate a JSON object with curly braces for a person with "
|
||||
"name and age fields for John Smith who is 31 years old. "
|
||||
"Make the response as short as possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
|
||||
assert outputs is not None
|
||||
for output in outputs:
|
||||
@@ -209,25 +216,30 @@ def test_structured_output(
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=4096,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
json=unsupported_json_schema))
|
||||
structured_outputs=StructuredOutputsParams(json=unsupported_json_schema),
|
||||
)
|
||||
if backend.startswith("xgrammar"):
|
||||
with pytest.raises(ValueError,
|
||||
match="The provided JSON schema contains features "
|
||||
"not supported by xgrammar."):
|
||||
|
||||
prompt = (f"Give an example JSON for an employee profile that "
|
||||
f"fits this schema: {unsupported_json_schema}. "
|
||||
f"Make the response as short as possible.")
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="The provided JSON schema contains features "
|
||||
"not supported by xgrammar.",
|
||||
):
|
||||
prompt = (
|
||||
f"Give an example JSON for an employee profile that "
|
||||
f"fits this schema: {unsupported_json_schema}. "
|
||||
f"Make the response as short as possible."
|
||||
)
|
||||
llm.generate(
|
||||
[prompt] * 2,
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
else:
|
||||
prompt = (f"Give an example JSON object for a grade that "
|
||||
f"fits this schema: {unsupported_json_schema}. "
|
||||
f"Make the response as short as possible.")
|
||||
prompt = (
|
||||
f"Give an example JSON object for a grade that "
|
||||
f"fits this schema: {unsupported_json_schema}. "
|
||||
f"Make the response as short as possible."
|
||||
)
|
||||
outputs = llm.generate(
|
||||
prompt,
|
||||
sampling_params=sampling_params,
|
||||
@@ -253,12 +265,14 @@ def test_structured_output(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
max_tokens=1000,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
grammar=sample_sql_ebnf))
|
||||
structured_outputs=StructuredOutputsParams(grammar=sample_sql_ebnf),
|
||||
)
|
||||
outputs = llm.generate(
|
||||
("Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short as "
|
||||
"possible."),
|
||||
(
|
||||
"Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short as "
|
||||
"possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -273,8 +287,7 @@ def test_structured_output(
|
||||
assert generated_text is not None
|
||||
|
||||
# remove spaces for comparison b/c we removed them in the grammar
|
||||
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
|
||||
" ", "")
|
||||
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(" ", "")
|
||||
|
||||
assert generated_text.strip() == ground_truth
|
||||
|
||||
@@ -287,12 +300,14 @@ def test_structured_output(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
max_tokens=1000,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
grammar=sample_sql_lark))
|
||||
structured_outputs=StructuredOutputsParams(grammar=sample_sql_lark),
|
||||
)
|
||||
outputs = llm.generate(
|
||||
("Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short as "
|
||||
"possible."),
|
||||
(
|
||||
"Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short as "
|
||||
"possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -308,12 +323,12 @@ def test_structured_output(
|
||||
|
||||
# use Lark to parse the output, and make sure it's a valid parse tree
|
||||
from lark import Lark
|
||||
|
||||
parser = Lark(sample_sql_lark)
|
||||
parser.parse(generated_text)
|
||||
|
||||
# remove spaces for comparison b/c we removed them in the grammar
|
||||
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
|
||||
" ", "")
|
||||
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(" ", "")
|
||||
|
||||
assert generated_text.strip() == ground_truth
|
||||
|
||||
@@ -326,13 +341,15 @@ def test_structured_output(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
max_tokens=1000,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
grammar="not a grammar"))
|
||||
structured_outputs=StructuredOutputsParams(grammar="not a grammar"),
|
||||
)
|
||||
with pytest.raises(ValueError, match="Failed to convert the grammar "):
|
||||
llm.generate(
|
||||
("Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short "
|
||||
"as possible."),
|
||||
(
|
||||
"Generate a sql statement that selects col_1 from "
|
||||
"table_1 where it is equal to 1. Make the response as short "
|
||||
"as possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -343,10 +360,13 @@ def test_structured_output(
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
structured_outputs=StructuredOutputsParams(regex=sample_regex))
|
||||
structured_outputs=StructuredOutputsParams(regex=sample_regex),
|
||||
)
|
||||
|
||||
prompt = (f"Give an example IPv4 address with this regex: {sample_regex}. "
|
||||
f"Make the response as short as possible.")
|
||||
prompt = (
|
||||
f"Give an example IPv4 address with this regex: {sample_regex}. "
|
||||
f"Make the response as short as possible."
|
||||
)
|
||||
outputs = llm.generate(
|
||||
[prompt] * 2,
|
||||
sampling_params=sampling_params,
|
||||
@@ -371,11 +391,15 @@ def test_structured_output(
|
||||
temperature=0.8,
|
||||
top_p=0.95,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
choice=sample_structured_outputs_choices))
|
||||
choice=sample_structured_outputs_choices
|
||||
),
|
||||
)
|
||||
|
||||
outputs = llm.generate(
|
||||
("The best language for type-safe systems programming is "
|
||||
"(Make the response as short as possible.) "),
|
||||
(
|
||||
"The best language for type-safe systems programming is "
|
||||
"(Make the response as short as possible.) "
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -397,12 +421,15 @@ def test_structured_output(
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=1000,
|
||||
structured_outputs=StructuredOutputsParams(json=json_schema))
|
||||
structured_outputs=StructuredOutputsParams(json=json_schema),
|
||||
)
|
||||
|
||||
outputs = llm.generate(
|
||||
("Generate a JSON with the brand, model and car_type of the most "
|
||||
"iconic car from the 90's. Make the response as short as "
|
||||
"possible."),
|
||||
(
|
||||
"Generate a JSON with the brand, model and car_type of the most "
|
||||
"iconic car from the 90's. Make the response as short as "
|
||||
"possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -422,7 +449,8 @@ def test_structured_output(
|
||||
except json.JSONDecodeError as e:
|
||||
pytest.fail(
|
||||
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
||||
f"Schema: {json_schema}\nError: {e}")
|
||||
f"Schema: {json_schema}\nError: {e}"
|
||||
)
|
||||
jsonschema.validate(instance=output_json, schema=json_schema)
|
||||
|
||||
#
|
||||
@@ -436,21 +464,24 @@ def test_structured_output(
|
||||
"description": {
|
||||
"type": "string",
|
||||
"maxLength": max_length,
|
||||
"minLength": min_length
|
||||
"minLength": min_length,
|
||||
}
|
||||
},
|
||||
"required": ["description"],
|
||||
"additionalProperties": False
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=4096,
|
||||
structured_outputs=StructuredOutputsParams(json=json_schema))
|
||||
structured_outputs=StructuredOutputsParams(json=json_schema),
|
||||
)
|
||||
|
||||
outputs = llm.generate(
|
||||
("Generate a description of a frog using 50 characters. "
|
||||
"Make the response as short as possible."),
|
||||
(
|
||||
"Generate a description of a frog using 50 characters. "
|
||||
"Make the response as short as possible."
|
||||
),
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True,
|
||||
)
|
||||
@@ -470,7 +501,8 @@ def test_structured_output(
|
||||
except json.JSONDecodeError as e:
|
||||
pytest.fail(
|
||||
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
||||
f"Schema: {json_schema}\nError: {e}")
|
||||
f"Schema: {json_schema}\nError: {e}"
|
||||
)
|
||||
jsonschema.validate(instance=output_json, schema=json_schema)
|
||||
|
||||
if backend not in ["outlines", "lm-format-enforcer"]:
|
||||
@@ -478,29 +510,28 @@ def test_structured_output(
|
||||
# Test 11: Generate structured output using structural_tag format
|
||||
#
|
||||
structural_tag_config = {
|
||||
"type":
|
||||
"structural_tag",
|
||||
"structures": [{
|
||||
"begin": "<function=get_weather>",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {
|
||||
"type": "string"
|
||||
}
|
||||
"type": "structural_tag",
|
||||
"structures": [
|
||||
{
|
||||
"begin": "<function=get_weather>",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {"city": {"type": "string"}},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"additionalProperties": False
|
||||
},
|
||||
"end": "</function>"
|
||||
}],
|
||||
"triggers": ["<function="]
|
||||
"end": "</function>",
|
||||
}
|
||||
],
|
||||
"triggers": ["<function="],
|
||||
}
|
||||
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.0,
|
||||
max_tokens=4096,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
structural_tag=json.dumps(structural_tag_config)))
|
||||
structural_tag=json.dumps(structural_tag_config)
|
||||
),
|
||||
)
|
||||
|
||||
prompt = """
|
||||
You have access to the following function to retrieve the weather in a city:
|
||||
@@ -542,9 +573,7 @@ Make the response as short as possible.
|
||||
"""
|
||||
|
||||
# Change this once other backends support structural_tag
|
||||
outputs = llm.generate(prompt,
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True)
|
||||
outputs = llm.generate(prompt, sampling_params=sampling_params, use_tqdm=True)
|
||||
assert outputs is not None
|
||||
|
||||
for output in outputs:
|
||||
@@ -554,12 +583,13 @@ Make the response as short as possible.
|
||||
assert generated_text is not None
|
||||
|
||||
# Search for function call pattern in the response
|
||||
function_call_pattern = r'<function=get_weather>(.*?)</function>'
|
||||
function_call_pattern = r"<function=get_weather>(.*?)</function>"
|
||||
matches = re.findall(function_call_pattern, generated_text)
|
||||
|
||||
if not matches:
|
||||
print(f"Warning: No function calls found in response: "
|
||||
f"{generated_text!r}")
|
||||
print(
|
||||
f"Warning: No function calls found in response: {generated_text!r}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Take the first function call if multiple are found
|
||||
@@ -570,16 +600,22 @@ Make the response as short as possible.
|
||||
assert isinstance(json_content["city"], str)
|
||||
print(f"Found valid function call: {generated_text!r}")
|
||||
except (json.JSONDecodeError, AssertionError) as e:
|
||||
pytest.fail("Invalid function call format: "
|
||||
f"{generated_text!r}\nError: {str(e)}")
|
||||
pytest.fail(
|
||||
f"Invalid function call format: {generated_text!r}\nError: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skip_global_cleanup
|
||||
@pytest.mark.parametrize(
|
||||
"model_name, backend, tokenizer_mode, reasoning_parser, speculative_config", # noqa: E501
|
||||
[
|
||||
("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "xgrammar", "auto",
|
||||
"deepseek_r1", NGRAM_SPEC_CONFIG),
|
||||
(
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
||||
"xgrammar",
|
||||
"auto",
|
||||
"deepseek_r1",
|
||||
NGRAM_SPEC_CONFIG,
|
||||
),
|
||||
("Qwen/Qwen3-1.7B", "xgrammar", "auto", "deepseek_r1", None),
|
||||
],
|
||||
)
|
||||
@@ -605,27 +641,25 @@ def test_structured_output_with_reasoning_matrices(
|
||||
enforce_eager=bool(not current_platform.is_tpu()),
|
||||
max_model_len=1024,
|
||||
max_num_seqs=16,
|
||||
structured_outputs_config=dict(backend=backend,
|
||||
disable_any_whitespace=backend
|
||||
in {"xgrammar", "guidance"},
|
||||
reasoning_parser=reasoning_parser),
|
||||
structured_outputs_config=dict(
|
||||
backend=backend,
|
||||
disable_any_whitespace=backend in {"xgrammar", "guidance"},
|
||||
reasoning_parser=reasoning_parser,
|
||||
),
|
||||
tokenizer_mode=tokenizer_mode,
|
||||
speculative_config=speculative_config,
|
||||
)
|
||||
tokenizer = llm.get_tokenizer()
|
||||
reasoner = ReasoningParserManager.get_reasoning_parser(reasoning_parser)(
|
||||
tokenizer=tokenizer)
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
reasoning_prompt = "Solve the following math problem step-by-step, then provide the final answer as JSON object with a single key 'result'. Make sure to correct your reasoning if there are any issue should it arise.\nProblem: What is 5 * 8 + 2?" # noqa: E501
|
||||
reasoning_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"properties": {"result": {"type": "integer"}},
|
||||
"required": ["result"],
|
||||
"additionalProperties": False
|
||||
"additionalProperties": False,
|
||||
}
|
||||
if "Qwen3" in model_name:
|
||||
reasoning_prompt += "<think>\n"
|
||||
@@ -646,11 +680,8 @@ def test_structured_output_with_reasoning_matrices(
|
||||
assert output is not None and isinstance(output, RequestOutput)
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text
|
||||
reasoning_content, content = run_reasoning_extraction(
|
||||
reasoner, [generated_text])
|
||||
print(
|
||||
f"Prompt: {prompt!r}\nReasoning: {reasoning_content!r}\nContent: {content!r}"
|
||||
)
|
||||
reasoning_content, content = run_reasoning_extraction(reasoner, [generated_text])
|
||||
print(f"Prompt: {prompt!r}\nReasoning: {reasoning_content!r}\nContent: {content!r}")
|
||||
|
||||
assert content is not None and reasoning_content is not None
|
||||
output_json = json.loads(content)
|
||||
@@ -658,8 +689,7 @@ def test_structured_output_with_reasoning_matrices(
|
||||
|
||||
|
||||
@pytest.mark.skip_global_cleanup
|
||||
@pytest.mark.parametrize("model_name, tokenizer_mode",
|
||||
PARAMS_MODELS_TOKENIZER_MODE)
|
||||
@pytest.mark.parametrize("model_name, tokenizer_mode", PARAMS_MODELS_TOKENIZER_MODE)
|
||||
def test_structured_output_auto_mode(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
unsupported_json_schema: dict[str, Any],
|
||||
@@ -668,30 +698,32 @@ def test_structured_output_auto_mode(
|
||||
):
|
||||
monkeypatch.setenv("VLLM_USE_V1", "1")
|
||||
|
||||
llm = LLM(model=model_name,
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(backend="auto"),
|
||||
tokenizer_mode=tokenizer_mode)
|
||||
llm = LLM(
|
||||
model=model_name,
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(backend="auto"),
|
||||
tokenizer_mode=tokenizer_mode,
|
||||
)
|
||||
|
||||
sampling_params = SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=1000,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
json=unsupported_json_schema))
|
||||
structured_outputs=StructuredOutputsParams(json=unsupported_json_schema),
|
||||
)
|
||||
|
||||
prompts = (
|
||||
"Give an example JSON object for a grade "
|
||||
"that fits this schema: "
|
||||
f"{unsupported_json_schema}. Make the response as short as possible.")
|
||||
f"{unsupported_json_schema}. Make the response as short as possible."
|
||||
)
|
||||
# This would fail with the default of "xgrammar", but in "auto"
|
||||
# we will handle fallback automatically.
|
||||
outputs = llm.generate(prompts,
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True)
|
||||
outputs = llm.generate(prompts, sampling_params=sampling_params, use_tqdm=True)
|
||||
# Make sure `auto` backend handling doesn't mess up sampling_params
|
||||
# and that we can reuse it without error.
|
||||
outputs.extend(
|
||||
llm.generate(prompts, sampling_params=sampling_params, use_tqdm=True))
|
||||
llm.generate(prompts, sampling_params=sampling_params, use_tqdm=True)
|
||||
)
|
||||
|
||||
assert outputs is not None
|
||||
for output in outputs:
|
||||
@@ -710,27 +742,24 @@ def test_structured_output_auto_mode(
|
||||
def test_guidance_no_additional_properties(monkeypatch: pytest.MonkeyPatch):
|
||||
monkeypatch.setenv("VLLM_USE_V1", "1")
|
||||
|
||||
llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct",
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(
|
||||
backend="guidance",
|
||||
disable_any_whitespace=True,
|
||||
disable_additional_properties=True))
|
||||
llm = LLM(
|
||||
model="Qwen/Qwen2.5-1.5B-Instruct",
|
||||
max_model_len=1024,
|
||||
structured_outputs_config=dict(
|
||||
backend="guidance",
|
||||
disable_any_whitespace=True,
|
||||
disable_additional_properties=True,
|
||||
),
|
||||
)
|
||||
|
||||
schema = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'a1': {
|
||||
'type': 'string'
|
||||
},
|
||||
'a2': {
|
||||
'type': 'string'
|
||||
},
|
||||
'a3': {
|
||||
'type': 'string'
|
||||
}
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a1": {"type": "string"},
|
||||
"a2": {"type": "string"},
|
||||
"a3": {"type": "string"},
|
||||
},
|
||||
'required': ['a1', 'a2', 'a3'],
|
||||
"required": ["a1", "a2", "a3"],
|
||||
}
|
||||
|
||||
prompt = (
|
||||
@@ -738,18 +767,19 @@ def test_guidance_no_additional_properties(monkeypatch: pytest.MonkeyPatch):
|
||||
"helpful assistant.<|im_end|>\n<|im_start|>user\nPlease generate a "
|
||||
"large JSON object with key-value pairs a1=b1, a2=b2, ..., a20=b20. "
|
||||
"Make the response as short as possible."
|
||||
"<|im_end|>\n<|im_start|>assistant\n")
|
||||
"<|im_end|>\n<|im_start|>assistant\n"
|
||||
)
|
||||
|
||||
def generate_with_backend(backend):
|
||||
structured_outputs_params = StructuredOutputsParams(
|
||||
json=schema,
|
||||
backend=backend,
|
||||
disable_any_whitespace=True,
|
||||
disable_additional_properties=True)
|
||||
disable_additional_properties=True,
|
||||
)
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0,
|
||||
max_tokens=256,
|
||||
structured_outputs=structured_outputs_params)
|
||||
temperature=0, max_tokens=256, structured_outputs=structured_outputs_params
|
||||
)
|
||||
|
||||
outputs = llm.generate(prompt, sampling_params=sampling_params)
|
||||
assert outputs is not None
|
||||
@@ -794,16 +824,18 @@ def test_structured_output_batched_with_non_structured_outputs_requests(
|
||||
structured_outputs_prompt = (
|
||||
"Give an example JSON for an employee profile that fits this "
|
||||
"schema. Make the response as short as possible. Schema: "
|
||||
f"{sample_json_schema}")
|
||||
f"{sample_json_schema}"
|
||||
)
|
||||
|
||||
non_structured_outputs_prompt = "The diameter of the Earth in kilometers is "
|
||||
|
||||
prompts = [structured_outputs_prompt, non_structured_outputs_prompt]
|
||||
sampling_params = [
|
||||
SamplingParams(temperature=1.0,
|
||||
max_tokens=400,
|
||||
structured_outputs=StructuredOutputsParams(
|
||||
json=sample_json_schema)),
|
||||
SamplingParams(
|
||||
temperature=1.0,
|
||||
max_tokens=400,
|
||||
structured_outputs=StructuredOutputsParams(json=sample_json_schema),
|
||||
),
|
||||
# No max tokens, temp=0 to assert on contents
|
||||
SamplingParams(
|
||||
seed=42,
|
||||
@@ -812,9 +844,9 @@ def test_structured_output_batched_with_non_structured_outputs_requests(
|
||||
),
|
||||
]
|
||||
|
||||
outputs = llm.generate(prompts=prompts,
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=True)
|
||||
outputs = llm.generate(
|
||||
prompts=prompts, sampling_params=sampling_params, use_tqdm=True
|
||||
)
|
||||
|
||||
assert outputs is not None
|
||||
|
||||
@@ -837,8 +869,7 @@ def test_structured_output_batched_with_non_structured_outputs_requests(
|
||||
# First prompt is structured outputs, expect valid JSON
|
||||
assert "\n" not in generated_text
|
||||
output_json = json.loads(generated_text)
|
||||
jsonschema.validate(instance=output_json,
|
||||
schema=sample_json_schema)
|
||||
jsonschema.validate(instance=output_json, schema=sample_json_schema)
|
||||
else:
|
||||
# Second prompt is not structured outputs, expect valid output
|
||||
# Cannot assert on exact output, but we can expect it to be factual
|
||||
|
||||
@@ -23,9 +23,9 @@ def default_server_args():
|
||||
@pytest.fixture(scope="module")
|
||||
def server_with_store(default_server_args):
|
||||
with RemoteOpenAIServer(
|
||||
MODEL_NAME,
|
||||
default_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
MODEL_NAME,
|
||||
default_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@@ -36,24 +36,14 @@ async def test_instructions(client: openai.AsyncOpenAI):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(input=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Finish the answer with QED."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is 5 * 3?"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "15. QED."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Multiply the result by 2."
|
||||
},
|
||||
], )
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{"role": "system", "content": "Finish the answer with QED."},
|
||||
{"role": "user", "content": "What is 5 * 3?"},
|
||||
{"role": "assistant", "content": "15. QED."},
|
||||
{"role": "user", "content": "Multiply the result by 2."},
|
||||
],
|
||||
)
|
||||
print(response)
|
||||
|
||||
output_text = response.output[-1].content[0].text
|
||||
@@ -63,15 +53,14 @@ async def test_chat(client: openai.AsyncOpenAI):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_with_input_type(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(input=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{
|
||||
"type": "input_text",
|
||||
"text": "Hello!"
|
||||
}],
|
||||
},
|
||||
], )
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{"type": "input_text", "text": "Hello!"}],
|
||||
},
|
||||
],
|
||||
)
|
||||
print(response)
|
||||
assert response.status == "completed"
|
||||
|
||||
@@ -99,6 +88,6 @@ async def test_streaming(client: openai.AsyncOpenAI):
|
||||
assert isinstance(events[0], openai_responses_types.ResponseCreatedEvent)
|
||||
assert any(
|
||||
isinstance(event, openai_responses_types.ResponseTextDeltaEvent)
|
||||
for event in events)
|
||||
assert isinstance(events[-1],
|
||||
openai_responses_types.ResponseCompletedEvent)
|
||||
for event in events
|
||||
)
|
||||
assert isinstance(events[-1], openai_responses_types.ResponseCompletedEvent)
|
||||
|
||||
@@ -38,9 +38,9 @@ def default_image_server_args():
|
||||
@pytest.fixture(scope="module")
|
||||
def image_server(default_image_server_args):
|
||||
with RemoteOpenAIServer(
|
||||
MODEL_NAME,
|
||||
default_image_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
MODEL_NAME,
|
||||
default_image_server_args,
|
||||
env_dict={"VLLM_ENABLE_RESPONSES_API_STORE": "1"},
|
||||
) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
@@ -54,8 +54,7 @@ async def client(image_server):
|
||||
@pytest.fixture(scope="session")
|
||||
def base64_encoded_image(local_asset_server) -> dict[str, str]:
|
||||
return {
|
||||
image_url:
|
||||
encode_image_base64(local_asset_server.get_image_asset(image_url))
|
||||
image_url: encode_image_base64(local_asset_server.get_image_asset(image_url))
|
||||
for image_url in TEST_IMAGE_ASSETS
|
||||
}
|
||||
|
||||
@@ -63,24 +62,23 @@ def base64_encoded_image(local_asset_server) -> dict[str, str]:
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
|
||||
async def test_single_chat_session_image(client: openai.AsyncOpenAI,
|
||||
model_name: str, image_url: str):
|
||||
async def test_single_chat_session_image(
|
||||
client: openai.AsyncOpenAI, model_name: str, image_url: str
|
||||
):
|
||||
content_text = "What's in this image?"
|
||||
messages = [{
|
||||
"role":
|
||||
"user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
},
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": content_text
|
||||
},
|
||||
],
|
||||
}]
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
},
|
||||
{"type": "input_text", "text": content_text},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
# test image url
|
||||
response = await client.responses.create(
|
||||
@@ -100,22 +98,19 @@ async def test_single_chat_session_image_base64encoded(
|
||||
base64_encoded_image: dict[str, str],
|
||||
):
|
||||
content_text = "What's in this image?"
|
||||
messages = [{
|
||||
"role":
|
||||
"user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url":
|
||||
f"data:image/jpeg;base64,{base64_encoded_image[raw_image_url]}",
|
||||
"detail": "auto",
|
||||
},
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": content_text
|
||||
},
|
||||
],
|
||||
}]
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": f"data:image/jpeg;base64,{base64_encoded_image[raw_image_url]}",
|
||||
"detail": "auto",
|
||||
},
|
||||
{"type": "input_text", "text": content_text},
|
||||
],
|
||||
}
|
||||
]
|
||||
# test image base64
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
@@ -129,24 +124,27 @@ async def test_single_chat_session_image_base64encoded(
|
||||
@pytest.mark.parametrize(
|
||||
"image_urls",
|
||||
[TEST_IMAGE_ASSETS[:i] for i in range(2, len(TEST_IMAGE_ASSETS))],
|
||||
indirect=True)
|
||||
async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
|
||||
image_urls: list[str]):
|
||||
messages = [{
|
||||
"role":
|
||||
"user",
|
||||
"content": [
|
||||
*({
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
} for image_url in image_urls),
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": "What's in this image?"
|
||||
},
|
||||
],
|
||||
}]
|
||||
indirect=True,
|
||||
)
|
||||
async def test_multi_image_input(
|
||||
client: openai.AsyncOpenAI, model_name: str, image_urls: list[str]
|
||||
):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
*(
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": image_url,
|
||||
"detail": "auto",
|
||||
}
|
||||
for image_url in image_urls
|
||||
),
|
||||
{"type": "input_text", "text": "What's in this image?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
if len(image_urls) > MAXIMUM_IMAGES:
|
||||
with pytest.raises(openai.BadRequestError): # test multi-image input
|
||||
@@ -157,10 +155,12 @@ async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
|
||||
# the server should still work afterwards
|
||||
response = await client.responses.create(
|
||||
model=model_name,
|
||||
input=[{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Paris today?",
|
||||
}],
|
||||
input=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Paris today?",
|
||||
}
|
||||
],
|
||||
)
|
||||
assert len(response.output_text) > 0
|
||||
else:
|
||||
|
||||
@@ -24,8 +24,7 @@ async def test_store(client: openai.AsyncOpenAI):
|
||||
assert response.status == "completed"
|
||||
|
||||
# The response should not be found.
|
||||
with pytest.raises(openai.NotFoundError,
|
||||
match="Response with id .* not found."):
|
||||
with pytest.raises(openai.NotFoundError, match="Response with id .* not found."):
|
||||
await client.responses.retrieve(response.id)
|
||||
|
||||
|
||||
@@ -53,8 +52,8 @@ async def test_background(client: openai.AsyncOpenAI):
|
||||
@pytest.mark.asyncio
|
||||
async def test_background_error(client: openai.AsyncOpenAI):
|
||||
with pytest.raises(
|
||||
openai.BadRequestError,
|
||||
match="background can only be used when `store` is true"):
|
||||
openai.BadRequestError, match="background can only be used when `store` is true"
|
||||
):
|
||||
_ = await client.responses.create(
|
||||
input="What is 13 * 24?",
|
||||
background=True,
|
||||
@@ -87,8 +86,9 @@ async def test_cancel_completed(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(input="Hello")
|
||||
assert response.status == "completed"
|
||||
|
||||
with pytest.raises(openai.BadRequestError,
|
||||
match="Cannot cancel a synchronous response."):
|
||||
with pytest.raises(
|
||||
openai.BadRequestError, match="Cannot cancel a synchronous response."
|
||||
):
|
||||
await client.responses.cancel(response.id)
|
||||
|
||||
|
||||
@@ -97,7 +97,8 @@ async def test_previous_response_id(client: openai.AsyncOpenAI):
|
||||
response1 = await client.responses.create(
|
||||
instructions="You are tested on your ability to retrieve the correct "
|
||||
"information from the previous response.",
|
||||
input="Hello, my name is John.")
|
||||
input="Hello, my name is John.",
|
||||
)
|
||||
|
||||
response2 = await client.responses.create(
|
||||
input="Actually, my name is not John. My real name is Mark.",
|
||||
@@ -118,7 +119,8 @@ async def test_two_responses_with_same_prev_id(client: openai.AsyncOpenAI):
|
||||
response1 = await client.responses.create(
|
||||
instructions="You are tested on your ability to retrieve the correct "
|
||||
"information from the previous response.",
|
||||
input="Hello, my name is John.")
|
||||
input="Hello, my name is John.",
|
||||
)
|
||||
|
||||
# Both response 2 and 3 use response 1 as the previous response.
|
||||
response2 = client.responses.create(
|
||||
|
||||
@@ -11,14 +11,10 @@ from pydantic import BaseModel
|
||||
async def test_structured_output(client: openai.AsyncOpenAI):
|
||||
response = await client.responses.create(
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Extract the event information."
|
||||
},
|
||||
{"role": "system", "content": "Extract the event information."},
|
||||
{
|
||||
"role": "user",
|
||||
"content":
|
||||
"Alice and Bob are going to a science fair on Friday.",
|
||||
"content": "Alice and Bob are going to a science fair on Friday.",
|
||||
},
|
||||
],
|
||||
text={
|
||||
@@ -28,18 +24,9 @@ async def test_structured_output(client: openai.AsyncOpenAI):
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"event_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"date": {
|
||||
"type": "string"
|
||||
},
|
||||
"participants": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"event_name": {"type": "string"},
|
||||
"date": {"type": "string"},
|
||||
"participants": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
"required": ["event_name", "date", "participants"],
|
||||
"additionalProperties": False,
|
||||
@@ -65,7 +52,6 @@ async def test_structured_output(client: openai.AsyncOpenAI):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_structured_output_with_parse(client: openai.AsyncOpenAI):
|
||||
|
||||
class CalendarEvent(BaseModel):
|
||||
event_name: str
|
||||
date: str
|
||||
|
||||
@@ -40,8 +40,7 @@ async def client(server):
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
model_name: str) -> None:
|
||||
async def test_invalid_json_schema(client: openai.AsyncOpenAI, model_name: str) -> None:
|
||||
invalid_json_schema = {
|
||||
"$defs": {
|
||||
"CarType": {
|
||||
@@ -51,35 +50,29 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"brand": {
|
||||
"title": "Brand",
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"title": "Model",
|
||||
"type": "string"
|
||||
},
|
||||
"car_type": {
|
||||
"$ref": "#/$defs/CarType"
|
||||
},
|
||||
"brand": {"title": "Brand", "type": "string"},
|
||||
"model": {"title": "Model", "type": "string"},
|
||||
"car_type": {"$ref": "#/$defs/CarType"},
|
||||
"foo": "bar",
|
||||
},
|
||||
"required": ["brand", "model", "car_type"],
|
||||
"title": "CarDescription",
|
||||
"type": "object",
|
||||
}
|
||||
prompt = ("Generate a JSON with the brand, model and car_type of"
|
||||
"the most iconic car from the 90's")
|
||||
prompt = (
|
||||
"Generate a JSON with the brand, model and car_type of"
|
||||
"the most iconic car from the 90's"
|
||||
)
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.chat.completions.create(
|
||||
model=model_name,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}],
|
||||
extra_body={"structured_outputs": {
|
||||
"json": invalid_json_schema
|
||||
}},
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
extra_body={"structured_outputs": {"json": invalid_json_schema}},
|
||||
)
|
||||
|
||||
|
||||
@@ -89,23 +82,22 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_invalid_regex(client: openai.AsyncOpenAI, model_name: str):
|
||||
prompt = ("Generate an email address for Alan Turing, who works in Enigma."
|
||||
"End in .com and new line. Example result:"
|
||||
"alan.turing@enigma.com\n")
|
||||
prompt = (
|
||||
"Generate an email address for Alan Turing, who works in Enigma."
|
||||
"End in .com and new line. Example result:"
|
||||
"alan.turing@enigma.com\n"
|
||||
)
|
||||
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.chat.completions.create(
|
||||
model=model_name,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}],
|
||||
extra_body={
|
||||
"structured_outputs": {
|
||||
"regex": r"[.*"
|
||||
},
|
||||
"stop": ["\n"]
|
||||
},
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
extra_body={"structured_outputs": {"regex": r"[.*"}, "stop": ["\n"]},
|
||||
)
|
||||
|
||||
|
||||
@@ -129,18 +121,20 @@ async def test_invalid_grammar(client: openai.AsyncOpenAI, model_name: str):
|
||||
number ::= "1 " | "2 "
|
||||
"""
|
||||
|
||||
prompt = ("Generate an SQL query to show the 'username' and 'email'"
|
||||
"from the 'users' table.")
|
||||
prompt = (
|
||||
"Generate an SQL query to show the 'username' and 'email'"
|
||||
"from the 'users' table."
|
||||
)
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.chat.completions.create(
|
||||
model=model_name,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}],
|
||||
extra_body={
|
||||
"structured_outputs": {
|
||||
"grammar": invalid_simplified_sql_grammar
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
extra_body={
|
||||
"structured_outputs": {"grammar": invalid_simplified_sql_grammar}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -31,12 +31,13 @@ def default_server_args():
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module",
|
||||
params=[["--no-enable-prefix-caching"],
|
||||
[
|
||||
"--no-enable-prefix-caching",
|
||||
"--disable-frontend-multiprocessing"
|
||||
]])
|
||||
@pytest.fixture(
|
||||
scope="module",
|
||||
params=[
|
||||
["--no-enable-prefix-caching"],
|
||||
["--no-enable-prefix-caching", "--disable-frontend-multiprocessing"],
|
||||
],
|
||||
)
|
||||
def server(default_server_args, request):
|
||||
if request.param:
|
||||
default_server_args = default_server_args + request.param
|
||||
@@ -55,12 +56,10 @@ async def client(server):
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_single_completion(client: openai.AsyncOpenAI,
|
||||
model_name: str) -> None:
|
||||
completion = await client.completions.create(model=model_name,
|
||||
prompt="Hello, my name is",
|
||||
max_tokens=5,
|
||||
temperature=0.0)
|
||||
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str) -> None:
|
||||
completion = await client.completions.create(
|
||||
model=model_name, prompt="Hello, my name is", max_tokens=5, temperature=0.0
|
||||
)
|
||||
|
||||
assert completion.id is not None
|
||||
assert completion.choices is not None and len(completion.choices) == 1
|
||||
@@ -69,7 +68,8 @@ async def test_single_completion(client: openai.AsyncOpenAI,
|
||||
assert len(choice.text) >= 5
|
||||
assert choice.finish_reason == "length"
|
||||
assert completion.usage == openai.types.CompletionUsage(
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11)
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11
|
||||
)
|
||||
|
||||
# test using token IDs
|
||||
completion = await client.completions.create(
|
||||
@@ -147,11 +147,12 @@ async def test_some_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
|
||||
model_name: str) -> None:
|
||||
|
||||
async def test_too_many_completion_logprobs(
|
||||
client: openai.AsyncOpenAI, model_name: str
|
||||
) -> None:
|
||||
with pytest.raises(
|
||||
(openai.BadRequestError, openai.APIError)): # test using token IDs
|
||||
(openai.BadRequestError, openai.APIError)
|
||||
): # test using token IDs
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=[0, 0, 0, 0, 0],
|
||||
@@ -163,7 +164,8 @@ async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
|
||||
)
|
||||
...
|
||||
with pytest.raises(
|
||||
(openai.BadRequestError, openai.APIError)): # test using token IDs
|
||||
(openai.BadRequestError, openai.APIError)
|
||||
): # test using token IDs
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=[0, 0, 0, 0, 0],
|
||||
@@ -188,13 +190,13 @@ async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name, prompt_logprobs", [(MODEL_NAME, -1),
|
||||
(MODEL_NAME, 0),
|
||||
(MODEL_NAME, 1),
|
||||
(MODEL_NAME, None)])
|
||||
async def test_prompt_logprobs_completion(client: openai.AsyncOpenAI,
|
||||
model_name: str,
|
||||
prompt_logprobs: Optional[int]):
|
||||
@pytest.mark.parametrize(
|
||||
"model_name, prompt_logprobs",
|
||||
[(MODEL_NAME, -1), (MODEL_NAME, 0), (MODEL_NAME, 1), (MODEL_NAME, None)],
|
||||
)
|
||||
async def test_prompt_logprobs_completion(
|
||||
client: openai.AsyncOpenAI, model_name: str, prompt_logprobs: Optional[int]
|
||||
):
|
||||
params: dict = {
|
||||
"prompt": ["A robot may not injure another robot", "My name is"],
|
||||
"model": model_name,
|
||||
@@ -223,8 +225,9 @@ async def test_prompt_logprobs_completion(client: openai.AsyncOpenAI,
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
model_name: str) -> None:
|
||||
async def test_completion_streaming(
|
||||
client: openai.AsyncOpenAI, model_name: str
|
||||
) -> None:
|
||||
prompt = "What is an LLM?"
|
||||
|
||||
single_completion = await client.completions.create(
|
||||
@@ -234,11 +237,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
temperature=0.0,
|
||||
)
|
||||
single_output = single_completion.choices[0].text
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True)
|
||||
stream = await client.completions.create(
|
||||
model=model_name, prompt=prompt, max_tokens=5, temperature=0.0, stream=True
|
||||
)
|
||||
chunks: list[str] = []
|
||||
finish_reason_count = 0
|
||||
async for chunk in stream:
|
||||
@@ -257,8 +258,7 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
|
||||
model_name: str):
|
||||
async def test_parallel_no_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
"""Parallel sampling without streaming.
|
||||
A single request output contains a list of completions.
|
||||
"""
|
||||
@@ -268,27 +268,26 @@ async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
|
||||
max_tokens = 50 # we want some to finish earlier than others
|
||||
|
||||
# High temperature to maximize chance of unique completions.
|
||||
completion = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
temperature=1.0,
|
||||
stream=False,
|
||||
logprobs=0,
|
||||
seed=42)
|
||||
completion = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
temperature=1.0,
|
||||
stream=False,
|
||||
logprobs=0,
|
||||
seed=42,
|
||||
)
|
||||
|
||||
# Assert `n` completions
|
||||
num_completions = len(completion.choices)
|
||||
assert num_completions == n, (
|
||||
f"Num completions {num_completions} but expected {n}.")
|
||||
assert num_completions == n, f"Num completions {num_completions} but expected {n}."
|
||||
completion_repeats: dict[str, int] = {}
|
||||
output_token_lengths = set()
|
||||
for idx, choice in enumerate(completion.choices):
|
||||
# Assert correct completion index & some finish reason.
|
||||
assert choice.index == idx, (
|
||||
f"Index {choice.index} but expected {idx}.")
|
||||
assert choice.finish_reason is not None, (
|
||||
"None finish_reason is invalid.")
|
||||
assert choice.index == idx, f"Index {choice.index} but expected {idx}."
|
||||
assert choice.finish_reason is not None, "None finish_reason is invalid."
|
||||
text = choice.text
|
||||
completion_repeats[text] = completion_repeats.get(text, 0) + 1
|
||||
output_token_lengths.add(len(choice.logprobs.tokens))
|
||||
@@ -297,13 +296,10 @@ async def test_parallel_no_streaming(client: openai.AsyncOpenAI,
|
||||
# Assert `n` unique completions
|
||||
num_unique = len(completion_repeats)
|
||||
if num_unique != n:
|
||||
repeats = {
|
||||
txt: num
|
||||
for (txt, num) in completion_repeats.items() if num > 1
|
||||
}
|
||||
repeats = {txt: num for (txt, num) in completion_repeats.items() if num > 1}
|
||||
raise AssertionError(
|
||||
f"Expected {n} unique completions, got {num_unique};"
|
||||
f" repeats: {repeats}.")
|
||||
f"Expected {n} unique completions, got {num_unique}; repeats: {repeats}."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -321,13 +317,15 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
n = 3
|
||||
max_tokens = 50 # we want some to finish earlier than others
|
||||
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
temperature=1.0,
|
||||
stream=True,
|
||||
seed=42)
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
n=n,
|
||||
temperature=1.0,
|
||||
stream=True,
|
||||
seed=42,
|
||||
)
|
||||
chunks: list[list[str]] = [[] for _ in range(n)]
|
||||
finish_reason_count = 0
|
||||
async for chunk in stream:
|
||||
@@ -338,7 +336,8 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
finish_reason_count += 1
|
||||
# Assert `n` completions with correct finish reasons
|
||||
assert finish_reason_count == n, (
|
||||
f"Expected {n} completions with valid indices and finish_reason.")
|
||||
f"Expected {n} completions with valid indices and finish_reason."
|
||||
)
|
||||
completion_repeats: dict[str, int] = {}
|
||||
chunk_lengths = set()
|
||||
for chunk in chunks:
|
||||
@@ -346,7 +345,8 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
# Assert correct number of completion tokens
|
||||
chunk_lengths.add(chunk_len)
|
||||
assert chunk_len <= max_tokens, (
|
||||
f"max_tokens={max_tokens} but chunk len is {chunk_len}.")
|
||||
f"max_tokens={max_tokens} but chunk len is {chunk_len}."
|
||||
)
|
||||
text = "".join(chunk)
|
||||
completion_repeats[text] = completion_repeats.get(text, 0) + 1
|
||||
print(text)
|
||||
@@ -355,12 +355,10 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
# Assert `n` unique completions
|
||||
num_unique = len(completion_repeats)
|
||||
if num_unique != n:
|
||||
repeats = {
|
||||
txt: num
|
||||
for (txt, num) in completion_repeats.items() if num > 1
|
||||
}
|
||||
raise AssertionError(f"{num_unique} unique completions, expected {n};"
|
||||
f" repeats: {repeats}")
|
||||
repeats = {txt: num for (txt, num) in completion_repeats.items() if num > 1}
|
||||
raise AssertionError(
|
||||
f"{num_unique} unique completions, expected {n}; repeats: {repeats}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -368,53 +366,55 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
model_name: str):
|
||||
async def test_completion_stream_options(client: openai.AsyncOpenAI, model_name: str):
|
||||
prompt = "What is the capital of France?"
|
||||
|
||||
# Test stream=True, stream_options=
|
||||
# {"include_usage": False, "continuous_usage_stats": False}
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": False,
|
||||
"continuous_usage_stats":
|
||||
False,
|
||||
})
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": False,
|
||||
"continuous_usage_stats": False,
|
||||
},
|
||||
)
|
||||
|
||||
async for chunk in stream:
|
||||
assert chunk.usage is None
|
||||
|
||||
# Test stream=True, stream_options=
|
||||
# {"include_usage": False, "continuous_usage_stats": True}
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": False,
|
||||
"continuous_usage_stats":
|
||||
True,
|
||||
})
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": False,
|
||||
"continuous_usage_stats": True,
|
||||
},
|
||||
)
|
||||
async for chunk in stream:
|
||||
assert chunk.usage is None
|
||||
|
||||
# Test stream=True, stream_options=
|
||||
# {"include_usage": True, "continuous_usage_stats": False}
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": True,
|
||||
"continuous_usage_stats":
|
||||
False,
|
||||
})
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": True,
|
||||
"continuous_usage_stats": False,
|
||||
},
|
||||
)
|
||||
async for chunk in stream:
|
||||
if chunk.choices[0].finish_reason is None:
|
||||
assert chunk.usage is None
|
||||
@@ -425,57 +425,63 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
assert final_chunk.usage.prompt_tokens > 0
|
||||
assert final_chunk.usage.completion_tokens > 0
|
||||
assert final_chunk.usage.total_tokens == (
|
||||
final_chunk.usage.prompt_tokens +
|
||||
final_chunk.usage.completion_tokens)
|
||||
final_chunk.usage.prompt_tokens + final_chunk.usage.completion_tokens
|
||||
)
|
||||
assert final_chunk.choices == []
|
||||
|
||||
# Test stream=True, stream_options=
|
||||
# {"include_usage": True, "continuous_usage_stats": True}
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": True,
|
||||
"continuous_usage_stats":
|
||||
True,
|
||||
})
|
||||
stream = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True,
|
||||
stream_options={
|
||||
"include_usage": True,
|
||||
"continuous_usage_stats": True,
|
||||
},
|
||||
)
|
||||
async for chunk in stream:
|
||||
assert chunk.usage is not None
|
||||
assert chunk.usage.prompt_tokens > 0
|
||||
assert chunk.usage.completion_tokens > 0
|
||||
assert chunk.usage.total_tokens == (chunk.usage.prompt_tokens +
|
||||
chunk.usage.completion_tokens)
|
||||
assert chunk.usage.total_tokens == (
|
||||
chunk.usage.prompt_tokens + chunk.usage.completion_tokens
|
||||
)
|
||||
if chunk.choices[0].finish_reason is not None:
|
||||
final_chunk = await stream.__anext__()
|
||||
assert final_chunk.usage is not None
|
||||
assert final_chunk.usage.prompt_tokens > 0
|
||||
assert final_chunk.usage.completion_tokens > 0
|
||||
assert final_chunk.usage.total_tokens == (
|
||||
final_chunk.usage.prompt_tokens +
|
||||
final_chunk.usage.completion_tokens)
|
||||
final_chunk.usage.prompt_tokens + final_chunk.usage.completion_tokens
|
||||
)
|
||||
assert final_chunk.choices == []
|
||||
|
||||
# Test stream=False, stream_options=
|
||||
# {"include_usage": None}
|
||||
with pytest.raises(BadRequestError):
|
||||
await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"include_usage": None})
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"include_usage": None},
|
||||
)
|
||||
|
||||
# Test stream=False, stream_options=
|
||||
# {"include_usage": True}
|
||||
with pytest.raises(BadRequestError):
|
||||
await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"include_usage": True})
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"include_usage": True},
|
||||
)
|
||||
|
||||
# Test stream=False, stream_options=
|
||||
# {"continuous_usage_stats": None}
|
||||
@@ -486,7 +492,8 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"continuous_usage_stats": None})
|
||||
stream_options={"continuous_usage_stats": None},
|
||||
)
|
||||
|
||||
# Test stream=False, stream_options=
|
||||
# {"continuous_usage_stats": True}
|
||||
@@ -497,7 +504,8 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=False,
|
||||
stream_options={"continuous_usage_stats": True})
|
||||
stream_options={"continuous_usage_stats": True},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -528,15 +536,19 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
|
||||
extra_body=dict(
|
||||
# NOTE: this has to be true for n > 1 in vLLM, but
|
||||
# not necessary for official client.
|
||||
use_beam_search=True),
|
||||
use_beam_search=True
|
||||
),
|
||||
)
|
||||
assert len(batch.choices) == 4
|
||||
assert batch.choices[0].text != batch.choices[
|
||||
1].text, "beam search should be different"
|
||||
assert batch.choices[0].text == batch.choices[
|
||||
2].text, "two copies of the same prompt should be the same"
|
||||
assert batch.choices[1].text == batch.choices[
|
||||
3].text, "two copies of the same prompt should be the same"
|
||||
assert batch.choices[0].text != batch.choices[1].text, (
|
||||
"beam search should be different"
|
||||
)
|
||||
assert batch.choices[0].text == batch.choices[2].text, (
|
||||
"two copies of the same prompt should be the same"
|
||||
)
|
||||
assert batch.choices[1].text == batch.choices[3].text, (
|
||||
"two copies of the same prompt should be the same"
|
||||
)
|
||||
|
||||
# test streaming
|
||||
batch = await client.completions.create(
|
||||
@@ -560,31 +572,30 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
|
||||
[MODEL_NAME],
|
||||
)
|
||||
@pytest.mark.parametrize("logprobs_arg", [1, 0])
|
||||
async def test_echo_logprob_completion(client: openai.AsyncOpenAI,
|
||||
model_name: str, logprobs_arg: int):
|
||||
async def test_echo_logprob_completion(
|
||||
client: openai.AsyncOpenAI, model_name: str, logprobs_arg: int
|
||||
):
|
||||
tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME)
|
||||
# test using text and token IDs
|
||||
for prompt in ("Hello, my name is", [0, 0, 0, 0, 0]):
|
||||
completion = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
echo=True,
|
||||
logprobs=logprobs_arg)
|
||||
completion = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
echo=True,
|
||||
logprobs=logprobs_arg,
|
||||
)
|
||||
|
||||
prompt_text = tokenizer.decode(prompt) if isinstance(prompt,
|
||||
list) else prompt
|
||||
prompt_text = tokenizer.decode(prompt) if isinstance(prompt, list) else prompt
|
||||
assert re.search(r"^" + prompt_text, completion.choices[0].text)
|
||||
logprobs = completion.choices[0].logprobs
|
||||
assert logprobs is not None
|
||||
assert len(logprobs.text_offset) > 5
|
||||
assert (len(logprobs.token_logprobs) > 5
|
||||
and logprobs.token_logprobs[0] is None)
|
||||
assert (len(logprobs.top_logprobs) > 5
|
||||
and logprobs.top_logprobs[0] is None)
|
||||
assert len(logprobs.token_logprobs) > 5 and logprobs.token_logprobs[0] is None
|
||||
assert len(logprobs.top_logprobs) > 5 and logprobs.top_logprobs[0] is None
|
||||
for top_logprobs in logprobs.top_logprobs[1:]:
|
||||
assert max(logprobs_arg,
|
||||
1) <= len(top_logprobs) <= logprobs_arg + 1
|
||||
assert max(logprobs_arg, 1) <= len(top_logprobs) <= logprobs_arg + 1
|
||||
assert len(logprobs.tokens) > 5
|
||||
|
||||
|
||||
@@ -593,8 +604,7 @@ async def test_echo_logprob_completion(client: openai.AsyncOpenAI,
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
model_name: str) -> None:
|
||||
async def test_invalid_json_schema(client: openai.AsyncOpenAI, model_name: str) -> None:
|
||||
invalid_json_schema = {
|
||||
"$defs": {
|
||||
"CarType": {
|
||||
@@ -604,32 +614,24 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"brand": {
|
||||
"title": "Brand",
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"title": "Model",
|
||||
"type": "string"
|
||||
},
|
||||
"car_type": {
|
||||
"$ref": "#/$defs/CarType"
|
||||
},
|
||||
"brand": {"title": "Brand", "type": "string"},
|
||||
"model": {"title": "Model", "type": "string"},
|
||||
"car_type": {"$ref": "#/$defs/CarType"},
|
||||
"foo": "bar",
|
||||
},
|
||||
"required": ["brand", "model", "car_type"],
|
||||
"title": "CarDescription",
|
||||
"type": "object",
|
||||
}
|
||||
prompt = ("Generate a JSON with the brand, model and car_type of"
|
||||
"the most iconic car from the 90's")
|
||||
prompt = (
|
||||
"Generate a JSON with the brand, model and car_type of"
|
||||
"the most iconic car from the 90's"
|
||||
)
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
extra_body={"structured_outputs": {
|
||||
"json": invalid_json_schema
|
||||
}},
|
||||
extra_body={"structured_outputs": {"json": invalid_json_schema}},
|
||||
)
|
||||
|
||||
|
||||
@@ -639,20 +641,17 @@ async def test_invalid_json_schema(client: openai.AsyncOpenAI,
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_invalid_regex(client: openai.AsyncOpenAI, model_name: str):
|
||||
prompt = ("Generate an email address for Alan Turing, who works in Enigma."
|
||||
"End in .com and new line. Example result:"
|
||||
"alan.turing@enigma.com\n")
|
||||
prompt = (
|
||||
"Generate an email address for Alan Turing, who works in Enigma."
|
||||
"End in .com and new line. Example result:"
|
||||
"alan.turing@enigma.com\n"
|
||||
)
|
||||
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
extra_body={
|
||||
"structured_outputs": {
|
||||
"regex": r"[.*"
|
||||
},
|
||||
"stop": ["\n"]
|
||||
},
|
||||
extra_body={"structured_outputs": {"regex": r"[.*"}, "stop": ["\n"]},
|
||||
)
|
||||
|
||||
|
||||
@@ -676,29 +675,29 @@ async def test_invalid_grammar(client: openai.AsyncOpenAI, model_name: str):
|
||||
number ::= "1 " | "2 "
|
||||
"""
|
||||
|
||||
prompt = ("Generate an SQL query to show the 'username' and 'email'"
|
||||
"from the 'users' table.")
|
||||
prompt = (
|
||||
"Generate an SQL query to show the 'username' and 'email'"
|
||||
"from the 'users' table."
|
||||
)
|
||||
with pytest.raises((openai.BadRequestError, openai.APIError)):
|
||||
await client.completions.create(
|
||||
model=model_name,
|
||||
prompt=prompt,
|
||||
extra_body={
|
||||
"structured_outputs": {
|
||||
"grammar": invalid_simplified_sql_grammar
|
||||
}
|
||||
"structured_outputs": {"grammar": invalid_simplified_sql_grammar}
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_completion_with_empty_prompt_embeds(
|
||||
client: openai.AsyncOpenAI) -> None:
|
||||
async def test_completion_with_empty_prompt_embeds(client: openai.AsyncOpenAI) -> None:
|
||||
"""Test completion with empty prompt embeds."""
|
||||
payload: dict[str, object] = {"prompt": "Hello", "prompt_embeds": []}
|
||||
headers: dict[str, str] = {"Content-Type": "application/json"}
|
||||
# base_url = http://localhost:8000/v1/completions
|
||||
response = requests.post(f"{client.base_url}completions",
|
||||
headers=headers,
|
||||
json=payload)
|
||||
response = requests.post(
|
||||
f"{client.base_url}completions", headers=headers, json=payload
|
||||
)
|
||||
assert response.status_code == 200, (
|
||||
f"Expected status code 200, got {response.status_code}. ")
|
||||
f"Expected status code 200, got {response.status_code}. "
|
||||
)
|
||||
|
||||
@@ -37,9 +37,9 @@ def default_image_embeds_server_args() -> list[str]:
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def server_with_image_embeds(default_image_embeds_server_args):
|
||||
with RemoteOpenAIServer(MODEL_NAME,
|
||||
default_image_embeds_server_args,
|
||||
max_wait_seconds=600) as remote_server:
|
||||
with RemoteOpenAIServer(
|
||||
MODEL_NAME, default_image_embeds_server_args, max_wait_seconds=600
|
||||
) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ def encode_image_embedding_to_base64(image_embedding) -> str:
|
||||
torch.save(image_embedding, buffer)
|
||||
buffer.seek(0)
|
||||
binary_data = buffer.read()
|
||||
base64_image_embedding = base64.b64encode(binary_data).decode('utf-8')
|
||||
base64_image_embedding = base64.b64encode(binary_data).decode("utf-8")
|
||||
return base64_image_embedding
|
||||
|
||||
|
||||
@@ -75,19 +75,13 @@ async def test_completions_with_image_embeds(
|
||||
base64_image_embedding = encode_image_embedding_to_base64(image_embeds)
|
||||
chat_completion = await client_with_image_embeds.chat.completions.create(
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
},
|
||||
{
|
||||
"role":
|
||||
"user",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type":
|
||||
"text",
|
||||
"text":
|
||||
"Describe these images separately. For each image,"
|
||||
"type": "text",
|
||||
"text": "Describe these images separately. For each image,"
|
||||
"reply with a short sentence (no more than 10 words).",
|
||||
},
|
||||
{
|
||||
|
||||
@@ -50,16 +50,13 @@ async def client(server):
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_single_completion(client: openai.AsyncOpenAI,
|
||||
server: RemoteOpenAIServer,
|
||||
model_name: str) -> None:
|
||||
|
||||
async def test_single_completion(
|
||||
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
|
||||
) -> None:
|
||||
async def make_request():
|
||||
completion = await client.completions.create(
|
||||
model=model_name,
|
||||
prompt="Hello, my name is",
|
||||
max_tokens=10,
|
||||
temperature=1.0)
|
||||
model=model_name, prompt="Hello, my name is", max_tokens=10, temperature=1.0
|
||||
)
|
||||
|
||||
assert completion.id is not None
|
||||
assert completion.choices is not None and len(completion.choices) == 1
|
||||
@@ -108,9 +105,9 @@ async def test_single_completion(client: openai.AsyncOpenAI,
|
||||
"model_name",
|
||||
[MODEL_NAME],
|
||||
)
|
||||
async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
server: RemoteOpenAIServer,
|
||||
model_name: str) -> None:
|
||||
async def test_completion_streaming(
|
||||
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
|
||||
) -> None:
|
||||
prompt = "What is an LLM?"
|
||||
|
||||
async def make_streaming_request():
|
||||
@@ -124,11 +121,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
single_output = single_completion.choices[0].text
|
||||
|
||||
# Perform the streaming request
|
||||
stream = await client.completions.create(model=model_name,
|
||||
prompt=prompt,
|
||||
max_tokens=5,
|
||||
temperature=0.0,
|
||||
stream=True)
|
||||
stream = await client.completions.create(
|
||||
model=model_name, prompt=prompt, max_tokens=5, temperature=0.0, stream=True
|
||||
)
|
||||
chunks: list[str] = []
|
||||
finish_reason_count = 0
|
||||
last_chunk = None
|
||||
@@ -139,16 +134,15 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
last_chunk = chunk # Keep track of the last chunk
|
||||
|
||||
# finish reason should only return in the last block for OpenAI API
|
||||
assert finish_reason_count == 1, (
|
||||
"Finish reason should appear exactly once.")
|
||||
assert last_chunk is not None, (
|
||||
"Stream should have yielded at least one chunk.")
|
||||
assert last_chunk.choices[
|
||||
0].finish_reason == "length", "Finish reason should be 'length'."
|
||||
assert finish_reason_count == 1, "Finish reason should appear exactly once."
|
||||
assert last_chunk is not None, "Stream should have yielded at least one chunk."
|
||||
assert last_chunk.choices[0].finish_reason == "length", (
|
||||
"Finish reason should be 'length'."
|
||||
)
|
||||
# Check that the combined text matches the non-streamed version.
|
||||
assert "".join(
|
||||
chunks
|
||||
) == single_output, "Streamed output should match non-streamed output."
|
||||
assert "".join(chunks) == single_output, (
|
||||
"Streamed output should match non-streamed output."
|
||||
)
|
||||
return True # Indicate success for this request
|
||||
|
||||
# Test single request
|
||||
@@ -162,9 +156,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
tasks = [make_streaming_request() for _ in range(num_requests)]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
assert len(
|
||||
results
|
||||
) == num_requests, f"Expected {num_requests} results, got {len(results)}"
|
||||
assert len(results) == num_requests, (
|
||||
f"Expected {num_requests} results, got {len(results)}"
|
||||
)
|
||||
assert all(results), "Not all streaming requests completed successfully."
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
@@ -172,9 +166,9 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
tasks = [make_streaming_request() for _ in range(num_requests)]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
assert len(
|
||||
results
|
||||
) == num_requests, f"Expected {num_requests} results, got {len(results)}"
|
||||
assert len(results) == num_requests, (
|
||||
f"Expected {num_requests} results, got {len(results)}"
|
||||
)
|
||||
assert all(results), "Not all streaming requests completed successfully."
|
||||
|
||||
# Check request balancing via Prometheus metrics if DP_SIZE > 1
|
||||
|
||||
Reference in New Issue
Block a user