[Frontend][responsesAPI][1/n] convert responses API tool input to chat completions tool format (#28231)

Signed-off-by: Andrew Xia <axia@fb.com>
Co-authored-by: Andrew Xia <axia@fb.com>
Co-authored-by: Chauncey <chaunceyjiang@gmail.com>
This commit is contained in:
Andrew Xia
2025-11-12 20:47:22 -08:00
committed by GitHub
parent 7c38ed0f1c
commit 1a0b157a2e
5 changed files with 76 additions and 21 deletions

View File

@@ -48,7 +48,6 @@ from openai.types.responses.response_output_text import Logprob, LogprobTopLogpr
from openai.types.responses.response_reasoning_item import (
Content as ResponseReasoningTextContent,
)
from openai.types.responses.tool import Tool
from openai_harmony import Message as OpenAIHarmonyMessage
from vllm import envs
@@ -94,7 +93,11 @@ from vllm.entrypoints.openai.protocol import (
)
from vllm.entrypoints.openai.serving_engine import OpenAIServing
from vllm.entrypoints.openai.serving_models import OpenAIServingModels
from vllm.entrypoints.responses_utils import construct_chat_message_with_tool_call
from vllm.entrypoints.responses_utils import (
construct_chat_message_with_tool_call,
convert_tool_responses_to_completions_format,
extract_tool_types,
)
from vllm.entrypoints.tool_server import ToolServer
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
from vllm.logger import init_logger
@@ -108,23 +111,6 @@ from vllm.utils import random_uuid
logger = init_logger(__name__)
def extract_tool_types(tools: list[Tool]) -> set[str]:
"""
Extracts the tool types from the given tools.
"""
tool_types: set[str] = set()
for tool in tools:
if tool.type == "mcp":
# Allow the MCP Tool type to enable built in tools if the
# server_label is allowlisted in
# envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS
if tool.server_label in envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS:
tool_types.add(tool.server_label)
else:
tool_types.add(tool.type)
return tool_types
class OpenAIServingResponses(OpenAIServing):
def __init__(
self,
@@ -513,7 +499,10 @@ class OpenAIServingResponses(OpenAIServing):
):
tool_dicts = None
else:
tool_dicts = [tool.model_dump() for tool in request.tools]
tool_dicts = [
convert_tool_responses_to_completions_format(tool.model_dump())
for tool in request.tools
]
# Construct the input messages.
messages = self._construct_input_messages(request, prev_response)
_, request_prompts, engine_prompts = await self._preprocess_chat(

View File

@@ -10,7 +10,9 @@ from openai.types.chat.chat_completion_message_tool_call_param import (
Function as FunctionCallTool,
)
from openai.types.responses import ResponseFunctionToolCall
from openai.types.responses.tool import Tool
from vllm import envs
from vllm.entrypoints.openai.protocol import (
ChatCompletionMessageParam,
ResponseInputOutputItem,
@@ -43,3 +45,33 @@ def construct_chat_message_with_tool_call(
tool_call_id=item.get("call_id"),
)
return item # type: ignore
def extract_tool_types(tools: list[Tool]) -> set[str]:
"""
Extracts the tool types from the given tools.
"""
tool_types: set[str] = set()
for tool in tools:
if tool.type == "mcp":
# Allow the MCP Tool type to enable built in tools if the
# server_label is allowlisted in
# envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS
if tool.server_label in envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS:
tool_types.add(tool.server_label)
else:
tool_types.add(tool.type)
return tool_types
def convert_tool_responses_to_completions_format(tool: dict) -> dict:
"""
Convert a flat tool schema:
{"type": "function", "name": "...", "description": "...", "parameters": {...}}
into:
{"type": "function", "function": {...}}
"""
return {
"type": "function",
"function": tool,
}