[Bugfix] Fix KeyError in parse_response_input for reasoning items with optional content (#34499)

Signed-off-by: jeonsworld <jeonsworld@gmail.com>
This commit is contained in:
Eunkwang Jeon
2026-03-13 01:13:36 +09:00
committed by GitHub
parent f444c05c32
commit bdc2343454
3 changed files with 101 additions and 6 deletions

View File

@@ -14,6 +14,7 @@ from vllm.entrypoints.openai.parser.harmony_utils import (
parse_chat_output,
)
from vllm.entrypoints.openai.responses.harmony import (
response_input_to_harmony,
response_previous_input_to_harmony,
)
@@ -841,3 +842,89 @@ class TestGetSystemMessage:
assert channel in valid_channels, (
f"{channel} missing when with_custom_tools={with_tools}"
)
class TestResponseInputToHarmonyReasoningItem:
"""Tests for response_input_to_harmony handling of reasoning input items.
Per the OpenAI spec, ResponseReasoningItem.content is
Optional[List[Content]] = None. Clients like langchain-openai may omit
this field when constructing multi-turn input from previous responses.
Reasoning items with content are converted to Harmony messages on the
'analysis' channel. All content items are concatenated. Items without
content return None (skipped by the caller).
"""
def test_reasoning_with_single_content(self):
"""Test reasoning item with a single content entry."""
item = {
"type": "reasoning",
"id": "rs_123",
"content": [{"type": "reasoning_text", "text": "Thinking step by step"}],
}
msg = response_input_to_harmony(item, prev_responses=[])
assert msg is not None
assert msg.author.role == Role.ASSISTANT
assert msg.content[0].text == "Thinking step by step"
assert msg.channel == "analysis"
def test_reasoning_with_multiple_content_items(self):
"""Test reasoning item with multiple content entries concatenated."""
item = {
"type": "reasoning",
"id": "rs_123",
"content": [
{"type": "reasoning_text", "text": "First, let me analyze"},
{"type": "reasoning_text", "text": "Second, I should consider"},
{"type": "reasoning_text", "text": "Finally, the answer is"},
],
}
msg = response_input_to_harmony(item, prev_responses=[])
assert msg is not None
assert msg.author.role == Role.ASSISTANT
assert msg.content[0].text == (
"First, let me analyze\nSecond, I should consider\nFinally, the answer is"
)
assert msg.channel == "analysis"
def test_reasoning_without_content_returns_none(self):
"""Test reasoning item without content field returns None."""
item = {
"type": "reasoning",
"id": "rs_123",
"summary": [{"type": "summary_text", "text": "Thinking about math"}],
}
msg = response_input_to_harmony(item, prev_responses=[])
assert msg is None
def test_reasoning_with_none_content_returns_none(self):
"""Test reasoning item with content=None returns None."""
item = {
"type": "reasoning",
"id": "rs_123",
"content": None,
"summary": [{"type": "summary_text", "text": "Thinking about math"}],
}
msg = response_input_to_harmony(item, prev_responses=[])
assert msg is None
def test_reasoning_with_empty_content_returns_none(self):
"""Test reasoning item with empty content list returns None."""
item = {
"type": "reasoning",
"id": "rs_123",
"content": [],
}
msg = response_input_to_harmony(item, prev_responses=[])
assert msg is None

View File

@@ -138,8 +138,12 @@ def _parse_chat_format_message(chat_msg: dict) -> list[Message]:
def response_input_to_harmony(
response_msg: ResponseInputOutputItem,
prev_responses: list[ResponseOutputItem | ResponseReasoningItem],
) -> Message:
"""Convert a single ResponseInputOutputItem into a Harmony Message."""
) -> Message | None:
"""Convert a single ResponseInputOutputItem into a Harmony Message.
Returns None for reasoning items with empty or absent content so
the caller can skip them.
"""
if not isinstance(response_msg, dict):
response_msg = response_msg.model_dump()
if "type" not in response_msg or response_msg["type"] == "message":
@@ -172,9 +176,13 @@ def response_input_to_harmony(
response_msg["output"],
)
elif response_msg["type"] == "reasoning":
content = response_msg["content"]
assert len(content) == 1
msg = Message.from_role_and_content(Role.ASSISTANT, content[0]["text"])
content = response_msg.get("content")
if content and len(content) >= 1:
reasoning_text = "\n".join(item["text"] for item in content)
msg = Message.from_role_and_content(Role.ASSISTANT, reasoning_text)
msg = msg.with_channel("analysis")
else:
return None
elif response_msg["type"] == "function_call":
msg = Message.from_role_and_content(Role.ASSISTANT, response_msg["arguments"])
msg = msg.with_channel("commentary")

View File

@@ -1086,7 +1086,7 @@ class OpenAIServingResponses(OpenAIServing):
prev_outputs = []
for response_msg in request.input:
new_msg = response_input_to_harmony(response_msg, prev_outputs)
if new_msg.author.role != "system":
if new_msg is not None and new_msg.author.role != "system":
messages.append(new_msg)
# User passes in a tool call request and its output. We need