[Bugfix] GLM-4 tool parser: incremental string streaming (#33218)

Signed-off-by: QwertyJack <7554089+QwertyJack@users.noreply.github.com>
Co-authored-by: QwertyJack <7554089+QwertyJack@users.noreply.github.com>
This commit is contained in:
jack
2026-02-02 11:13:31 +08:00
committed by GitHub
parent 318b120766
commit 7c036432fc
2 changed files with 726 additions and 97 deletions

View File

@@ -6,6 +6,7 @@ import json
import pytest
from vllm.entrypoints.openai.chat_completion.protocol import ChatCompletionRequest
from vllm.entrypoints.openai.engine.protocol import FunctionCall, ToolCall
from vllm.tokenizers import get_tokenizer
from vllm.tool_parsers.glm4_moe_tool_parser import (
@@ -447,3 +448,338 @@ def test_extract_tool_calls_incomplete_tool_call(glm4_moe_tool_parser):
assert not extracted_tool_calls.tools_called
assert extracted_tool_calls.tool_calls == []
assert extracted_tool_calls.content == model_output
def _reset_streaming_state(parser):
"""Helper to reset parser streaming state."""
parser._buffer = ""
parser._in_tool_call = False
parser.current_tool_name_sent = False
parser._current_tool_name = None
parser._pending_key = None
parser._streaming_string_value = False
parser.prev_tool_call_arr = []
parser.current_tool_id = -1
parser.streamed_args_for_tool = []
parser._tool_call_ids = []
parser._args_started = []
parser._args_closed = []
parser._seen_keys = []
def test_streaming_incremental_string_value(glm4_moe_tool_parser):
"""Test incremental streaming of string argument values."""
_reset_streaming_state(glm4_moe_tool_parser)
# Simulate streaming a tool call character by character
chunks = [
"<tool_call>",
"get_weather\n",
"<arg_key>city</arg_key>",
"<arg_value>",
"Bei",
"jing",
"</arg_value>",
"</tool_call>",
]
collected_fragments = []
for chunk in chunks:
result = glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text=chunk,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
if result is not None and hasattr(result, "tool_calls") and result.tool_calls:
for tc in result.tool_calls:
if hasattr(tc, "function") and tc.function:
func = tc.function
if isinstance(func, dict):
if func.get("arguments"):
collected_fragments.append(func["arguments"])
if func.get("name"):
collected_fragments.append(f"name:{func['name']}")
else:
if func.arguments:
collected_fragments.append(func.arguments)
if func.name:
collected_fragments.append(f"name:{func.name}")
# Verify we got incremental streaming of the argument value
assert len(collected_fragments) > 0
# The fragments should include the tool name and argument pieces
combined = "".join(collected_fragments)
assert "get_weather" in combined or "name:get_weather" in combined
def test_streaming_empty_tool_call(glm4_moe_tool_parser):
"""Test that empty tool calls don't cause infinite loops."""
_reset_streaming_state(glm4_moe_tool_parser)
# Empty tool call should be handled gracefully
result = glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text="<tool_call></tool_call>",
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
# Should not hang and should return something (None or content)
# The key is that this completes without hanging
assert result is None or hasattr(result, "content") or hasattr(result, "tool_calls")
# State should be properly reset
assert glm4_moe_tool_parser.current_tool_id == -1
def test_streaming_prev_tool_call_arr_finalization(glm4_moe_tool_parser):
"""Test that prev_tool_call_arr contains parsed dict after tool call."""
_reset_streaming_state(glm4_moe_tool_parser)
# Stream a complete tool call
chunks = [
"<tool_call>get_weather\n",
"<arg_key>city</arg_key>",
"<arg_value>Beijing</arg_value>",
"</tool_call>",
]
for chunk in chunks:
glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text=chunk,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
# After the tool call completes, prev_tool_call_arr should have parsed dict
assert len(glm4_moe_tool_parser.prev_tool_call_arr) == 1
tool_entry = glm4_moe_tool_parser.prev_tool_call_arr[0]
assert tool_entry.get("name") == "get_weather"
# arguments should be a dict, not a string
args = tool_entry.get("arguments")
assert isinstance(args, dict), f"Expected dict, got {type(args)}"
assert args.get("city") == "Beijing"
def test_streaming_multiple_tool_calls_sequential(glm4_moe_tool_parser):
"""Test streaming multiple sequential tool calls."""
_reset_streaming_state(glm4_moe_tool_parser)
# Stream two tool calls
chunks = [
"<tool_call>get_weather\n",
"<arg_key>city</arg_key>",
"<arg_value>Beijing</arg_value>",
"</tool_call>",
"<tool_call>get_weather\n",
"<arg_key>city</arg_key>",
"<arg_value>Shanghai</arg_value>",
"</tool_call>",
]
for chunk in chunks:
glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text=chunk,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
# Should have two tool calls in prev_tool_call_arr
assert len(glm4_moe_tool_parser.prev_tool_call_arr) == 2
assert glm4_moe_tool_parser.prev_tool_call_arr[0]["arguments"]["city"] == "Beijing"
assert glm4_moe_tool_parser.prev_tool_call_arr[1]["arguments"]["city"] == "Shanghai"
def test_streaming_json_escape_in_string(glm4_moe_tool_parser):
"""Test that special characters in string values are properly escaped."""
_reset_streaming_state(glm4_moe_tool_parser)
# String with characters that need JSON escaping
chunks = [
"<tool_call>send_message\n",
"<arg_key>message</arg_key>",
'<arg_value>Hello "world"\nNew line</arg_value>',
"</tool_call>",
]
for chunk in chunks:
glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text=chunk,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=None,
)
# The streamed_args_for_tool should contain valid JSON
assert len(glm4_moe_tool_parser.streamed_args_for_tool) == 1
args_json = glm4_moe_tool_parser.streamed_args_for_tool[0]
# Should be parseable as JSON
parsed = json.loads(args_json)
assert "message" in parsed
# The value should preserve the special characters
assert '"' in parsed["message"] or "world" in parsed["message"]
def test_streaming_long_content_incremental(glm4_moe_tool_parser):
"""Test incremental streaming of long content (Issue #32829).
This is the core fix: for long string values like code (4000+ chars),
the parser should stream incrementally rather than buffering until
complete. This test verifies we get many fragments, not just 1-3.
"""
_reset_streaming_state(glm4_moe_tool_parser)
# Bubble sort example from Issue #32829 - realistic long content
bubble_sort_code = '''#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Bubble Sort Implementation
"""
def bubble_sort(arr):
n = len(arr)
for i in range(n):
swapped = False
for j in range(0, n - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
swapped = True
if not swapped:
break
return arr
if __name__ == "__main__":
test_arr = [64, 34, 25, 12, 22, 11, 90]
print(f"Original: {test_arr}")
sorted_arr = bubble_sort(test_arr.copy())
print(f"Sorted: {sorted_arr}")'''
# Create a request with tool schema to enable string type detection
# This is required for incremental streaming of string values
request = ChatCompletionRequest(
model=MODEL,
messages=[],
tools=[
{
"type": "function",
"function": {
"name": "write_to_file",
"parameters": {
"type": "object",
"properties": {
"file_path": {"type": "string"},
"content": {"type": "string"},
},
},
},
}
],
)
# Simulate token-based streaming (special tags as single tokens)
chunks = [
"<tool_call>",
"write_to_file\n",
"<arg_key>file_path</arg_key>",
"<arg_value>/tmp/bubble_sort.py</arg_value>",
"<arg_key>content</arg_key>",
"<arg_value>",
]
# Add content line by line (realistic token streaming)
for line in bubble_sort_code.split("\n"):
chunks.append(line + "\n")
chunks.append("</arg_value>")
chunks.append("</tool_call>")
# Count argument fragments
fragment_count = 0
for chunk in chunks:
result = glm4_moe_tool_parser.extract_tool_calls_streaming(
previous_text="",
current_text="",
delta_text=chunk,
previous_token_ids=[],
current_token_ids=[],
delta_token_ids=[],
request=request,
)
if result is not None and hasattr(result, "tool_calls") and result.tool_calls:
for tc in result.tool_calls:
if hasattr(tc, "function") and tc.function:
func = tc.function
args = (
func.get("arguments")
if isinstance(func, dict)
else getattr(func, "arguments", None)
)
if args:
fragment_count += 1
# For true incremental streaming, we expect many fragments (10+)
# Old buffered implementation would give only 1-3 fragments
assert fragment_count >= 10, (
f"Expected >=10 fragments for incremental streaming, got {fragment_count}"
)
# Verify final result is valid JSON
assert len(glm4_moe_tool_parser.streamed_args_for_tool) == 1
args_json = glm4_moe_tool_parser.streamed_args_for_tool[0]
parsed = json.loads(args_json)
assert parsed["file_path"] == "/tmp/bubble_sort.py"
assert "def bubble_sort" in parsed["content"]
def test_extract_tool_calls_numeric_deserialization(glm4_moe_tool_parser):
"""Test that numeric arguments are deserialized as numbers, not strings."""
model_output = """<tool_call>calculate
<arg_key>operation</arg_key>
<arg_value>add</arg_value>
<arg_key>a</arg_key>
<arg_value>42</arg_value>
<arg_key>b</arg_key>
<arg_value>3.14</arg_value>
<arg_key>enabled</arg_key>
<arg_value>true</arg_value>
</tool_call>"""
extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls(
model_output, request=None
) # type: ignore[arg-type]
assert extracted_tool_calls.tools_called
assert len(extracted_tool_calls.tool_calls) == 1
args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
# String should remain string
assert args["operation"] == "add"
assert isinstance(args["operation"], str)
# Integer should be deserialized as int
assert args["a"] == 42
assert isinstance(args["a"], int)
# Float should be deserialized as float
assert args["b"] == 3.14
assert isinstance(args["b"], float)
# Boolean should be deserialized as bool
assert args["enabled"] is True
assert isinstance(args["enabled"], bool)

View File

@@ -1,5 +1,15 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
GLM-4 Tool Call Parser with incremental string streaming support.
This parser fixes the streaming issue reported in Issue #32829 where long string
parameters (e.g., file content with 4000+ characters of code) are buffered until
complete, causing multi-second delays before the user sees any content.
The fix streams string values incrementally as they arrive, providing a true
streaming experience for long content.
"""
import ast
import json
@@ -8,6 +18,7 @@ from typing import Any
import regex as re
from vllm.entrypoints.chat_utils import make_tool_call_id
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
ChatCompletionToolsParam,
@@ -30,14 +41,27 @@ logger = init_logger(__name__)
class Glm4MoeModelToolParser(ToolParser):
"""Tool parser for GLM-4 models with incremental string streaming.
This parser emits tool-call deltas incrementally as arguments arrive.
For string-type parameters, content is streamed character-by-character
rather than waiting for the complete </arg_value> tag.
"""
def __init__(self, tokenizer: TokenizerLike):
super().__init__(tokenizer)
self.current_tool_name_sent = False
self.prev_tool_call_arr: list[dict] = []
self.current_tool_id = -1
# Stateful streaming fields
self.current_tool_name_sent: bool = False
self.prev_tool_call_arr: list[dict[str, Any]] = []
self.current_tool_id: int = -1
self.streamed_args_for_tool: list[str] = []
self.tool_call_start_token = "<tool_call>"
self.tool_call_end_token = "</tool_call>"
self.tool_call_start_token: str = "<tool_call>"
self.tool_call_end_token: str = "</tool_call>"
self.arg_key_start: str = "<arg_key>"
self.arg_key_end: str = "</arg_key>"
self.arg_val_start: str = "<arg_value>"
self.arg_val_end: str = "</arg_value>"
self.tool_calls_start_token = self.tool_call_start_token
@@ -48,6 +72,7 @@ class Glm4MoeModelToolParser(ToolParser):
self.func_arg_regex = re.compile(
r"<arg_key>(.*?)</arg_key>\s*<arg_value>(.*?)</arg_value>", re.DOTALL
)
if not self.model_tokenizer:
raise ValueError(
"The model tokenizer must be passed to the ToolParser "
@@ -56,13 +81,78 @@ class Glm4MoeModelToolParser(ToolParser):
self.tool_call_start_token_id = self.vocab.get(self.tool_call_start_token)
self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token)
self._buffer = ""
self._buffer: str = ""
# Streaming state for incremental tool-call streaming
self._in_tool_call: bool = False
self._current_tool_name: str | None = None
self._pending_key: str | None = None
self._streaming_string_value: bool = False
self._tool_call_ids: list[str] = []
self._args_started: list[bool] = []
self._args_closed: list[bool] = []
self._seen_keys: list[set[str]] = []
@staticmethod
def _deserialize(value: str) -> Any:
try:
return json.loads(value)
except json.JSONDecodeError:
pass
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
return value
@staticmethod
def _json_escape_string_content(s: str) -> str:
"""JSON-escape string content for incremental streaming.
This escapes the content that goes INSIDE a JSON string (between quotes),
not including the surrounding quotes themselves.
"""
if not s:
return ""
return json.dumps(s, ensure_ascii=False)[1:-1]
@staticmethod
def _is_string_type(
tool_name: str,
arg_name: str,
tools: list[ChatCompletionToolsParam] | None,
) -> bool:
if tools is None:
return False
for tool in tools:
if tool.function.name != tool_name:
continue
if tool.function.parameters is None:
return False
arg_type = (
tool.function.parameters.get("properties", {})
.get(arg_name, {})
.get("type", None)
)
return arg_type == "string"
logger.debug("No tool named '%s'.", tool_name)
return False
@staticmethod
def _tools_enabled(request: ChatCompletionRequest) -> bool:
"""Return whether tool parsing should be applied for this request."""
try:
tools = getattr(request, "tools", None)
tool_choice = getattr(request, "tool_choice", None)
return bool(tools) and tool_choice != "none"
except Exception:
logger.exception("Failed to determine if tools are enabled.")
return False
def adjust_request(self, request: ChatCompletionRequest) -> ChatCompletionRequest:
"""
Adjust request parameters to ensure tool call tokens are not skipped
during tokenizer decoding.
"""
"""Adjust request parameters for tool call token handling."""
request = super().adjust_request(request)
if request.tools and request.tool_choice != "none":
# Ensure tool call tokens (<tool_call>, </tool_call>) are not skipped
@@ -77,42 +167,10 @@ class Glm4MoeModelToolParser(ToolParser):
model_output: str,
request: ChatCompletionRequest,
) -> ExtractedToolCallInformation:
def _is_string_type(
tool_name: str,
arg_name: str,
tools: list[ChatCompletionToolsParam] | None,
) -> bool:
if tools is None:
return False
for tool in tools:
if tool.function.name == tool_name:
if tool.function.parameters is None:
return False
arg_type = (
tool.function.parameters.get("properties", {})
.get(arg_name, {})
.get("type", None)
)
return arg_type == "string"
logger.debug("No tool named '%s'.", tool_name)
return False
def _deserialize(value: str) -> Any:
try:
return json.loads(value)
except Exception:
pass
try:
return ast.literal_eval(value)
except Exception:
pass
return value
matched_tool_calls = self.func_call_regex.findall(model_output)
logger.debug("model_output: %s", model_output)
try:
tool_calls = []
tool_calls: list[ToolCall] = []
for match in matched_tool_calls:
tc_detail = self.func_detail_regex.search(match)
if not tc_detail:
@@ -121,15 +179,15 @@ class Glm4MoeModelToolParser(ToolParser):
match,
)
continue
tc_name = tc_detail.group(1)
tc_name = tc_detail.group(1).strip()
tc_args = tc_detail.group(2)
pairs = self.func_arg_regex.findall(tc_args) if tc_args else []
arg_dct = {}
arg_dct: dict[str, Any] = {}
for key, value in pairs:
arg_key = key.strip()
arg_val = value.strip()
if not _is_string_type(tc_name, arg_key, request.tools):
arg_val = _deserialize(arg_val)
if not self._is_string_type(tc_name, arg_key, request.tools):
arg_val = self._deserialize(arg_val)
logger.debug("arg_key = %s, arg_val = %s", arg_key, arg_val)
arg_dct[arg_key] = arg_val
tool_calls.append(
@@ -166,58 +224,293 @@ class Glm4MoeModelToolParser(ToolParser):
delta_token_ids: Sequence[int],
request: ChatCompletionRequest,
) -> DeltaMessage | None:
if not self._tools_enabled(request):
return DeltaMessage(content=delta_text) if delta_text else None
self._buffer += delta_text
cur_text = self._buffer
start_idx = cur_text.find(self.tool_call_start_token)
if start_idx == -1:
self._buffer = ""
if self.current_tool_id > 0:
cur_text = ""
return DeltaMessage(content=cur_text)
logger.debug("cur_text = %s", cur_text)
end_idx = cur_text.find(self.tool_call_end_token)
if end_idx != -1:
if self.current_tool_id == -1:
self.current_tool_id = 0
self.prev_tool_call_arr = []
self.streamed_args_for_tool = []
while len(self.prev_tool_call_arr) <= self.current_tool_id:
self.prev_tool_call_arr.append({})
while len(self.streamed_args_for_tool) <= self.current_tool_id:
self.streamed_args_for_tool.append("")
extracted_tool_calls = self.extract_tool_calls(
cur_text[: end_idx + len(self.tool_call_end_token)], request
)
while True:
if not self._in_tool_call:
start_idx = self._buffer.find(self.tool_call_start_token)
if start_idx == -1:
# Check for partial start token at end of buffer
for i in range(1, len(self.tool_call_start_token)):
if self._buffer.endswith(self.tool_call_start_token[:i]):
out = self._buffer[:-i]
self._buffer = self._buffer[-i:]
return DeltaMessage(content=out) if out else None
out = self._buffer
self._buffer = ""
return DeltaMessage(content=out) if out else None
if len(extracted_tool_calls.tool_calls) == 0:
logger.warning("Failed to extract any tool calls.")
return None
tool_call = extracted_tool_calls.tool_calls[0]
self.prev_tool_call_arr[self.current_tool_id] = {
"name": tool_call.function.name,
"arguments": json.loads(tool_call.function.arguments),
}
self.streamed_args_for_tool[self.current_tool_id] = (
tool_call.function.arguments
)
delta = DeltaMessage(
content=extracted_tool_calls.content,
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
id=tool_call.id,
type=tool_call.type,
function=DeltaFunctionCall(
name=tool_call.function.name,
arguments=tool_call.function.arguments,
),
if start_idx > 0:
out = self._buffer[:start_idx]
self._buffer = self._buffer[start_idx:]
return DeltaMessage(content=out) if out else None
self._buffer = self._buffer[len(self.tool_call_start_token) :]
self._begin_tool_call()
continue
# Parse tool name first
if not self.current_tool_name_sent:
nl = self._buffer.find("\n")
ak = self._buffer.find(self.arg_key_start)
end = self._buffer.find(self.tool_call_end_token)
candidates = [i for i in [nl, ak, end] if i != -1]
if not candidates:
return None
cut = min(candidates)
tool_name = self._buffer[:cut].strip()
if tool_name == "" and cut == end:
# Handle empty tool call like `<tool_call></tool_call>`.
# Consume the tokens and reset state to avoid infinite loop.
self._buffer = self._buffer[end + len(self.tool_call_end_token) :]
self._finish_tool_call()
self._revert_last_tool_call_state()
continue
if cut == nl:
self._buffer = self._buffer[nl + 1 :]
else:
self._buffer = self._buffer[cut:]
self._current_tool_name = tool_name
self.current_tool_name_sent = True
return self._emit_tool_name_delta(tool_name)
assert self._current_tool_name is not None
# Handle incremental string value streaming
if self._streaming_string_value:
val_end = self._buffer.find(self.arg_val_end)
if val_end != -1:
raw_content = self._buffer[:val_end]
self._buffer = self._buffer[val_end + len(self.arg_val_end) :]
self._streaming_string_value = False
self._pending_key = None
escaped = self._json_escape_string_content(raw_content)
frag = escaped + '"'
self.streamed_args_for_tool[self.current_tool_id] += frag
return self._emit_tool_args_delta(frag)
else:
# Check for partial </arg_value> at end
safe_len = len(self._buffer)
for i in range(1, len(self.arg_val_end)):
if self._buffer.endswith(self.arg_val_end[:i]):
safe_len = len(self._buffer) - i
break
if safe_len > 0:
to_emit = self._buffer[:safe_len]
self._buffer = self._buffer[safe_len:]
escaped = self._json_escape_string_content(to_emit)
if escaped:
self.streamed_args_for_tool[self.current_tool_id] += escaped
return self._emit_tool_args_delta(escaped)
return None
# If we have a pending key, parse its value
if self._pending_key is not None:
val_pos = self._buffer.find(self.arg_val_start)
if val_pos == -1:
return None
if val_pos > 0:
self._buffer = self._buffer[val_pos:]
key = (self._pending_key or "").strip()
is_string = self._is_string_type(
self._current_tool_name, key, request.tools
)
if is_string:
# String type: stream incrementally
self._buffer = self._buffer[len(self.arg_val_start) :]
if key in self._seen_keys[self.current_tool_id]:
self._pending_key = None
continue
self._seen_keys[self.current_tool_id].add(key)
key_json = json.dumps(key, ensure_ascii=False)
if not self._args_started[self.current_tool_id]:
frag = "{" + key_json + ':"'
self._args_started[self.current_tool_id] = True
else:
frag = "," + key_json + ':"'
self.streamed_args_for_tool[self.current_tool_id] += frag
self._streaming_string_value = True
return self._emit_tool_args_delta(frag)
else:
# Non-string type: wait for complete value
val_end = self._buffer.find(self.arg_val_end)
if val_end == -1:
return None
raw_val = self._buffer[len(self.arg_val_start) : val_end].strip()
self._buffer = self._buffer[val_end + len(self.arg_val_end) :]
self._pending_key = None
frag = self._append_arg_fragment(
key=key,
raw_val=raw_val,
)
],
)
self.current_tool_id += 1
self._buffer = cur_text[end_idx + len(self.tool_call_end_token) :]
return delta
if frag:
return self._emit_tool_args_delta(frag)
continue
self._buffer = cur_text[start_idx:]
return DeltaMessage(content=cur_text[:start_idx])
# Parse next arg or close
end_pos = self._buffer.find(self.tool_call_end_token)
key_pos = self._buffer.find(self.arg_key_start)
if end_pos != -1 and (key_pos == -1 or end_pos < key_pos):
self._buffer = self._buffer[end_pos + len(self.tool_call_end_token) :]
frag = self._close_args_if_needed()
# Finalize prev_tool_call_arr with complete parsed arguments
if self._current_tool_name:
try:
full_args_str = self.streamed_args_for_tool[
self.current_tool_id
]
args_dict = json.loads(full_args_str)
self.prev_tool_call_arr[self.current_tool_id] = {
"name": self._current_tool_name,
"arguments": args_dict,
}
except (json.JSONDecodeError, IndexError) as e:
logger.warning(
"Failed to finalize tool call state for tool %d: %s",
self.current_tool_id,
e,
)
self._finish_tool_call()
return self._emit_tool_args_delta(frag) if frag else None
if key_pos == -1:
return None
if key_pos > 0:
self._buffer = self._buffer[key_pos:]
key_end = self._buffer.find(self.arg_key_end)
if key_end == -1:
return None
key = self._buffer[len(self.arg_key_start) : key_end]
self._buffer = self._buffer[key_end + len(self.arg_key_end) :]
self._pending_key = key
continue
def _ensure_tool_state(self) -> None:
while len(self._tool_call_ids) <= self.current_tool_id:
self._tool_call_ids.append(
make_tool_call_id(id_type="random", func_name=None, idx=None)
)
while len(self.streamed_args_for_tool) <= self.current_tool_id:
self.streamed_args_for_tool.append("")
while len(self.prev_tool_call_arr) <= self.current_tool_id:
self.prev_tool_call_arr.append({})
while len(self._args_started) <= self.current_tool_id:
self._args_started.append(False)
while len(self._args_closed) <= self.current_tool_id:
self._args_closed.append(False)
while len(self._seen_keys) <= self.current_tool_id:
self._seen_keys.append(set())
def _begin_tool_call(self) -> None:
if self.current_tool_id == -1:
self.current_tool_id = 0
else:
self.current_tool_id += 1
self._ensure_tool_state()
self.current_tool_name_sent = False
self._current_tool_name = None
self._pending_key = None
self._streaming_string_value = False
self._in_tool_call = True
def _finish_tool_call(self) -> None:
self._in_tool_call = False
self._current_tool_name = None
self._pending_key = None
self._streaming_string_value = False
def _revert_last_tool_call_state(self) -> None:
"""Revert the state allocation for the last tool call."""
if self.current_tool_id < 0:
return
self._tool_call_ids.pop()
self.streamed_args_for_tool.pop()
self.prev_tool_call_arr.pop()
self._args_started.pop()
self._args_closed.pop()
self._seen_keys.pop()
self.current_tool_id -= 1
def _emit_tool_name_delta(self, tool_name: str) -> DeltaMessage:
return DeltaMessage(
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
id=self._tool_call_ids[self.current_tool_id],
type="function",
function=DeltaFunctionCall(
name=tool_name,
arguments="",
).model_dump(exclude_none=True),
)
]
)
def _emit_tool_args_delta(self, fragment: str) -> DeltaMessage:
return DeltaMessage(
tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
function=DeltaFunctionCall(arguments=fragment).model_dump(
exclude_none=True
),
)
]
)
def _append_arg_fragment(
self,
*,
key: str,
raw_val: str,
) -> str | None:
key = key.strip()
if not key:
return None
if key in self._seen_keys[self.current_tool_id]:
return None
# This function is only called for non-string types (already checked
# by _is_string_type in the caller), so we always deserialize.
val_obj: Any = self._deserialize(raw_val)
key_json = json.dumps(key, ensure_ascii=False)
val_json = json.dumps(val_obj, ensure_ascii=False)
if not self._args_started[self.current_tool_id]:
fragment = "{" + key_json + ":" + val_json
self._args_started[self.current_tool_id] = True
else:
fragment = "," + key_json + ":" + val_json
self._seen_keys[self.current_tool_id].add(key)
self.streamed_args_for_tool[self.current_tool_id] += fragment
return fragment
def _close_args_if_needed(self) -> str | None:
if self._args_closed[self.current_tool_id]:
return None
self._args_closed[self.current_tool_id] = True
if not self._args_started[self.current_tool_id]:
fragment = "{}"
self.streamed_args_for_tool[self.current_tool_id] = fragment
else:
fragment = "}"
self.streamed_args_for_tool[self.current_tool_id] += fragment
return fragment