[Refactor] [6/N] to simplify the vLLM openai chat_completion serving architecture (#32240)

Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
Chauncey
2026-01-13 21:01:39 +08:00
committed by GitHub
parent a5bbbd2f24
commit fefce49807
128 changed files with 1221 additions and 1008 deletions

View File

@@ -7,7 +7,7 @@ import json
import pytest
from openai.types.responses import ResponseFunctionToolCall
from vllm.entrypoints.openai.protocol import ResponsesRequest
from vllm.entrypoints.openai.engine.protocol import ResponsesRequest
def test_function_call_dict_converted_to_object():
@@ -253,7 +253,7 @@ def test_function_call_validation_failure_logs_debug(caplog):
}
# Mock the logger to verify debug was called
with patch("vllm.entrypoints.openai.protocol.logger") as mock_logger:
with patch("vllm.entrypoints.openai.engine.protocol.logger") as mock_logger:
with pytest.raises(ValueError):
ResponsesRequest(**request_data)