From 841d53aaa8d674f2c9f72503e77f75e5ffa79c71 Mon Sep 17 00:00:00 2001 From: Chauncey Date: Thu, 22 Jan 2026 19:34:14 +0800 Subject: [PATCH] [Frontend] add prompt_cache_key for openresponses (#32824) Signed-off-by: chaunceyjiang --- vllm/entrypoints/openai/responses/protocol.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/vllm/entrypoints/openai/responses/protocol.py b/vllm/entrypoints/openai/responses/protocol.py index ee5998a59..1109d78a5 100644 --- a/vllm/entrypoints/openai/responses/protocol.py +++ b/vllm/entrypoints/openai/responses/protocol.py @@ -173,6 +173,14 @@ class ResponsesRequest(OpenAIBaseModel): user: str | None = None skip_special_tokens: bool = True include_stop_str_in_output: bool = False + prompt_cache_key: str | None = Field( + default=None, + description=( + "A key that was used to read from or write to the prompt cache." + "Note: This field has not been implemented yet " + "and vLLM will ignore it." + ), + ) # --8<-- [start:responses-extra-params] request_id: str = Field(