[Frontend] add prompt_cache_key for openresponses (#32824)

Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
Chauncey
2026-01-22 19:34:14 +08:00
committed by GitHub
parent 1752262e96
commit 841d53aaa8

View File

@@ -173,6 +173,14 @@ class ResponsesRequest(OpenAIBaseModel):
user: str | None = None user: str | None = None
skip_special_tokens: bool = True skip_special_tokens: bool = True
include_stop_str_in_output: bool = False include_stop_str_in_output: bool = False
prompt_cache_key: str | None = Field(
default=None,
description=(
"A key that was used to read from or write to the prompt cache."
"Note: This field has not been implemented yet "
"and vLLM will ignore it."
),
)
# --8<-- [start:responses-extra-params] # --8<-- [start:responses-extra-params]
request_id: str = Field( request_id: str = Field(