[Frontend] add prompt_cache_key for openresponses (#32824)
Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
@@ -173,6 +173,14 @@ class ResponsesRequest(OpenAIBaseModel):
|
|||||||
user: str | None = None
|
user: str | None = None
|
||||||
skip_special_tokens: bool = True
|
skip_special_tokens: bool = True
|
||||||
include_stop_str_in_output: bool = False
|
include_stop_str_in_output: bool = False
|
||||||
|
prompt_cache_key: str | None = Field(
|
||||||
|
default=None,
|
||||||
|
description=(
|
||||||
|
"A key that was used to read from or write to the prompt cache."
|
||||||
|
"Note: This field has not been implemented yet "
|
||||||
|
"and vLLM will ignore it."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
# --8<-- [start:responses-extra-params]
|
# --8<-- [start:responses-extra-params]
|
||||||
request_id: str = Field(
|
request_id: str = Field(
|
||||||
|
|||||||
Reference in New Issue
Block a user