[Bugfix] Add error handling for FINISHED_ERROR in OpenAIServing (#37148)

Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
Chauncey
2026-03-17 00:27:47 +08:00
committed by GitHub
parent 5ae685c1c8
commit 6682c231fa
2 changed files with 19 additions and 1 deletions

View File

@@ -29,11 +29,13 @@ from vllm.entrypoints.chat_utils import load_chat_template
from vllm.entrypoints.launcher import serve_http
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.cli_args import make_arg_parser, validate_parsed_serve_args
from vllm.entrypoints.openai.engine.protocol import GenerationError
from vllm.entrypoints.openai.models.protocol import BaseModelPath
from vllm.entrypoints.openai.models.serving import OpenAIServingModels
from vllm.entrypoints.openai.server_utils import (
engine_error_handler,
exception_handler,
generation_error_handler,
get_uvicorn_log_config,
http_exception_handler,
lifespan,
@@ -263,6 +265,7 @@ def build_app(
app.exception_handler(RequestValidationError)(validation_exception_handler)
app.exception_handler(EngineGenerateError)(engine_error_handler)
app.exception_handler(EngineDeadError)(engine_error_handler)
app.exception_handler(GenerationError)(generation_error_handler)
app.exception_handler(Exception)(exception_handler)
# Ensure --api-key option from CLI takes precedence over VLLM_API_KEY