78 lines
2.6 KiB
Python
78 lines
2.6 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
|
|
from http import HTTPStatus
|
|
|
|
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
|
|
from fastapi.responses import JSONResponse, StreamingResponse
|
|
|
|
from vllm.entrypoints.openai.chat_completion.protocol import (
|
|
ChatCompletionRequest,
|
|
ChatCompletionResponse,
|
|
)
|
|
from vllm.entrypoints.openai.chat_completion.serving import OpenAIServingChat
|
|
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
|
|
from vllm.entrypoints.openai.orca_metrics import metrics_header
|
|
from vllm.entrypoints.openai.utils import validate_json_request
|
|
from vllm.entrypoints.utils import (
|
|
load_aware_call,
|
|
with_cancellation,
|
|
)
|
|
from vllm.logger import init_logger
|
|
|
|
logger = init_logger(__name__)
|
|
|
|
router = APIRouter()
|
|
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = "endpoint-load-metrics-format"
|
|
|
|
|
|
def chat(request: Request) -> OpenAIServingChat | None:
|
|
return request.app.state.openai_serving_chat
|
|
|
|
|
|
@router.post(
|
|
"/v1/chat/completions",
|
|
dependencies=[Depends(validate_json_request)],
|
|
responses={
|
|
HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
|
|
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
|
|
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
|
|
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
|
|
},
|
|
)
|
|
@with_cancellation
|
|
@load_aware_call
|
|
async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request):
|
|
metrics_header_format = raw_request.headers.get(
|
|
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
|
|
)
|
|
handler = chat(raw_request)
|
|
if handler is None:
|
|
base_server = raw_request.app.state.openai_serving_tokenization
|
|
return base_server.create_error_response(
|
|
message="The model does not support Chat Completions API"
|
|
)
|
|
try:
|
|
generator = await handler.create_chat_completion(request, raw_request)
|
|
except Exception as e:
|
|
raise HTTPException(
|
|
status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
|
|
) from e
|
|
if isinstance(generator, ErrorResponse):
|
|
return JSONResponse(
|
|
content=generator.model_dump(), status_code=generator.error.code
|
|
)
|
|
|
|
elif isinstance(generator, ChatCompletionResponse):
|
|
return JSONResponse(
|
|
content=generator.model_dump(),
|
|
headers=metrics_header(metrics_header_format),
|
|
)
|
|
|
|
return StreamingResponse(content=generator, media_type="text/event-stream")
|
|
|
|
|
|
def attach_router(app: FastAPI):
|
|
app.include_router(router)
|