2023-05-23 21:39:50 -07:00
|
|
|
import argparse
|
2023-07-03 14:50:56 -07:00
|
|
|
import asyncio
|
2023-05-23 21:39:50 -07:00
|
|
|
import json
|
2024-01-05 15:24:42 +02:00
|
|
|
from contextlib import asynccontextmanager
|
2023-12-02 16:37:44 -08:00
|
|
|
from aioprometheus import MetricsMiddleware
|
|
|
|
|
from aioprometheus.asgi.starlette import metrics
|
2023-05-23 21:39:50 -07:00
|
|
|
import fastapi
|
2023-08-11 12:14:34 -07:00
|
|
|
import uvicorn
|
2024-01-17 05:33:14 +00:00
|
|
|
from http import HTTPStatus
|
2023-09-22 04:25:05 +08:00
|
|
|
from fastapi import Request
|
2023-05-23 21:39:50 -07:00
|
|
|
from fastapi.exceptions import RequestValidationError
|
|
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
2023-11-01 22:59:44 +05:30
|
|
|
from fastapi.responses import JSONResponse, StreamingResponse, Response
|
2023-05-23 21:39:50 -07:00
|
|
|
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm.engine.arg_utils import AsyncEngineArgs
|
|
|
|
|
from vllm.engine.async_llm_engine import AsyncLLMEngine
|
2023-12-02 16:37:44 -08:00
|
|
|
from vllm.engine.metrics import add_global_metrics_labels
|
2024-01-17 05:33:14 +00:00
|
|
|
from vllm.entrypoints.openai.protocol import CompletionRequest, ChatCompletionRequest, ErrorResponse
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm.logger import init_logger
|
2024-01-17 05:33:14 +00:00
|
|
|
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
|
|
|
|
|
from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
|
2023-05-23 21:39:50 -07:00
|
|
|
|
2023-07-03 11:31:55 -07:00
|
|
|
TIMEOUT_KEEP_ALIVE = 5 # seconds
|
2023-05-23 21:39:50 -07:00
|
|
|
|
2024-01-17 05:33:14 +00:00
|
|
|
openai_serving_chat: OpenAIServingChat = None
|
|
|
|
|
openai_serving_completion: OpenAIServingCompletion = None
|
2023-05-23 21:39:50 -07:00
|
|
|
logger = init_logger(__name__)
|
2023-11-30 19:43:13 -05:00
|
|
|
|
|
|
|
|
|
2024-01-05 15:24:42 +02:00
|
|
|
@asynccontextmanager
|
|
|
|
|
async def lifespan(app: fastapi.FastAPI):
|
|
|
|
|
|
|
|
|
|
async def _force_log():
|
|
|
|
|
while True:
|
|
|
|
|
await asyncio.sleep(10)
|
|
|
|
|
await engine.do_log_stats()
|
|
|
|
|
|
|
|
|
|
if not engine_args.disable_log_stats:
|
|
|
|
|
asyncio.create_task(_force_log())
|
|
|
|
|
|
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app = fastapi.FastAPI(lifespan=lifespan)
|
|
|
|
|
|
|
|
|
|
|
2023-11-30 19:43:13 -05:00
|
|
|
def parse_args():
|
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
|
description="vLLM OpenAI-Compatible RESTful API server.")
|
|
|
|
|
parser.add_argument("--host", type=str, default=None, help="host name")
|
|
|
|
|
parser.add_argument("--port", type=int, default=8000, help="port number")
|
|
|
|
|
parser.add_argument("--allow-credentials",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="allow credentials")
|
|
|
|
|
parser.add_argument("--allowed-origins",
|
|
|
|
|
type=json.loads,
|
|
|
|
|
default=["*"],
|
|
|
|
|
help="allowed origins")
|
|
|
|
|
parser.add_argument("--allowed-methods",
|
|
|
|
|
type=json.loads,
|
|
|
|
|
default=["*"],
|
|
|
|
|
help="allowed methods")
|
|
|
|
|
parser.add_argument("--allowed-headers",
|
|
|
|
|
type=json.loads,
|
|
|
|
|
default=["*"],
|
|
|
|
|
help="allowed headers")
|
|
|
|
|
parser.add_argument("--served-model-name",
|
|
|
|
|
type=str,
|
|
|
|
|
default=None,
|
|
|
|
|
help="The model name used in the API. If not "
|
|
|
|
|
"specified, the model name will be the same as "
|
|
|
|
|
"the huggingface name.")
|
|
|
|
|
parser.add_argument("--chat-template",
|
|
|
|
|
type=str,
|
|
|
|
|
default=None,
|
|
|
|
|
help="The file path to the chat template, "
|
|
|
|
|
"or the template in single-line form "
|
|
|
|
|
"for the specified model")
|
|
|
|
|
parser.add_argument("--response-role",
|
|
|
|
|
type=str,
|
|
|
|
|
default="assistant",
|
|
|
|
|
help="The role name to return if "
|
|
|
|
|
"`request.add_generation_prompt=true`.")
|
2023-12-18 02:56:23 +00:00
|
|
|
parser.add_argument("--ssl-keyfile",
|
|
|
|
|
type=str,
|
|
|
|
|
default=None,
|
|
|
|
|
help="The file path to the SSL key file")
|
|
|
|
|
parser.add_argument("--ssl-certfile",
|
|
|
|
|
type=str,
|
|
|
|
|
default=None,
|
|
|
|
|
help="The file path to the SSL cert file")
|
2024-01-13 00:29:59 +05:30
|
|
|
parser.add_argument(
|
|
|
|
|
"--root-path",
|
|
|
|
|
type=str,
|
|
|
|
|
default=None,
|
|
|
|
|
help="FastAPI root_path when app is behind a path based routing proxy")
|
2023-11-30 19:43:13 -05:00
|
|
|
|
|
|
|
|
parser = AsyncEngineArgs.add_cli_args(parser)
|
|
|
|
|
return parser.parse_args()
|
2023-05-23 21:39:50 -07:00
|
|
|
|
|
|
|
|
|
2023-12-02 16:37:44 -08:00
|
|
|
app.add_middleware(MetricsMiddleware) # Trace HTTP server metrics
|
|
|
|
|
app.add_route("/metrics", metrics) # Exposes HTTP metrics
|
|
|
|
|
|
|
|
|
|
|
2023-05-23 21:39:50 -07:00
|
|
|
@app.exception_handler(RequestValidationError)
|
2023-11-20 11:58:01 -08:00
|
|
|
async def validation_exception_handler(_, exc):
|
2024-01-17 05:33:14 +00:00
|
|
|
err = openai_serving_chat.create_error_response(message=str(exc))
|
2024-01-22 01:05:56 +01:00
|
|
|
return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST)
|
2023-07-03 13:54:33 +08:00
|
|
|
|
|
|
|
|
|
2023-11-01 22:59:44 +05:30
|
|
|
@app.get("/health")
|
|
|
|
|
async def health() -> Response:
|
|
|
|
|
"""Health check."""
|
|
|
|
|
return Response(status_code=200)
|
|
|
|
|
|
|
|
|
|
|
2023-05-23 21:39:50 -07:00
|
|
|
@app.get("/v1/models")
|
|
|
|
|
async def show_available_models():
|
2024-01-17 05:33:14 +00:00
|
|
|
models = await openai_serving_chat.show_available_models()
|
2024-01-22 01:05:56 +01:00
|
|
|
return JSONResponse(content=models.model_dump())
|
2023-05-23 21:39:50 -07:00
|
|
|
|
|
|
|
|
|
2023-07-03 13:54:33 +08:00
|
|
|
@app.post("/v1/chat/completions")
|
2023-08-29 21:54:08 -07:00
|
|
|
async def create_chat_completion(request: ChatCompletionRequest,
|
|
|
|
|
raw_request: Request):
|
2024-01-17 05:33:14 +00:00
|
|
|
generator = await openai_serving_chat.create_chat_completion(
|
|
|
|
|
request, raw_request)
|
2024-01-22 01:05:56 +01:00
|
|
|
if isinstance(generator, ErrorResponse):
|
|
|
|
|
return JSONResponse(content=generator.model_dump(),
|
|
|
|
|
status_code=generator.code)
|
|
|
|
|
if request.stream:
|
2024-01-17 05:33:14 +00:00
|
|
|
return StreamingResponse(content=generator,
|
2023-07-03 13:54:33 +08:00
|
|
|
media_type="text/event-stream")
|
2023-11-30 19:43:13 -05:00
|
|
|
else:
|
2024-01-22 01:05:56 +01:00
|
|
|
return JSONResponse(content=generator.model_dump())
|
2023-07-03 13:54:33 +08:00
|
|
|
|
|
|
|
|
|
2023-05-23 21:39:50 -07:00
|
|
|
@app.post("/v1/completions")
|
2023-08-29 21:54:08 -07:00
|
|
|
async def create_completion(request: CompletionRequest, raw_request: Request):
|
2024-01-17 05:33:14 +00:00
|
|
|
generator = await openai_serving_completion.create_completion(
|
|
|
|
|
request, raw_request)
|
2024-01-22 01:05:56 +01:00
|
|
|
if isinstance(generator, ErrorResponse):
|
|
|
|
|
return JSONResponse(content=generator.model_dump(),
|
|
|
|
|
status_code=generator.code)
|
|
|
|
|
if request.stream:
|
2024-01-17 05:33:14 +00:00
|
|
|
return StreamingResponse(content=generator,
|
2023-05-23 21:39:50 -07:00
|
|
|
media_type="text/event-stream")
|
2024-01-17 05:33:14 +00:00
|
|
|
else:
|
2024-01-22 01:05:56 +01:00
|
|
|
return JSONResponse(content=generator.model_dump())
|
2023-05-23 21:39:50 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2023-11-30 19:43:13 -05:00
|
|
|
args = parse_args()
|
2023-05-23 21:39:50 -07:00
|
|
|
|
|
|
|
|
app.add_middleware(
|
|
|
|
|
CORSMiddleware,
|
|
|
|
|
allow_origins=args.allowed_origins,
|
|
|
|
|
allow_credentials=args.allow_credentials,
|
|
|
|
|
allow_methods=args.allowed_methods,
|
|
|
|
|
allow_headers=args.allowed_headers,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger.info(f"args: {args}")
|
|
|
|
|
|
2023-07-03 23:01:56 -07:00
|
|
|
if args.served_model_name is not None:
|
|
|
|
|
served_model = args.served_model_name
|
|
|
|
|
else:
|
|
|
|
|
served_model = args.model
|
|
|
|
|
|
2023-06-17 17:25:21 +08:00
|
|
|
engine_args = AsyncEngineArgs.from_cli_args(args)
|
2023-09-08 00:03:39 -07:00
|
|
|
engine = AsyncLLMEngine.from_engine_args(engine_args)
|
2024-01-17 05:33:14 +00:00
|
|
|
openai_serving_chat = OpenAIServingChat(engine, served_model,
|
|
|
|
|
args.response_role,
|
|
|
|
|
args.chat_template)
|
|
|
|
|
openai_serving_completion = OpenAIServingCompletion(engine, served_model)
|
2023-05-23 21:39:50 -07:00
|
|
|
|
2023-12-02 16:37:44 -08:00
|
|
|
# Register labels for metrics
|
|
|
|
|
add_global_metrics_labels(model_name=engine_args.model)
|
|
|
|
|
|
2024-01-13 00:29:59 +05:30
|
|
|
app.root_path = args.root_path
|
2023-07-03 11:31:55 -07:00
|
|
|
uvicorn.run(app,
|
|
|
|
|
host=args.host,
|
|
|
|
|
port=args.port,
|
|
|
|
|
log_level="info",
|
2023-12-18 02:56:23 +00:00
|
|
|
timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
|
|
|
|
|
ssl_keyfile=args.ssl_keyfile,
|
|
|
|
|
ssl_certfile=args.ssl_certfile)
|