# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import hashlib import importlib import inspect import json import multiprocessing import multiprocessing.forkserver as forkserver import os import secrets import signal import socket import tempfile import uuid from argparse import Namespace from collections.abc import AsyncIterator, Awaitable from contextlib import asynccontextmanager from http import HTTPStatus from typing import Any import model_hosting_container_standards.sagemaker as sagemaker_standards import pydantic import uvloop from fastapi import APIRouter, FastAPI, HTTPException, Request from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from starlette.concurrency import iterate_in_threadpool from starlette.datastructures import URL, Headers, MutableHeaders, State from starlette.types import ASGIApp, Message, Receive, Scope, Send import vllm.envs as envs from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.protocol import EngineClient from vllm.entrypoints.anthropic.serving import AnthropicServingMessages from vllm.entrypoints.chat_utils import load_chat_template from vllm.entrypoints.launcher import serve_http from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.mcp.tool_server import DemoToolServer, MCPToolServer, ToolServer from vllm.entrypoints.openai.chat_completion.serving import OpenAIServingChat from vllm.entrypoints.openai.cli_args import make_arg_parser, validate_parsed_serve_args from vllm.entrypoints.openai.completion.serving import OpenAIServingCompletion from vllm.entrypoints.openai.engine.protocol import ( ErrorInfo, ErrorResponse, ) from vllm.entrypoints.openai.engine.serving import OpenAIServing from vllm.entrypoints.openai.models.protocol import BaseModelPath from vllm.entrypoints.openai.models.serving import ( OpenAIServingModels, ) from vllm.entrypoints.openai.responses.serving import OpenAIServingResponses from vllm.entrypoints.openai.translations.serving import ( OpenAIServingTranscription, OpenAIServingTranslation, ) from vllm.entrypoints.serve.disagg.serving import ServingTokens from vllm.entrypoints.serve.elastic_ep.middleware import ( ScalingMiddleware, ) from vllm.entrypoints.serve.tokenize.serving import OpenAIServingTokenization from vllm.entrypoints.utils import ( cli_env_setup, log_non_default_args, log_version_and_model, process_lora_modules, sanitize_message, ) from vllm.exceptions import VLLMValidationError from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.tool_parsers import ToolParserManager from vllm.usage.usage_lib import UsageContext from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.gc_utils import freeze_gc_heap from vllm.utils.network_utils import is_valid_ipv6_address from vllm.utils.system_utils import decorate_logs, set_ulimit from vllm.version import __version__ as VLLM_VERSION prometheus_multiproc_dir: tempfile.TemporaryDirectory # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765) logger = init_logger("vllm.entrypoints.openai.api_server") _running_tasks: set[asyncio.Task] = set() @asynccontextmanager async def lifespan(app: FastAPI): try: if app.state.log_stats: engine_client: EngineClient = app.state.engine_client async def _force_log(): while True: await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL) await engine_client.do_log_stats() task = asyncio.create_task(_force_log()) _running_tasks.add(task) task.add_done_callback(_running_tasks.remove) else: task = None # Mark the startup heap as static so that it's ignored by GC. # Reduces pause times of oldest generation collections. freeze_gc_heap() try: yield finally: if task is not None: task.cancel() finally: # Ensure app state including engine ref is gc'd del app.state @asynccontextmanager async def build_async_engine_client( args: Namespace, *, usage_context: UsageContext = UsageContext.OPENAI_API_SERVER, disable_frontend_multiprocessing: bool | None = None, client_config: dict[str, Any] | None = None, ) -> AsyncIterator[EngineClient]: if os.getenv("VLLM_WORKER_MULTIPROC_METHOD") == "forkserver": # The executor is expected to be mp. # Pre-import heavy modules in the forkserver process logger.debug("Setup forkserver with pre-imports") multiprocessing.set_start_method("forkserver") multiprocessing.set_forkserver_preload(["vllm.v1.engine.async_llm"]) forkserver.ensure_running() logger.debug("Forkserver setup complete!") # Context manager to handle engine_client lifecycle # Ensures everything is shutdown and cleaned up on error/exit engine_args = AsyncEngineArgs.from_cli_args(args) if client_config: engine_args._api_process_count = client_config.get("client_count", 1) engine_args._api_process_rank = client_config.get("client_index", 0) if disable_frontend_multiprocessing is None: disable_frontend_multiprocessing = bool(args.disable_frontend_multiprocessing) async with build_async_engine_client_from_engine_args( engine_args, usage_context=usage_context, disable_frontend_multiprocessing=disable_frontend_multiprocessing, client_config=client_config, ) as engine: yield engine @asynccontextmanager async def build_async_engine_client_from_engine_args( engine_args: AsyncEngineArgs, *, usage_context: UsageContext = UsageContext.OPENAI_API_SERVER, disable_frontend_multiprocessing: bool = False, client_config: dict[str, Any] | None = None, ) -> AsyncIterator[EngineClient]: """ Create EngineClient, either: - in-process using the AsyncLLMEngine Directly - multiprocess using AsyncLLMEngine RPC Returns the Client or None if the creation failed. """ # Create the EngineConfig (determines if we can use V1). vllm_config = engine_args.create_engine_config(usage_context=usage_context) if disable_frontend_multiprocessing: logger.warning("V1 is enabled, but got --disable-frontend-multiprocessing.") from vllm.v1.engine.async_llm import AsyncLLM async_llm: AsyncLLM | None = None # Don't mutate the input client_config client_config = dict(client_config) if client_config else {} client_count = client_config.pop("client_count", 1) client_index = client_config.pop("client_index", 0) try: async_llm = AsyncLLM.from_vllm_config( vllm_config=vllm_config, usage_context=usage_context, enable_log_requests=engine_args.enable_log_requests, aggregate_engine_logging=engine_args.aggregate_engine_logging, disable_log_stats=engine_args.disable_log_stats, client_addresses=client_config, client_count=client_count, client_index=client_index, ) # Don't keep the dummy data in memory assert async_llm is not None await async_llm.reset_mm_cache() yield async_llm finally: if async_llm: async_llm.shutdown() router = APIRouter() def base(request: Request) -> OpenAIServing: # Reuse the existing instance return tokenization(request) def tokenization(request: Request) -> OpenAIServingTokenization: return request.app.state.openai_serving_tokenization def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client @router.get("/load") async def get_server_load_metrics(request: Request): # This endpoint returns the current server load metrics. # It tracks requests utilizing the GPU from the following routes: # - /v1/responses # - /v1/responses/{response_id} # - /v1/responses/{response_id}/cancel # - /v1/messages # - /v1/chat/completions # - /v1/completions # - /v1/audio/transcriptions # - /v1/audio/translations # - /v1/embeddings # - /pooling # - /classify # - /score # - /v1/score # - /rerank # - /v1/rerank # - /v2/rerank return JSONResponse(content={"server_load": request.app.state.server_load_metrics}) @router.get("/version") async def show_version(): ver = {"version": VLLM_VERSION} return JSONResponse(content=ver) def load_log_config(log_config_file: str | None) -> dict | None: if not log_config_file: return None try: with open(log_config_file) as f: return json.load(f) except Exception as e: logger.warning( "Failed to load log config from file %s: error %s", log_config_file, e ) return None def get_uvicorn_log_config(args: Namespace) -> dict | None: """ Get the uvicorn log config based on the provided arguments. Priority: 1. If log_config_file is specified, use it 2. If disable_access_log_for_endpoints is specified, create a config with the access log filter 3. Otherwise, return None (use uvicorn defaults) """ # First, try to load from file if specified log_config = load_log_config(args.log_config_file) if log_config is not None: return log_config # If endpoints to filter are specified, create a config with the filter if args.disable_access_log_for_endpoints: from vllm.logging_utils import create_uvicorn_log_config # Parse comma-separated string into list excluded_paths = [ p.strip() for p in args.disable_access_log_for_endpoints.split(",") if p.strip() ] return create_uvicorn_log_config( excluded_paths=excluded_paths, log_level=args.uvicorn_log_level, ) return None class AuthenticationMiddleware: """ Pure ASGI middleware that authenticates each request by checking if the Authorization Bearer token exists and equals anyof "{api_key}". Notes ----- There are two cases in which authentication is skipped: 1. The HTTP method is OPTIONS. 2. The request path doesn't start with /v1 (e.g. /health). """ def __init__(self, app: ASGIApp, tokens: list[str]) -> None: self.app = app self.api_tokens = [hashlib.sha256(t.encode("utf-8")).digest() for t in tokens] def verify_token(self, headers: Headers) -> bool: authorization_header_value = headers.get("Authorization") if not authorization_header_value: return False scheme, _, param = authorization_header_value.partition(" ") if scheme.lower() != "bearer": return False param_hash = hashlib.sha256(param.encode("utf-8")).digest() token_match = False for token_hash in self.api_tokens: token_match |= secrets.compare_digest(param_hash, token_hash) return token_match def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]: if scope["type"] not in ("http", "websocket") or scope["method"] == "OPTIONS": # scope["type"] can be "lifespan" or "startup" for example, # in which case we don't need to do anything return self.app(scope, receive, send) root_path = scope.get("root_path", "") url_path = URL(scope=scope).path.removeprefix(root_path) headers = Headers(scope=scope) # Type narrow to satisfy mypy. if url_path.startswith("/v1") and not self.verify_token(headers): response = JSONResponse(content={"error": "Unauthorized"}, status_code=401) return response(scope, receive, send) return self.app(scope, receive, send) class XRequestIdMiddleware: """ Middleware the set's the X-Request-Id header for each response to a random uuid4 (hex) value if the header isn't already present in the request, otherwise use the provided request id. """ def __init__(self, app: ASGIApp) -> None: self.app = app def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]: if scope["type"] not in ("http", "websocket"): return self.app(scope, receive, send) # Extract the request headers. request_headers = Headers(scope=scope) async def send_with_request_id(message: Message) -> None: """ Custom send function to mutate the response headers and append X-Request-Id to it. """ if message["type"] == "http.response.start": response_headers = MutableHeaders(raw=message["headers"]) request_id = request_headers.get("X-Request-Id", uuid.uuid4().hex) response_headers.append("X-Request-Id", request_id) await send(message) return self.app(scope, receive, send_with_request_id) def _extract_content_from_chunk(chunk_data: dict) -> str: """Extract content from a streaming response chunk.""" try: from vllm.entrypoints.openai.chat_completion.protocol import ( ChatCompletionStreamResponse, ) from vllm.entrypoints.openai.completion.protocol import ( CompletionStreamResponse, ) # Try using Completion types for type-safe parsing if chunk_data.get("object") == "chat.completion.chunk": chat_response = ChatCompletionStreamResponse.model_validate(chunk_data) if chat_response.choices and chat_response.choices[0].delta.content: return chat_response.choices[0].delta.content elif chunk_data.get("object") == "text_completion": completion_response = CompletionStreamResponse.model_validate(chunk_data) if completion_response.choices and completion_response.choices[0].text: return completion_response.choices[0].text except pydantic.ValidationError: # Fallback to manual parsing if "choices" in chunk_data and chunk_data["choices"]: choice = chunk_data["choices"][0] if "delta" in choice and choice["delta"].get("content"): return choice["delta"]["content"] elif choice.get("text"): return choice["text"] return "" class SSEDecoder: """Robust Server-Sent Events decoder for streaming responses.""" def __init__(self): self.buffer = "" self.content_buffer = [] def decode_chunk(self, chunk: bytes) -> list[dict]: """Decode a chunk of SSE data and return parsed events.""" import json try: chunk_str = chunk.decode("utf-8") except UnicodeDecodeError: # Skip malformed chunks return [] self.buffer += chunk_str events = [] # Process complete lines while "\n" in self.buffer: line, self.buffer = self.buffer.split("\n", 1) line = line.rstrip("\r") # Handle CRLF if line.startswith("data: "): data_str = line[6:].strip() if data_str == "[DONE]": events.append({"type": "done"}) elif data_str: try: event_data = json.loads(data_str) events.append({"type": "data", "data": event_data}) except json.JSONDecodeError: # Skip malformed JSON continue return events def extract_content(self, event_data: dict) -> str: """Extract content from event data.""" return _extract_content_from_chunk(event_data) def add_content(self, content: str) -> None: """Add content to the buffer.""" if content: self.content_buffer.append(content) def get_complete_content(self) -> str: """Get the complete buffered content.""" return "".join(self.content_buffer) def _log_streaming_response(response, response_body: list) -> None: """Log streaming response with robust SSE parsing.""" from starlette.concurrency import iterate_in_threadpool sse_decoder = SSEDecoder() chunk_count = 0 def buffered_iterator(): nonlocal chunk_count for chunk in response_body: chunk_count += 1 yield chunk # Parse SSE events from chunk events = sse_decoder.decode_chunk(chunk) for event in events: if event["type"] == "data": content = sse_decoder.extract_content(event["data"]) sse_decoder.add_content(content) elif event["type"] == "done": # Log complete content when done full_content = sse_decoder.get_complete_content() if full_content: # Truncate if too long if len(full_content) > 2048: full_content = full_content[:2048] + "" "...[truncated]" logger.info( "response_body={streaming_complete: content=%r, chunks=%d}", full_content, chunk_count, ) else: logger.info( "response_body={streaming_complete: no_content, chunks=%d}", chunk_count, ) return response.body_iterator = iterate_in_threadpool(buffered_iterator()) logger.info("response_body={streaming_started: chunks=%d}", len(response_body)) def _log_non_streaming_response(response_body: list) -> None: """Log non-streaming response.""" try: decoded_body = response_body[0].decode() logger.info("response_body={%s}", decoded_body) except UnicodeDecodeError: logger.info("response_body={}") def build_app(args: Namespace) -> FastAPI: if args.disable_fastapi_docs: app = FastAPI( openapi_url=None, docs_url=None, redoc_url=None, lifespan=lifespan ) elif args.enable_offline_docs: app = FastAPI(docs_url=None, redoc_url=None, lifespan=lifespan) else: app = FastAPI(lifespan=lifespan) app.state.args = args from vllm.entrypoints.serve import register_vllm_serve_api_routers register_vllm_serve_api_routers(app) from vllm.entrypoints.openai.chat_completion.api_router import ( attach_router as register_chat_api_router, ) register_chat_api_router(app) from vllm.entrypoints.openai.responses.api_router import ( attach_router as register_responses_api_router, ) register_responses_api_router(app) from vllm.entrypoints.openai.translations.api_router import ( attach_router as register_translations_api_router, ) register_translations_api_router(app) from vllm.entrypoints.openai.completion.api_router import ( attach_router as register_completion_api_router, ) register_completion_api_router(app) from vllm.entrypoints.anthropic.api_router import ( attach_router as register_anthropic_api_router, ) register_anthropic_api_router(app) from vllm.entrypoints.openai.models.api_router import ( attach_router as register_models_api_router, ) register_models_api_router(app) from vllm.entrypoints.sagemaker.routes import register_sagemaker_routes register_sagemaker_routes(router) app.include_router(router) app.root_path = args.root_path from vllm.entrypoints.pooling import register_pooling_api_routers register_pooling_api_routers(app) app.add_middleware( CORSMiddleware, allow_origins=args.allowed_origins, allow_credentials=args.allow_credentials, allow_methods=args.allowed_methods, allow_headers=args.allowed_headers, ) @app.exception_handler(HTTPException) async def http_exception_handler(_: Request, exc: HTTPException): err = ErrorResponse( error=ErrorInfo( message=sanitize_message(exc.detail), type=HTTPStatus(exc.status_code).phrase, code=exc.status_code, ) ) return JSONResponse(err.model_dump(), status_code=exc.status_code) @app.exception_handler(RequestValidationError) async def validation_exception_handler(_: Request, exc: RequestValidationError): param = None errors = exc.errors() for error in errors: if "ctx" in error and "error" in error["ctx"]: ctx_error = error["ctx"]["error"] if isinstance(ctx_error, VLLMValidationError): param = ctx_error.parameter break exc_str = str(exc) errors_str = str(errors) if errors and errors_str and errors_str != exc_str: message = f"{exc_str} {errors_str}" else: message = exc_str err = ErrorResponse( error=ErrorInfo( message=sanitize_message(message), type=HTTPStatus.BAD_REQUEST.phrase, code=HTTPStatus.BAD_REQUEST, param=param, ) ) return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST) # Ensure --api-key option from CLI takes precedence over VLLM_API_KEY if tokens := [key for key in (args.api_key or [envs.VLLM_API_KEY]) if key]: app.add_middleware(AuthenticationMiddleware, tokens=tokens) if args.enable_request_id_headers: app.add_middleware(XRequestIdMiddleware) # Add scaling middleware to check for scaling state app.add_middleware(ScalingMiddleware) if envs.VLLM_DEBUG_LOG_API_SERVER_RESPONSE: logger.warning( "CAUTION: Enabling log response in the API Server. " "This can include sensitive information and should be " "avoided in production." ) @app.middleware("http") async def log_response(request: Request, call_next): response = await call_next(request) response_body = [section async for section in response.body_iterator] response.body_iterator = iterate_in_threadpool(iter(response_body)) # Check if this is a streaming response by looking at content-type content_type = response.headers.get("content-type", "") is_streaming = content_type == "text/event-stream; charset=utf-8" # Log response body based on type if not response_body: logger.info("response_body={}") elif is_streaming: _log_streaming_response(response, response_body) else: _log_non_streaming_response(response_body) return response for middleware in args.middleware: module_path, object_name = middleware.rsplit(".", 1) imported = getattr(importlib.import_module(module_path), object_name) if inspect.isclass(imported): app.add_middleware(imported) # type: ignore[arg-type] elif inspect.iscoroutinefunction(imported): app.middleware("http")(imported) else: raise ValueError( f"Invalid middleware {middleware}. Must be a function or a class." ) app = sagemaker_standards.bootstrap(app) return app async def init_app_state( engine_client: EngineClient, state: State, args: Namespace, ) -> None: vllm_config = engine_client.vllm_config if args.served_model_name is not None: served_model_names = args.served_model_name else: served_model_names = [args.model] if args.enable_log_requests: request_logger = RequestLogger(max_log_len=args.max_log_len) else: request_logger = None base_model_paths = [ BaseModelPath(name=name, model_path=args.model) for name in served_model_names ] state.engine_client = engine_client state.log_stats = not args.disable_log_stats state.vllm_config = vllm_config state.args = args supported_tasks = await engine_client.get_supported_tasks() logger.info("Supported tasks: %s", supported_tasks) resolved_chat_template = load_chat_template(args.chat_template) if args.tool_server == "demo": tool_server: ToolServer | None = DemoToolServer() assert isinstance(tool_server, DemoToolServer) await tool_server.init_and_validate() elif args.tool_server: tool_server = MCPToolServer() await tool_server.add_tool_server(args.tool_server) else: tool_server = None # Merge default_mm_loras into the static lora_modules default_mm_loras = ( vllm_config.lora_config.default_mm_loras if vllm_config.lora_config is not None else {} ) default_mm_loras = ( vllm_config.lora_config.default_mm_loras if vllm_config.lora_config is not None else {} ) lora_modules = process_lora_modules(args.lora_modules, default_mm_loras) state.openai_serving_models = OpenAIServingModels( engine_client=engine_client, base_model_paths=base_model_paths, lora_modules=lora_modules, ) await state.openai_serving_models.init_static_loras() state.openai_serving_responses = ( OpenAIServingResponses( engine_client, state.openai_serving_models, request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, return_tokens_as_token_ids=args.return_tokens_as_token_ids, enable_auto_tools=args.enable_auto_tool_choice, tool_parser=args.tool_call_parser, tool_server=tool_server, reasoning_parser=args.structured_outputs_config.reasoning_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_force_include_usage=args.enable_force_include_usage, enable_log_outputs=args.enable_log_outputs, log_error_stack=args.log_error_stack, ) if "generate" in supported_tasks else None ) state.openai_serving_chat = ( OpenAIServingChat( engine_client, state.openai_serving_models, args.response_role, request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, default_chat_template_kwargs=args.default_chat_template_kwargs, trust_request_chat_template=args.trust_request_chat_template, return_tokens_as_token_ids=args.return_tokens_as_token_ids, enable_auto_tools=args.enable_auto_tool_choice, exclude_tools_when_tool_choice_none=args.exclude_tools_when_tool_choice_none, tool_parser=args.tool_call_parser, reasoning_parser=args.structured_outputs_config.reasoning_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_force_include_usage=args.enable_force_include_usage, enable_log_outputs=args.enable_log_outputs, enable_log_deltas=args.enable_log_deltas, log_error_stack=args.log_error_stack, ) if "generate" in supported_tasks else None ) # Warm up chat template processing to avoid first-request latency if state.openai_serving_chat is not None: await state.openai_serving_chat.warmup() state.openai_serving_completion = ( OpenAIServingCompletion( engine_client, state.openai_serving_models, request_logger=request_logger, return_tokens_as_token_ids=args.return_tokens_as_token_ids, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_force_include_usage=args.enable_force_include_usage, log_error_stack=args.log_error_stack, ) if "generate" in supported_tasks else None ) state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, state.openai_serving_models, request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, trust_request_chat_template=args.trust_request_chat_template, log_error_stack=args.log_error_stack, ) state.openai_serving_transcription = ( OpenAIServingTranscription( engine_client, state.openai_serving_models, request_logger=request_logger, log_error_stack=args.log_error_stack, enable_force_include_usage=args.enable_force_include_usage, ) if "transcription" in supported_tasks else None ) state.openai_serving_translation = ( OpenAIServingTranslation( engine_client, state.openai_serving_models, request_logger=request_logger, log_error_stack=args.log_error_stack, enable_force_include_usage=args.enable_force_include_usage, ) if "transcription" in supported_tasks else None ) state.anthropic_serving_messages = ( AnthropicServingMessages( engine_client, state.openai_serving_models, args.response_role, request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, return_tokens_as_token_ids=args.return_tokens_as_token_ids, enable_auto_tools=args.enable_auto_tool_choice, tool_parser=args.tool_call_parser, reasoning_parser=args.structured_outputs_config.reasoning_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_force_include_usage=args.enable_force_include_usage, ) if "generate" in supported_tasks else None ) state.serving_tokens = ( ServingTokens( engine_client, state.openai_serving_models, request_logger=request_logger, return_tokens_as_token_ids=args.return_tokens_as_token_ids, log_error_stack=args.log_error_stack, enable_prompt_tokens_details=args.enable_prompt_tokens_details, enable_log_outputs=args.enable_log_outputs, force_no_detokenize=args.tokens_only, ) if "generate" in supported_tasks else None ) from vllm.entrypoints.pooling import init_pooling_state await init_pooling_state(engine_client, state, args) state.enable_server_load_tracking = args.enable_server_load_tracking state.server_load_metrics = 0 def create_server_socket(addr: tuple[str, int]) -> socket.socket: family = socket.AF_INET if is_valid_ipv6_address(addr[0]): family = socket.AF_INET6 sock = socket.socket(family=family, type=socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) sock.bind(addr) return sock def create_server_unix_socket(path: str) -> socket.socket: sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM) sock.bind(path) return sock def validate_api_server_args(args): valid_tool_parses = ToolParserManager.list_registered() if args.enable_auto_tool_choice and args.tool_call_parser not in valid_tool_parses: raise KeyError( f"invalid tool call parser: {args.tool_call_parser} " f"(chose from {{ {','.join(valid_tool_parses)} }})" ) valid_reasoning_parsers = ReasoningParserManager.list_registered() if ( reasoning_parser := args.structured_outputs_config.reasoning_parser ) and reasoning_parser not in valid_reasoning_parsers: raise KeyError( f"invalid reasoning parser: {reasoning_parser} " f"(chose from {{ {','.join(valid_reasoning_parsers)} }})" ) def setup_server(args): """Validate API server args, set up signal handler, create socket ready to serve.""" log_version_and_model(logger, VLLM_VERSION, args.model) log_non_default_args(args) if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3: ToolParserManager.import_tool_parser(args.tool_parser_plugin) if args.reasoning_parser_plugin and len(args.reasoning_parser_plugin) > 3: ReasoningParserManager.import_reasoning_parser(args.reasoning_parser_plugin) validate_api_server_args(args) # workaround to make sure that we bind the port before the engine is set up. # This avoids race conditions with ray. # see https://github.com/vllm-project/vllm/issues/8204 if args.uds: sock = create_server_unix_socket(args.uds) else: sock_addr = (args.host or "", args.port) sock = create_server_socket(sock_addr) # workaround to avoid footguns where uvicorn drops requests with too # many concurrent requests active set_ulimit() def signal_handler(*_) -> None: # Interrupt server on sigterm while initializing raise KeyboardInterrupt("terminated") signal.signal(signal.SIGTERM, signal_handler) if args.uds: listen_address = f"unix:{args.uds}" else: addr, port = sock_addr is_ssl = args.ssl_keyfile and args.ssl_certfile host_part = f"[{addr}]" if is_valid_ipv6_address(addr) else addr or "0.0.0.0" listen_address = f"http{'s' if is_ssl else ''}://{host_part}:{port}" return listen_address, sock async def run_server(args, **uvicorn_kwargs) -> None: """Run a single-worker API server.""" # Add process-specific prefix to stdout and stderr. decorate_logs("APIServer") listen_address, sock = setup_server(args) await run_server_worker(listen_address, sock, args, **uvicorn_kwargs) async def run_server_worker( listen_address, sock, args, client_config=None, **uvicorn_kwargs ) -> None: """Run a single API server worker.""" if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3: ToolParserManager.import_tool_parser(args.tool_parser_plugin) if args.reasoning_parser_plugin and len(args.reasoning_parser_plugin) > 3: ReasoningParserManager.import_reasoning_parser(args.reasoning_parser_plugin) # Get uvicorn log config (from file or with endpoint filter) log_config = get_uvicorn_log_config(args) if log_config is not None: uvicorn_kwargs["log_config"] = log_config async with build_async_engine_client( args, client_config=client_config, ) as engine_client: app = build_app(args) await init_app_state(engine_client, app.state, args) logger.info( "Starting vLLM API server %d on %s", engine_client.vllm_config.parallel_config._api_process_rank, listen_address, ) shutdown_task = await serve_http( app, sock=sock, enable_ssl_refresh=args.enable_ssl_refresh, host=args.host, port=args.port, log_level=args.uvicorn_log_level, # NOTE: When the 'disable_uvicorn_access_log' value is True, # no access log will be output. access_log=not args.disable_uvicorn_access_log, timeout_keep_alive=envs.VLLM_HTTP_TIMEOUT_KEEP_ALIVE, ssl_keyfile=args.ssl_keyfile, ssl_certfile=args.ssl_certfile, ssl_ca_certs=args.ssl_ca_certs, ssl_cert_reqs=args.ssl_cert_reqs, ssl_ciphers=args.ssl_ciphers, h11_max_incomplete_event_size=args.h11_max_incomplete_event_size, h11_max_header_count=args.h11_max_header_count, **uvicorn_kwargs, ) # NB: Await server shutdown only after the backend context is exited try: await shutdown_task finally: sock.close() if __name__ == "__main__": # NOTE(simon): # This section should be in sync with vllm/entrypoints/cli/main.py for CLI # entrypoints. cli_env_setup() parser = FlexibleArgumentParser( description="vLLM OpenAI-Compatible RESTful API server." ) parser = make_arg_parser(parser) args = parser.parse_args() validate_parsed_serve_args(args) uvloop.run(run_server(args))