[V1] AsyncLLM data parallel (#13923)
Signed-off-by: Nick Hill <nhill@redhat.com>
This commit is contained in:
@@ -128,12 +128,18 @@ class EngineCoreOutputs(
|
||||
#NOTE(Nick): We could consider ways to make this more compact,
|
||||
# e.g. columnwise layout
|
||||
|
||||
engine_index: int = 0
|
||||
|
||||
# [num_reqs]
|
||||
outputs: list[EngineCoreOutput] = []
|
||||
scheduler_stats: Optional[SchedulerStats] = None
|
||||
timestamp: float = 0.0
|
||||
|
||||
utility_output: Optional[UtilityOutput] = None
|
||||
finished_requests: Optional[set[str]] = None
|
||||
|
||||
# In DP case, used to signal that the engine is paused.
|
||||
engine_paused: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if self.timestamp == 0.0:
|
||||
@@ -147,4 +153,5 @@ class EngineCoreRequestType(enum.Enum):
|
||||
"""
|
||||
ADD = b'\x00'
|
||||
ABORT = b'\x01'
|
||||
UTILITY = b'\x02'
|
||||
START_DP = b'\x02'
|
||||
UTILITY = b'\x03'
|
||||
|
||||
@@ -66,11 +66,17 @@ class AsyncLLM(EngineClient):
|
||||
|
||||
self.log_requests = log_requests
|
||||
self.log_stats = log_stats
|
||||
self.stat_loggers: list[StatLoggerBase] = []
|
||||
|
||||
# Set up stat loggers; independent set for each DP rank.
|
||||
self.stat_loggers: list[list[StatLoggerBase]] = []
|
||||
if self.log_stats:
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
self.stat_loggers.append(LoggingStatLogger())
|
||||
self.stat_loggers.append(PrometheusStatLogger(vllm_config))
|
||||
for i in range(vllm_config.parallel_config.data_parallel_size):
|
||||
loggers: list[StatLoggerBase] = []
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
loggers.append(LoggingStatLogger(engine_index=i))
|
||||
loggers.append(
|
||||
PrometheusStatLogger(vllm_config, engine_index=i))
|
||||
self.stat_loggers.append(loggers)
|
||||
|
||||
# Tokenizer (+ ensure liveness if running in another process).
|
||||
self.tokenizer = init_tokenizer_from_configs(
|
||||
@@ -329,6 +335,7 @@ class AsyncLLM(EngineClient):
|
||||
# TODO(rob): make into a coroutine and launch it in
|
||||
# background thread once Prometheus overhead is non-trivial.
|
||||
self._record_stats(
|
||||
engine_index=outputs.engine_index,
|
||||
scheduler_stats=outputs.scheduler_stats,
|
||||
iteration_stats=iteration_stats,
|
||||
)
|
||||
@@ -350,12 +357,13 @@ class AsyncLLM(EngineClient):
|
||||
self,
|
||||
scheduler_stats: Optional[SchedulerStats],
|
||||
iteration_stats: Optional[IterationStats],
|
||||
engine_index: int = 0,
|
||||
):
|
||||
if not self.log_stats:
|
||||
return
|
||||
|
||||
assert scheduler_stats is not None
|
||||
for stat_logger in self.stat_loggers:
|
||||
for stat_logger in self.stat_loggers[engine_index]:
|
||||
stat_logger.record(scheduler_stats=scheduler_stats,
|
||||
iteration_stats=iteration_stats)
|
||||
|
||||
@@ -393,8 +401,9 @@ class AsyncLLM(EngineClient):
|
||||
scheduler_outputs=None,
|
||||
model_output=None,
|
||||
) -> None:
|
||||
for stat_logger in self.stat_loggers:
|
||||
stat_logger.log()
|
||||
for loggers in self.stat_loggers:
|
||||
for stat_logger in loggers:
|
||||
stat_logger.log()
|
||||
|
||||
async def check_health(self) -> None:
|
||||
logger.debug("Called check_health.")
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import queue
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import Future
|
||||
from inspect import isclass, signature
|
||||
from multiprocessing.connection import Connection
|
||||
from logging import DEBUG
|
||||
from typing import Any, Optional
|
||||
|
||||
import msgspec
|
||||
@@ -14,7 +15,9 @@ import psutil
|
||||
import zmq
|
||||
import zmq.asyncio
|
||||
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.config import ParallelConfig, VllmConfig
|
||||
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
||||
from vllm.executor.multiproc_worker_utils import _add_prefix
|
||||
from vllm.logger import init_logger
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.transformers_utils.config import (
|
||||
@@ -91,6 +94,8 @@ class EngineCore:
|
||||
cache_config=vllm_config.cache_config,
|
||||
lora_config=vllm_config.lora_config,
|
||||
speculative_config=vllm_config.speculative_config,
|
||||
include_finished_set=vllm_config.parallel_config.data_parallel_size
|
||||
> 1,
|
||||
log_stats=self.log_stats,
|
||||
structured_output_manager=self.structured_output_manager,
|
||||
)
|
||||
@@ -283,10 +288,10 @@ class EngineCoreProc(EngineCore):
|
||||
self,
|
||||
input_path: str,
|
||||
output_path: str,
|
||||
ready_pipe: Connection,
|
||||
vllm_config: VllmConfig,
|
||||
executor_class: type[Executor],
|
||||
log_stats: bool,
|
||||
engine_index: int = 0,
|
||||
):
|
||||
super().__init__(vllm_config, executor_class, log_stats)
|
||||
|
||||
@@ -302,14 +307,20 @@ class EngineCoreProc(EngineCore):
|
||||
args=(input_path, ),
|
||||
daemon=True).start()
|
||||
threading.Thread(target=self.process_output_socket,
|
||||
args=(output_path, ),
|
||||
args=(output_path, engine_index),
|
||||
daemon=True).start()
|
||||
|
||||
# Send Readiness signal to EngineClient.
|
||||
ready_pipe.send({"status": "READY"})
|
||||
self.global_unfinished_reqs = False
|
||||
|
||||
self.step_fn = (self.step if self.batch_queue is None else
|
||||
self.step_with_batch_queue)
|
||||
|
||||
@staticmethod
|
||||
def run_engine_core(*args, **kwargs):
|
||||
def run_engine_core(*args,
|
||||
dp_rank: int = 0,
|
||||
local_dp_rank: int = 0,
|
||||
ready_pipe,
|
||||
**kwargs):
|
||||
"""Launch EngineCore busy loop in background process."""
|
||||
|
||||
# Signal handler used for graceful termination.
|
||||
@@ -331,9 +342,21 @@ class EngineCoreProc(EngineCore):
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
parent_process = psutil.Process().parent()
|
||||
engine_core = None
|
||||
engine_core: Optional[EngineCoreProc] = None
|
||||
try:
|
||||
engine_core = EngineCoreProc(*args, **kwargs)
|
||||
parallel_config: ParallelConfig = kwargs[
|
||||
"vllm_config"].parallel_config
|
||||
if parallel_config.data_parallel_size > 1:
|
||||
# Set data parallel rank for this engine process.
|
||||
parallel_config.data_parallel_rank = dp_rank
|
||||
parallel_config.data_parallel_rank_local = local_dp_rank
|
||||
engine_core = DPEngineCoreProc(*args, **kwargs)
|
||||
else:
|
||||
engine_core = EngineCoreProc(*args, **kwargs)
|
||||
|
||||
# Send Readiness signal to EngineClient.
|
||||
ready_pipe.send({"status": "READY"})
|
||||
|
||||
engine_core.run_busy_loop()
|
||||
|
||||
except SystemExit:
|
||||
@@ -351,28 +374,44 @@ class EngineCoreProc(EngineCore):
|
||||
def run_busy_loop(self):
|
||||
"""Core busy loop of the EngineCore."""
|
||||
|
||||
step_fn = (self.step
|
||||
if self.batch_queue is None else self.step_with_batch_queue)
|
||||
|
||||
# Loop until process is sent a SIGINT or SIGTERM
|
||||
while True:
|
||||
# 1) Poll the input queue until there is work to do.
|
||||
while not self.scheduler.has_requests():
|
||||
logger.debug("EngineCore busy loop waiting.")
|
||||
req = self.input_queue.get()
|
||||
self._handle_client_request(*req)
|
||||
self._process_input_queue()
|
||||
# 2) Step the engine core and return the outputs.
|
||||
self._process_engine_step()
|
||||
|
||||
# 2) Handle any new client requests.
|
||||
while not self.input_queue.empty():
|
||||
req = self.input_queue.get_nowait()
|
||||
self._handle_client_request(*req)
|
||||
def _process_input_queue(self):
|
||||
"""Exits when an engine step needs to be performed."""
|
||||
|
||||
# 3) Step the engine core.
|
||||
outputs = step_fn()
|
||||
waited = False
|
||||
while not self.global_unfinished_reqs and not (
|
||||
self.scheduler.has_requests()):
|
||||
if logger.isEnabledFor(DEBUG) and self.input_queue.empty():
|
||||
logger.debug("EngineCore waiting for work.")
|
||||
waited = True
|
||||
req = self.input_queue.get()
|
||||
self._handle_client_request(*req)
|
||||
|
||||
# 4) Put EngineCoreOutputs into the output queue.
|
||||
if outputs is not None:
|
||||
self.output_queue.put_nowait(outputs)
|
||||
if waited:
|
||||
logger.debug(
|
||||
"EngineCore loop active - local unfinished: %s, finished: %s.",
|
||||
self.scheduler.has_unfinished_requests(),
|
||||
self.scheduler.has_finished_requests())
|
||||
|
||||
# Handle any more client requests.
|
||||
while not self.input_queue.empty():
|
||||
req = self.input_queue.get_nowait()
|
||||
self._handle_client_request(*req)
|
||||
|
||||
def _process_engine_step(self):
|
||||
"""Called only when there are unfinished local requests."""
|
||||
|
||||
# Step the engine core.
|
||||
outputs = self.step_fn()
|
||||
# Put EngineCoreOutputs into the output queue.
|
||||
if outputs is not None:
|
||||
self.output_queue.put_nowait(outputs)
|
||||
|
||||
def _handle_client_request(self, request_type: EngineCoreRequestType,
|
||||
request: Any) -> None:
|
||||
@@ -382,6 +421,10 @@ class EngineCoreProc(EngineCore):
|
||||
self.add_request(request)
|
||||
elif request_type == EngineCoreRequestType.ABORT:
|
||||
self.abort_requests(request)
|
||||
elif request_type == EngineCoreRequestType.START_DP:
|
||||
if not self.global_unfinished_reqs:
|
||||
logger.debug("EngineCore starting idle loop.")
|
||||
self.global_unfinished_reqs = True
|
||||
elif request_type == EngineCoreRequestType.UTILITY:
|
||||
call_id, method_name, args = request
|
||||
output = UtilityOutput(call_id)
|
||||
@@ -432,7 +475,7 @@ class EngineCoreProc(EngineCore):
|
||||
# Push to input queue for core busy loop.
|
||||
self.input_queue.put_nowait((request_type, request))
|
||||
|
||||
def process_output_socket(self, output_path: str):
|
||||
def process_output_socket(self, output_path: str, engine_index: int):
|
||||
"""Output socket IO thread."""
|
||||
|
||||
# Msgpack serialization encoding.
|
||||
@@ -443,5 +486,114 @@ class EngineCoreProc(EngineCore):
|
||||
with zmq_socket_ctx(output_path, zmq.constants.PUSH) as socket:
|
||||
while True:
|
||||
outputs = self.output_queue.get()
|
||||
outputs.engine_index = engine_index
|
||||
encoder.encode_into(outputs, buffer)
|
||||
socket.send_multipart((buffer, ), copy=False)
|
||||
socket.send(buffer, copy=False)
|
||||
|
||||
|
||||
ENGINE_PAUSED_OUTPUTS = EngineCoreOutputs(engine_paused=True)
|
||||
|
||||
|
||||
class DPEngineCoreProc(EngineCoreProc):
|
||||
"""ZMQ-wrapper for running EngineCore in background process
|
||||
in a data parallel context."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_path: str,
|
||||
output_path: str,
|
||||
vllm_config: VllmConfig,
|
||||
executor_class: type[Executor],
|
||||
log_stats: bool,
|
||||
):
|
||||
# Add process-specific prefix to stdout and stderr before
|
||||
# we initialize the engine.
|
||||
from multiprocessing import current_process
|
||||
process_name = current_process().name
|
||||
pid = os.getpid()
|
||||
_add_prefix(sys.stdout, process_name, pid)
|
||||
_add_prefix(sys.stderr, process_name, pid)
|
||||
|
||||
dp_size = vllm_config.parallel_config.data_parallel_size
|
||||
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
||||
local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local
|
||||
|
||||
assert dp_size > 1
|
||||
assert 0 <= local_dp_rank <= dp_rank < dp_size
|
||||
|
||||
from vllm.platforms import current_platform
|
||||
if current_platform.is_cuda_alike():
|
||||
from vllm.platforms.cuda import device_id_to_physical_device_id
|
||||
tp_size = vllm_config.parallel_config.tensor_parallel_size
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
|
||||
str(device_id_to_physical_device_id(i))
|
||||
for i in range(local_dp_rank * tp_size, (local_dp_rank + 1) *
|
||||
tp_size))
|
||||
|
||||
self.dp_group = vllm_config.parallel_config.stateless_init_dp_group()
|
||||
|
||||
# Initialize the engine after setting up environment.
|
||||
super().__init__(input_path, output_path, vllm_config, executor_class,
|
||||
log_stats, dp_rank)
|
||||
|
||||
# Counts forward-passes of the model so that we can synchronize
|
||||
# finished with DP peers every N steps.
|
||||
self.counter = 0
|
||||
|
||||
def shutdown(self):
|
||||
super().shutdown()
|
||||
if dp_group := getattr(self, "dp_group", None):
|
||||
stateless_destroy_torch_distributed_process_group(dp_group)
|
||||
|
||||
def run_busy_loop(self):
|
||||
"""Core busy loop of the EngineCore for data parallel case."""
|
||||
|
||||
# Loop until process is sent a SIGINT or SIGTERM
|
||||
while True:
|
||||
# 1) Poll the input queue until there is work to do.
|
||||
self._process_input_queue()
|
||||
|
||||
local_unfinished_reqs = self.scheduler.has_unfinished_requests()
|
||||
|
||||
if local_unfinished_reqs:
|
||||
# 2) Step the engine core.
|
||||
self._process_engine_step()
|
||||
|
||||
# Check if we have now finished all requests.
|
||||
local_unfinished_reqs = (
|
||||
self.scheduler.has_unfinished_requests())
|
||||
else:
|
||||
if self.scheduler.has_finished_requests():
|
||||
# There are no unfinished requests, but there are some
|
||||
# finished requests remaining to be removed from the
|
||||
# batch state. This engine step won't perform a forward
|
||||
# pass but will flush the finished requests to ensure
|
||||
# up-to-date state is returned in the engine outputs.
|
||||
self._process_engine_step()
|
||||
|
||||
if not self.global_unfinished_reqs:
|
||||
# All engines are idle.
|
||||
continue
|
||||
|
||||
# There must be unfinished requests in DP peers, run a
|
||||
# dummy forward pass.
|
||||
self.execute_dummy_batch()
|
||||
|
||||
# 3) All-reduce operation to determine global unfinished reqs.
|
||||
self.global_unfinished_reqs = self._has_global_unfinished_reqs(
|
||||
local_unfinished_reqs)
|
||||
|
||||
if not self.global_unfinished_reqs:
|
||||
# Notify client that we are pausing the loop.
|
||||
self.output_queue.put_nowait(ENGINE_PAUSED_OUTPUTS)
|
||||
|
||||
def _has_global_unfinished_reqs(self, local_unfinished: bool) -> bool:
|
||||
|
||||
# Optimization - only perform finish-sync all-reduce every 16 steps.
|
||||
self.counter += 1
|
||||
if self.counter != 16:
|
||||
return True
|
||||
self.counter = 0
|
||||
|
||||
return ParallelConfig.has_unfinished_dp(self.dp_group,
|
||||
local_unfinished)
|
||||
|
||||
@@ -8,10 +8,11 @@ import threading
|
||||
import uuid
|
||||
import weakref
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Awaitable, Sequence
|
||||
from concurrent.futures import Future
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from threading import Thread
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
import zmq
|
||||
import zmq.asyncio
|
||||
@@ -60,6 +61,9 @@ class EngineCoreClient(ABC):
|
||||
"is not currently supported.")
|
||||
|
||||
if multiprocess_mode and asyncio_mode:
|
||||
if vllm_config.parallel_config.data_parallel_size > 1:
|
||||
return DPAsyncMPClient(vllm_config, executor_class, log_stats)
|
||||
|
||||
return AsyncMPClient(vllm_config, executor_class, log_stats)
|
||||
|
||||
if multiprocess_mode and not asyncio_mode:
|
||||
@@ -207,28 +211,74 @@ class InprocClient(EngineCoreClient):
|
||||
return self.engine_core.pin_lora(lora_id)
|
||||
|
||||
|
||||
class CoreEngine:
|
||||
"""One per data parallel rank."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vllm_config: VllmConfig,
|
||||
executor_class: type[Executor],
|
||||
log_stats: bool,
|
||||
ctx: Union[zmq.Context, zmq.asyncio.Context],
|
||||
output_path: str,
|
||||
index: int = 0,
|
||||
local_dp_rank: int = 0,
|
||||
):
|
||||
# Paths and sockets for IPC.
|
||||
input_path = get_open_zmq_ipc_path()
|
||||
self.input_socket = make_zmq_socket(ctx, input_path,
|
||||
zmq.constants.PUSH)
|
||||
try:
|
||||
# Start EngineCore in background process.
|
||||
self.proc_handle = BackgroundProcHandle(
|
||||
input_path=input_path,
|
||||
output_path=output_path,
|
||||
process_name=f"EngineCore_{index}",
|
||||
target_fn=EngineCoreProc.run_engine_core,
|
||||
process_kwargs={
|
||||
"vllm_config": vllm_config,
|
||||
"dp_rank": index,
|
||||
"local_dp_rank": local_dp_rank,
|
||||
"executor_class": executor_class,
|
||||
"log_stats": log_stats,
|
||||
})
|
||||
|
||||
self.num_reqs_in_flight = 0
|
||||
finally:
|
||||
if not hasattr(self, "num_reqs_in_flight"):
|
||||
# Ensure socket is closed if process fails to start.
|
||||
self.close()
|
||||
|
||||
def send_multipart(self, msg_parts: Sequence):
|
||||
return self.input_socket.send_multipart(msg_parts, copy=False)
|
||||
|
||||
def close(self):
|
||||
if proc_handle := getattr(self, "proc_handle", None):
|
||||
proc_handle.shutdown()
|
||||
if socket := getattr(self, "input_socket", None):
|
||||
socket.close(linger=0)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackgroundResources:
|
||||
"""Used as a finalizer for clean shutdown, avoiding
|
||||
circular reference back to the client object."""
|
||||
|
||||
ctx: zmq.Context
|
||||
ctx: Union[zmq.Context]
|
||||
core_engines: list[CoreEngine] = field(default_factory=list)
|
||||
output_socket: Optional[Union[zmq.Socket, zmq.asyncio.Socket]] = None
|
||||
input_socket: Optional[Union[zmq.Socket, zmq.asyncio.Socket]] = None
|
||||
proc_handle: Optional[BackgroundProcHandle] = None
|
||||
shutdown_path: Optional[str] = None
|
||||
|
||||
def __call__(self):
|
||||
"""Clean up background resources."""
|
||||
|
||||
if self.proc_handle is not None:
|
||||
self.proc_handle.shutdown()
|
||||
for core_engine in self.core_engines:
|
||||
core_engine.close()
|
||||
|
||||
# ZMQ context termination can hang if the sockets
|
||||
# aren't explicitly closed first.
|
||||
if self.output_socket is not None:
|
||||
self.output_socket.close(linger=0)
|
||||
if self.input_socket is not None:
|
||||
self.input_socket.close(linger=0)
|
||||
if self.shutdown_path is not None:
|
||||
# We must ensure that the sync output socket is
|
||||
# closed cleanly in its own thread.
|
||||
@@ -284,7 +334,7 @@ class MPClient(EngineCoreClient):
|
||||
self.decoder = MsgpackDecoder(EngineCoreOutputs)
|
||||
|
||||
# ZMQ setup.
|
||||
sync_ctx = zmq.Context()
|
||||
sync_ctx = zmq.Context(io_threads=2)
|
||||
self.ctx = zmq.asyncio.Context(sync_ctx) if asyncio_mode else sync_ctx
|
||||
|
||||
# This will ensure resources created so far are closed
|
||||
@@ -293,28 +343,38 @@ class MPClient(EngineCoreClient):
|
||||
self.resources = BackgroundResources(ctx=sync_ctx)
|
||||
self._finalizer = weakref.finalize(self, self.resources)
|
||||
|
||||
# Paths for IPC.
|
||||
# Paths and sockets for IPC.
|
||||
self.output_path = get_open_zmq_ipc_path()
|
||||
input_path = get_open_zmq_ipc_path()
|
||||
|
||||
# Start EngineCore in background process.
|
||||
self.resources.proc_handle = BackgroundProcHandle(
|
||||
input_path=input_path,
|
||||
output_path=self.output_path,
|
||||
process_name="EngineCore",
|
||||
target_fn=EngineCoreProc.run_engine_core,
|
||||
process_kwargs={
|
||||
"vllm_config": vllm_config,
|
||||
"executor_class": executor_class,
|
||||
"log_stats": log_stats,
|
||||
})
|
||||
new_core_engine = lambda index, local_dp_rank=None: CoreEngine(
|
||||
vllm_config, executor_class, log_stats, self.ctx, self.output_path,
|
||||
index, local_dp_rank)
|
||||
|
||||
# Start engine core process(es).
|
||||
self._init_core_engines(vllm_config, new_core_engine,
|
||||
self.resources.core_engines)
|
||||
|
||||
# Wait for engine core process(es) to start.
|
||||
for engine in self.resources.core_engines:
|
||||
engine.proc_handle.wait_for_startup()
|
||||
|
||||
# Create input socket.
|
||||
self.resources.input_socket = make_zmq_socket(self.ctx, input_path,
|
||||
zmq.constants.PUSH)
|
||||
self.input_socket = self.resources.input_socket
|
||||
self.utility_results: dict[int, AnyFuture] = {}
|
||||
|
||||
def _init_core_engines(
|
||||
self,
|
||||
vllm_config: VllmConfig,
|
||||
new_core_engine: Callable[[int, Optional[int]], CoreEngine],
|
||||
core_engines: list[CoreEngine],
|
||||
) -> None:
|
||||
|
||||
# Default case - single core engine.
|
||||
dp_rank = vllm_config.parallel_config.data_parallel_rank
|
||||
local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local
|
||||
core_engine = new_core_engine(
|
||||
dp_rank, local_dp_rank if local_dp_rank is not None else dp_rank)
|
||||
core_engines.append(core_engine)
|
||||
self.core_engine = core_engine
|
||||
|
||||
def shutdown(self):
|
||||
self._finalizer()
|
||||
|
||||
@@ -370,7 +430,7 @@ class SyncMPClient(MPClient):
|
||||
# shutdown signal, exit thread.
|
||||
break
|
||||
|
||||
(frame, ) = out_socket.recv_multipart(copy=False)
|
||||
frame = out_socket.recv(copy=False)
|
||||
outputs = decoder.decode(frame.buffer)
|
||||
if outputs.utility_output:
|
||||
_process_utility_output(outputs.utility_output,
|
||||
@@ -391,18 +451,15 @@ class SyncMPClient(MPClient):
|
||||
def get_output(self) -> EngineCoreOutputs:
|
||||
return self.outputs_queue.get()
|
||||
|
||||
def _send_input(self, request_type: EngineCoreRequestType,
|
||||
request: Any) -> None:
|
||||
|
||||
def _send_input(self, request_type: EngineCoreRequestType, request: Any):
|
||||
# (RequestType, SerializedRequest)
|
||||
msg = (request_type.value, self.encoder.encode(request))
|
||||
self.input_socket.send_multipart(msg, copy=False)
|
||||
self.core_engine.send_multipart(msg)
|
||||
|
||||
def _call_utility(self, method: str, *args) -> Any:
|
||||
def call_utility(self, method: str, *args) -> Any:
|
||||
call_id = uuid.uuid1().int >> 64
|
||||
future: Future[Any] = Future()
|
||||
self.utility_results[call_id] = future
|
||||
|
||||
self._send_input(EngineCoreRequestType.UTILITY,
|
||||
(call_id, method, args))
|
||||
|
||||
@@ -419,34 +476,34 @@ class SyncMPClient(MPClient):
|
||||
self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
||||
|
||||
def profile(self, is_start: bool = True) -> None:
|
||||
self._call_utility("profile", is_start)
|
||||
self.call_utility("profile", is_start)
|
||||
|
||||
def reset_prefix_cache(self) -> None:
|
||||
self._call_utility("reset_prefix_cache")
|
||||
self.call_utility("reset_prefix_cache")
|
||||
|
||||
def add_lora(self, lora_request: LoRARequest) -> bool:
|
||||
return self._call_utility("add_lora", lora_request)
|
||||
return self.call_utility("add_lora", lora_request)
|
||||
|
||||
def remove_lora(self, lora_id: int) -> bool:
|
||||
return self._call_utility("remove_lora", lora_id)
|
||||
return self.call_utility("remove_lora", lora_id)
|
||||
|
||||
def list_loras(self) -> set[int]:
|
||||
return self._call_utility("list_loras")
|
||||
return self.call_utility("list_loras")
|
||||
|
||||
def pin_lora(self, lora_id: int) -> bool:
|
||||
return self._call_utility("pin_lora", lora_id)
|
||||
return self.call_utility("pin_lora", lora_id)
|
||||
|
||||
def sleep(self, level: int = 1) -> None:
|
||||
self._call_utility("sleep", level)
|
||||
self.call_utility("sleep", level)
|
||||
|
||||
def wake_up(self) -> None:
|
||||
self._call_utility("wake_up")
|
||||
self.call_utility("wake_up")
|
||||
|
||||
def is_sleeping(self) -> bool:
|
||||
return self._call_utility("is_sleeping")
|
||||
return self.call_utility("is_sleeping")
|
||||
|
||||
def execute_dummy_batch(self) -> None:
|
||||
self._call_utility("execute_dummy_batch")
|
||||
self.call_utility("execute_dummy_batch")
|
||||
|
||||
|
||||
class AsyncMPClient(MPClient):
|
||||
@@ -464,13 +521,21 @@ class AsyncMPClient(MPClient):
|
||||
self.outputs_queue: Optional[asyncio.Queue[EngineCoreOutputs]] = None
|
||||
self.queue_task: Optional[asyncio.Task] = None
|
||||
|
||||
async def _start_output_queue_task(self):
|
||||
self.outputs_handler: Optional[Callable[
|
||||
[AsyncMPClient, EngineCoreOutputs], Awaitable[None]]] = None
|
||||
|
||||
def _ensure_output_queue_task(self):
|
||||
if self.outputs_queue is not None:
|
||||
return
|
||||
|
||||
# Perform IO in separate task to parallelize as much as possible.
|
||||
# Avoid task having direct reference back to the client.
|
||||
self.outputs_queue = asyncio.Queue()
|
||||
decoder = self.decoder
|
||||
utility_results = self.utility_results
|
||||
outputs_queue = self.outputs_queue
|
||||
output_handler = self.outputs_handler
|
||||
_self_ref = weakref.ref(self) if output_handler else None
|
||||
output_path = self.output_path
|
||||
output_socket = make_zmq_socket(self.ctx, output_path,
|
||||
zmq.constants.PULL)
|
||||
@@ -483,34 +548,52 @@ class AsyncMPClient(MPClient):
|
||||
if outputs.utility_output:
|
||||
_process_utility_output(outputs.utility_output,
|
||||
utility_results)
|
||||
else:
|
||||
continue
|
||||
|
||||
if output_handler is not None:
|
||||
assert _self_ref is not None
|
||||
_self = _self_ref()
|
||||
if not _self:
|
||||
# Client has been garbage collected, abort.
|
||||
return
|
||||
await output_handler(_self, outputs)
|
||||
|
||||
if outputs.outputs or outputs.scheduler_stats:
|
||||
outputs_queue.put_nowait(outputs)
|
||||
|
||||
self.queue_task = asyncio.create_task(process_outputs_socket(),
|
||||
name="EngineCoreOutputQueueTask")
|
||||
|
||||
async def get_output_async(self) -> EngineCoreOutputs:
|
||||
if self.outputs_queue is None:
|
||||
await self._start_output_queue_task()
|
||||
assert self.outputs_queue is not None
|
||||
self._ensure_output_queue_task()
|
||||
assert self.outputs_queue is not None
|
||||
return await self.outputs_queue.get()
|
||||
|
||||
async def _send_input(self, request_type: EngineCoreRequestType,
|
||||
request: Any) -> None:
|
||||
await self.core_engine.send_multipart(
|
||||
(request_type.value, self.encoder.encode(request)))
|
||||
|
||||
msg = (request_type.value, self.encoder.encode(request))
|
||||
await self.input_socket.send_multipart(msg, copy=False)
|
||||
self._ensure_output_queue_task()
|
||||
|
||||
if self.outputs_queue is None:
|
||||
await self._start_output_queue_task()
|
||||
async def call_utility_async(self, method: str, *args) -> Any:
|
||||
return await self._call_utility_async(method,
|
||||
*args,
|
||||
engine=self.core_engine)
|
||||
|
||||
async def _call_utility_async(self, method: str, *args) -> Any:
|
||||
async def _call_utility_async(
|
||||
self,
|
||||
method: str,
|
||||
*args,
|
||||
engine: CoreEngine,
|
||||
) -> Any:
|
||||
call_id = uuid.uuid1().int >> 64
|
||||
future = asyncio.get_running_loop().create_future()
|
||||
self.utility_results[call_id] = future
|
||||
await self._send_input(EngineCoreRequestType.UTILITY,
|
||||
(call_id, method, args))
|
||||
|
||||
message = (EngineCoreRequestType.UTILITY.value,
|
||||
self.encoder.encode((call_id, method, args)))
|
||||
await engine.send_multipart(message)
|
||||
self._ensure_output_queue_task()
|
||||
return await future
|
||||
|
||||
async def add_request_async(self, request: EngineCoreRequest) -> None:
|
||||
@@ -524,31 +607,146 @@ class AsyncMPClient(MPClient):
|
||||
await self._send_input(EngineCoreRequestType.ABORT, request_ids)
|
||||
|
||||
async def profile_async(self, is_start: bool = True) -> None:
|
||||
await self._call_utility_async("profile", is_start)
|
||||
await self.call_utility_async("profile", is_start)
|
||||
|
||||
async def reset_prefix_cache_async(self) -> None:
|
||||
await self._call_utility_async("reset_prefix_cache")
|
||||
await self.call_utility_async("reset_prefix_cache")
|
||||
|
||||
async def sleep_async(self, level: int = 1) -> None:
|
||||
await self._call_utility_async("sleep", level)
|
||||
await self.call_utility_async("sleep", level)
|
||||
|
||||
async def wake_up_async(self) -> None:
|
||||
await self._call_utility_async("wake_up")
|
||||
await self.call_utility_async("wake_up")
|
||||
|
||||
async def is_sleeping_async(self) -> bool:
|
||||
return await self._call_utility_async("is_sleeping")
|
||||
return await self.call_utility_async("is_sleeping")
|
||||
|
||||
async def execute_dummy_batch_async(self) -> None:
|
||||
await self._call_utility_async("execute_dummy_batch")
|
||||
await self.call_utility_async("execute_dummy_batch")
|
||||
|
||||
async def add_lora_async(self, lora_request: LoRARequest) -> bool:
|
||||
return await self._call_utility_async("add_lora", lora_request)
|
||||
return await self.call_utility_async("add_lora", lora_request)
|
||||
|
||||
async def remove_lora_async(self, lora_id: int) -> bool:
|
||||
return await self._call_utility_async("remove_lora", lora_id)
|
||||
return await self.call_utility_async("remove_lora", lora_id)
|
||||
|
||||
async def list_loras_async(self) -> set[int]:
|
||||
return await self._call_utility_async("list_loras")
|
||||
return await self.call_utility_async("list_loras")
|
||||
|
||||
async def pin_lora_async(self, lora_id: int) -> bool:
|
||||
return await self._call_utility_async("pin_lora", lora_id)
|
||||
return await self.call_utility_async("pin_lora", lora_id)
|
||||
|
||||
|
||||
class DPAsyncMPClient(AsyncMPClient):
|
||||
"""Asyncio-compatible client for multi-proc, multi-engine (data parallel)
|
||||
EngineCore."""
|
||||
|
||||
def __init__(self, vllm_config: VllmConfig, executor_class: type[Executor],
|
||||
log_stats: bool):
|
||||
super().__init__(vllm_config, executor_class, log_stats)
|
||||
|
||||
assert len(self.core_engines) > 1
|
||||
|
||||
# Control message used for triggering dp idle mode loop.
|
||||
self.start_dp_msg = (EngineCoreRequestType.START_DP.value,
|
||||
self.encoder.encode(None))
|
||||
|
||||
self.num_engines_running = 0
|
||||
self.reqs_in_flight: dict[str, CoreEngine] = {}
|
||||
|
||||
self.outputs_handler = DPAsyncMPClient.process_engine_outputs # type: ignore[assignment]
|
||||
|
||||
def _init_core_engines(
|
||||
self,
|
||||
vllm_config: VllmConfig,
|
||||
new_core_engine: Callable[[int, Optional[int]], CoreEngine],
|
||||
core_engines: list[CoreEngine],
|
||||
) -> None:
|
||||
|
||||
# Launch a core engine for each data parallel rank.
|
||||
dp_size = vllm_config.parallel_config.data_parallel_size
|
||||
for i in range(dp_size):
|
||||
# Multi-node not yet supported so local_dp_rank == dp_rank.
|
||||
core_engines.append(new_core_engine(i, i))
|
||||
|
||||
self.core_engines = core_engines
|
||||
|
||||
async def call_utility_async(self, method: str, *args) -> Any:
|
||||
# Only the result from the first engine is returned.
|
||||
return (await asyncio.gather(*[
|
||||
self._call_utility_async(method, *args, engine=engine)
|
||||
for engine in self.core_engines
|
||||
]))[0]
|
||||
|
||||
async def add_request_async(self, request: EngineCoreRequest) -> None:
|
||||
# NOTE: text prompt is not needed in the core engine as it has been
|
||||
# tokenized.
|
||||
request.prompt = None
|
||||
|
||||
msg = (EngineCoreRequestType.ADD.value, self.encoder.encode(request))
|
||||
|
||||
chosen_engine = self.get_core_engine_for_request()
|
||||
self.reqs_in_flight[request.request_id] = chosen_engine
|
||||
chosen_engine.num_reqs_in_flight += 1
|
||||
if self.num_engines_running >= len(self.core_engines):
|
||||
await chosen_engine.send_multipart(msg)
|
||||
else:
|
||||
# Send request to chosen engine and dp start loop
|
||||
# control message to all other engines.
|
||||
self.num_engines_running += len(self.core_engines)
|
||||
await asyncio.gather(*[
|
||||
engine.send_multipart(msg if engine is
|
||||
chosen_engine else self.start_dp_msg)
|
||||
for engine in self.core_engines
|
||||
])
|
||||
|
||||
self._ensure_output_queue_task()
|
||||
|
||||
def get_core_engine_for_request(self) -> CoreEngine:
|
||||
return min(self.core_engines, key=lambda e: e.num_reqs_in_flight)
|
||||
|
||||
@staticmethod
|
||||
async def process_engine_outputs(self: "DPAsyncMPClient",
|
||||
outputs: EngineCoreOutputs):
|
||||
if self.reqs_in_flight:
|
||||
for req_id in outputs.finished_requests or ():
|
||||
if engine := self.reqs_in_flight.pop(req_id, None):
|
||||
engine.num_reqs_in_flight -= 1
|
||||
|
||||
if outputs.engine_paused:
|
||||
assert self.num_engines_running >= 1
|
||||
self.num_engines_running -= 1
|
||||
if not self.num_engines_running and self.reqs_in_flight:
|
||||
# If there are requests in flight here, they must have
|
||||
# been sent after the engines paused. We must make
|
||||
# sure to start the other engines:
|
||||
self.num_engines_running = len(self.core_engines)
|
||||
coros = [
|
||||
engine.send_multipart(self.start_dp_msg)
|
||||
for engine in self.core_engines
|
||||
if not engine.num_reqs_in_flight
|
||||
]
|
||||
if coros:
|
||||
await asyncio.gather(*coros)
|
||||
|
||||
async def abort_requests_async(self, request_ids: list[str]) -> None:
|
||||
if not request_ids:
|
||||
return
|
||||
|
||||
if len(request_ids) == 1:
|
||||
# Fast-path common case.
|
||||
if engine := self.reqs_in_flight.get(request_ids[0]):
|
||||
await self._abort_requests(request_ids, engine)
|
||||
return
|
||||
|
||||
by_engine: dict[CoreEngine, list[str]] = {}
|
||||
for req_id in request_ids:
|
||||
if engine := self.reqs_in_flight.get(req_id):
|
||||
by_engine.setdefault(engine, []).append(req_id)
|
||||
for engine, req_ids in by_engine.items():
|
||||
await self._abort_requests(req_ids, engine)
|
||||
|
||||
async def _abort_requests(self, request_ids: list[str],
|
||||
engine: CoreEngine) -> None:
|
||||
await engine.send_multipart((EngineCoreRequestType.ABORT.value,
|
||||
self.encoder.encode(request_ids)))
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing_extensions import TypeVar
|
||||
|
||||
import vllm.envs as envs
|
||||
from vllm.config import ParallelConfig, VllmConfig
|
||||
from vllm.distributed import stateless_destroy_torch_distributed_process_group
|
||||
from vllm.engine.arg_utils import EngineArgs
|
||||
from vllm.engine.metrics_types import StatLoggerBase
|
||||
from vllm.inputs import INPUT_REGISTRY, InputRegistry, PromptType
|
||||
@@ -60,11 +61,13 @@ class LLMEngine:
|
||||
self.cache_config = vllm_config.cache_config
|
||||
|
||||
# important: init dp group before init the engine_core
|
||||
self.parallel_config = vllm_config.parallel_config
|
||||
self.dp_enabled = self.parallel_config.data_parallel_size > 1 # noqa
|
||||
# In the decoupled engine case this is handled in EngineCoreProc.
|
||||
parallel_config = vllm_config.parallel_config
|
||||
if not multiprocess_mode and parallel_config.data_parallel_size > 1:
|
||||
self.dp_group = parallel_config.stateless_init_dp_group()
|
||||
else:
|
||||
self.dp_group = None
|
||||
self.should_execute_dummy_batch = False
|
||||
if self.dp_enabled:
|
||||
self.dp_group = self.parallel_config.stateless_init_dp_group()
|
||||
|
||||
# Tokenizer (+ ensure liveness if running in another process).
|
||||
self.tokenizer = init_tokenizer_from_configs(
|
||||
@@ -148,7 +151,7 @@ class LLMEngine:
|
||||
|
||||
def has_unfinished_requests(self) -> bool:
|
||||
has_unfinished = self.output_processor.has_unfinished_requests()
|
||||
if not self.dp_enabled:
|
||||
if self.dp_group is None:
|
||||
return has_unfinished
|
||||
return self.has_unfinished_requests_dp(has_unfinished)
|
||||
|
||||
@@ -280,3 +283,7 @@ class LLMEngine:
|
||||
def pin_lora(self, lora_id: int) -> bool:
|
||||
"""Prevent an adapter from being evicted."""
|
||||
return self.engine_core.pin_lora(lora_id)
|
||||
|
||||
def __del__(self):
|
||||
if dp_group := getattr(self, "dp_group", None):
|
||||
stateless_destroy_torch_distributed_process_group(dp_group)
|
||||
|
||||
Reference in New Issue
Block a user