[V0 Deprecation] Remove V0 executors (#27142)
Signed-off-by: Nick Hill <nhill@redhat.com>
This commit is contained in:
@@ -0,0 +1,6 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from .abstract import Executor
|
||||
from .uniproc_executor import UniProcExecutor
|
||||
|
||||
__all__ = ["Executor", "UniProcExecutor"]
|
||||
|
||||
@@ -1,31 +1,40 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from functools import cached_property
|
||||
from typing import Literal, TypeVar, overload
|
||||
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.executor.executor_base import ExecutorBase
|
||||
from vllm.executor.uniproc_executor import ( # noqa
|
||||
ExecutorWithExternalLauncher as ExecutorWithExternalLauncherV0,
|
||||
)
|
||||
from vllm.executor.uniproc_executor import UniProcExecutor as UniProcExecutorV0 # noqa
|
||||
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
|
||||
from vllm.logger import init_logger
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.tasks import SupportedTask
|
||||
from vllm.utils.import_utils import resolve_obj_by_qualname
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.engine import ReconfigureDistributedRequest
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec
|
||||
from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput
|
||||
from vllm.v1.worker.worker_base import WorkerBase
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
_R = TypeVar("_R")
|
||||
|
||||
FailureCallback = Callable[[], None]
|
||||
|
||||
|
||||
class Executor(ExecutorBase):
|
||||
class Executor(ABC):
|
||||
"""Abstract base class for vLLM executors."
|
||||
|
||||
An executor is responsible for executing the model on one device,
|
||||
or it can be a distributed executor that can execute the model on multiple devices.
|
||||
"""
|
||||
Abstract class for v1 executors, mainly define some methods for v1.
|
||||
For methods shared by v0 and v1, define them in ExecutorBase"""
|
||||
|
||||
uses_ray: bool = False # whether the executor uses Ray for orchestration.
|
||||
supports_pp: bool = False # whether the executor supports PP
|
||||
|
||||
@staticmethod
|
||||
def get_class(vllm_config: VllmConfig) -> type["Executor"]:
|
||||
@@ -34,16 +43,14 @@ class Executor(ExecutorBase):
|
||||
distributed_executor_backend = parallel_config.distributed_executor_backend
|
||||
# distributed_executor_backend must be set in VllmConfig.__post_init__
|
||||
if isinstance(distributed_executor_backend, type):
|
||||
if not issubclass(distributed_executor_backend, ExecutorBase):
|
||||
if not issubclass(distributed_executor_backend, Executor):
|
||||
raise TypeError(
|
||||
"distributed_executor_backend must be a subclass of "
|
||||
f"ExecutorBase. Got {distributed_executor_backend}."
|
||||
f"Executor. Got {distributed_executor_backend}."
|
||||
)
|
||||
executor_class = distributed_executor_backend
|
||||
elif distributed_executor_backend == "ray":
|
||||
from vllm.v1.executor.ray_distributed_executor import ( # noqa
|
||||
RayDistributedExecutor,
|
||||
)
|
||||
from vllm.v1.executor.ray_executor import RayDistributedExecutor
|
||||
|
||||
executor_class = RayDistributedExecutor
|
||||
elif distributed_executor_backend == "mp":
|
||||
@@ -51,6 +58,8 @@ class Executor(ExecutorBase):
|
||||
|
||||
executor_class = MultiprocExecutor
|
||||
elif distributed_executor_backend == "uni":
|
||||
from vllm.v1.executor.uniproc_executor import UniProcExecutor
|
||||
|
||||
executor_class = UniProcExecutor
|
||||
elif distributed_executor_backend == "external_launcher":
|
||||
# TODO: make v1 scheduling deterministic
|
||||
@@ -58,10 +67,10 @@ class Executor(ExecutorBase):
|
||||
executor_class = ExecutorWithExternalLauncher
|
||||
elif isinstance(distributed_executor_backend, str):
|
||||
executor_class = resolve_obj_by_qualname(distributed_executor_backend)
|
||||
if not issubclass(executor_class, ExecutorBase):
|
||||
if not issubclass(executor_class, Executor):
|
||||
raise TypeError(
|
||||
"distributed_executor_backend must be a subclass of "
|
||||
f"ExecutorBase. Got {executor_class}."
|
||||
f"Executor. Got {executor_class}."
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
@@ -69,6 +78,29 @@ class Executor(ExecutorBase):
|
||||
)
|
||||
return executor_class
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vllm_config: VllmConfig,
|
||||
) -> None:
|
||||
self.vllm_config = vllm_config
|
||||
self.model_config = vllm_config.model_config
|
||||
self.cache_config = vllm_config.cache_config
|
||||
self.lora_config = vllm_config.lora_config
|
||||
self.load_config = vllm_config.load_config
|
||||
self.parallel_config = vllm_config.parallel_config
|
||||
self.scheduler_config = vllm_config.scheduler_config
|
||||
self.device_config = vllm_config.device_config
|
||||
self.speculative_config = vllm_config.speculative_config
|
||||
self.observability_config = vllm_config.observability_config
|
||||
self._init_executor()
|
||||
self.is_sleeping = False
|
||||
self.sleeping_tags: set[str] = set()
|
||||
self.kv_output_aggregator: KVOutputAggregator | None = None
|
||||
|
||||
@abstractmethod
|
||||
def _init_executor(self) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def initialize_from_config(self, kv_cache_configs: list[KVCacheConfig]) -> None:
|
||||
"""
|
||||
Initialize the KV caches and begin the model execution loop of the
|
||||
@@ -77,7 +109,7 @@ class Executor(ExecutorBase):
|
||||
self.collective_rpc("initialize_from_config", args=(kv_cache_configs,))
|
||||
self.collective_rpc("compile_or_warm_up_model")
|
||||
|
||||
def register_failure_callback(self, callback: FailureCallback):
|
||||
def register_failure_callback(self, callback: FailureCallback): # noqa: B027
|
||||
"""
|
||||
Register a function to be called if the executor enters a permanent
|
||||
failed state.
|
||||
@@ -90,22 +122,78 @@ class Executor(ExecutorBase):
|
||||
def get_kv_cache_specs(self) -> list[dict[str, KVCacheSpec]]:
|
||||
return self.collective_rpc("get_kv_cache_spec")
|
||||
|
||||
@overload
|
||||
def collective_rpc(
|
||||
self,
|
||||
method: str | Callable,
|
||||
method: str | Callable[[WorkerBase], _R],
|
||||
timeout: float | None = None,
|
||||
args: tuple = (),
|
||||
kwargs: dict | None = None,
|
||||
non_block: bool = False,
|
||||
) -> list[Any]:
|
||||
non_block: Literal[False] = False,
|
||||
) -> list[_R]:
|
||||
"""
|
||||
Execute an RPC call on all workers.
|
||||
|
||||
Args:
|
||||
method: Name of the worker method to execute, or a callable that
|
||||
is serialized and sent to all workers to execute.
|
||||
|
||||
If the method is a callable, it should accept an additional
|
||||
`self` argument, in addition to the arguments passed in `args`
|
||||
and `kwargs`. The `self` argument will be the worker object.
|
||||
timeout: Maximum time in seconds to wait for execution. Raises a
|
||||
[`TimeoutError`][] on timeout. `None` means wait indefinitely.
|
||||
args: Positional arguments to pass to the worker method.
|
||||
kwargs: Keyword arguments to pass to the worker method.
|
||||
non_block: If `True`, returns a list of Futures instead of waiting
|
||||
for the results.
|
||||
|
||||
Returns:
|
||||
A list containing the results from each worker.
|
||||
|
||||
Note:
|
||||
It is recommended to use this API to only pass control messages,
|
||||
and set up data-plane communication to pass data.
|
||||
"""
|
||||
pass
|
||||
|
||||
@overload
|
||||
def collective_rpc(
|
||||
self,
|
||||
method: str | Callable[[WorkerBase], _R],
|
||||
timeout: float | None = None,
|
||||
args: tuple = (),
|
||||
kwargs: dict | None = None,
|
||||
non_block: Literal[True] = True,
|
||||
) -> list[Future[_R]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def collective_rpc(
|
||||
self, method, timeout=None, args=(), kwargs=None, non_block: bool = False
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
@overload
|
||||
def execute_model(
|
||||
self,
|
||||
scheduler_output: SchedulerOutput,
|
||||
non_block: bool = False,
|
||||
non_block: Literal[False] = False,
|
||||
) -> ModelRunnerOutput:
|
||||
pass
|
||||
|
||||
@overload
|
||||
def execute_model(
|
||||
self,
|
||||
scheduler_output: SchedulerOutput,
|
||||
non_block: Literal[True] = True,
|
||||
) -> Future[ModelRunnerOutput]:
|
||||
pass
|
||||
|
||||
def execute_model(
|
||||
self, scheduler_output: SchedulerOutput, non_block: bool = False
|
||||
) -> ModelRunnerOutput | Future[ModelRunnerOutput]:
|
||||
output = self.collective_rpc(
|
||||
output = self.collective_rpc( # type: ignore[call-overload]
|
||||
"execute_model", args=(scheduler_output,), non_block=non_block
|
||||
)
|
||||
return output[0]
|
||||
@@ -114,7 +202,7 @@ class Executor(ExecutorBase):
|
||||
self.collective_rpc("execute_dummy_batch")
|
||||
|
||||
def take_draft_token_ids(self) -> DraftTokenIds | None:
|
||||
output = self.collective_rpc("take_draft_token_ids")
|
||||
output: list[DraftTokenIds] = self.collective_rpc("take_draft_token_ids")
|
||||
return output[0]
|
||||
|
||||
@property
|
||||
@@ -124,19 +212,120 @@ class Executor(ExecutorBase):
|
||||
def profile(self, is_start: bool = True):
|
||||
self.collective_rpc("profile", args=(is_start,))
|
||||
|
||||
def save_sharded_state(
|
||||
self,
|
||||
path: str,
|
||||
pattern: str | None = None,
|
||||
max_size: int | None = None,
|
||||
) -> None:
|
||||
self.collective_rpc(
|
||||
"save_sharded_state",
|
||||
kwargs=dict(path=path, pattern=pattern, max_size=max_size),
|
||||
)
|
||||
|
||||
class UniProcExecutor(UniProcExecutorV0, Executor):
|
||||
pass
|
||||
@abstractmethod
|
||||
def check_health(self) -> None:
|
||||
"""Checks if the executor is healthy. If not, it should raise an
|
||||
exception."""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the executor."""
|
||||
self.collective_rpc("shutdown")
|
||||
|
||||
def init_kv_output_aggregator(self, finished_count: int | None) -> None:
|
||||
"""Init KVOutputAggregator"""
|
||||
self.kv_output_aggregator = KVOutputAggregator(
|
||||
finished_count or self.parallel_config.world_size
|
||||
)
|
||||
|
||||
@cached_property # Avoid unnecessary RPC calls
|
||||
def supported_tasks(self) -> tuple[SupportedTask, ...]:
|
||||
output: list[tuple[SupportedTask, ...]]
|
||||
output = self.collective_rpc("get_supported_tasks")
|
||||
return output[0]
|
||||
|
||||
def add_lora(self, lora_request: LoRARequest) -> bool:
|
||||
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
|
||||
return all(self.collective_rpc("add_lora", args=(lora_request,)))
|
||||
|
||||
def remove_lora(self, lora_id: int) -> bool:
|
||||
assert lora_id > 0, "lora_id must be greater than 0."
|
||||
return all(self.collective_rpc("remove_lora", args=(lora_id,)))
|
||||
|
||||
def pin_lora(self, lora_id: int) -> bool:
|
||||
assert lora_id > 0, "lora_id must be greater than 0."
|
||||
return all(self.collective_rpc("pin_lora", args=(lora_id,)))
|
||||
|
||||
def list_loras(self) -> set[int]:
|
||||
sets: list[set[int]] = self.collective_rpc("list_loras")
|
||||
for s in sets:
|
||||
assert s == sets[0], "All workers should have the same LORAs."
|
||||
return sets[0]
|
||||
|
||||
def reset_mm_cache(self) -> None:
|
||||
"""Reset the multi-modal cache in each worker."""
|
||||
self.collective_rpc("reset_mm_cache")
|
||||
|
||||
def start_profile(self) -> None:
|
||||
self.collective_rpc("start_profile")
|
||||
|
||||
def stop_profile(self) -> None:
|
||||
self.collective_rpc("stop_profile")
|
||||
|
||||
def sleep(self, level: int = 1):
|
||||
if self.is_sleeping:
|
||||
logger.warning("Executor is already sleeping.")
|
||||
return
|
||||
time_before_sleep = time.perf_counter()
|
||||
self.collective_rpc("sleep", kwargs=dict(level=level))
|
||||
time_after_sleep = time.perf_counter()
|
||||
self.sleeping_tags = {"weights", "kv_cache"}
|
||||
self.is_sleeping = True
|
||||
logger.info(
|
||||
"It took %.6f seconds to fall asleep.", time_after_sleep - time_before_sleep
|
||||
)
|
||||
|
||||
def wake_up(self, tags: list[str] | None = None):
|
||||
if not self.is_sleeping:
|
||||
logger.warning("Executor is not sleeping.")
|
||||
return
|
||||
if tags:
|
||||
for tag in tags:
|
||||
if tag not in self.sleeping_tags:
|
||||
logger.warning(
|
||||
"Tag %s is not in sleeping tags %s", tag, self.sleeping_tags
|
||||
)
|
||||
return
|
||||
time_before_wakeup = time.perf_counter()
|
||||
self.collective_rpc("wake_up", kwargs=dict(tags=tags))
|
||||
time_after_wakeup = time.perf_counter()
|
||||
logger.info(
|
||||
"It took %.6f seconds to wake up tags %s.",
|
||||
time_after_wakeup - time_before_wakeup,
|
||||
tags if tags is not None else self.sleeping_tags,
|
||||
)
|
||||
if tags:
|
||||
for tag in tags:
|
||||
self.sleeping_tags.remove(tag)
|
||||
else:
|
||||
self.sleeping_tags.clear()
|
||||
if not self.sleeping_tags:
|
||||
self.is_sleeping = False
|
||||
|
||||
def reinitialize_distributed(
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ExecutorWithExternalLauncher(ExecutorWithExternalLauncherV0, Executor):
|
||||
def determine_available_memory(self) -> list[int]: # in bytes
|
||||
# same as determine_num_available_blocks in v0,
|
||||
# we need to get the min across all ranks.
|
||||
memory = super().determine_available_memory()
|
||||
from vllm.distributed.parallel_state import get_world_group
|
||||
from vllm.v1.executor.uniproc_executor import ( # noqa: E402
|
||||
ExecutorWithExternalLauncher as _ExecutorWithExternalLauncher,
|
||||
)
|
||||
from vllm.v1.executor.uniproc_executor import ( # noqa: E402
|
||||
UniProcExecutor as _UniProcExecutor,
|
||||
)
|
||||
|
||||
cpu_group = get_world_group().cpu_group
|
||||
memory_tensor = torch.tensor([memory], device="cpu", dtype=torch.int64)
|
||||
dist.all_reduce(memory_tensor, group=cpu_group, op=dist.ReduceOp.MIN)
|
||||
return [memory_tensor.item()]
|
||||
# For backwards compatibility.
|
||||
UniProcExecutor = _UniProcExecutor
|
||||
ExecutorWithExternalLauncher = _ExecutorWithExternalLauncher
|
||||
|
||||
@@ -179,7 +179,7 @@ class MultiprocExecutor(Executor):
|
||||
else:
|
||||
self.failure_callback = callback
|
||||
|
||||
def execute_model(
|
||||
def execute_model( # type: ignore[override]
|
||||
self,
|
||||
scheduler_output: SchedulerOutput,
|
||||
non_block: bool = False,
|
||||
@@ -204,6 +204,7 @@ class MultiprocExecutor(Executor):
|
||||
)
|
||||
|
||||
# aggregate all workers output to a single output
|
||||
assert self.kv_output_aggregator is not None
|
||||
if non_block:
|
||||
return self.kv_output_aggregator.async_aggregate(outputs, self.output_rank)
|
||||
return self.kv_output_aggregator.aggregate(outputs, self.output_rank)
|
||||
|
||||
@@ -1,111 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from concurrent.futures import Future
|
||||
|
||||
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
|
||||
from vllm.executor.ray_distributed_executor import ( # noqa
|
||||
RayDistributedExecutor as RayDistributedExecutorV0,
|
||||
from vllm.v1.executor.ray_executor import (
|
||||
RayDistributedExecutor as _RayDistributedExecutor,
|
||||
)
|
||||
from vllm.logger import init_logger
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
|
||||
from vllm.v1.executor.abstract import Executor
|
||||
from vllm.v1.outputs import ModelRunnerOutput
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class FutureWrapper(Future):
|
||||
"""A wrapper around Ray output reference to meet the interface
|
||||
of .execute_model(): The top level (core busy loop) expects .result() api
|
||||
to block and return a single output.
|
||||
|
||||
If aggregator is provided, the outputs from all workers are aggregated upon
|
||||
the result() call. If not only the first worker's output is returned.
|
||||
"""
|
||||
|
||||
def __init__(self, refs, aggregator: KVOutputAggregator | None = None):
|
||||
super().__init__()
|
||||
self.refs = refs
|
||||
self.aggregator = aggregator
|
||||
|
||||
def result(self, timeout=None):
|
||||
if timeout is not None:
|
||||
raise NotImplementedError("timeout is not supported")
|
||||
|
||||
if self.aggregator is None:
|
||||
return self.refs[0].get()
|
||||
|
||||
outputs = [ref.get() for ref in self.refs]
|
||||
return self.aggregator.aggregate(outputs, output_rank=0)
|
||||
|
||||
|
||||
class RayDistributedExecutor(RayDistributedExecutorV0, Executor):
|
||||
"""Ray distributed executor using Ray Compiled Graphs."""
|
||||
|
||||
supports_pp: bool = True
|
||||
|
||||
def _init_executor(self) -> None:
|
||||
super()._init_executor()
|
||||
|
||||
# KV connector setup
|
||||
self.has_connector = self.vllm_config.kv_transfer_config is not None
|
||||
|
||||
@property
|
||||
def max_concurrent_batches(self) -> int:
|
||||
"""Ray distributed executor supports pipeline parallelism,
|
||||
meaning that it allows PP size batches to be executed concurrently.
|
||||
"""
|
||||
if self.scheduler_config.async_scheduling:
|
||||
return 2
|
||||
return self.parallel_config.pipeline_parallel_size
|
||||
|
||||
def execute_model(
|
||||
self,
|
||||
scheduler_output: SchedulerOutput,
|
||||
non_block: bool = False,
|
||||
) -> ModelRunnerOutput | Future[ModelRunnerOutput]:
|
||||
"""Execute the model on the Ray workers.
|
||||
|
||||
Args:
|
||||
scheduler_output: The scheduler output to execute.
|
||||
non_block: If True, the method will return a Future.
|
||||
|
||||
Returns:
|
||||
The model runner output.
|
||||
"""
|
||||
# Build the compiled DAG for the first time.
|
||||
if self.forward_dag is None: # type: ignore
|
||||
self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)
|
||||
|
||||
refs = self.forward_dag.execute(scheduler_output) # type: ignore
|
||||
|
||||
if not self.has_connector:
|
||||
# Get output only from a single worker (output_rank)
|
||||
# When PP is not used, we block here until the result is available.
|
||||
if not non_block:
|
||||
return refs[0].get()
|
||||
|
||||
# When PP is used, we return a FutureWrapper immediately so that
|
||||
# the scheduler can yield to the next batch.
|
||||
return FutureWrapper(refs)
|
||||
|
||||
# Get output from all workers when connector is present
|
||||
if not non_block:
|
||||
# Block and get results from all workers
|
||||
outputs = [ref.get() for ref in refs]
|
||||
return self.kv_output_aggregator.aggregate(outputs)
|
||||
|
||||
# Return a future that will aggregate outputs from all workers
|
||||
return FutureWrapper(refs, self.kv_output_aggregator)
|
||||
|
||||
def reinitialize_distributed(
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
self._run_workers("reinitialize_distributed", reconfig_request)
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank
|
||||
== ReconfigureRankType.SHUTDOWN_CURRENT_RANK
|
||||
):
|
||||
self.shutdown()
|
||||
# For backwards compatibility.
|
||||
RayDistributedExecutor = _RayDistributedExecutor
|
||||
|
||||
579
vllm/v1/executor/ray_executor.py
Normal file
579
vllm/v1/executor/ray_executor.py
Normal file
@@ -0,0 +1,579 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import cloudpickle
|
||||
|
||||
import vllm.envs as envs
|
||||
from vllm.logger import init_logger
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.ray.ray_env import get_env_vars_to_copy
|
||||
from vllm.utils.network_utils import (
|
||||
get_distributed_init_method,
|
||||
get_ip,
|
||||
get_open_port,
|
||||
)
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
|
||||
from vllm.v1.executor.abstract import Executor
|
||||
from vllm.v1.executor.ray_utils import (
|
||||
FutureWrapper,
|
||||
RayWorkerWrapper,
|
||||
initialize_ray_cluster,
|
||||
ray,
|
||||
)
|
||||
from vllm.v1.outputs import ModelRunnerOutput
|
||||
|
||||
if ray is not None:
|
||||
from ray.actor import ActorHandle
|
||||
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
|
||||
else:
|
||||
ActorHandle = None
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ray.util.placement_group import PlacementGroup
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RayWorkerMetaData:
|
||||
"""
|
||||
Metadata for a Ray worker.
|
||||
The order of ray worker creation can be random,
|
||||
and we need to reset the rank after creating all workers.
|
||||
"""
|
||||
|
||||
worker: ActorHandle
|
||||
created_rank: int
|
||||
adjusted_rank: int = -1
|
||||
ip: str = ""
|
||||
|
||||
|
||||
class RayDistributedExecutor(Executor):
|
||||
"""Ray-based distributed executor"""
|
||||
|
||||
# These env vars are worker-specific, therefore are NOT copied
|
||||
# from the driver to the workers
|
||||
WORKER_SPECIFIC_ENV_VARS = {
|
||||
"VLLM_HOST_IP",
|
||||
"VLLM_HOST_PORT",
|
||||
"LOCAL_RANK",
|
||||
"CUDA_VISIBLE_DEVICES",
|
||||
}
|
||||
|
||||
# These non-vLLM env vars are copied from the driver to workers
|
||||
ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN"}
|
||||
|
||||
uses_ray: bool = True
|
||||
supports_pp: bool = True
|
||||
|
||||
def _init_executor(self) -> None:
|
||||
self.forward_dag: ray.dag.CompiledDAG | None = None
|
||||
|
||||
# For TPU or XPU, avoid compiling NVIDIA's NCCL
|
||||
if current_platform.is_tpu() or current_platform.is_xpu():
|
||||
os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = "shm"
|
||||
|
||||
assert self.uses_ray
|
||||
initialize_ray_cluster(self.parallel_config)
|
||||
placement_group = self.parallel_config.placement_group
|
||||
|
||||
# Disable Ray usage stats collection.
|
||||
ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0")
|
||||
if ray_usage != "1":
|
||||
os.environ["RAY_USAGE_STATS_ENABLED"] = "0"
|
||||
|
||||
# Create the parallel GPU workers.
|
||||
self._init_workers_ray(placement_group)
|
||||
|
||||
# KV connector setup
|
||||
self.has_connector = self.vllm_config.kv_transfer_config is not None
|
||||
|
||||
@property
|
||||
def max_concurrent_batches(self) -> int:
|
||||
"""Ray distributed executor supports pipeline parallelism,
|
||||
meaning that it allows PP size batches to be executed concurrently.
|
||||
"""
|
||||
if self.scheduler_config.async_scheduling:
|
||||
return 2
|
||||
return self.parallel_config.pipeline_parallel_size
|
||||
|
||||
def shutdown(self) -> None:
|
||||
if logger:
|
||||
# Somehow logger can be None here.
|
||||
logger.info(
|
||||
"Shutting down Ray distributed executor. If you see error log "
|
||||
"from logging.cc regarding SIGTERM received, please ignore "
|
||||
"because this is the expected termination process in Ray."
|
||||
)
|
||||
if hasattr(self, "forward_dag") and self.forward_dag is not None:
|
||||
self.forward_dag.teardown()
|
||||
import ray
|
||||
|
||||
for worker in self.workers:
|
||||
ray.kill(worker)
|
||||
self.forward_dag = None
|
||||
|
||||
def _configure_ray_workers_use_nsight(self, ray_remote_kwargs) -> dict[str, Any]:
|
||||
# If nsight profiling is enabled, we need to set the profiling
|
||||
# configuration for the ray workers as runtime env.
|
||||
runtime_env = ray_remote_kwargs.setdefault("runtime_env", {})
|
||||
runtime_env.update(
|
||||
{
|
||||
"nsight": {
|
||||
"t": "cuda,cudnn,cublas",
|
||||
"o": "'worker_process_%p'",
|
||||
"cuda-graph-trace": "node",
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
return ray_remote_kwargs
|
||||
|
||||
# child class could overwrite this to return actual env vars.
|
||||
def _get_env_vars_to_be_updated(self):
|
||||
return self._env_vars_for_all_workers
|
||||
|
||||
def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs):
|
||||
num_gpus = envs.VLLM_RAY_PER_WORKER_GPUS
|
||||
|
||||
# The driver dummy worker does not actually use any resources.
|
||||
# It holds the resource for the driver worker.
|
||||
self.driver_dummy_worker: RayWorkerWrapper | None = None
|
||||
# The remaining workers are the actual ray actors.
|
||||
self.workers: list[RayWorkerWrapper] = []
|
||||
|
||||
# Used in ray compiled DAG: indexed first by PP rank,
|
||||
# and then TP rank. In other words, the inner list is
|
||||
# the TP group of workers for a PP rank.
|
||||
self.pp_tp_workers: list[list[RayWorkerWrapper]] = []
|
||||
|
||||
if self.parallel_config.ray_workers_use_nsight:
|
||||
ray_remote_kwargs = self._configure_ray_workers_use_nsight(
|
||||
ray_remote_kwargs
|
||||
)
|
||||
|
||||
# Create the workers.
|
||||
bundle_indices: list[int]
|
||||
if envs.VLLM_RAY_BUNDLE_INDICES:
|
||||
# Use the bundle indices specified by the user.
|
||||
bundle_indices = list(map(int, envs.VLLM_RAY_BUNDLE_INDICES.split(",")))
|
||||
assert len(bundle_indices) == self.parallel_config.world_size, (
|
||||
"VLLM_RAY_BUNDLE_INDICES must have the same size"
|
||||
f" as the world size, but got {bundle_indices=} "
|
||||
f"and {self.parallel_config.world_size=}"
|
||||
)
|
||||
assert len(set(bundle_indices)) == len(bundle_indices), (
|
||||
"VLLM_RAY_BUNDLE_INDICES cannot have duplicate values,"
|
||||
f" but got {bundle_indices=}"
|
||||
)
|
||||
else:
|
||||
# use the first N bundles that have GPU resources.
|
||||
bundle_indices = []
|
||||
for bundle_id, bundle in enumerate(placement_group.bundle_specs):
|
||||
if bundle.get(current_platform.ray_device_key, 0):
|
||||
bundle_indices.append(bundle_id)
|
||||
bundle_indices = bundle_indices[: self.parallel_config.world_size]
|
||||
|
||||
worker_metadata: list[RayWorkerMetaData] = []
|
||||
driver_ip = get_ip()
|
||||
for rank, bundle_id in enumerate(bundle_indices):
|
||||
scheduling_strategy = PlacementGroupSchedulingStrategy(
|
||||
placement_group=placement_group,
|
||||
placement_group_capture_child_tasks=True,
|
||||
placement_group_bundle_index=bundle_id,
|
||||
)
|
||||
|
||||
if current_platform.ray_device_key == "GPU":
|
||||
# NV+AMD GPUs, and Intel XPUs
|
||||
worker = ray.remote(
|
||||
num_cpus=0,
|
||||
num_gpus=num_gpus,
|
||||
scheduling_strategy=scheduling_strategy,
|
||||
**ray_remote_kwargs,
|
||||
)(RayWorkerWrapper).remote( # type: ignore[attr-defined]
|
||||
vllm_config=self.vllm_config, rpc_rank=rank
|
||||
)
|
||||
else:
|
||||
worker = ray.remote(
|
||||
num_cpus=0,
|
||||
num_gpus=0,
|
||||
resources={current_platform.ray_device_key: num_gpus},
|
||||
scheduling_strategy=scheduling_strategy,
|
||||
**ray_remote_kwargs,
|
||||
)(RayWorkerWrapper).remote( # type: ignore[attr-defined]
|
||||
vllm_config=self.vllm_config, rpc_rank=rank
|
||||
)
|
||||
worker_metadata.append(RayWorkerMetaData(worker=worker, created_rank=rank))
|
||||
|
||||
worker_ips = ray.get(
|
||||
[
|
||||
each.worker.get_node_ip.remote() # type: ignore[attr-defined]
|
||||
for each in worker_metadata
|
||||
]
|
||||
)
|
||||
|
||||
for each, ip in zip(worker_metadata, worker_ips):
|
||||
each.ip = ip
|
||||
|
||||
logger.debug("workers: %s", worker_metadata)
|
||||
logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker)
|
||||
|
||||
ip_counts: dict[str, int] = {}
|
||||
for ip in worker_ips:
|
||||
ip_counts[ip] = ip_counts.get(ip, 0) + 1
|
||||
|
||||
def sort_by_driver_then_worker_ip(item: RayWorkerMetaData):
|
||||
"""
|
||||
Sort the workers based on 3 properties:
|
||||
1. If the worker is on the same node as the driver (vllm engine),
|
||||
it should be placed first.
|
||||
2. Then, if the worker is on a node with fewer workers, it should
|
||||
be placed first.
|
||||
3. Finally, if the work is on a node with smaller IP address, it
|
||||
should be placed first.
|
||||
"""
|
||||
ip = item.ip
|
||||
return 0 if ip == driver_ip else 1, ip_counts[ip], ip
|
||||
|
||||
# After sorting, the workers on the same node will be
|
||||
# close to each other, and the workers on the driver
|
||||
# node will be placed first.
|
||||
sorted_worker_metadata = sorted(
|
||||
worker_metadata, key=sort_by_driver_then_worker_ip
|
||||
)
|
||||
for i, item in enumerate(sorted_worker_metadata):
|
||||
item.adjusted_rank = i
|
||||
self.workers = [item.worker for item in sorted_worker_metadata]
|
||||
rerank_mapping = {
|
||||
item.created_rank: item.adjusted_rank for item in sorted_worker_metadata
|
||||
}
|
||||
self.collective_rpc("adjust_rank", args=(rerank_mapping,))
|
||||
|
||||
# Get the set of GPU IDs used on each node.
|
||||
worker_node_and_gpu_ids = []
|
||||
for worker in [self.driver_dummy_worker] + self.workers:
|
||||
if worker is None:
|
||||
# driver_dummy_worker can be None when using ray spmd worker.
|
||||
continue
|
||||
worker_node_and_gpu_ids.append(
|
||||
ray.get(worker.get_node_and_gpu_ids.remote())
|
||||
) # type: ignore[attr-defined]
|
||||
|
||||
node_workers = defaultdict(list) # node id -> list of worker ranks
|
||||
node_gpus = defaultdict(list) # node id -> list of gpu ids
|
||||
|
||||
for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids):
|
||||
node_workers[node_id].append(i)
|
||||
# `gpu_ids` can be a list of strings or integers.
|
||||
# convert them to integers for consistency.
|
||||
# NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs),
|
||||
# string sorting is not sufficient.
|
||||
# see https://github.com/vllm-project/vllm/issues/5590
|
||||
gpu_ids = [int(x) for x in gpu_ids]
|
||||
node_gpus[node_id].extend(gpu_ids)
|
||||
for node_id, gpu_ids in node_gpus.items():
|
||||
node_gpus[node_id] = sorted(gpu_ids)
|
||||
|
||||
all_ips = set(worker_ips + [driver_ip])
|
||||
n_ips = len(all_ips)
|
||||
n_nodes = len(node_workers)
|
||||
|
||||
if n_nodes != n_ips:
|
||||
raise RuntimeError(
|
||||
f"Every node should have a unique IP address. Got {n_nodes}"
|
||||
f" nodes with node ids {list(node_workers.keys())} and "
|
||||
f"{n_ips} unique IP addresses {all_ips}. Please check your"
|
||||
" network configuration. If you set `VLLM_HOST_IP`"
|
||||
" environment variable, make sure it is unique for"
|
||||
" each node."
|
||||
)
|
||||
|
||||
# Set environment variables for the driver and workers.
|
||||
all_args_to_update_environment_variables = [
|
||||
{
|
||||
current_platform.device_control_env_var: ",".join(
|
||||
map(str, node_gpus[node_id])
|
||||
),
|
||||
}
|
||||
for (node_id, _) in worker_node_and_gpu_ids
|
||||
]
|
||||
|
||||
# Environment variables to copy from driver to workers
|
||||
env_vars_to_copy = get_env_vars_to_copy(
|
||||
exclude_vars=self.WORKER_SPECIFIC_ENV_VARS,
|
||||
additional_vars=set(current_platform.additional_env_vars).union(
|
||||
self.ADDITIONAL_ENV_VARS
|
||||
),
|
||||
destination="workers",
|
||||
)
|
||||
|
||||
# Copy existing env vars to each worker's args
|
||||
for args in all_args_to_update_environment_variables:
|
||||
# TODO: refactor platform-specific env vars
|
||||
for name in env_vars_to_copy:
|
||||
if name in os.environ:
|
||||
args[name] = os.environ[name]
|
||||
|
||||
self._env_vars_for_all_workers = all_args_to_update_environment_variables
|
||||
|
||||
self.collective_rpc(
|
||||
"update_environment_variables", args=(self._get_env_vars_to_be_updated(),)
|
||||
)
|
||||
|
||||
if len(node_gpus) == 1:
|
||||
# in single node case, we don't need to get the IP address.
|
||||
# the loopback address is sufficient
|
||||
# NOTE: a node may have several IP addresses, one for each
|
||||
# network interface. `get_ip()` might return any of them,
|
||||
# while they might not work for communication inside the node
|
||||
# if the network setup is complicated. Using the loopback address
|
||||
# solves this issue, as it always works for communication inside
|
||||
# the node.
|
||||
driver_ip = "127.0.0.1"
|
||||
distributed_init_method = get_distributed_init_method(
|
||||
driver_ip, get_open_port()
|
||||
)
|
||||
|
||||
# Initialize the actual workers inside worker wrapper.
|
||||
all_kwargs = []
|
||||
for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids):
|
||||
local_rank = node_workers[node_id].index(rank)
|
||||
kwargs = dict(
|
||||
vllm_config=self.vllm_config,
|
||||
local_rank=local_rank,
|
||||
rank=rank,
|
||||
distributed_init_method=distributed_init_method,
|
||||
is_driver_worker=(not self.parallel_config)
|
||||
or (rank % self.parallel_config.tensor_parallel_size == 0),
|
||||
)
|
||||
all_kwargs.append(kwargs)
|
||||
self.collective_rpc("init_worker", args=(all_kwargs,))
|
||||
|
||||
self.collective_rpc("init_device")
|
||||
self.collective_rpc("load_model")
|
||||
|
||||
for pp_rank in range(self.parallel_config.pipeline_parallel_size):
|
||||
self.pp_tp_workers.append([])
|
||||
for tp_rank in range(self.parallel_config.tensor_parallel_size):
|
||||
# PP=2, TP=4
|
||||
# pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]]
|
||||
rank = (pp_rank * self.parallel_config.tensor_parallel_size) + tp_rank
|
||||
assert len(self.pp_tp_workers[pp_rank]) == tp_rank
|
||||
assert pp_rank < len(self.pp_tp_workers)
|
||||
self.pp_tp_workers[pp_rank].append(self.workers[rank])
|
||||
|
||||
def reinitialize_distributed(
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
self.collective_rpc("reinitialize_distributed", args=(reconfig_request,))
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank
|
||||
== ReconfigureRankType.SHUTDOWN_CURRENT_RANK
|
||||
):
|
||||
self.shutdown()
|
||||
|
||||
def execute_model( # type: ignore[override]
|
||||
self, scheduler_output: SchedulerOutput, non_block: bool = False
|
||||
) -> ModelRunnerOutput | Future[ModelRunnerOutput]:
|
||||
"""Execute the model on the Ray workers.
|
||||
|
||||
Args:
|
||||
scheduler_output: The scheduler output to execute.
|
||||
non_block: If True, the method will return a Future.
|
||||
|
||||
Returns:
|
||||
The model runner output.
|
||||
"""
|
||||
# Build the compiled DAG for the first time.
|
||||
if self.forward_dag is None: # type: ignore
|
||||
self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)
|
||||
|
||||
refs = self.forward_dag.execute(scheduler_output) # type: ignore
|
||||
|
||||
if not self.has_connector:
|
||||
# Get output only from a single worker (output_rank)
|
||||
# When PP is not used, we block here until the result is available.
|
||||
if not non_block:
|
||||
return refs[0].get()
|
||||
|
||||
# When PP is used, we return a FutureWrapper immediately so that
|
||||
# the scheduler can yield to the next batch.
|
||||
return FutureWrapper(refs)
|
||||
|
||||
# Get output from all workers when connector is present
|
||||
assert self.kv_output_aggregator is not None
|
||||
if not non_block:
|
||||
# Block and get results from all workers
|
||||
outputs = [ref.get() for ref in refs]
|
||||
return self.kv_output_aggregator.aggregate(outputs)
|
||||
|
||||
# Return a future that will aggregate outputs from all workers
|
||||
return FutureWrapper(refs, self.kv_output_aggregator)
|
||||
|
||||
def collective_rpc(
|
||||
self,
|
||||
method: str | Callable,
|
||||
timeout: float | None = None,
|
||||
args: tuple = (),
|
||||
kwargs: dict[str, Any] | None = None,
|
||||
non_block: bool = False,
|
||||
) -> list[Any]:
|
||||
"""Runs the given method on all workers."""
|
||||
sent_method = method if isinstance(method, str) else cloudpickle.dumps(method)
|
||||
del method
|
||||
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
ray_worker_outputs = [
|
||||
worker.execute_method.remote( # type: ignore[attr-defined]
|
||||
sent_method, *args, **kwargs
|
||||
)
|
||||
for worker in self.workers
|
||||
]
|
||||
|
||||
# Get the results of the ray workers.
|
||||
if non_block:
|
||||
return [FutureWrapper((output,)) for output in ray_worker_outputs]
|
||||
|
||||
return ray.get(ray_worker_outputs, timeout=timeout)
|
||||
|
||||
def _check_ray_cgraph_installation(self):
|
||||
import importlib.metadata
|
||||
|
||||
from packaging import version
|
||||
|
||||
required_version = version.parse("2.43.0")
|
||||
current_version = version.parse(importlib.metadata.version("ray"))
|
||||
if current_version < required_version:
|
||||
raise ValueError(
|
||||
f"Ray version {required_version} is "
|
||||
f"required, but found {current_version}"
|
||||
)
|
||||
|
||||
import importlib.util
|
||||
|
||||
cgraph_spec = importlib.util.find_spec("ray.experimental.compiled_dag_ref")
|
||||
if cgraph_spec is None:
|
||||
raise ValueError(
|
||||
"Ray Compiled Graph is not installed. "
|
||||
"Run `pip install ray[cgraph]` to install it."
|
||||
)
|
||||
|
||||
cupy_spec = importlib.util.find_spec("cupy")
|
||||
if cupy_spec is None and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE == "nccl":
|
||||
raise ValueError(
|
||||
"cupy is not installed but required since "
|
||||
"VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE is set to 'nccl'. "
|
||||
"Run `pip install ray[cgraph]` and check cupy installation."
|
||||
)
|
||||
|
||||
def _compiled_ray_dag(self, enable_asyncio: bool):
|
||||
assert self.parallel_config.use_ray
|
||||
self._check_ray_cgraph_installation()
|
||||
# Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds
|
||||
# (it is 10 seconds by default). This is a Ray environment variable to
|
||||
# control the timeout of getting result from a compiled graph execution,
|
||||
# i.e., the distributed execution that includes model forward runs and
|
||||
# intermediate tensor communications, in the case of vllm.
|
||||
# Note: we should set this env var before importing
|
||||
# ray.dag, otherwise it will not take effect.
|
||||
os.environ.setdefault("RAY_CGRAPH_get_timeout", "300") # noqa: SIM112
|
||||
from ray.dag import InputNode, MultiOutputNode
|
||||
|
||||
logger.info(
|
||||
"RAY_CGRAPH_get_timeout is set to %s",
|
||||
os.environ["RAY_CGRAPH_get_timeout"], # noqa: SIM112
|
||||
)
|
||||
logger.info(
|
||||
"VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s",
|
||||
envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE,
|
||||
)
|
||||
logger.info(
|
||||
"VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s",
|
||||
envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
|
||||
)
|
||||
|
||||
channel_type = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
|
||||
if channel_type not in ("auto", "nccl", "shm"):
|
||||
raise ValueError(
|
||||
"Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: "
|
||||
f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'."
|
||||
)
|
||||
|
||||
with InputNode() as input_data:
|
||||
# Example DAG: PP=2, TP=4
|
||||
#
|
||||
# SchedulerOutput -> 0 -> (SchedulerOutput, IntermediateTensors) -> 4 -> ModelRunnerOutput # noqa: E501
|
||||
# SchedulerOutput -> 1 -> (SchedulerOutput, IntermediateTensors) -> 5 -> ModelRunnerOutput # noqa: E501
|
||||
# SchedulerOutput -> 2 -> (SchedulerOutput, IntermediateTensors) -> 6 -> ModelRunnerOutput # noqa: E501
|
||||
# SchedulerOutput -> 3 -> (SchedulerOutput, IntermediateTensors) -> 7 -> ModelRunnerOutput # noqa: E501
|
||||
|
||||
# All workers in the first TP group will take in the
|
||||
# ExecuteModelRequest as input.
|
||||
outputs = [input_data for _ in self.pp_tp_workers[0]]
|
||||
for pp_rank, tp_group in enumerate(self.pp_tp_workers):
|
||||
# Each PP worker takes in the output of the previous PP worker,
|
||||
# and the TP group executes in SPMD fashion.
|
||||
outputs = [
|
||||
worker.execute_model_ray.bind(outputs[i]) # type: ignore[attr-defined]
|
||||
for i, worker in enumerate(tp_group)
|
||||
]
|
||||
|
||||
last_pp_rank = len(self.pp_tp_workers) - 1
|
||||
if (
|
||||
pp_rank < last_pp_rank
|
||||
and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE != "shm"
|
||||
):
|
||||
# Specify how intermediate tensors should be passed
|
||||
# between pp stages, no need to specify for the last
|
||||
# pp stage or when using shared memory (the default).
|
||||
transport = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
|
||||
outputs = [
|
||||
output.with_tensor_transport(transport=transport)
|
||||
for output in outputs
|
||||
]
|
||||
|
||||
forward_dag = MultiOutputNode(outputs)
|
||||
|
||||
if envs.VLLM_USE_RAY_WRAPPED_PP_COMM:
|
||||
from ray.experimental.channel.accelerator_context import (
|
||||
register_accelerator_context,
|
||||
)
|
||||
|
||||
from vllm.distributed.device_communicators.ray_communicator import (
|
||||
RayPPCommunicator,
|
||||
)
|
||||
|
||||
register_accelerator_context(
|
||||
torch_module_name="cuda", communicator_cls=RayPPCommunicator
|
||||
)
|
||||
logger.info(
|
||||
"Using RayPPCommunicator "
|
||||
"(which wraps vLLM _PP GroupCoordinator) "
|
||||
"for Ray Compiled Graph communication."
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Using Ray's NCCL communicator for Ray Compiled Graph communication."
|
||||
)
|
||||
|
||||
return forward_dag.experimental_compile(
|
||||
enable_asyncio=enable_asyncio,
|
||||
_overlap_gpu_communication=envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
self.shutdown()
|
||||
|
||||
def check_health(self) -> None:
|
||||
# Assume that the Ray workers are healthy.
|
||||
# TODO: check the health of the Ray workers
|
||||
return
|
||||
425
vllm/v1/executor/ray_utils.py
Normal file
425
vllm/v1/executor/ray_utils.py
Normal file
@@ -0,0 +1,425 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
import vllm.platforms
|
||||
from vllm.config import ParallelConfig
|
||||
from vllm.distributed import get_pp_group
|
||||
from vllm.distributed.kv_transfer.kv_connector.utils import KVOutputAggregator
|
||||
from vllm.logger import init_logger
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.sequence import IntermediateTensors
|
||||
from vllm.utils.network_utils import get_ip
|
||||
from vllm.v1.outputs import AsyncModelRunnerOutput
|
||||
from vllm.v1.worker.worker_base import WorkerWrapperBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.v1.core.sched.output import SchedulerOutput
|
||||
from vllm.v1.outputs import ModelRunnerOutput
|
||||
|
||||
logger = init_logger(__name__)
|
||||
PG_WAIT_TIMEOUT = 1800
|
||||
|
||||
try:
|
||||
import ray
|
||||
from ray.util import placement_group_table
|
||||
from ray.util.placement_group import PlacementGroup
|
||||
|
||||
try:
|
||||
from ray._private.state import available_resources_per_node
|
||||
except ImportError:
|
||||
# Ray 2.9.x doesn't expose `available_resources_per_node`
|
||||
from ray._private.state import state as _state
|
||||
|
||||
available_resources_per_node = _state._available_resources_per_node
|
||||
|
||||
class RayWorkerWrapper(WorkerWrapperBase):
|
||||
"""Ray wrapper for vllm.worker.Worker, allowing Worker to be
|
||||
lazily initialized after Ray sets CUDA_VISIBLE_DEVICES."""
|
||||
|
||||
def __init__(self, *args, **kwargs) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
# Since the compiled DAG runs a main execution
|
||||
# in a different thread that calls cuda.set_device.
|
||||
# The flag indicates is set_device is called on
|
||||
# that thread.
|
||||
self.compiled_dag_cuda_device_set = False
|
||||
|
||||
def get_node_ip(self) -> str:
|
||||
return get_ip()
|
||||
|
||||
def get_node_and_gpu_ids(self) -> tuple[str, list[int]]:
|
||||
node_id = ray.get_runtime_context().get_node_id()
|
||||
device_key = vllm.platforms.current_platform.ray_device_key
|
||||
if not device_key:
|
||||
raise RuntimeError(
|
||||
"current platform %s does not support ray.",
|
||||
vllm.platforms.current_platform.device_name,
|
||||
)
|
||||
gpu_ids = ray.get_runtime_context().get_accelerator_ids()[device_key]
|
||||
return node_id, gpu_ids
|
||||
|
||||
def setup_device_if_necessary(self):
|
||||
# TODO(swang): This is needed right now because Ray CG executes
|
||||
# on a background thread, so we need to reset torch's current
|
||||
# device.
|
||||
# We can remove this API after it is fixed in compiled graph.
|
||||
assert self.worker is not None, "Worker is not initialized"
|
||||
if not self.compiled_dag_cuda_device_set:
|
||||
if current_platform.is_tpu():
|
||||
# Not needed
|
||||
pass
|
||||
else:
|
||||
assert self.worker.device is not None
|
||||
current_platform.set_device(self.worker.device)
|
||||
|
||||
self.compiled_dag_cuda_device_set = True
|
||||
|
||||
def execute_model_ray(
|
||||
self,
|
||||
scheduler_output: Union[
|
||||
"SchedulerOutput", tuple["SchedulerOutput", "IntermediateTensors"]
|
||||
],
|
||||
) -> Union[
|
||||
"ModelRunnerOutput", tuple["SchedulerOutput", "IntermediateTensors"]
|
||||
]:
|
||||
# This method is used by Ray Compiled Graph to execute the model,
|
||||
# and it needs a special logic of self.setup_device_if_necessary()
|
||||
self.setup_device_if_necessary()
|
||||
assert self.worker is not None, "Worker is not initialized"
|
||||
if isinstance(scheduler_output, tuple):
|
||||
scheduler_output, intermediate_tensors = scheduler_output
|
||||
else:
|
||||
scheduler_output, intermediate_tensors = scheduler_output, None
|
||||
assert self.worker.model_runner is not None
|
||||
output = self.worker.model_runner.execute_model(
|
||||
scheduler_output, intermediate_tensors
|
||||
)
|
||||
if isinstance(output, IntermediateTensors):
|
||||
output = scheduler_output, output
|
||||
elif not get_pp_group().is_last_rank:
|
||||
# Case where there are no scheduled requests
|
||||
# but may still be finished requests.
|
||||
assert not output or not output.req_ids
|
||||
output = scheduler_output, None
|
||||
# Ensure outputs crossing Ray compiled DAG are serializable.
|
||||
# AsyncModelRunnerOutput holds CUDA events and cannot be
|
||||
# pickled.
|
||||
if isinstance(output, AsyncModelRunnerOutput):
|
||||
output = output.get_output()
|
||||
return output
|
||||
|
||||
def override_env_vars(self, vars: dict[str, str]):
|
||||
os.environ.update(vars)
|
||||
|
||||
ray_import_err = None
|
||||
|
||||
except ImportError as e:
|
||||
ray = None # type: ignore
|
||||
# only capture string to avoid variable references in the traceback that can
|
||||
# prevent garbage collection in some cases
|
||||
ray_import_err = str(e)
|
||||
RayWorkerWrapper = None # type: ignore
|
||||
|
||||
|
||||
class FutureWrapper(Future):
|
||||
"""A wrapper around Ray output reference to meet the interface
|
||||
of .execute_model(): The top level (core busy loop) expects .result() api
|
||||
to block and return a single output.
|
||||
|
||||
If aggregator is provided, the outputs from all workers are aggregated upon
|
||||
the result() call. If not only the first worker's output is returned.
|
||||
"""
|
||||
|
||||
def __init__(self, refs, aggregator: KVOutputAggregator | None = None):
|
||||
super().__init__()
|
||||
self.refs = refs
|
||||
self.aggregator = aggregator
|
||||
|
||||
def result(self, timeout=None):
|
||||
if timeout is not None:
|
||||
raise NotImplementedError("timeout is not supported")
|
||||
|
||||
if self.aggregator is None:
|
||||
return self.refs[0].get()
|
||||
|
||||
outputs = [ref.get() for ref in self.refs]
|
||||
return self.aggregator.aggregate(outputs, output_rank=0)
|
||||
|
||||
|
||||
def ray_is_available() -> bool:
|
||||
"""Returns True if Ray is available."""
|
||||
return ray is not None
|
||||
|
||||
|
||||
def assert_ray_available():
|
||||
"""Raise an exception if Ray is not available."""
|
||||
if ray is None:
|
||||
raise ValueError(
|
||||
f"Failed to import Ray: {ray_import_err}."
|
||||
"Please install Ray with `pip install ray`."
|
||||
)
|
||||
|
||||
|
||||
def _verify_bundles(
|
||||
placement_group: "PlacementGroup", parallel_config: ParallelConfig, device_str: str
|
||||
):
|
||||
"""Verify a given placement group has bundles located in the right place.
|
||||
|
||||
There are 2 rules.
|
||||
- Warn if all tensor parallel workers cannot fit in a single node.
|
||||
- Fail if driver node is not included in a placement group.
|
||||
"""
|
||||
assert ray.is_initialized(), (
|
||||
"Ray is not initialized although distributed-executor-backend is ray."
|
||||
)
|
||||
pg_data = placement_group_table(placement_group)
|
||||
# bundle_idx -> node_id
|
||||
bundle_to_node_ids = pg_data["bundles_to_node_id"]
|
||||
# bundle_idx -> bundle (e.g., {"GPU": 1})
|
||||
bundles = pg_data["bundles"]
|
||||
# node_id -> List of bundle (e.g., {"GPU": 1})
|
||||
node_id_to_bundle: dict[str, list[dict[str, float]]] = defaultdict(list)
|
||||
|
||||
for bundle_idx, node_id in bundle_to_node_ids.items():
|
||||
node_id_to_bundle[node_id].append(bundles[bundle_idx])
|
||||
driver_node_id = ray.get_runtime_context().get_node_id()
|
||||
|
||||
if driver_node_id not in node_id_to_bundle:
|
||||
raise RuntimeError(
|
||||
f"driver node id {driver_node_id} is not included in a placement "
|
||||
f"group {placement_group.id}. Node id -> bundles "
|
||||
f"{node_id_to_bundle}. "
|
||||
"You don't have enough GPUs available in a current node. Check "
|
||||
"`ray status` and `ray list nodes` to see if you have available "
|
||||
"GPUs in a node `{driver_node_id}` before starting an vLLM engine."
|
||||
)
|
||||
|
||||
for node_id, bundles in node_id_to_bundle.items():
|
||||
if len(bundles) < parallel_config.tensor_parallel_size:
|
||||
logger.warning(
|
||||
"tensor_parallel_size=%d "
|
||||
"is bigger than a reserved number of %ss (%d "
|
||||
"%ss) in a node %s. Tensor parallel workers can be "
|
||||
"spread out to 2+ nodes which can degrade the performance "
|
||||
"unless you have fast interconnect across nodes, like "
|
||||
"Infiniband. To resolve this issue, make sure you have more "
|
||||
"than %d GPUs available at each node.",
|
||||
parallel_config.tensor_parallel_size,
|
||||
device_str,
|
||||
len(bundles),
|
||||
device_str,
|
||||
node_id,
|
||||
parallel_config.tensor_parallel_size,
|
||||
)
|
||||
|
||||
|
||||
def _wait_until_pg_ready(current_placement_group: "PlacementGroup"):
|
||||
"""Wait until a placement group is ready.
|
||||
|
||||
It prints the informative log messages if the placement group is
|
||||
not created within time.
|
||||
|
||||
"""
|
||||
# Wait until PG is ready - this will block until all
|
||||
# requested resources are available, and will time out
|
||||
# if they cannot be provisioned.
|
||||
placement_group_specs = current_placement_group.bundle_specs
|
||||
|
||||
s = time.time()
|
||||
pg_ready_ref = current_placement_group.ready()
|
||||
wait_interval = 10
|
||||
while time.time() - s < PG_WAIT_TIMEOUT:
|
||||
ready, _ = ray.wait([pg_ready_ref], timeout=wait_interval)
|
||||
if len(ready) > 0:
|
||||
break
|
||||
|
||||
# Exponential backoff for warning print.
|
||||
wait_interval *= 2
|
||||
logger.info(
|
||||
"Waiting for creating a placement group of specs for "
|
||||
"%d seconds. specs=%s. Check `ray status` and "
|
||||
"`ray list nodes` to see if you have enough resources,"
|
||||
" and make sure the IP addresses used by ray cluster"
|
||||
" are the same as VLLM_HOST_IP environment variable"
|
||||
" specified in each node if you are running on a multi-node.",
|
||||
int(time.time() - s),
|
||||
placement_group_specs,
|
||||
)
|
||||
|
||||
try:
|
||||
ray.get(pg_ready_ref, timeout=0)
|
||||
except ray.exceptions.GetTimeoutError:
|
||||
raise ValueError(
|
||||
"Cannot provide a placement group of "
|
||||
f"{placement_group_specs=} within {PG_WAIT_TIMEOUT} seconds. See "
|
||||
"`ray status` and `ray list nodes` to make sure the cluster has "
|
||||
"enough resources."
|
||||
) from None
|
||||
|
||||
|
||||
def _wait_until_pg_removed(current_placement_group: "PlacementGroup"):
|
||||
ray.util.remove_placement_group(current_placement_group)
|
||||
s = time.time()
|
||||
wait_interval = 10
|
||||
while time.time() - s < PG_WAIT_TIMEOUT:
|
||||
pg = ray.util.get_current_placement_group()
|
||||
if pg is None:
|
||||
break
|
||||
|
||||
# Exponential backoff for warning print.
|
||||
wait_interval *= 2
|
||||
logger.info(
|
||||
"Waiting for removing a placement group of specs for %d seconds.",
|
||||
int(time.time() - s),
|
||||
)
|
||||
time.sleep(wait_interval)
|
||||
|
||||
|
||||
def initialize_ray_cluster(
|
||||
parallel_config: ParallelConfig,
|
||||
ray_address: str | None = None,
|
||||
):
|
||||
"""Initialize the distributed cluster with Ray.
|
||||
|
||||
it will connect to the Ray cluster and create a placement group
|
||||
for the workers, which includes the specification of the resources
|
||||
for each distributed worker.
|
||||
|
||||
Args:
|
||||
parallel_config: The configurations for parallel execution.
|
||||
ray_address: The address of the Ray cluster. If None, uses
|
||||
the default Ray cluster address.
|
||||
"""
|
||||
assert_ray_available()
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
if ray.is_initialized():
|
||||
logger.info("Ray is already initialized. Skipping Ray initialization.")
|
||||
elif current_platform.is_rocm() or current_platform.is_xpu():
|
||||
# Try to connect existing ray instance and create a new one if not found
|
||||
try:
|
||||
ray.init("auto")
|
||||
except ConnectionError:
|
||||
logger.warning(
|
||||
"No existing RAY instance detected. "
|
||||
"A new instance will be launched with current node resources."
|
||||
)
|
||||
ray.init(
|
||||
address=ray_address,
|
||||
num_gpus=parallel_config.world_size,
|
||||
runtime_env=parallel_config.ray_runtime_env,
|
||||
)
|
||||
else:
|
||||
ray.init(address=ray_address, runtime_env=parallel_config.ray_runtime_env)
|
||||
|
||||
device_str = current_platform.ray_device_key
|
||||
if not device_str:
|
||||
raise ValueError(
|
||||
f"current platform {current_platform.device_name} does not support ray."
|
||||
)
|
||||
|
||||
# Create or get the placement group for worker processes
|
||||
if parallel_config.placement_group:
|
||||
current_placement_group = parallel_config.placement_group
|
||||
else:
|
||||
current_placement_group = ray.util.get_current_placement_group()
|
||||
|
||||
if current_placement_group:
|
||||
logger.info("Using the existing placement group")
|
||||
|
||||
# We are in a placement group
|
||||
bundles = current_placement_group.bundle_specs
|
||||
# Verify that we can use the placement group.
|
||||
device_bundles = 0
|
||||
for bundle in bundles:
|
||||
bundle_devices = bundle.get(device_str, 0)
|
||||
if bundle_devices > 1:
|
||||
raise ValueError(
|
||||
f"Placement group bundle cannot have more than 1 {device_str}."
|
||||
)
|
||||
if bundle_devices:
|
||||
device_bundles += 1
|
||||
if parallel_config.world_size > device_bundles:
|
||||
raise ValueError(
|
||||
f"The number of required {device_str}s exceeds the total "
|
||||
f"number of available {device_str}s in the placement group. "
|
||||
f"Required number of devices: {parallel_config.world_size}. "
|
||||
f"Total number of devices: {device_bundles}."
|
||||
)
|
||||
else:
|
||||
logger.info("No current placement group found. Creating a new placement group.")
|
||||
num_devices_in_cluster = ray.cluster_resources().get(device_str, 0)
|
||||
# Log a warning message and delay resource allocation failure response.
|
||||
# Avoid immediate rejection to allow user-initiated placement group
|
||||
# created and wait cluster to be ready
|
||||
if parallel_config.world_size > num_devices_in_cluster:
|
||||
logger.warning(
|
||||
"The number of required %ss exceeds the total "
|
||||
"number of available %ss in the placement group.",
|
||||
device_str,
|
||||
device_str,
|
||||
)
|
||||
# Create a new placement group
|
||||
placement_group_specs: list[dict[str, float]] = [
|
||||
{device_str: 1.0} for _ in range(parallel_config.world_size)
|
||||
]
|
||||
|
||||
# vLLM engine is also a worker to execute model with an accelerator,
|
||||
# so it requires to have the device in a current node. Check if
|
||||
# the current node has at least one device.
|
||||
current_ip = get_ip()
|
||||
current_node_id = ray.get_runtime_context().get_node_id()
|
||||
current_node_resource = available_resources_per_node()[current_node_id]
|
||||
if current_node_resource.get(device_str, 0) < 1:
|
||||
raise ValueError(
|
||||
f"Current node has no {device_str} available. "
|
||||
f"{current_node_resource=}. vLLM engine cannot start without "
|
||||
f"{device_str}. Make sure you have at least 1 {device_str} "
|
||||
f"available in a node {current_node_id=} {current_ip=}."
|
||||
)
|
||||
# This way, at least bundle is required to be created in a current
|
||||
# node.
|
||||
placement_group_specs[0][f"node:{current_ip}"] = 0.001
|
||||
|
||||
# By default, Ray packs resources as much as possible.
|
||||
current_placement_group = ray.util.placement_group(
|
||||
placement_group_specs, strategy="PACK"
|
||||
)
|
||||
_wait_until_pg_ready(current_placement_group)
|
||||
|
||||
assert current_placement_group is not None
|
||||
_verify_bundles(current_placement_group, parallel_config, device_str)
|
||||
# Set the placement group in the parallel config
|
||||
parallel_config.placement_group = current_placement_group
|
||||
|
||||
|
||||
def get_num_tpu_nodes() -> int:
|
||||
from ray._private.accelerators import TPUAcceleratorManager
|
||||
|
||||
cluster_resources = ray.cluster_resources()
|
||||
total_tpus = int(cluster_resources["TPU"])
|
||||
tpus_per_node = TPUAcceleratorManager.get_current_node_num_accelerators()
|
||||
assert total_tpus % tpus_per_node == 0
|
||||
return total_tpus // tpus_per_node
|
||||
|
||||
|
||||
def get_num_nodes_in_placement_group() -> int:
|
||||
pg_table = ray.util.placement_group_table()
|
||||
current_pg = ray.util.get_current_placement_group()
|
||||
num_nodes = 0
|
||||
|
||||
if current_pg:
|
||||
nodes_in_pg = set()
|
||||
for pg_key, pg in pg_table.items():
|
||||
if pg_key == current_pg.id.hex():
|
||||
for _, node in pg["bundles_to_node_id"].items():
|
||||
nodes_in_pg.add(node)
|
||||
num_nodes = len(nodes_in_pg)
|
||||
|
||||
return num_nodes
|
||||
155
vllm/v1/executor/uniproc_executor.py
Normal file
155
vllm/v1/executor/uniproc_executor.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import os
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from functools import cached_property
|
||||
from multiprocessing import Lock
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
import vllm.envs as envs
|
||||
from vllm.logger import init_logger
|
||||
from vllm.utils import run_method
|
||||
from vllm.utils.network_utils import get_distributed_init_method, get_ip, get_open_port
|
||||
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
|
||||
from vllm.v1.executor.abstract import Executor
|
||||
from vllm.v1.outputs import AsyncModelRunnerOutput
|
||||
from vllm.v1.worker.worker_base import WorkerWrapperBase
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class UniProcExecutor(Executor):
|
||||
def _init_executor(self) -> None:
|
||||
"""Initialize the worker and load the model."""
|
||||
self.driver_worker = WorkerWrapperBase(vllm_config=self.vllm_config, rpc_rank=0)
|
||||
distributed_init_method, rank, local_rank = self._distributed_args()
|
||||
kwargs = dict(
|
||||
vllm_config=self.vllm_config,
|
||||
local_rank=local_rank,
|
||||
rank=rank,
|
||||
distributed_init_method=distributed_init_method,
|
||||
is_driver_worker=True,
|
||||
shared_worker_lock=Lock(),
|
||||
)
|
||||
|
||||
self.async_output_thread: ThreadPoolExecutor | None = None
|
||||
if self.max_concurrent_batches > 1:
|
||||
self.async_output_thread = ThreadPoolExecutor(
|
||||
max_workers=1, thread_name_prefix="WorkerAsyncOutput"
|
||||
)
|
||||
|
||||
self.driver_worker.init_worker(all_kwargs=[kwargs])
|
||||
self.driver_worker.init_device()
|
||||
self.driver_worker.load_model()
|
||||
|
||||
def _distributed_args(self) -> tuple[str, int, int]:
|
||||
"""Return (distributed_init_method, rank, local_rank)."""
|
||||
distributed_init_method = get_distributed_init_method(get_ip(), get_open_port())
|
||||
# set local rank as the device index if specified
|
||||
device_info = self.vllm_config.device_config.device.__str__().split(":")
|
||||
local_rank = int(device_info[1]) if len(device_info) > 1 else 0
|
||||
return distributed_init_method, 0, local_rank
|
||||
|
||||
@cached_property
|
||||
def max_concurrent_batches(self) -> int:
|
||||
return 2 if self.scheduler_config.async_scheduling else 1
|
||||
|
||||
def collective_rpc(
|
||||
self,
|
||||
method: str | Callable,
|
||||
timeout: float | None = None,
|
||||
args: tuple = (),
|
||||
kwargs: dict | None = None,
|
||||
non_block: bool = False,
|
||||
) -> list[Any]:
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
if not non_block:
|
||||
return [run_method(self.driver_worker, method, args, kwargs)]
|
||||
|
||||
try:
|
||||
result = run_method(self.driver_worker, method, args, kwargs)
|
||||
if isinstance(result, AsyncModelRunnerOutput):
|
||||
if (async_thread := self.async_output_thread) is not None:
|
||||
return [async_thread.submit(result.get_output)]
|
||||
result = result.get_output()
|
||||
future = Future[Any]()
|
||||
future.set_result(result)
|
||||
except Exception as e:
|
||||
future = Future[Any]()
|
||||
future.set_exception(e)
|
||||
return [future]
|
||||
|
||||
def check_health(self) -> None:
|
||||
# UniProcExecutor will always be healthy as long as
|
||||
# it's running.
|
||||
return
|
||||
|
||||
def reinitialize_distributed(
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
self.driver_worker.reinitialize_distributed(reconfig_request)
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank
|
||||
== ReconfigureRankType.SHUTDOWN_CURRENT_RANK
|
||||
):
|
||||
self.shutdown()
|
||||
|
||||
def shutdown(self) -> None:
|
||||
if worker := self.driver_worker:
|
||||
worker.shutdown()
|
||||
|
||||
|
||||
class ExecutorWithExternalLauncher(UniProcExecutor):
|
||||
"""An executor that uses external launchers to launch engines,
|
||||
specially designed for torchrun-compatible launchers, for
|
||||
offline inference with tensor parallelism.
|
||||
|
||||
see https://github.com/vllm-project/vllm/issues/11400 for
|
||||
the motivation, and examples/offline_inference/torchrun_example.py
|
||||
for the usage example.
|
||||
|
||||
The key idea: although it is tensor-parallel inference, we only
|
||||
create one worker per executor, users will launch multiple
|
||||
engines with torchrun-compatible launchers, and all these engines
|
||||
work together to process the same prompts. When scheduling is
|
||||
deterministic, all the engines will generate the same outputs,
|
||||
and they don't need to synchronize the states with each other.
|
||||
"""
|
||||
|
||||
def _init_executor(self) -> None:
|
||||
"""Initialize the worker and load the model."""
|
||||
if envs.VLLM_USE_V1:
|
||||
assert not envs.VLLM_ENABLE_V1_MULTIPROCESSING, (
|
||||
"To get deterministic execution in V1, "
|
||||
"please set VLLM_ENABLE_V1_MULTIPROCESSING=0"
|
||||
)
|
||||
super()._init_executor()
|
||||
|
||||
def _distributed_args(self) -> tuple[str, int, int]:
|
||||
# engines are launched in torchrun-compatible launchers
|
||||
# so we can use the env:// method.
|
||||
# required env vars:
|
||||
# - RANK
|
||||
# - LOCAL_RANK
|
||||
# - MASTER_ADDR
|
||||
# - MASTER_PORT
|
||||
distributed_init_method = "env://"
|
||||
rank = int(os.environ["RANK"])
|
||||
local_rank = int(os.environ["LOCAL_RANK"])
|
||||
return distributed_init_method, rank, local_rank
|
||||
|
||||
def determine_available_memory(self) -> list[int]: # in bytes
|
||||
# we need to get the min across all ranks.
|
||||
memory = super().determine_available_memory()
|
||||
from vllm.distributed.parallel_state import get_world_group
|
||||
|
||||
cpu_group = get_world_group().cpu_group
|
||||
memory_tensor = torch.tensor([memory], device="cpu", dtype=torch.int64)
|
||||
dist.all_reduce(memory_tensor, group=cpu_group, op=dist.ReduceOp.MIN)
|
||||
return [memory_tensor.item()]
|
||||
Reference in New Issue
Block a user