Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""A GPU worker class."""
|
||||
|
||||
import copy
|
||||
import gc
|
||||
import os
|
||||
@@ -13,9 +14,11 @@ import torch.nn as nn
|
||||
|
||||
import vllm.envs as envs
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.distributed import (ensure_model_parallel_initialized,
|
||||
init_distributed_environment,
|
||||
set_custom_all_reduce)
|
||||
from vllm.distributed import (
|
||||
ensure_model_parallel_initialized,
|
||||
init_distributed_environment,
|
||||
set_custom_all_reduce,
|
||||
)
|
||||
from vllm.distributed.kv_transfer import ensure_kv_transfer_initialized
|
||||
from vllm.distributed.parallel_state import get_pp_group, get_tp_group
|
||||
from vllm.logger import init_logger
|
||||
@@ -28,8 +31,12 @@ from vllm.tasks import SupportedTask
|
||||
from vllm.utils import GiB_bytes, MemorySnapshot, memory_profiling
|
||||
from vllm.v1.engine import ReconfigureDistributedRequest, ReconfigureRankType
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig, KVCacheSpec
|
||||
from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, AsyncModelRunnerOutput,
|
||||
DraftTokenIds, ModelRunnerOutput)
|
||||
from vllm.v1.outputs import (
|
||||
EMPTY_MODEL_RUNNER_OUTPUT,
|
||||
AsyncModelRunnerOutput,
|
||||
DraftTokenIds,
|
||||
ModelRunnerOutput,
|
||||
)
|
||||
from vllm.v1.utils import report_usage_stats
|
||||
from vllm.v1.worker.gpu_model_runner import GPUModelRunner
|
||||
from vllm.v1.worker.utils import is_residual_scattered_for_sp
|
||||
@@ -43,7 +50,6 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class Worker(WorkerBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vllm_config: VllmConfig,
|
||||
@@ -52,16 +58,18 @@ class Worker(WorkerBase):
|
||||
distributed_init_method: str,
|
||||
is_driver_worker: bool = False,
|
||||
):
|
||||
|
||||
super().__init__(vllm_config=vllm_config,
|
||||
local_rank=local_rank,
|
||||
rank=rank,
|
||||
distributed_init_method=distributed_init_method,
|
||||
is_driver_worker=is_driver_worker)
|
||||
super().__init__(
|
||||
vllm_config=vllm_config,
|
||||
local_rank=local_rank,
|
||||
rank=rank,
|
||||
distributed_init_method=distributed_init_method,
|
||||
is_driver_worker=is_driver_worker,
|
||||
)
|
||||
|
||||
if self.model_config.trust_remote_code:
|
||||
# note: lazy import to avoid importing torch before initializing
|
||||
from vllm.utils import init_cached_hf_modules
|
||||
|
||||
init_cached_hf_modules()
|
||||
|
||||
# Buffers saved before sleep
|
||||
@@ -71,8 +79,10 @@ class Worker(WorkerBase):
|
||||
# VLLM_TORCH_PROFILER_DIR=/path/to/save/trace
|
||||
if envs.VLLM_TORCH_PROFILER_DIR:
|
||||
torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR
|
||||
logger.info("Profiling enabled. Traces will be saved to: %s",
|
||||
torch_profiler_trace_dir)
|
||||
logger.info(
|
||||
"Profiling enabled. Traces will be saved to: %s",
|
||||
torch_profiler_trace_dir,
|
||||
)
|
||||
logger.debug(
|
||||
"Profiler config: record_shapes=%s,"
|
||||
"profile_memory=%s,with_stack=%s,with_flops=%s",
|
||||
@@ -91,7 +101,9 @@ class Worker(WorkerBase):
|
||||
with_stack=envs.VLLM_TORCH_PROFILER_WITH_STACK,
|
||||
with_flops=envs.VLLM_TORCH_PROFILER_WITH_FLOPS,
|
||||
on_trace_ready=torch.profiler.tensorboard_trace_handler(
|
||||
torch_profiler_trace_dir, use_gzip=True))
|
||||
torch_profiler_trace_dir, use_gzip=True
|
||||
),
|
||||
)
|
||||
else:
|
||||
self.profiler = None
|
||||
|
||||
@@ -104,20 +116,20 @@ class Worker(WorkerBase):
|
||||
if level == 2:
|
||||
model = self.model_runner.model
|
||||
self._sleep_saved_buffers = {
|
||||
name: buffer.cpu().clone()
|
||||
for name, buffer in model.named_buffers()
|
||||
name: buffer.cpu().clone() for name, buffer in model.named_buffers()
|
||||
}
|
||||
|
||||
allocator = CuMemAllocator.get_instance()
|
||||
allocator.sleep(offload_tags=("weights", ) if level == 1 else tuple())
|
||||
allocator.sleep(offload_tags=("weights",) if level == 1 else tuple())
|
||||
free_bytes_after_sleep, total = torch.cuda.mem_get_info()
|
||||
freed_bytes = free_bytes_after_sleep - free_bytes_before_sleep
|
||||
used_bytes = total - free_bytes_after_sleep
|
||||
assert freed_bytes >= 0, "Memory usage increased after sleeping."
|
||||
logger.info(
|
||||
"Sleep mode freed %.2f GiB memory, "
|
||||
"%.2f GiB memory is still in use.", freed_bytes / GiB_bytes,
|
||||
used_bytes / GiB_bytes)
|
||||
"Sleep mode freed %.2f GiB memory, %.2f GiB memory is still in use.",
|
||||
freed_bytes / GiB_bytes,
|
||||
used_bytes / GiB_bytes,
|
||||
)
|
||||
|
||||
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
||||
from vllm.device_allocator.cumem import CuMemAllocator
|
||||
@@ -133,23 +145,21 @@ class Worker(WorkerBase):
|
||||
buffer.data.copy_(self._sleep_saved_buffers[name].data)
|
||||
self._sleep_saved_buffers = {}
|
||||
|
||||
def _maybe_get_memory_pool_context(self,
|
||||
tag: str) -> AbstractContextManager:
|
||||
def _maybe_get_memory_pool_context(self, tag: str) -> AbstractContextManager:
|
||||
if self.vllm_config.model_config.enable_sleep_mode:
|
||||
from vllm.device_allocator.cumem import CuMemAllocator
|
||||
|
||||
allocator = CuMemAllocator.get_instance()
|
||||
if tag == "weights":
|
||||
assert allocator.get_current_usage() == 0, (
|
||||
"Sleep mode can only be "
|
||||
"used for one instance per process.")
|
||||
"Sleep mode can only be used for one instance per process."
|
||||
)
|
||||
context = allocator.use_memory_pool(tag=tag)
|
||||
else:
|
||||
context = nullcontext()
|
||||
return context
|
||||
|
||||
def initialize_cache(self, num_gpu_blocks: int,
|
||||
num_cpu_blocks: int) -> None:
|
||||
def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None:
|
||||
self.cache_config.num_gpu_blocks = num_gpu_blocks
|
||||
self.cache_config.num_cpu_blocks = num_cpu_blocks
|
||||
|
||||
@@ -166,10 +176,13 @@ class Worker(WorkerBase):
|
||||
# memory snapshot
|
||||
# This ensures NCCL buffers are allocated before we measure
|
||||
# available memory
|
||||
init_worker_distributed_environment(self.vllm_config, self.rank,
|
||||
self.distributed_init_method,
|
||||
self.local_rank,
|
||||
current_platform.dist_backend)
|
||||
init_worker_distributed_environment(
|
||||
self.vllm_config,
|
||||
self.rank,
|
||||
self.distributed_init_method,
|
||||
self.local_rank,
|
||||
current_platform.dist_backend,
|
||||
)
|
||||
|
||||
# Set random seed.
|
||||
set_random_seed(self.model_config.seed)
|
||||
@@ -180,8 +193,10 @@ class Worker(WorkerBase):
|
||||
|
||||
# take current memory snapshot
|
||||
self.init_snapshot = MemorySnapshot()
|
||||
self.requested_memory = (self.init_snapshot.total_memory *
|
||||
self.cache_config.gpu_memory_utilization)
|
||||
self.requested_memory = (
|
||||
self.init_snapshot.total_memory
|
||||
* self.cache_config.gpu_memory_utilization
|
||||
)
|
||||
if self.init_snapshot.free_memory < self.requested_memory:
|
||||
GiB = lambda b: round(b / GiB_bytes, 2)
|
||||
raise ValueError(
|
||||
@@ -194,12 +209,12 @@ class Worker(WorkerBase):
|
||||
f"utilization or reduce GPU memory used by other processes."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"Not support device type: {self.device_config.device}")
|
||||
raise RuntimeError(f"Not support device type: {self.device_config.device}")
|
||||
|
||||
# Construct the model runner
|
||||
self.model_runner: GPUModelRunner = GPUModelRunner(
|
||||
self.vllm_config, self.device)
|
||||
self.vllm_config, self.device
|
||||
)
|
||||
|
||||
if self.rank == 0:
|
||||
# If usage stat is enabled, collect relevant info.
|
||||
@@ -247,7 +262,8 @@ class Worker(WorkerBase):
|
||||
"size. If OOM'ed, check the difference of initial free "
|
||||
"memory between the current run and the previous run "
|
||||
"where kv_cache_memory_bytes is suggested and update it "
|
||||
"correspondingly.")
|
||||
"correspondingly."
|
||||
)
|
||||
logger.info(msg)
|
||||
return kv_cache_memory_bytes
|
||||
|
||||
@@ -257,8 +273,8 @@ class Worker(WorkerBase):
|
||||
# Execute a forward pass with dummy inputs to profile the memory usage
|
||||
# of the model.
|
||||
with memory_profiling(
|
||||
self.init_snapshot,
|
||||
weights_memory=int(self.model_runner.model_memory_usage),
|
||||
self.init_snapshot,
|
||||
weights_memory=int(self.model_runner.model_memory_usage),
|
||||
) as profile_result:
|
||||
self.model_runner.profile_run()
|
||||
|
||||
@@ -275,15 +291,15 @@ class Worker(WorkerBase):
|
||||
"This happens when other processes sharing the same container "
|
||||
"release GPU memory while vLLM is profiling during initialization. "
|
||||
"To fix this, ensure consistent GPU memory allocation or "
|
||||
"isolate vLLM in its own container.")
|
||||
self.available_kv_cache_memory_bytes = self.requested_memory \
|
||||
- profile_result.non_kv_cache_memory
|
||||
"isolate vLLM in its own container."
|
||||
)
|
||||
self.available_kv_cache_memory_bytes = (
|
||||
self.requested_memory - profile_result.non_kv_cache_memory
|
||||
)
|
||||
|
||||
unrequested_memory = self.init_snapshot.free_memory \
|
||||
- self.requested_memory
|
||||
unrequested_memory = self.init_snapshot.free_memory - self.requested_memory
|
||||
logger.debug(
|
||||
"Initial free memory: %.2f GiB; "
|
||||
"Requested memory: %.2f (util), %.2f GiB",
|
||||
"Initial free memory: %.2f GiB; Requested memory: %.2f (util), %.2f GiB",
|
||||
GiB(self.init_snapshot.free_memory),
|
||||
self.cache_config.gpu_memory_utilization,
|
||||
GiB(self.requested_memory),
|
||||
@@ -295,8 +311,10 @@ class Worker(WorkerBase):
|
||||
GiB(free_gpu_memory - unrequested_memory),
|
||||
)
|
||||
logger.debug(profile_result)
|
||||
logger.info("Available KV cache memory: %.2f GiB",
|
||||
GiB(self.available_kv_cache_memory_bytes))
|
||||
logger.info(
|
||||
"Available KV cache memory: %.2f GiB",
|
||||
GiB(self.available_kv_cache_memory_bytes),
|
||||
)
|
||||
gc.collect()
|
||||
|
||||
return int(self.available_kv_cache_memory_bytes)
|
||||
@@ -324,15 +342,14 @@ class Worker(WorkerBase):
|
||||
warmup_sizes = self.vllm_config.compilation_config.compile_sizes.copy()
|
||||
if not self.model_config.enforce_eager:
|
||||
warmup_sizes = [
|
||||
x for x in warmup_sizes if x not in
|
||||
self.vllm_config.compilation_config.cudagraph_capture_sizes
|
||||
x
|
||||
for x in warmup_sizes
|
||||
if x not in self.vllm_config.compilation_config.cudagraph_capture_sizes
|
||||
]
|
||||
# We skip EPLB here since we don't want to record dummy metrics
|
||||
for size in sorted(warmup_sizes, reverse=True):
|
||||
logger.info("Compile and warming up model for size %d", size)
|
||||
self.model_runner._dummy_run(size,
|
||||
skip_eplb=True,
|
||||
remove_lora=False)
|
||||
self.model_runner._dummy_run(size, skip_eplb=True, remove_lora=False)
|
||||
self.model_runner.maybe_remove_all_loras(self.model_runner.lora_config)
|
||||
|
||||
# Warmup and tune the kernels used during model execution before
|
||||
@@ -343,8 +360,9 @@ class Worker(WorkerBase):
|
||||
if not self.model_config.enforce_eager:
|
||||
cuda_graph_memory_bytes = self.model_runner.capture_model()
|
||||
|
||||
if (self.cache_config.kv_cache_memory_bytes is None
|
||||
and hasattr(self, "peak_activation_memory")):
|
||||
if self.cache_config.kv_cache_memory_bytes is None and hasattr(
|
||||
self, "peak_activation_memory"
|
||||
):
|
||||
# Suggests optimal kv cache memory size if we rely on
|
||||
# memory_profiling to guess the kv cache memory size which
|
||||
# provides peak_activation_memory and a few other memory
|
||||
@@ -358,16 +376,22 @@ class Worker(WorkerBase):
|
||||
# slightly underestimate the memory consumption.
|
||||
# So leave a small buffer (=150MiB) to avoid OOM.
|
||||
redundancy_buffer_memory = 150 * (1 << 20)
|
||||
non_kv_cache_memory = (self.model_runner.model_memory_usage +
|
||||
self.peak_activation_memory +
|
||||
self.non_torch_memory +
|
||||
cuda_graph_memory_bytes)
|
||||
non_kv_cache_memory = (
|
||||
self.model_runner.model_memory_usage
|
||||
+ self.peak_activation_memory
|
||||
+ self.non_torch_memory
|
||||
+ cuda_graph_memory_bytes
|
||||
)
|
||||
kv_cache_memory_bytes_to_gpu_limit = (
|
||||
self.init_snapshot.free_memory - non_kv_cache_memory -
|
||||
redundancy_buffer_memory)
|
||||
self.init_snapshot.free_memory
|
||||
- non_kv_cache_memory
|
||||
- redundancy_buffer_memory
|
||||
)
|
||||
kv_cache_memory_bytes_to_requested_limit = (
|
||||
int(self.requested_memory) - non_kv_cache_memory -
|
||||
redundancy_buffer_memory)
|
||||
int(self.requested_memory)
|
||||
- non_kv_cache_memory
|
||||
- redundancy_buffer_memory
|
||||
)
|
||||
|
||||
msg = (
|
||||
f"Free memory on device "
|
||||
@@ -388,7 +412,8 @@ class Worker(WorkerBase):
|
||||
f"{kv_cache_memory_bytes_to_gpu_limit}` "
|
||||
f"({GiB(kv_cache_memory_bytes_to_gpu_limit)} GiB) to fully "
|
||||
f"utilize gpu memory. Current kv cache memory in use is "
|
||||
f"{GiB(self.available_kv_cache_memory_bytes)} GiB.")
|
||||
f"{GiB(self.available_kv_cache_memory_bytes)} GiB."
|
||||
)
|
||||
|
||||
logger.debug(msg)
|
||||
|
||||
@@ -398,20 +423,20 @@ class Worker(WorkerBase):
|
||||
# NOTE: This is called after `capture_model` on purpose to prevent
|
||||
# memory buffers from being cleared by `torch.cuda.empty_cache`.
|
||||
if get_pp_group().is_last_rank:
|
||||
max_num_reqs = min(self.scheduler_config.max_num_seqs,
|
||||
self.scheduler_config.max_num_batched_tokens)
|
||||
max_num_reqs = min(
|
||||
self.scheduler_config.max_num_seqs,
|
||||
self.scheduler_config.max_num_batched_tokens,
|
||||
)
|
||||
|
||||
# We skip EPLB here since we don't want to record dummy metrics
|
||||
hidden_states, last_hidden_states = \
|
||||
self.model_runner._dummy_run(
|
||||
num_tokens=max_num_reqs,
|
||||
skip_eplb=True,
|
||||
)
|
||||
hidden_states, last_hidden_states = self.model_runner._dummy_run(
|
||||
num_tokens=max_num_reqs,
|
||||
skip_eplb=True,
|
||||
)
|
||||
if self.model_runner.is_pooling_model:
|
||||
self.model_runner._dummy_pooler_run(hidden_states)
|
||||
else:
|
||||
self.model_runner._dummy_sampler_run(
|
||||
hidden_states=last_hidden_states)
|
||||
self.model_runner._dummy_sampler_run(hidden_states=last_hidden_states)
|
||||
|
||||
# Reset the seed to ensure that the random state is not affected by
|
||||
# the model initialization and profiling.
|
||||
@@ -431,32 +456,36 @@ class Worker(WorkerBase):
|
||||
intermediate_tensors = None
|
||||
forward_pass = scheduler_output.total_num_scheduled_tokens > 0
|
||||
num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
|
||||
num_input_tokens = self.model_runner._get_num_input_tokens(
|
||||
num_scheduled_tokens)
|
||||
num_input_tokens = self.model_runner._get_num_input_tokens(num_scheduled_tokens)
|
||||
all_gather_tensors = {
|
||||
"residual":
|
||||
not is_residual_scattered_for_sp(self.vllm_config,
|
||||
num_input_tokens)
|
||||
"residual": not is_residual_scattered_for_sp(
|
||||
self.vllm_config, num_input_tokens
|
||||
)
|
||||
}
|
||||
if forward_pass and not get_pp_group().is_first_rank:
|
||||
intermediate_tensors = IntermediateTensors(
|
||||
get_pp_group().recv_tensor_dict(
|
||||
all_gather_group=get_tp_group(),
|
||||
all_gather_tensors=all_gather_tensors))
|
||||
all_gather_tensors=all_gather_tensors,
|
||||
)
|
||||
)
|
||||
|
||||
output = self.model_runner.execute_model(scheduler_output,
|
||||
intermediate_tensors)
|
||||
output = self.model_runner.execute_model(scheduler_output, intermediate_tensors)
|
||||
if isinstance(output, (ModelRunnerOutput, AsyncModelRunnerOutput)):
|
||||
return output
|
||||
|
||||
assert isinstance(output, IntermediateTensors)
|
||||
parallel_config = self.vllm_config.parallel_config
|
||||
assert parallel_config.distributed_executor_backend != (
|
||||
"external_launcher") and not get_pp_group().is_last_rank
|
||||
assert (
|
||||
parallel_config.distributed_executor_backend != ("external_launcher")
|
||||
and not get_pp_group().is_last_rank
|
||||
)
|
||||
|
||||
get_pp_group().send_tensor_dict(output.tensors,
|
||||
all_gather_group=get_tp_group(),
|
||||
all_gather_tensors=all_gather_tensors)
|
||||
get_pp_group().send_tensor_dict(
|
||||
output.tensors,
|
||||
all_gather_group=get_tp_group(),
|
||||
all_gather_tensors=all_gather_tensors,
|
||||
)
|
||||
|
||||
kv_connector_output = output.kv_connector_output
|
||||
if not kv_connector_output:
|
||||
@@ -483,8 +512,9 @@ class Worker(WorkerBase):
|
||||
self.profiler.stop()
|
||||
# only print profiler results on rank 0
|
||||
if self.local_rank == 0:
|
||||
print(self.profiler.key_averages().table(
|
||||
sort_by="self_cuda_time_total"))
|
||||
print(
|
||||
self.profiler.key_averages().table(sort_by="self_cuda_time_total")
|
||||
)
|
||||
|
||||
def execute_dummy_batch(self) -> None:
|
||||
self.model_runner._dummy_run(1, uniform_decode=True)
|
||||
@@ -505,68 +535,79 @@ class Worker(WorkerBase):
|
||||
# worker will always be healthy as long as it's running.
|
||||
return
|
||||
|
||||
def _eplb_before_scale_down(self, old_ep_size: int,
|
||||
new_ep_size: int) -> None:
|
||||
def _eplb_before_scale_down(self, old_ep_size: int, new_ep_size: int) -> None:
|
||||
from vllm.distributed.parallel_state import get_ep_group
|
||||
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Starting expert resharding "
|
||||
"before scaling down...")
|
||||
logger.info(
|
||||
"[Elastic EP] Starting expert resharding before scaling down..."
|
||||
)
|
||||
rank_mapping = {
|
||||
old_ep_rank: old_ep_rank if old_ep_rank < new_ep_size else -1
|
||||
for old_ep_rank in range(old_ep_size)
|
||||
}
|
||||
assert self.model_runner.eplb_state is not None
|
||||
self.model_runner.eplb_state.rearrange(self.model_runner.model,
|
||||
execute_shuffle=True,
|
||||
global_expert_load=None,
|
||||
rank_mapping=rank_mapping)
|
||||
torch.cuda.synchronize()
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Expert resharding completed!")
|
||||
|
||||
def _eplb_after_scale_up(
|
||||
self, old_ep_size: int, new_ep_size: int,
|
||||
global_expert_load: Optional[torch.Tensor]) -> None:
|
||||
from vllm.distributed.parallel_state import get_ep_group
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Starting expert resharding "
|
||||
"after scaling up...")
|
||||
rank_mapping = {
|
||||
old_ep_rank: old_ep_rank
|
||||
for old_ep_rank in range(old_ep_size)
|
||||
}
|
||||
assert self.model_runner.eplb_state is not None
|
||||
self.model_runner.eplb_state.rearrange(
|
||||
self.model_runner.model,
|
||||
execute_shuffle=True,
|
||||
global_expert_load=None,
|
||||
rank_mapping=rank_mapping,
|
||||
)
|
||||
torch.cuda.synchronize()
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Expert resharding completed!")
|
||||
|
||||
def _eplb_after_scale_up(
|
||||
self,
|
||||
old_ep_size: int,
|
||||
new_ep_size: int,
|
||||
global_expert_load: Optional[torch.Tensor],
|
||||
) -> None:
|
||||
from vllm.distributed.parallel_state import get_ep_group
|
||||
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Starting expert resharding after scaling up...")
|
||||
rank_mapping = {old_ep_rank: old_ep_rank for old_ep_rank in range(old_ep_size)}
|
||||
assert self.model_runner.eplb_state is not None
|
||||
self.model_runner.eplb_state.rearrange(
|
||||
self.model_runner.model,
|
||||
execute_shuffle=True,
|
||||
global_expert_load=global_expert_load,
|
||||
rank_mapping=rank_mapping)
|
||||
rank_mapping=rank_mapping,
|
||||
)
|
||||
if get_ep_group().rank == 0:
|
||||
logger.info("[Elastic EP] Expert resharding completed!")
|
||||
|
||||
def _reconfigure_parallel_config(
|
||||
self, reconfig_request: ReconfigureDistributedRequest) -> None:
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
"""
|
||||
Update parallel config with provided reconfig_request
|
||||
"""
|
||||
parallel_config = self.vllm_config.parallel_config
|
||||
parallel_config.data_parallel_size = \
|
||||
reconfig_request.new_data_parallel_size
|
||||
if reconfig_request.new_data_parallel_rank != \
|
||||
ReconfigureRankType.KEEP_CURRENT_RANK:
|
||||
parallel_config.data_parallel_rank = \
|
||||
reconfig_request.new_data_parallel_rank
|
||||
if reconfig_request.new_data_parallel_rank_local != \
|
||||
ReconfigureRankType.KEEP_CURRENT_RANK:
|
||||
parallel_config.data_parallel_rank_local = \
|
||||
parallel_config.data_parallel_size = reconfig_request.new_data_parallel_size
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank
|
||||
!= ReconfigureRankType.KEEP_CURRENT_RANK
|
||||
):
|
||||
parallel_config.data_parallel_rank = reconfig_request.new_data_parallel_rank
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank_local
|
||||
!= ReconfigureRankType.KEEP_CURRENT_RANK
|
||||
):
|
||||
parallel_config.data_parallel_rank_local = (
|
||||
reconfig_request.new_data_parallel_rank_local
|
||||
parallel_config.data_parallel_master_ip = \
|
||||
)
|
||||
parallel_config.data_parallel_master_ip = (
|
||||
reconfig_request.new_data_parallel_master_ip
|
||||
parallel_config.data_parallel_master_port = \
|
||||
)
|
||||
parallel_config.data_parallel_master_port = (
|
||||
reconfig_request.new_data_parallel_master_port
|
||||
)
|
||||
|
||||
def _reconfigure_moe(self, old_ep_size: int,
|
||||
new_ep_size: int) -> Optional[torch.Tensor]:
|
||||
def _reconfigure_moe(
|
||||
self, old_ep_size: int, new_ep_size: int
|
||||
) -> Optional[torch.Tensor]:
|
||||
"""
|
||||
Reconfigure MoE modules with provided reconfig_request
|
||||
|
||||
@@ -574,20 +615,26 @@ class Worker(WorkerBase):
|
||||
otherwise None
|
||||
"""
|
||||
from vllm.distributed.parallel_state import (
|
||||
get_dp_group, get_ep_group, prepare_communication_buffer_for_model)
|
||||
from vllm.model_executor.layers.fused_moe.layer import (
|
||||
FusedMoEParallelConfig)
|
||||
get_dp_group,
|
||||
get_ep_group,
|
||||
prepare_communication_buffer_for_model,
|
||||
)
|
||||
from vllm.model_executor.layers.fused_moe.layer import FusedMoEParallelConfig
|
||||
|
||||
parallel_config = self.vllm_config.parallel_config
|
||||
moe_modules = [
|
||||
module for module in self.model_runner.model.modules()
|
||||
if (module.__class__.__name__ == "FusedMoE"
|
||||
or module.__class__.__name__ == "SharedFusedMoE")
|
||||
module
|
||||
for module in self.model_runner.model.modules()
|
||||
if (
|
||||
module.__class__.__name__ == "FusedMoE"
|
||||
or module.__class__.__name__ == "SharedFusedMoE"
|
||||
)
|
||||
]
|
||||
num_local_experts = moe_modules[0].moe_config.num_local_experts
|
||||
assert all(module.moe_config.num_local_experts == num_local_experts
|
||||
for module in moe_modules), (
|
||||
"All MoE modules must have the same number of experts")
|
||||
assert all(
|
||||
module.moe_config.num_local_experts == num_local_experts
|
||||
for module in moe_modules
|
||||
), "All MoE modules must have the same number of experts"
|
||||
for module in moe_modules:
|
||||
module.moe_config.num_experts = num_local_experts * new_ep_size
|
||||
module.global_num_experts = module.moe_config.num_experts
|
||||
@@ -600,49 +647,62 @@ class Worker(WorkerBase):
|
||||
if new_ep_size < old_ep_size:
|
||||
num_local_physical_experts = num_local_experts
|
||||
assert self.model_runner.eplb_state is not None
|
||||
new_physical_experts = \
|
||||
new_physical_experts = (
|
||||
self.model_runner.eplb_state.physical_to_logical_map.shape[1]
|
||||
)
|
||||
parallel_config.eplb_config.num_redundant_experts = (
|
||||
new_physical_experts -
|
||||
self.model_runner.eplb_state.logical_replica_count.shape[1])
|
||||
new_physical_experts
|
||||
- self.model_runner.eplb_state.logical_replica_count.shape[1]
|
||||
)
|
||||
global_expert_load = None
|
||||
else:
|
||||
num_local_physical_experts = torch.tensor([num_local_experts],
|
||||
dtype=torch.int32,
|
||||
device="cpu")
|
||||
torch.distributed.broadcast(num_local_physical_experts,
|
||||
group=get_ep_group().cpu_group,
|
||||
group_src=0)
|
||||
num_local_physical_experts = torch.tensor(
|
||||
[num_local_experts], dtype=torch.int32, device="cpu"
|
||||
)
|
||||
torch.distributed.broadcast(
|
||||
num_local_physical_experts, group=get_ep_group().cpu_group, group_src=0
|
||||
)
|
||||
num_local_physical_experts = num_local_physical_experts.item()
|
||||
new_physical_experts = num_local_physical_experts * new_ep_size
|
||||
assert self.model_runner.eplb_state is not None
|
||||
global_expert_load = self.model_runner.eplb_state.rearrange(
|
||||
self.model_runner.model, execute_shuffle=False)
|
||||
self.model_runner.model, execute_shuffle=False
|
||||
)
|
||||
parallel_config.eplb_config.num_redundant_experts = (
|
||||
new_physical_experts - global_expert_load.shape[1])
|
||||
new_physical_experts - global_expert_load.shape[1]
|
||||
)
|
||||
prepare_communication_buffer_for_model(self.model_runner.model)
|
||||
self.model_runner.model.update_physical_experts_metadata(
|
||||
num_physical_experts=new_physical_experts,
|
||||
num_local_physical_experts=num_local_physical_experts)
|
||||
num_local_physical_experts=num_local_physical_experts,
|
||||
)
|
||||
return global_expert_load
|
||||
|
||||
def reinitialize_distributed(
|
||||
self, reconfig_request: ReconfigureDistributedRequest) -> None:
|
||||
self, reconfig_request: ReconfigureDistributedRequest
|
||||
) -> None:
|
||||
from vllm.config import set_current_vllm_config
|
||||
from vllm.distributed.parallel_state import (
|
||||
cleanup_dist_env_and_memory, get_ep_group)
|
||||
cleanup_dist_env_and_memory,
|
||||
get_ep_group,
|
||||
)
|
||||
|
||||
old_ep_size = get_ep_group().world_size
|
||||
old_ep_rank = get_ep_group().rank
|
||||
new_ep_size = reconfig_request.new_data_parallel_size * get_tp_group(
|
||||
).world_size * get_pp_group().world_size
|
||||
new_ep_size = (
|
||||
reconfig_request.new_data_parallel_size
|
||||
* get_tp_group().world_size
|
||||
* get_pp_group().world_size
|
||||
)
|
||||
if new_ep_size < old_ep_size:
|
||||
self._eplb_before_scale_down(old_ep_size, new_ep_size)
|
||||
|
||||
cleanup_dist_env_and_memory()
|
||||
|
||||
if reconfig_request.new_data_parallel_rank == \
|
||||
ReconfigureRankType.SHUTDOWN_CURRENT_RANK:
|
||||
if (
|
||||
reconfig_request.new_data_parallel_rank
|
||||
== ReconfigureRankType.SHUTDOWN_CURRENT_RANK
|
||||
):
|
||||
assert old_ep_rank >= new_ep_size
|
||||
# shutdown
|
||||
return
|
||||
@@ -650,16 +710,18 @@ class Worker(WorkerBase):
|
||||
self._reconfigure_parallel_config(reconfig_request)
|
||||
|
||||
with set_current_vllm_config(self.vllm_config):
|
||||
init_worker_distributed_environment(self.vllm_config, self.rank,
|
||||
self.distributed_init_method,
|
||||
self.local_rank)
|
||||
init_worker_distributed_environment(
|
||||
self.vllm_config,
|
||||
self.rank,
|
||||
self.distributed_init_method,
|
||||
self.local_rank,
|
||||
)
|
||||
|
||||
global_expert_load = self._reconfigure_moe(old_ep_size, new_ep_size)
|
||||
|
||||
if new_ep_size > old_ep_size:
|
||||
assert global_expert_load is not None
|
||||
self._eplb_after_scale_up(old_ep_size, new_ep_size,
|
||||
global_expert_load)
|
||||
self._eplb_after_scale_up(old_ep_size, new_ep_size, global_expert_load)
|
||||
|
||||
def save_sharded_state(
|
||||
self,
|
||||
@@ -668,6 +730,7 @@ class Worker(WorkerBase):
|
||||
max_size: Optional[int] = None,
|
||||
) -> None:
|
||||
from vllm.model_executor.model_loader import ShardedStateLoader
|
||||
|
||||
ShardedStateLoader.save_model(
|
||||
self.model_runner.model,
|
||||
path,
|
||||
@@ -680,7 +743,8 @@ class Worker(WorkerBase):
|
||||
tensorizer_config: "TensorizerConfig",
|
||||
) -> None:
|
||||
self.model_runner.save_tensorized_model(
|
||||
tensorizer_config=tensorizer_config, )
|
||||
tensorizer_config=tensorizer_config,
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
if runner := getattr(self, "model_runner", None):
|
||||
@@ -698,12 +762,14 @@ def init_worker_distributed_environment(
|
||||
parallel_config = vllm_config.parallel_config
|
||||
set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)
|
||||
|
||||
init_distributed_environment(parallel_config.world_size, rank,
|
||||
distributed_init_method, local_rank, backend)
|
||||
init_distributed_environment(
|
||||
parallel_config.world_size, rank, distributed_init_method, local_rank, backend
|
||||
)
|
||||
|
||||
ensure_model_parallel_initialized(
|
||||
parallel_config.tensor_parallel_size,
|
||||
parallel_config.pipeline_parallel_size,
|
||||
parallel_config.decode_context_parallel_size)
|
||||
parallel_config.decode_context_parallel_size,
|
||||
)
|
||||
|
||||
ensure_kv_transfer_initialized(vllm_config)
|
||||
|
||||
Reference in New Issue
Block a user