[platform] add ray_device_key (#11948)
Signed-off-by: youkaichao <youkaichao@gmail.com>
This commit is contained in:
@@ -8,6 +8,7 @@ import msgspec
|
||||
from vllm.config import ParallelConfig
|
||||
from vllm.executor.msgspec_utils import decode_hook, encode_hook
|
||||
from vllm.logger import init_logger
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.sequence import ExecuteModelRequest, IntermediateTensors
|
||||
from vllm.utils import get_ip
|
||||
from vllm.worker.worker_base import WorkerWrapperBase
|
||||
@@ -47,7 +48,12 @@ try:
|
||||
|
||||
def get_node_and_gpu_ids(self) -> Tuple[str, List[int]]:
|
||||
node_id = ray.get_runtime_context().get_node_id()
|
||||
gpu_ids = ray.get_gpu_ids()
|
||||
device_key = current_platform.ray_device_key
|
||||
if not device_key:
|
||||
raise RuntimeError("current platform %s does not support ray.",
|
||||
current_platform.device_name)
|
||||
gpu_ids = ray.get_runtime_context().get_accelerator_ids(
|
||||
)[device_key]
|
||||
return node_id, gpu_ids
|
||||
|
||||
def execute_model_spmd(
|
||||
@@ -249,11 +255,12 @@ def initialize_ray_cluster(
|
||||
# Placement group is already set.
|
||||
return
|
||||
|
||||
device_str = "GPU"
|
||||
if current_platform.is_tpu():
|
||||
device_str = "TPU"
|
||||
elif current_platform.is_hpu():
|
||||
device_str = 'HPU'
|
||||
device_str = current_platform.ray_device_key
|
||||
if not device_str:
|
||||
raise ValueError(
|
||||
f"current platform {current_platform.device_name} does not "
|
||||
"support ray.")
|
||||
|
||||
# Create placement group for worker processes
|
||||
current_placement_group = ray.util.get_current_placement_group()
|
||||
if current_placement_group:
|
||||
|
||||
Reference in New Issue
Block a user