[platforms] absorb worker cls difference into platforms folder (#10555)

Signed-off-by: youkaichao <youkaichao@gmail.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
This commit is contained in:
youkaichao
2024-11-21 21:00:32 -08:00
committed by GitHub
parent 446c7806b2
commit a111d0151f
21 changed files with 272 additions and 282 deletions

View File

@@ -84,3 +84,5 @@ class CpuPlatform(Platform):
"distributed executor backend."),
parallel_config.distributed_executor_backend)
parallel_config.distributed_executor_backend = "mp"
if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker"

View File

@@ -4,7 +4,7 @@ pynvml. However, it should not initialize cuda context.
import os
from functools import lru_cache, wraps
from typing import Callable, List, Tuple, TypeVar
from typing import TYPE_CHECKING, Callable, List, Tuple, TypeVar
import pynvml
import torch
@@ -16,6 +16,11 @@ from vllm.logger import init_logger
from .interface import DeviceCapability, Platform, PlatformEnum
if TYPE_CHECKING:
from vllm.config import VllmConfig
else:
VllmConfig = None
logger = init_logger(__name__)
_P = ParamSpec("_P")
@@ -157,3 +162,17 @@ class CudaPlatform(Platform):
" machine has no NVLink equipped.")
return False
return True
@classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
parallel_config = vllm_config.parallel_config
scheduler_config = vllm_config.scheduler_config
if parallel_config.worker_cls == "auto":
if scheduler_config.is_multi_step:
parallel_config.worker_cls = \
"vllm.worker.multi_step_worker.MultiStepWorker"
elif vllm_config.speculative_config:
parallel_config.worker_cls = \
"vllm.spec_decode.spec_decode_worker.create_spec_worker"
else:
parallel_config.worker_cls = "vllm.worker.worker.Worker"

View File

@@ -1,7 +1,14 @@
from typing import TYPE_CHECKING
import torch
from .interface import Platform, PlatformEnum, _Backend
if TYPE_CHECKING:
from vllm.config import VllmConfig
else:
VllmConfig = None
class HpuPlatform(Platform):
_enum = PlatformEnum.HPU
@@ -14,3 +21,19 @@ class HpuPlatform(Platform):
@staticmethod
def inference_mode():
return torch.no_grad()
@classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
scheduler_config = vllm_config.scheduler_config
if scheduler_config.is_multi_step:
raise NotImplementedError(
"Multi-step execution is not implemented for HPU")
if vllm_config.speculative_config is not None:
raise NotImplementedError(
"Speculative decoding is not implemented for HPU")
parallel_config = vllm_config.parallel_config
if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker"

View File

@@ -1,5 +1,12 @@
from typing import TYPE_CHECKING
from .interface import Platform, PlatformEnum
if TYPE_CHECKING:
from vllm.config import VllmConfig
else:
VllmConfig = None
class NeuronPlatform(Platform):
_enum = PlatformEnum.NEURON
@@ -8,3 +15,10 @@ class NeuronPlatform(Platform):
@classmethod
def get_device_name(cls, device_id: int = 0) -> str:
return "neuron"
@classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
parallel_config = vllm_config.parallel_config
if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = \
"vllm.worker.neuron_worker.NeuronWorker"

View File

@@ -1,3 +1,5 @@
from typing import TYPE_CHECKING
import torch
import vllm.envs as envs
@@ -5,6 +7,11 @@ from vllm.logger import init_logger
from .interface import Platform, PlatformEnum, _Backend
if TYPE_CHECKING:
from vllm.config import VllmConfig
else:
VllmConfig = None
logger = init_logger(__name__)
@@ -38,3 +45,14 @@ class OpenVinoPlatform(Platform):
def is_pin_memory_available(self) -> bool:
logger.warning("Pin memory is not supported on OpenViNO.")
return False
@classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
parallel_config = vllm_config.parallel_config
assert (
parallel_config.world_size == 1
), "OpenVINOExecutor only supports single CPU socket currently."
if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = \
"vllm.worker.openvino_worker.OpenVINOWorker"

View File

@@ -1,5 +1,6 @@
import os
from functools import lru_cache
from typing import TYPE_CHECKING
import torch
@@ -7,6 +8,11 @@ from vllm.logger import init_logger
from .interface import DeviceCapability, Platform, PlatformEnum, _Backend
if TYPE_CHECKING:
from vllm.config import VllmConfig
else:
VllmConfig = None
logger = init_logger(__name__)
try:
@@ -58,3 +64,17 @@ class RocmPlatform(Platform):
def get_device_total_memory(cls, device_id: int = 0) -> int:
device_props = torch.cuda.get_device_properties(device_id)
return device_props.total_memory
@classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
parallel_config = vllm_config.parallel_config
scheduler_config = vllm_config.scheduler_config
if parallel_config.worker_cls == "auto":
if scheduler_config.is_multi_step:
parallel_config.worker_cls = \
"vllm.worker.multi_step_worker.MultiStepWorker"
elif vllm_config.speculative_config:
parallel_config.worker_cls = \
"vllm.spec_decode.spec_decode_worker.create_spec_worker"
else:
parallel_config.worker_cls = "vllm.worker.worker.Worker"

View File

@@ -48,3 +48,15 @@ class TpuPlatform(Platform):
if compilation_config.backend == "":
compilation_config.backend = "openxla"
assert vllm_config.speculative_config is None, \
"TPU does not support speculative decoding"
parallel_config = vllm_config.parallel_config
scheduler_config = vllm_config.scheduler_config
if parallel_config.worker_cls == "auto":
if scheduler_config.is_multi_step:
parallel_config.worker_cls = \
"vllm.worker.multi_step_tpu_worker.MultiStepTPUWorker"
else:
parallel_config.worker_cls = "vllm.worker.tpu_worker.TPUWorker"

View File

@@ -57,6 +57,10 @@ class XPUPlatform(Platform):
"mode.")
model_config.enforce_eager = True
if vllm_config.speculative_config is not None:
raise NotImplementedError(
"XPU does not support speculative decoding")
# check and update parallel config
parallel_config = vllm_config.parallel_config
if (parallel_config.distributed_executor_backend is not None
@@ -66,3 +70,5 @@ class XPUPlatform(Platform):
" executor backend.",
parallel_config.distributed_executor_backend)
parallel_config.distributed_executor_backend = "ray"
if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker"