[Platform] platform agnostic for EngineArgs initialization (#11225)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2024-12-17 14:11:06 +08:00
committed by GitHub
parent 59c9b6ebeb
commit e88db68cf5
9 changed files with 37 additions and 6 deletions

View File

@@ -112,9 +112,7 @@ class EngineArgs:
pipeline_parallel_size: int = 1 pipeline_parallel_size: int = 1
tensor_parallel_size: int = 1 tensor_parallel_size: int = 1
max_parallel_loading_workers: Optional[int] = None max_parallel_loading_workers: Optional[int] = None
# NOTE(kzawora): default block size for Gaudi should be 128 block_size: Optional[int] = None
# smaller sizes still work, but very inefficiently
block_size: int = 16 if not current_platform.is_hpu() else 128
enable_prefix_caching: Optional[bool] = None enable_prefix_caching: Optional[bool] = None
disable_sliding_window: bool = False disable_sliding_window: bool = False
use_v2_block_manager: bool = True use_v2_block_manager: bool = True
@@ -1036,9 +1034,7 @@ class EngineArgs:
self.enable_prefix_caching = False self.enable_prefix_caching = False
cache_config = CacheConfig( cache_config = CacheConfig(
# neuron needs block_size = max_model_len block_size=self.block_size,
block_size=self.block_size if self.device != "neuron" else
(self.max_model_len if self.max_model_len is not None else 0),
gpu_memory_utilization=self.gpu_memory_utilization, gpu_memory_utilization=self.gpu_memory_utilization,
swap_space=self.swap_space, swap_space=self.swap_space,
cache_dtype=self.kv_cache_dtype, cache_dtype=self.kv_cache_dtype,

View File

@@ -60,6 +60,9 @@ class CpuPlatform(Platform):
cache_config = vllm_config.cache_config cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE
if kv_cache_space >= 0: if kv_cache_space >= 0:

View File

@@ -137,6 +137,10 @@ class CudaPlatformBase(Platform):
else: else:
parallel_config.worker_cls = "vllm.worker.worker.Worker" parallel_config.worker_cls = "vllm.worker.worker.Worker"
cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
# NVML utils # NVML utils
# Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`, # Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`,

View File

@@ -48,6 +48,12 @@ class HpuPlatform(Platform):
if parallel_config.worker_cls == "auto": if parallel_config.worker_cls == "auto":
parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker" parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker"
# NOTE(kzawora): default block size for Gaudi should be 128
# smaller sizes still work, but very inefficiently
cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 128
@classmethod @classmethod
def is_pin_memory_available(cls): def is_pin_memory_available(cls):
logger.warning("Pin memory is not supported on HPU.") logger.warning("Pin memory is not supported on HPU.")

View File

@@ -33,6 +33,12 @@ class NeuronPlatform(Platform):
parallel_config.worker_cls = \ parallel_config.worker_cls = \
"vllm.worker.neuron_worker.NeuronWorker" "vllm.worker.neuron_worker.NeuronWorker"
cache_config = vllm_config.cache_config
if cache_config:
# neuron needs block_size = max_model_len
vllm_config.cache_config.block_size = \
vllm_config.model_config.max_model_len
@classmethod @classmethod
def is_pin_memory_available(cls) -> bool: def is_pin_memory_available(cls) -> bool:
logger.warning("Pin memory is not supported on Neuron.") logger.warning("Pin memory is not supported on Neuron.")

View File

@@ -87,6 +87,9 @@ class OpenVinoPlatform(Platform):
# check and update cache config # check and update cache config
ov_core = ov.Core() ov_core = ov.Core()
cache_config = vllm_config.cache_config cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8":
if not OpenVinoPlatform.is_openvino_cpu(): if not OpenVinoPlatform.is_openvino_cpu():
logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is"

View File

@@ -84,6 +84,10 @@ class RocmPlatform(Platform):
@classmethod @classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None: def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
parallel_config = vllm_config.parallel_config parallel_config = vllm_config.parallel_config
scheduler_config = vllm_config.scheduler_config scheduler_config = vllm_config.scheduler_config
if parallel_config.worker_cls == "auto": if parallel_config.worker_cls == "auto":

View File

@@ -46,6 +46,11 @@ class TpuPlatform(Platform):
@classmethod @classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None: def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
from vllm.config import CompilationLevel from vllm.config import CompilationLevel
cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
compilation_config = vllm_config.compilation_config compilation_config = vllm_config.compilation_config
if compilation_config.level == CompilationLevel.NO_COMPILATION: if compilation_config.level == CompilationLevel.NO_COMPILATION:
# TPU does not support NO_COMPILATION # TPU does not support NO_COMPILATION

View File

@@ -51,6 +51,10 @@ class XPUPlatform(Platform):
@classmethod @classmethod
def check_and_update_config(cls, vllm_config: VllmConfig) -> None: def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
cache_config = vllm_config.cache_config
if cache_config and cache_config.block_size is None:
cache_config.block_size = 16
# check and update model config # check and update model config
model_config = vllm_config.model_config model_config = vllm_config.model_config
if model_config.dtype == torch.bfloat16: if model_config.dtype == torch.bfloat16: