- **Add SPDX license headers to python source files** - **Check for SPDX headers using pre-commit** commit 9d7ef44c3cfb72ca4c32e1c677d99259d10d4745 Author: Russell Bryant <rbryant@redhat.com> Date: Fri Jan 31 14:18:24 2025 -0500 Add SPDX license headers to python source files This commit adds SPDX license headers to python source files as recommended to the project by the Linux Foundation. These headers provide a concise way that is both human and machine readable for communicating license information for each source file. It helps avoid any ambiguity about the license of the code and can also be easily used by tools to help manage license compliance. The Linux Foundation runs license scans against the codebase to help ensure we are in compliance with the licenses of the code we use, including dependencies. Having these headers in place helps that tool do its job. More information can be found on the SPDX site: - https://spdx.dev/learn/handling-license-info/ Signed-off-by: Russell Bryant <rbryant@redhat.com> commit 5a1cf1cb3b80759131c73f6a9dddebccac039dea Author: Russell Bryant <rbryant@redhat.com> Date: Fri Jan 31 14:36:32 2025 -0500 Check for SPDX headers using pre-commit Signed-off-by: Russell Bryant <rbryant@redhat.com> --------- Signed-off-by: Russell Bryant <rbryant@redhat.com>
138 lines
5.2 KiB
Python
138 lines
5.2 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
from typing import TYPE_CHECKING, Optional
|
|
|
|
import torch
|
|
|
|
from vllm.logger import init_logger
|
|
|
|
from .interface import DeviceCapability, Platform, PlatformEnum, _Backend
|
|
|
|
if TYPE_CHECKING:
|
|
from vllm.config import VllmConfig
|
|
else:
|
|
VllmConfig = None
|
|
|
|
logger = init_logger(__name__)
|
|
|
|
|
|
class XPUPlatform(Platform):
|
|
_enum = PlatformEnum.XPU
|
|
device_name: str = "xpu"
|
|
device_type: str = "xpu"
|
|
dispatch_key: str = "XPU"
|
|
# Intel XPU's device key is "GPU" for Ray.
|
|
# see https://github.com/ray-project/ray/blob/6a5eb5865eeb9ccf058a79b44f107e327e360673/python/ray/_private/accelerators/intel_gpu.py#L20 # noqa: E501
|
|
ray_device_key: str = "GPU"
|
|
device_control_env_var: str = "ONEAPI_DEVICE_SELECTOR"
|
|
|
|
@classmethod
|
|
def get_attn_backend_cls(cls, selected_backend: _Backend, head_size: int,
|
|
dtype: torch.dtype, kv_cache_dtype: Optional[str],
|
|
block_size: int, use_v1: bool,
|
|
use_mla: bool) -> str:
|
|
if selected_backend != _Backend.IPEX:
|
|
logger.info("Cannot use %s backend on XPU.", selected_backend)
|
|
logger.info("Using IPEX attention backend.")
|
|
return "vllm.attention.backends.ipex_attn.IpexAttnBackend"
|
|
|
|
@staticmethod
|
|
def get_device_capability(device_id: int = 0) -> DeviceCapability:
|
|
major, minor, *_ = torch.xpu.get_device_capability(
|
|
device_id)['version'].split('.')
|
|
return DeviceCapability(major=int(major), minor=int(minor))
|
|
|
|
@staticmethod
|
|
def get_device_name(device_id: int = 0) -> str:
|
|
return torch.xpu.get_device_name(device_id)
|
|
|
|
@classmethod
|
|
def get_device_total_memory(cls, device_id: int = 0) -> int:
|
|
device_props = torch.xpu.get_device_properties(device_id)
|
|
return device_props.total_memory
|
|
|
|
@classmethod
|
|
def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool:
|
|
return True
|
|
|
|
@staticmethod
|
|
def inference_mode():
|
|
return torch.no_grad()
|
|
|
|
@classmethod
|
|
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
|
|
cache_config = vllm_config.cache_config
|
|
if cache_config and cache_config.block_size is None:
|
|
cache_config.block_size = 16
|
|
|
|
# check and update model config
|
|
model_config = vllm_config.model_config
|
|
if model_config.dtype == torch.bfloat16:
|
|
bf16_supported = cls.device_support_bf16()
|
|
if not bf16_supported:
|
|
logger.warning(
|
|
"bfloat16 is only supported on Intel Data Center GPU, "
|
|
"Intel Arc GPU is not supported yet. Your device is %s,"
|
|
"which is not supported. will fallback to float16",
|
|
cls.get_device_name())
|
|
model_config.dtype = torch.float16
|
|
if not model_config.enforce_eager:
|
|
logger.warning(
|
|
"CUDA graph is not supported on XPU, fallback to the eager "
|
|
"mode.")
|
|
model_config.enforce_eager = True
|
|
|
|
if vllm_config.speculative_config is not None:
|
|
raise NotImplementedError(
|
|
"XPU does not support speculative decoding")
|
|
|
|
if vllm_config.device_config is not None:
|
|
assert vllm_config.device_config.device_type == "xpu"
|
|
|
|
# check and update parallel config
|
|
parallel_config = vllm_config.parallel_config
|
|
if parallel_config.worker_cls == "auto":
|
|
parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker"
|
|
|
|
if parallel_config.distributed_executor_backend is None:
|
|
parallel_config.distributed_executor_backend = "ray"
|
|
elif parallel_config.distributed_executor_backend == "mp":
|
|
# FIXME(kunshang):
|
|
# spawn needs calling `if __name__ == '__main__':``
|
|
# fork is not supported for xpu start new process.
|
|
logger.error(
|
|
"Both start methods (spawn and fork) have issue "
|
|
"on XPU if you use mp backend, setting it to ray instead.")
|
|
parallel_config.distributed_executor_backend = "ray"
|
|
|
|
elif parallel_config.distributed_executor_backend != "ray":
|
|
logger.warning(
|
|
"%s is not supported on XPU, fallback to ray distributed"
|
|
" executor backend.",
|
|
parallel_config.distributed_executor_backend)
|
|
parallel_config.distributed_executor_backend = "ray"
|
|
|
|
@classmethod
|
|
def is_pin_memory_available(cls):
|
|
logger.warning("Pin memory is not supported on XPU.")
|
|
return False
|
|
|
|
@classmethod
|
|
def get_current_memory_usage(cls,
|
|
device: Optional[torch.types.Device] = None
|
|
) -> float:
|
|
torch.xpu.reset_peak_memory_stats(device)
|
|
return torch.xpu.max_memory_allocated(device)
|
|
|
|
@classmethod
|
|
def device_support_bf16(cls) -> bool:
|
|
device_name = cls.get_device_name().lower()
|
|
if device_name.count("arc") > 0:
|
|
return False
|
|
elif device_name.count("data center gpu") > 0:
|
|
return True
|
|
else:
|
|
logger.warning("Unknown device name %s, always use float16",
|
|
device_name)
|
|
return False
|