[Hardware][XPU] using current_platform.is_xpu (#9605)
This commit is contained in:
@@ -327,29 +327,6 @@ def is_openvino() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def is_xpu() -> bool:
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
try:
|
||||
is_xpu_flag = "xpu" in version("vllm")
|
||||
except PackageNotFoundError:
|
||||
return False
|
||||
# vllm is not build with xpu
|
||||
if not is_xpu_flag:
|
||||
return False
|
||||
try:
|
||||
import intel_extension_for_pytorch as ipex # noqa: F401
|
||||
_import_ipex = True
|
||||
except ImportError as e:
|
||||
logger.warning("Import Error for IPEX: %s", e.msg)
|
||||
_import_ipex = False
|
||||
# ipex dependency is not ready
|
||||
if not _import_ipex:
|
||||
logger.warning("not found ipex lib")
|
||||
return False
|
||||
return hasattr(torch, "xpu") and torch.xpu.is_available()
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def get_max_shared_memory_bytes(gpu: int = 0) -> int:
|
||||
"""Returns the maximum shared memory per thread block in bytes."""
|
||||
@@ -379,7 +356,7 @@ def seed_everything(seed: int) -> None:
|
||||
if current_platform.is_cuda_alike():
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
|
||||
if is_xpu():
|
||||
if current_platform.is_xpu():
|
||||
torch.xpu.manual_seed_all(seed)
|
||||
|
||||
|
||||
@@ -774,7 +751,7 @@ def is_pin_memory_available() -> bool:
|
||||
print_warning_once("Using 'pin_memory=False' as WSL is detected. "
|
||||
"This may slow down the performance.")
|
||||
return False
|
||||
elif is_xpu():
|
||||
elif current_platform.is_xpu():
|
||||
print_warning_once("Pin memory is not supported on XPU.")
|
||||
return False
|
||||
elif current_platform.is_neuron():
|
||||
@@ -795,7 +772,7 @@ class DeviceMemoryProfiler:
|
||||
if current_platform.is_cuda_alike():
|
||||
torch.cuda.reset_peak_memory_stats(self.device)
|
||||
mem = torch.cuda.max_memory_allocated(self.device)
|
||||
elif is_xpu():
|
||||
elif current_platform.is_xpu():
|
||||
torch.xpu.reset_peak_memory_stats(self.device) # type: ignore
|
||||
mem = torch.xpu.max_memory_allocated(self.device) # type: ignore
|
||||
return mem
|
||||
|
||||
Reference in New Issue
Block a user