[bugfix] [ROCm] Fix premature CUDA initialization in platform detection (#33941)
Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com>
This commit is contained in:
committed by
GitHub
parent
207c3a0c20
commit
4a2d00eafd
20
tests/cuda/scripts/check_platform_no_cuda_init.py
Normal file
20
tests/cuda/scripts/check_platform_no_cuda_init.py
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""Check that vllm.platforms import does not initialize CUDA."""
|
||||
|
||||
import os
|
||||
|
||||
for key in ["CUDA_VISIBLE_DEVICES", "HIP_VISIBLE_DEVICES", "ROCR_VISIBLE_DEVICES"]:
|
||||
os.environ.pop(key, None)
|
||||
|
||||
import torch # noqa: E402
|
||||
|
||||
assert not torch.cuda.is_initialized(), "CUDA initialized before import"
|
||||
|
||||
from vllm.platforms import current_platform # noqa: E402
|
||||
|
||||
assert not torch.cuda.is_initialized(), (
|
||||
f"CUDA was initialized during vllm.platforms import on {current_platform}"
|
||||
)
|
||||
print("OK")
|
||||
Reference in New Issue
Block a user