[misc][cuda] use nvml to avoid accidentally cuda initialization (#6007)

This commit is contained in:
youkaichao
2024-06-30 20:07:34 -07:00
committed by GitHub
parent af9ad46fca
commit 614aa51203
13 changed files with 86 additions and 68 deletions

View File

@@ -8,12 +8,13 @@ import pytest
import torch
from vllm import _custom_ops as ops
from vllm.utils import get_device_capability_stateless
CUDA_DEVICES = [
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
]
capability = torch.cuda.get_device_capability()
capability = get_device_capability_stateless()
capability = capability[0] * 10 + capability[1]