[misc][cuda] use nvml to avoid accidentally cuda initialization (#6007)

This commit is contained in:
youkaichao
2024-06-30 20:07:34 -07:00
committed by GitHub
parent af9ad46fca
commit 614aa51203
13 changed files with 86 additions and 68 deletions

View File

@@ -10,7 +10,7 @@ from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig, QuantizeMethodBase)
from vllm.model_executor.utils import set_weight_attrs
from vllm.utils import print_warning_once
from vllm.utils import get_device_capability_stateless, print_warning_once
ACTIVATION_SCHEMES = ["static", "dynamic"]
@@ -18,7 +18,7 @@ logger = init_logger(__name__)
def cutlass_fp8_supported() -> bool:
capability = torch.cuda.get_device_capability()
capability = get_device_capability_stateless()
capability = capability[0] * 10 + capability[1]
return ops.cutlass_scaled_mm_supports_fp8(capability)