[platform] Move get_cu_count to utils (#27005)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-11-13 08:48:47 +08:00
committed by GitHub
parent d75ad04818
commit 2dacd57394
6 changed files with 28 additions and 18 deletions

View File

@@ -13,6 +13,7 @@ from vllm.model_executor.layers.quantization.input_quant_fp8 import QuantFP8
from vllm.model_executor.layers.quantization.utils.quant_utils import GroupShape
from vllm.platforms import current_platform
from vllm.utils.flashinfer import flashinfer_scaled_fp8_mm, has_flashinfer
from vllm.utils.platform_utils import get_cu_count
from vllm.utils.torch_utils import direct_register_custom_op
# Input scaling factors are no longer optional in _scaled_mm starting
@@ -200,7 +201,7 @@ def rocm_per_tensor_w8a8_scaled_mm_impl(
out_dtype,
scale_a,
scale_b,
current_platform.get_cu_count(),
get_cu_count(),
bias,
)
else:

View File

@@ -11,6 +11,7 @@ from vllm import envs
from vllm._aiter_ops import rocm_aiter_ops
from vllm.logger import init_logger
from vllm.platforms import CpuArchEnum, current_platform
from vllm.utils.platform_utils import get_cu_count
from vllm.utils.torch_utils import direct_register_custom_op
logger = init_logger(__name__)
@@ -151,7 +152,7 @@ def rocm_unquantized_gemm_impl(
x_view = x.reshape(-1, x.size(-1))
if m > 8 and 0 < n <= 4:
cu_count = current_platform.get_cu_count()
cu_count = get_cu_count()
out = ops.wvSplitK(weight, x_view, cu_count, bias)
return out.reshape(*x.shape[:-1], weight.shape[0])
elif m % 4 == 0 and n == 1 and k <= 8192 and bias is None: