[platform] Move get_cu_count to utils (#27005)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-11-13 08:48:47 +08:00
committed by GitHub
parent d75ad04818
commit 2dacd57394
6 changed files with 28 additions and 18 deletions

View File

@@ -545,13 +545,6 @@ class Platform:
cls._global_graph_pool = self.graph_pool_handle()
return cls._global_graph_pool
@classmethod
def get_cu_count(cls, device_id: int = 0) -> int:
"""
Returns the total number of compute units (CU) on single GPU.
"""
raise NotImplementedError
@classmethod
def get_static_graph_wrapper_cls(cls) -> str:
"""

View File

@@ -423,10 +423,6 @@ class RocmPlatform(Platform):
def opaque_attention_op(cls) -> bool:
return True
@classmethod
def get_cu_count(cls, device_id: int = 0) -> int:
return torch.cuda.get_device_properties(device_id).multi_processor_count
@classmethod
def is_navi(cls) -> bool:
return "gfx1" in torch.cuda.get_device_properties(0).gcnArchName