[platform] Move get_cu_count to utils (#27005)
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -545,13 +545,6 @@ class Platform:
|
||||
cls._global_graph_pool = self.graph_pool_handle()
|
||||
return cls._global_graph_pool
|
||||
|
||||
@classmethod
|
||||
def get_cu_count(cls, device_id: int = 0) -> int:
|
||||
"""
|
||||
Returns the total number of compute units (CU) on single GPU.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def get_static_graph_wrapper_cls(cls) -> str:
|
||||
"""
|
||||
|
||||
@@ -423,10 +423,6 @@ class RocmPlatform(Platform):
|
||||
def opaque_attention_op(cls) -> bool:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_cu_count(cls, device_id: int = 0) -> int:
|
||||
return torch.cuda.get_device_properties(device_id).multi_processor_count
|
||||
|
||||
@classmethod
|
||||
def is_navi(cls) -> bool:
|
||||
return "gfx1" in torch.cuda.get_device_properties(0).gcnArchName
|
||||
|
||||
Reference in New Issue
Block a user