2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2024-12-18 09:57:16 -05:00
|
|
|
# Cutlass bench utils
|
|
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_fp8(tensor: torch.Tensor) -> torch.Tensor:
|
|
|
|
|
finfo = torch.finfo(torch.float8_e4m3fn)
|
2025-05-13 14:43:29 +01:00
|
|
|
return torch.round(tensor.clamp(min=finfo.min, max=finfo.max)).to(
|
|
|
|
|
dtype=torch.float8_e4m3fn
|
|
|
|
|
)
|
2024-12-18 09:57:16 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_int8(tensor: torch.Tensor) -> torch.Tensor:
|
|
|
|
|
return torch.round(tensor.clamp(min=-128, max=127)).to(dtype=torch.int8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_bf16(tensor: torch.Tensor) -> torch.Tensor:
|
|
|
|
|
return tensor.to(dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_fp16(tensor: torch.Tensor) -> torch.Tensor:
|
|
|
|
|
return tensor.to(dtype=torch.float16)
|
|
|
|
|
|
|
|
|
|
|
2025-05-13 14:43:29 +01:00
|
|
|
def make_rand_tensors(
|
|
|
|
|
dtype: torch.dtype, m: int, n: int, k: int
|
|
|
|
|
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
|
|
|
a = torch.randn((m, k), device="cuda") * 5
|
|
|
|
|
b = torch.randn((n, k), device="cuda").t() * 5
|
2024-12-18 09:57:16 -05:00
|
|
|
|
|
|
|
|
if dtype == torch.int8:
|
|
|
|
|
return to_int8(a), to_int8(b)
|
|
|
|
|
if dtype == torch.float8_e4m3fn:
|
|
|
|
|
return to_fp8(a), to_fp8(b)
|
|
|
|
|
|
|
|
|
|
raise ValueError("unsupported dtype")
|