# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Cutlass bench utils import torch def to_fp8(tensor: torch.Tensor) -> torch.Tensor: finfo = torch.finfo(torch.float8_e4m3fn) return torch.round(tensor.clamp(min=finfo.min, max=finfo.max)).to( dtype=torch.float8_e4m3fn ) def to_int8(tensor: torch.Tensor) -> torch.Tensor: return torch.round(tensor.clamp(min=-128, max=127)).to(dtype=torch.int8) def to_bf16(tensor: torch.Tensor) -> torch.Tensor: return tensor.to(dtype=torch.bfloat16) def to_fp16(tensor: torch.Tensor) -> torch.Tensor: return tensor.to(dtype=torch.float16) def make_rand_tensors( dtype: torch.dtype, m: int, n: int, k: int ) -> tuple[torch.Tensor, torch.Tensor]: a = torch.randn((m, k), device="cuda") * 5 b = torch.randn((n, k), device="cuda").t() * 5 if dtype == torch.int8: return to_int8(a), to_int8(b) if dtype == torch.float8_e4m3fn: return to_fp8(a), to_fp8(b) raise ValueError("unsupported dtype")