[4/n] Migrate FP4/W4A8 CUTLASS kernels to torch stable ABI (#37503)
Signed-off-by: Mikayla Gawarecki <mikaylagawarecki@gmail.com>
This commit is contained in:
245
csrc/libtorch_stable/quantization/fp4/nvfp4_quant_kernels.cu
Normal file
245
csrc/libtorch_stable/quantization/fp4/nvfp4_quant_kernels.cu
Normal file
@@ -0,0 +1,245 @@
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <torch/csrc/stable/tensor.h>
|
||||
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
#include <cuda_fp8.h>
|
||||
|
||||
#include "libtorch_stable/torch_utils.h"
|
||||
#include "libtorch_stable/dispatch_utils.h"
|
||||
#include "cuda_vec_utils.cuh"
|
||||
|
||||
#include "cuda_utils.h"
|
||||
#include "launch_bounds_utils.h"
|
||||
|
||||
// Define before including nvfp4_utils.cuh so the header
|
||||
// can use this macro during compilation.
|
||||
#define NVFP4_ENABLE_ELTS16 1
|
||||
#include "nvfp4_utils.cuh"
|
||||
|
||||
namespace vllm {
|
||||
|
||||
// Use UE4M3 by default.
|
||||
template <class Type, bool UE8M0_SF = false>
|
||||
__global__ void __launch_bounds__(512, VLLM_BLOCKS_PER_SM(512))
|
||||
cvt_fp16_to_fp4(int32_t numRows, int32_t numCols, int32_t num_padded_cols,
|
||||
Type const* __restrict__ in,
|
||||
float const* __restrict__ SFScale,
|
||||
uint32_t* __restrict__ out, uint32_t* __restrict__ SFout) {
|
||||
using PackedVec = vllm::PackedVec<Type, CVT_FP4_PACK16>;
|
||||
|
||||
static constexpr int CVT_FP4_NUM_THREADS_PER_SF =
|
||||
(CVT_FP4_SF_VEC_SIZE / CVT_FP4_ELTS_PER_THREAD);
|
||||
static_assert(sizeof(PackedVec) == sizeof(Type) * CVT_FP4_ELTS_PER_THREAD,
|
||||
"Vec size is not matched.");
|
||||
|
||||
// Precompute SF layout parameter (constant for entire kernel).
|
||||
int32_t const numKTiles = (numCols + 63) / 64;
|
||||
|
||||
int sf_m = round_up<int>(numRows, 128);
|
||||
int32_t const colIdx = blockDim.x * blockIdx.y + threadIdx.x;
|
||||
int elem_idx = colIdx * CVT_FP4_ELTS_PER_THREAD;
|
||||
|
||||
// Get the global scaling factor, which will be applied to the SF.
|
||||
// Note SFScale is the same as next GEMM's alpha, which is
|
||||
// (448.f / (Alpha_A / 6.f)).
|
||||
float const global_scale = (SFScale == nullptr) ? 1.0f : SFScale[0];
|
||||
|
||||
// Iterate over all rows and cols including padded ones -
|
||||
// ensures we visit every single scale factor address to initialize it.
|
||||
for (int rowIdx = blockIdx.x; rowIdx < sf_m; rowIdx += gridDim.x) {
|
||||
if (colIdx < num_padded_cols) {
|
||||
PackedVec in_vec;
|
||||
int64_t inOffset = rowIdx * (numCols / CVT_FP4_ELTS_PER_THREAD) + colIdx;
|
||||
|
||||
// If we are outside valid rows OR outside valid columns -> Use Zeros
|
||||
bool valid = (rowIdx < numRows) && (elem_idx < numCols);
|
||||
if constexpr (CVT_FP4_PACK16) {
|
||||
ld256_cg_or_zero(reinterpret_cast<u32x8_t&>(in_vec),
|
||||
&reinterpret_cast<const uint32_t*>(in)[inOffset * 8],
|
||||
valid);
|
||||
} else {
|
||||
ld128_cg_or_zero(reinterpret_cast<uint4&>(in_vec),
|
||||
&reinterpret_cast<const uint32_t*>(in)[inOffset * 4],
|
||||
valid);
|
||||
}
|
||||
|
||||
auto sf_out =
|
||||
cvt_quant_to_fp4_get_sf_out_offset<uint32_t,
|
||||
CVT_FP4_NUM_THREADS_PER_SF>(
|
||||
rowIdx, colIdx, numKTiles, SFout);
|
||||
|
||||
auto out_val =
|
||||
cvt_warp_fp16_to_fp4<Type, CVT_FP4_NUM_THREADS_PER_SF, UE8M0_SF>(
|
||||
in_vec, global_scale, sf_out);
|
||||
|
||||
// We do NOT write output for padding because the 'out' tensor is not
|
||||
// padded.
|
||||
if (valid) {
|
||||
if constexpr (CVT_FP4_PACK16) {
|
||||
int64_t outOffset = rowIdx * (numCols / 8) + colIdx * 2;
|
||||
uint64_t packed64 =
|
||||
(uint64_t(out_val.hi) << 32) | uint64_t(out_val.lo);
|
||||
reinterpret_cast<uint64_t*>(out)[outOffset >> 1] = packed64;
|
||||
} else {
|
||||
out[inOffset] = out_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use UE4M3 by default.
|
||||
template <class Type, bool UE8M0_SF = false>
|
||||
__global__ void __launch_bounds__(512, VLLM_BLOCKS_PER_SM(512))
|
||||
cvt_fp16_to_fp4_sf_major(int32_t numRows, int32_t numCols,
|
||||
int32_t sf_n_unpadded, int32_t num_packed_cols,
|
||||
Type const* __restrict__ in,
|
||||
float const* __restrict__ SFScale,
|
||||
uint32_t* __restrict__ out,
|
||||
uint32_t* __restrict__ SFout) {
|
||||
using PackedVec = PackedVec<Type, CVT_FP4_PACK16>;
|
||||
|
||||
static constexpr int CVT_FP4_NUM_THREADS_PER_SF =
|
||||
(CVT_FP4_SF_VEC_SIZE / CVT_FP4_ELTS_PER_THREAD);
|
||||
static_assert(sizeof(PackedVec) == sizeof(Type) * CVT_FP4_ELTS_PER_THREAD,
|
||||
"Vec size is not matched.");
|
||||
|
||||
int32_t const colIdx = blockDim.x * blockIdx.y + threadIdx.x;
|
||||
int elem_idx = colIdx * CVT_FP4_ELTS_PER_THREAD;
|
||||
|
||||
// Get the global scaling factor, which will be applied to the SF.
|
||||
// Note SFScale is the same as next GEMM's alpha, which is
|
||||
// (448.f / (Alpha_A / 6.f)).
|
||||
float const global_scale = (SFScale == nullptr) ? 1.0f : SFScale[0];
|
||||
|
||||
// Iterate over all rows and cols including padded ones -
|
||||
// ensures we visit every single scale factor address to initialize it.
|
||||
for (int rowIdx = blockIdx.x; rowIdx < numRows; rowIdx += gridDim.x) {
|
||||
if (colIdx < num_packed_cols) {
|
||||
PackedVec in_vec;
|
||||
int64_t inOffset = rowIdx * (numCols / CVT_FP4_ELTS_PER_THREAD) + colIdx;
|
||||
|
||||
// If we are outside valid rows OR outside valid columns -> Use Zeros
|
||||
bool valid = (rowIdx < numRows) && (elem_idx < numCols);
|
||||
if constexpr (CVT_FP4_PACK16) {
|
||||
ld256_cg_or_zero(reinterpret_cast<u32x8_t&>(in_vec),
|
||||
&reinterpret_cast<const uint32_t*>(in)[inOffset * 8],
|
||||
valid);
|
||||
} else {
|
||||
ld128_cg_or_zero(reinterpret_cast<uint4&>(in_vec),
|
||||
&reinterpret_cast<const uint32_t*>(in)[inOffset * 4],
|
||||
valid);
|
||||
}
|
||||
|
||||
auto sf_out =
|
||||
sf_out_rowmajor_u8<uint32_t>(rowIdx, colIdx, sf_n_unpadded, SFout);
|
||||
|
||||
auto out_val =
|
||||
cvt_warp_fp16_to_fp4<Type, CVT_FP4_NUM_THREADS_PER_SF, UE8M0_SF>(
|
||||
in_vec, global_scale, sf_out);
|
||||
|
||||
// We do NOT write output for padding because the 'out' tensor is not
|
||||
// padded.
|
||||
if (valid) {
|
||||
if constexpr (CVT_FP4_PACK16) {
|
||||
int64_t outOffset = rowIdx * (numCols / 8) + colIdx * 2;
|
||||
uint64_t packed64 =
|
||||
(uint64_t(out_val.hi) << 32) | uint64_t(out_val.lo);
|
||||
reinterpret_cast<uint64_t*>(out)[outOffset >> 1] = packed64;
|
||||
} else {
|
||||
out[inOffset] = out_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace vllm
|
||||
|
||||
void scaled_fp4_quant_sm1xxa(torch::stable::Tensor const& output,
|
||||
torch::stable::Tensor const& input,
|
||||
torch::stable::Tensor const& output_sf,
|
||||
torch::stable::Tensor const& input_sf,
|
||||
bool is_sf_swizzled_layout) {
|
||||
int32_t m = input.size(0);
|
||||
int32_t n = input.size(1);
|
||||
|
||||
STD_TORCH_CHECK(n % 16 == 0, "The N dimension must be multiple of 16.");
|
||||
STD_TORCH_CHECK(
|
||||
input.scalar_type() == torch::headeronly::ScalarType::Half ||
|
||||
input.scalar_type() == torch::headeronly::ScalarType::BFloat16,
|
||||
"Unsupported input data type for quantize_to_fp4.");
|
||||
|
||||
int multiProcessorCount =
|
||||
get_device_attribute(cudaDevAttrMultiProcessorCount, -1);
|
||||
|
||||
auto input_sf_ptr = static_cast<float const*>(input_sf.data_ptr());
|
||||
auto sf_out = static_cast<int32_t*>(output_sf.data_ptr());
|
||||
auto output_ptr = static_cast<int64_t*>(output.data_ptr());
|
||||
const torch::stable::accelerator::DeviceGuard device_guard(
|
||||
input.get_device_index());
|
||||
auto stream = get_current_cuda_stream(input.get_device_index());
|
||||
|
||||
int sf_n_unpadded = int(n / CVT_FP4_SF_VEC_SIZE);
|
||||
|
||||
// Grid, Block size. Each thread converts 8 values.
|
||||
dim3 block(std::min(int(n / ELTS_PER_THREAD), 512));
|
||||
int const numBlocksPerSM =
|
||||
vllm_runtime_blocks_per_sm(static_cast<int>(block.x));
|
||||
|
||||
if (is_sf_swizzled_layout) {
|
||||
int sf_n_int = int(vllm::round_up(sf_n_unpadded, 4) / 4);
|
||||
int32_t num_padded_cols =
|
||||
sf_n_int * 4 * CVT_FP4_SF_VEC_SIZE / CVT_FP4_ELTS_PER_THREAD;
|
||||
|
||||
int grid_y = vllm::div_round_up(num_padded_cols, static_cast<int>(block.x));
|
||||
int grid_x =
|
||||
std::min(vllm::computeEffectiveRows(m),
|
||||
std::max(1, (multiProcessorCount * numBlocksPerSM) / grid_y));
|
||||
dim3 grid(grid_x, grid_y);
|
||||
|
||||
VLLM_STABLE_DISPATCH_HALF_TYPES(
|
||||
input.scalar_type(), "nvfp4_quant_kernel", [&] {
|
||||
using cuda_type = vllm::CUDATypeConverter<scalar_t>::Type;
|
||||
auto input_ptr = static_cast<cuda_type const*>(input.data_ptr());
|
||||
vllm::cvt_fp16_to_fp4<cuda_type, false><<<grid, block, 0, stream>>>(
|
||||
m, n, num_padded_cols, input_ptr, input_sf_ptr,
|
||||
reinterpret_cast<uint32_t*>(output_ptr),
|
||||
reinterpret_cast<uint32_t*>(sf_out));
|
||||
});
|
||||
} else {
|
||||
int num_packed_cols = n / CVT_FP4_ELTS_PER_THREAD;
|
||||
int grid_y = vllm::div_round_up(num_packed_cols, static_cast<int>(block.x));
|
||||
int grid_x = std::min(
|
||||
m, std::max(1, (multiProcessorCount * numBlocksPerSM) / grid_y));
|
||||
dim3 grid(grid_x, grid_y);
|
||||
|
||||
VLLM_STABLE_DISPATCH_HALF_TYPES(
|
||||
input.scalar_type(), "nvfp4_quant_kernel", [&] {
|
||||
using cuda_type = vllm::CUDATypeConverter<scalar_t>::Type;
|
||||
auto input_ptr = static_cast<cuda_type const*>(input.data_ptr());
|
||||
vllm::cvt_fp16_to_fp4_sf_major<cuda_type, false>
|
||||
<<<grid, block, 0, stream>>>(
|
||||
m, n, sf_n_unpadded, num_packed_cols, input_ptr, input_sf_ptr,
|
||||
reinterpret_cast<uint32_t*>(output_ptr),
|
||||
reinterpret_cast<uint32_t*>(sf_out));
|
||||
});
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user