Implement cache ops

This commit is contained in:
Woosuk Kwon
2023-02-16 07:47:03 +00:00
parent a1c67e6db8
commit 6f058c7ba8
4 changed files with 109 additions and 6 deletions

20
csrc/cache.cpp Normal file
View File

@@ -0,0 +1,20 @@
#include <torch/extension.h>
void copy_blocks(
torch::Tensor& src,
torch::Tensor& dst,
const std::map<int64_t, int64_t>& block_mapping);
void copy_cache_blocks(
torch::Tensor& src,
torch::Tensor& dst,
const std::map<int64_t, int64_t>& block_mapping) {
copy_blocks(src, dst, block_mapping);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"copy_cache_blocks",
&copy_cache_blocks,
"Copy the cache blocks from src to dst");
}

43
csrc/cache_kernel.cu Normal file
View File

@@ -0,0 +1,43 @@
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cassert>
#include <map>
void copy_blocks(
torch::Tensor& src,
torch::Tensor& dst,
const std::map<int64_t, int64_t>& block_mapping) {
torch::Device src_device = src.device();
torch::Device dst_device = dst.device();
cudaMemcpyKind memcpy_type;
if (src_device.is_cuda() && dst_device.is_cuda()) {
assert(src_device.index() == dst_device.index());
memcpy_type = cudaMemcpyDeviceToDevice;
} else if (src_device.is_cuda() && dst_device.is_cpu()) {
memcpy_type = cudaMemcpyDeviceToHost;
} else if (src_device.is_cpu() && dst_device.is_cuda()) {
memcpy_type = cudaMemcpyHostToDevice;
} else {
assert(false);
}
void *src_ptr = src.data_ptr();
void *dst_ptr = dst.data_ptr();
const int64_t block_size_in_bytes = src.element_size() * src[0].numel();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (const auto& pair : block_mapping) {
int64_t src_block_number = pair.first;
int64_t dst_block_number = pair.second;
int64_t src_offset = src_block_number * block_size_in_bytes;
int64_t dst_offset = dst_block_number * block_size_in_bytes;
cudaMemcpyAsync(
dst_ptr + dst_offset,
src_ptr + src_offset,
block_size_in_bytes,
memcpy_type,
stream);
}
}