Implement AWQ quantization support for LLaMA (#1032)

Co-authored-by: Robert Irvine <robert@seamlessml.com>
Co-authored-by: root <rirv938@gmail.com>
Co-authored-by: Casper <casperbh.96@gmail.com>
Co-authored-by: julian-q <julianhquevedo@gmail.com>
This commit is contained in:
Woosuk Kwon
2023-09-16 00:03:37 -07:00
committed by GitHub
parent b9fe4616f9
commit e3e79e9e8a
19 changed files with 1178 additions and 208 deletions

View File

@@ -146,6 +146,20 @@ activation_extension = CUDAExtension(
)
ext_modules.append(activation_extension)
# Quantization kernels.
quantization_extension = CUDAExtension(
name="vllm.quantization_ops",
sources=[
"csrc/quantization.cpp",
"csrc/quantization/awq/gemm_kernels.cu",
],
extra_compile_args={
"cxx": CXX_FLAGS,
"nvcc": NVCC_FLAGS,
},
)
ext_modules.append(quantization_extension)
def get_path(*filepath) -> str:
return os.path.join(ROOT_DIR, *filepath)