[Hardware][CPU] Vllm int8 quantization enablement for ARM CPU (#14129)

Signed-off-by: nishith-fujitsu <nishith.jaiswal@fujitsu.com>
This commit is contained in:
nishith-fujitsu
2025-07-10 21:29:04 +05:30
committed by GitHub
parent 4b9a9435bb
commit c7753a9809
5 changed files with 347 additions and 30 deletions

View File

@@ -151,8 +151,9 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
ops.impl("rotary_embedding", torch::kCPU, &rotary_embedding);
// Quantization
#ifdef __AVX512F__
#if defined(__AVX512F__) || defined(__aarch64__)
at::Tag stride_tag = at::Tag::needs_fixed_stride_order;
// Compute int8 quantized tensor for given scaling factor.
ops.def(
"static_scaled_int8_quant(Tensor! out, Tensor input, Tensor scale,"