[Feature] A calibration-free RTN-based quantization for accurate and accelerated INT4/INT8 inference (#18768)

Signed-off-by: Alex Kogan <alex.kogan@oracle.com>
Co-authored-by: Michael Goin <mgoin64@gmail.com>
This commit is contained in:
Alex Kogan
2025-07-01 01:44:38 -04:00
committed by GitHub
parent bd5038af07
commit 27949354fa
3 changed files with 319 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright © 2025, Oracle and/or its affiliates.
"""Tests RTN quantization startup and generation,
doesn't test correctness
"""
import pytest
from tests.quantization.utils import is_quant_method_supported
MODELS = ["microsoft/Phi-3-mini-4k-instruct"]
@pytest.mark.skipif(not is_quant_method_supported("rtn"),
reason="RTN is not supported on this GPU type.")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [10])
def test_model_rtn_startup(
hf_runner,
vllm_runner,
example_prompts,
model: str,
dtype: str,
max_tokens: int,
) -> None:
with vllm_runner(model, dtype=dtype, quantization="rtn") as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)