[Hardware][CPU] Support AWQ for CPU backend (#7515)

This commit is contained in:
Li, Jiang
2024-10-10 00:28:08 +08:00
committed by GitHub
parent 7dea289066
commit ca77dd7a44
9 changed files with 214 additions and 7 deletions

View File

@@ -0,0 +1,28 @@
"""Test model set-up and inference for quantized HF models supported
on the CPU backend using IPEX (including AWQ).
Validating the configuration and printing results for manual checking.
Run `pytest tests/quantization/test_ipex_quant.py`.
"""
import pytest
from vllm.platforms import current_platform
MODELS = [
"casperhansen/llama-3-8b-instruct-awq",
]
DTYPE = ["bfloat16"]
@pytest.mark.skipif(not current_platform.is_cpu(),
reason="only supports the CPU backend.")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", DTYPE)
def test_ipex_quant(vllm_runner, model, dtype):
with vllm_runner(model, dtype=dtype) as llm:
output = llm.generate_greedy(["The capital of France is"],
max_tokens=32)
assert output
print(output)