Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -1,11 +1,11 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""Test model set-up and inference for quantized HF models supported
|
||||
on the AutoRound.
|
||||
on the AutoRound.
|
||||
|
||||
Validating the configuration and printing results for manual checking.
|
||||
Validating the configuration and printing results for manual checking.
|
||||
|
||||
Run `pytest tests/quantization/test_auto_round.py`.
|
||||
Run `pytest tests/quantization/test_auto_round.py`.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -14,18 +14,19 @@ from vllm.platforms import current_platform
|
||||
|
||||
MODELS = [
|
||||
"OPEA/Qwen2.5-0.5B-Instruct-int4-sym-inc", ##auto_round:auto_gptq
|
||||
"Intel/Qwen2-0.5B-Instruct-int4-sym-AutoRound" ##auto_round:auto_awq
|
||||
"Intel/Qwen2-0.5B-Instruct-int4-sym-AutoRound", ##auto_round:auto_awq
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not current_platform.is_cpu()
|
||||
and not current_platform.is_xpu()
|
||||
and not current_platform.is_cuda(),
|
||||
reason="only supports CPU/XPU/CUDA backend.")
|
||||
@pytest.mark.skipif(
|
||||
not current_platform.is_cpu()
|
||||
and not current_platform.is_xpu()
|
||||
and not current_platform.is_cuda(),
|
||||
reason="only supports CPU/XPU/CUDA backend.",
|
||||
)
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
def test_auto_round(vllm_runner, model):
|
||||
with vllm_runner(model) as llm:
|
||||
output = llm.generate_greedy(["The capital of France is"],
|
||||
max_tokens=8)
|
||||
output = llm.generate_greedy(["The capital of France is"], max_tokens=8)
|
||||
assert output
|
||||
print(f"{output[0][1]}")
|
||||
|
||||
Reference in New Issue
Block a user