Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -2,9 +2,10 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
# flake8: noqa
|
||||
"""Tests experts_int8 quantization startup and generation,
|
||||
"""Tests experts_int8 quantization startup and generation,
|
||||
doesn't test correctness
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
@@ -14,8 +15,10 @@ from ..models.registry import HF_EXAMPLE_MODELS
|
||||
MODELS = ["ai21labs/Jamba-tiny-random", "pfnet/plamo-2-1b"]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("experts_int8"),
|
||||
reason="ExpertsInt8 is not supported on this GPU type.")
|
||||
@pytest.mark.skipif(
|
||||
not is_quant_method_supported("experts_int8"),
|
||||
reason="ExpertsInt8 is not supported on this GPU type.",
|
||||
)
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("dtype", ["bfloat16"])
|
||||
@pytest.mark.parametrize("max_tokens", [10])
|
||||
@@ -30,6 +33,5 @@ def test_model_experts_int8_startup(
|
||||
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
|
||||
model_info.check_transformers_version(on_fail="skip")
|
||||
|
||||
with vllm_runner(model, dtype=dtype,
|
||||
quantization="experts_int8") as vllm_model:
|
||||
with vllm_runner(model, dtype=dtype, quantization="experts_int8") as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
|
||||
Reference in New Issue
Block a user