Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -5,8 +5,10 @@
import pytest
from tests.utils import wait_for_gpu_memory_to_clear
from tests.v1.shutdown.utils import (SHUTDOWN_TEST_THRESHOLD_BYTES,
SHUTDOWN_TEST_TIMEOUT_SEC)
from tests.v1.shutdown.utils import (
SHUTDOWN_TEST_THRESHOLD_BYTES,
SHUTDOWN_TEST_TIMEOUT_SEC,
)
from vllm import LLM
from vllm.distributed import get_tensor_model_parallel_rank
from vllm.engine.arg_utils import AsyncEngineArgs
@@ -30,9 +32,9 @@ def evil_method(self, *args, **kwargs):
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
@pytest.mark.parametrize("failing_method", ["forward", "load_weights"])
def test_async_llm_startup_error(monkeypatch, model: str,
tensor_parallel_size: int,
failing_method: str) -> None:
def test_async_llm_startup_error(
monkeypatch, model: str, tensor_parallel_size: int, failing_method: str
) -> None:
"""Test that AsyncLLM propagates an __init__ error & frees memory.
Test profiling (forward()) and load weights failures.
AsyncLLM always uses an MP client.
@@ -43,9 +45,9 @@ def test_async_llm_startup_error(monkeypatch, model: str,
# Monkeypatch an error in the model.
monkeypatch.setattr(LlamaForCausalLM, failing_method, evil_method)
engine_args = AsyncEngineArgs(model=model,
enforce_eager=True,
tensor_parallel_size=tensor_parallel_size)
engine_args = AsyncEngineArgs(
model=model, enforce_eager=True, tensor_parallel_size=tensor_parallel_size
)
# Confirm we get an exception.
with pytest.raises(Exception, match="initialization failed"):
@@ -63,9 +65,13 @@ def test_async_llm_startup_error(monkeypatch, model: str,
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
@pytest.mark.parametrize("enable_multiprocessing", [True])
@pytest.mark.parametrize("failing_method", ["forward", "load_weights"])
def test_llm_startup_error(monkeypatch, model: str, tensor_parallel_size: int,
enable_multiprocessing: bool,
failing_method: str) -> None:
def test_llm_startup_error(
monkeypatch,
model: str,
tensor_parallel_size: int,
enable_multiprocessing: bool,
failing_method: str,
) -> None:
"""Test that LLM propagates an __init__ error and frees memory.
Test profiling (forward()) and load weights failures.
TODO(andy) - LLM without multiprocessing.
@@ -76,7 +82,6 @@ def test_llm_startup_error(monkeypatch, model: str, tensor_parallel_size: int,
pytest.skip(reason="Not enough CUDA devices")
with monkeypatch.context() as m:
MP_VALUE = "1" if enable_multiprocessing else "0"
m.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", MP_VALUE)
@@ -84,12 +89,16 @@ def test_llm_startup_error(monkeypatch, model: str, tensor_parallel_size: int,
monkeypatch.setattr(LlamaForCausalLM, failing_method, evil_method)
with pytest.raises(
Exception,
match="initialization failed"
if enable_multiprocessing else "Simulated Error in startup!"):
_ = LLM(model=model,
enforce_eager=True,
tensor_parallel_size=tensor_parallel_size)
Exception,
match="initialization failed"
if enable_multiprocessing
else "Simulated Error in startup!",
):
_ = LLM(
model=model,
enforce_eager=True,
tensor_parallel_size=tensor_parallel_size,
)
# Confirm all the processes are cleaned up.
wait_for_gpu_memory_to_clear(