Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -7,8 +7,10 @@ import asyncio
import pytest
from tests.utils import wait_for_gpu_memory_to_clear
from tests.v1.shutdown.utils import (SHUTDOWN_TEST_THRESHOLD_BYTES,
SHUTDOWN_TEST_TIMEOUT_SEC)
from tests.v1.shutdown.utils import (
SHUTDOWN_TEST_THRESHOLD_BYTES,
SHUTDOWN_TEST_TIMEOUT_SEC,
)
from vllm import LLM, AsyncEngineArgs, SamplingParams
from vllm.distributed import get_tensor_model_parallel_rank
from vllm.model_executor.models.llama import LlamaForCausalLM
@@ -26,8 +28,10 @@ def evil_forward(self, *args, **kwargs):
if not hasattr(self, "num_calls"):
self.num_calls = 0
if (self.num_calls == NUMBER_OF_GOOD_PASSES
and get_tensor_model_parallel_rank() == 0):
if (
self.num_calls == NUMBER_OF_GOOD_PASSES
and get_tensor_model_parallel_rank() == 0
):
raise Exception("Simulated illegal memory access on Rank 0!")
self.num_calls += 1
@@ -37,10 +41,11 @@ def evil_forward(self, *args, **kwargs):
@pytest.mark.asyncio
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
@pytest.mark.parametrize("model", MODELS)
async def test_async_llm_model_error(monkeypatch, tensor_parallel_size: int,
model: str) -> None:
async def test_async_llm_model_error(
monkeypatch, tensor_parallel_size: int, model: str
) -> None:
"""Test that AsyncLLM propagates a forward pass error and frees memory.
AsyncLLM always uses an MP client.
"""
if cuda_device_count_stateless() < tensor_parallel_size:
@@ -49,15 +54,15 @@ async def test_async_llm_model_error(monkeypatch, tensor_parallel_size: int,
# Monkeypatch an error in the model.
monkeypatch.setattr(LlamaForCausalLM, "forward", evil_forward)
engine_args = AsyncEngineArgs(model=model,
enforce_eager=True,
tensor_parallel_size=tensor_parallel_size)
engine_args = AsyncEngineArgs(
model=model, enforce_eager=True, tensor_parallel_size=tensor_parallel_size
)
async_llm = AsyncLLM.from_engine_args(engine_args)
async def generate(request_id: str):
generator = async_llm.generate("Hello my name is",
request_id=request_id,
sampling_params=SamplingParams())
generator = async_llm.generate(
"Hello my name is", request_id=request_id, sampling_params=SamplingParams()
)
try:
async for _ in generator:
pass
@@ -77,9 +82,9 @@ async def test_async_llm_model_error(monkeypatch, tensor_parallel_size: int,
# We should not be able to make another request.
with pytest.raises(EngineDeadError):
async for _ in async_llm.generate("Hello my name is",
request_id="abc",
sampling_params=SamplingParams()):
async for _ in async_llm.generate(
"Hello my name is", request_id="abc", sampling_params=SamplingParams()
):
raise Exception("We should not get here.")
# Confirm all the processes are cleaned up.
@@ -98,8 +103,9 @@ async def test_async_llm_model_error(monkeypatch, tensor_parallel_size: int,
@pytest.mark.parametrize("enable_multiprocessing", [True])
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
@pytest.mark.parametrize("model", MODELS)
def test_llm_model_error(monkeypatch, tensor_parallel_size: int,
enable_multiprocessing: bool, model: str) -> None:
def test_llm_model_error(
monkeypatch, tensor_parallel_size: int, enable_multiprocessing: bool, model: str
) -> None:
"""Test that LLM propagates a forward pass error and frees memory.
TODO(andy) - LLM without multiprocessing; LLM with multiprocessing
and >1 rank
@@ -108,19 +114,17 @@ def test_llm_model_error(monkeypatch, tensor_parallel_size: int,
pytest.skip(reason="Not enough CUDA devices")
with monkeypatch.context() as m:
MP_VALUE = "1" if enable_multiprocessing else "0"
m.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", MP_VALUE)
# Monkeypatch an error in the model.
m.setattr(LlamaForCausalLM, "forward", evil_forward)
llm = LLM(model=model,
enforce_eager=True,
tensor_parallel_size=tensor_parallel_size)
llm = LLM(
model=model, enforce_eager=True, tensor_parallel_size=tensor_parallel_size
)
with pytest.raises(
EngineDeadError if enable_multiprocessing else Exception):
with pytest.raises(EngineDeadError if enable_multiprocessing else Exception):
llm.generate("Hello my name is Robert and I")
# Confirm all the processes are cleaned up.