Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -5,8 +5,10 @@
|
||||
import pytest
|
||||
|
||||
from tests.utils import wait_for_gpu_memory_to_clear
|
||||
from tests.v1.shutdown.utils import (SHUTDOWN_TEST_THRESHOLD_BYTES,
|
||||
SHUTDOWN_TEST_TIMEOUT_SEC)
|
||||
from tests.v1.shutdown.utils import (
|
||||
SHUTDOWN_TEST_THRESHOLD_BYTES,
|
||||
SHUTDOWN_TEST_TIMEOUT_SEC,
|
||||
)
|
||||
from vllm import LLM, SamplingParams
|
||||
from vllm.engine.arg_utils import AsyncEngineArgs
|
||||
from vllm.sampling_params import RequestOutputKind
|
||||
@@ -21,8 +23,9 @@ MODELS = ["meta-llama/Llama-3.2-1B"]
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
|
||||
@pytest.mark.parametrize("send_one_request", [False, True])
|
||||
async def test_async_llm_delete(model: str, tensor_parallel_size: int,
|
||||
send_one_request: bool) -> None:
|
||||
async def test_async_llm_delete(
|
||||
model: str, tensor_parallel_size: int, send_one_request: bool
|
||||
) -> None:
|
||||
"""Test that AsyncLLM frees GPU memory upon deletion.
|
||||
AsyncLLM always uses an MP client.
|
||||
|
||||
@@ -34,19 +37,21 @@ async def test_async_llm_delete(model: str, tensor_parallel_size: int,
|
||||
if cuda_device_count_stateless() < tensor_parallel_size:
|
||||
pytest.skip(reason="Not enough CUDA devices")
|
||||
|
||||
engine_args = AsyncEngineArgs(model=model,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=tensor_parallel_size)
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=model, enforce_eager=True, tensor_parallel_size=tensor_parallel_size
|
||||
)
|
||||
|
||||
# Instantiate AsyncLLM; make request to complete any deferred
|
||||
# initialization; then delete instance
|
||||
async_llm = AsyncLLM.from_engine_args(engine_args)
|
||||
if send_one_request:
|
||||
async for _ in async_llm.generate(
|
||||
"Hello my name is",
|
||||
request_id="abc",
|
||||
sampling_params=SamplingParams(
|
||||
max_tokens=1, output_kind=RequestOutputKind.DELTA)):
|
||||
"Hello my name is",
|
||||
request_id="abc",
|
||||
sampling_params=SamplingParams(
|
||||
max_tokens=1, output_kind=RequestOutputKind.DELTA
|
||||
),
|
||||
):
|
||||
pass
|
||||
del async_llm
|
||||
|
||||
@@ -62,9 +67,13 @@ async def test_async_llm_delete(model: str, tensor_parallel_size: int,
|
||||
@pytest.mark.parametrize("tensor_parallel_size", [2, 1])
|
||||
@pytest.mark.parametrize("enable_multiprocessing", [True])
|
||||
@pytest.mark.parametrize("send_one_request", [False, True])
|
||||
def test_llm_delete(monkeypatch, model: str, tensor_parallel_size: int,
|
||||
enable_multiprocessing: bool,
|
||||
send_one_request: bool) -> None:
|
||||
def test_llm_delete(
|
||||
monkeypatch,
|
||||
model: str,
|
||||
tensor_parallel_size: int,
|
||||
enable_multiprocessing: bool,
|
||||
send_one_request: bool,
|
||||
) -> None:
|
||||
"""Test that LLM frees GPU memory upon deletion.
|
||||
TODO(andy) - LLM without multiprocessing.
|
||||
|
||||
@@ -83,12 +92,13 @@ def test_llm_delete(monkeypatch, model: str, tensor_parallel_size: int,
|
||||
|
||||
# Instantiate LLM; make request to complete any deferred
|
||||
# initialization; then delete instance
|
||||
llm = LLM(model=model,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=tensor_parallel_size)
|
||||
llm = LLM(
|
||||
model=model, enforce_eager=True, tensor_parallel_size=tensor_parallel_size
|
||||
)
|
||||
if send_one_request:
|
||||
llm.generate("Hello my name is",
|
||||
sampling_params=SamplingParams(max_tokens=1))
|
||||
llm.generate(
|
||||
"Hello my name is", sampling_params=SamplingParams(max_tokens=1)
|
||||
)
|
||||
del llm
|
||||
|
||||
# Confirm all the processes are cleaned up.
|
||||
|
||||
Reference in New Issue
Block a user