Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -7,6 +7,7 @@ WARNING: This test runs in both single-node (4 GPUs) and multi-node
all workers in a node other than the head node, which can cause the test
to fail.
"""
import json
import os
from dataclasses import dataclass
@@ -55,26 +56,17 @@ class PPTestSettings:
):
return PPTestSettings(
parallel_setups=[
ParallelSetup(tp_size=tp_base,
pp_size=pp_base,
eager_mode=False),
ParallelSetup(tp_size=tp_base,
pp_size=2 * pp_base,
eager_mode=False),
ParallelSetup(tp_size=tp_base,
pp_size=2 * pp_base,
eager_mode=True),
ParallelSetup(tp_size=2 * tp_base,
pp_size=pp_base,
eager_mode=False),
ParallelSetup(tp_size=2 * tp_base,
pp_size=pp_base,
eager_mode=True),
ParallelSetup(tp_size=tp_base, pp_size=pp_base, eager_mode=False),
ParallelSetup(tp_size=tp_base, pp_size=2 * pp_base, eager_mode=False),
ParallelSetup(tp_size=tp_base, pp_size=2 * pp_base, eager_mode=True),
ParallelSetup(tp_size=2 * tp_base, pp_size=pp_base, eager_mode=False),
ParallelSetup(tp_size=2 * tp_base, pp_size=pp_base, eager_mode=True),
],
distributed_backends=["mp", "ray"],
runner=runner,
test_options=PPTestOptions(multi_node_only=multi_node_only,
load_format=load_format),
test_options=PPTestOptions(
multi_node_only=multi_node_only, load_format=load_format
),
)
@staticmethod
@@ -86,17 +78,15 @@ class PPTestSettings:
multi_node_only: bool = False,
load_format: Optional[str] = None,
):
return PPTestSettings(
parallel_setups=[
ParallelSetup(tp_size=tp_base,
pp_size=pp_base,
eager_mode=True),
ParallelSetup(tp_size=tp_base, pp_size=pp_base, eager_mode=True),
],
distributed_backends=["mp"],
runner=runner,
test_options=PPTestOptions(multi_node_only=multi_node_only,
load_format=load_format),
test_options=PPTestOptions(
multi_node_only=multi_node_only, load_format=load_format
),
)
def iter_params(self, model_id: str):
@@ -281,8 +271,10 @@ def _compare_tp(
if num_gpus_available < tp_size * pp_size:
pytest.skip(f"Need at least {tp_size} x {pp_size} GPUs")
if VLLM_MULTI_NODE and distributed_backend == "mp":
pytest.skip("Skipping multi-node pipeline parallel test for "
"multiprocessing distributed backend")
pytest.skip(
"Skipping multi-node pipeline parallel test for "
"multiprocessing distributed backend"
)
if multi_node_only and not VLLM_MULTI_NODE:
pytest.skip("Not in multi-node setting")
@@ -357,20 +349,16 @@ def _compare_tp(
"mp",
]
compare_two_settings(model_id,
pp_args,
tp_args,
pp_env,
tp_env,
method=method)
compare_two_settings(model_id, pp_args, tp_args, pp_env, tp_env, method=method)
@pytest.mark.parametrize(
("model_id", "parallel_setup", "distributed_backend", "runner",
"test_options"),
("model_id", "parallel_setup", "distributed_backend", "runner", "test_options"),
[
params for model_id, settings in TEXT_GENERATION_MODELS.items()
for params in settings.iter_params(model_id) if model_id in TEST_MODELS
params
for model_id, settings in TEXT_GENERATION_MODELS.items()
for params in settings.iter_params(model_id)
if model_id in TEST_MODELS
],
)
@create_new_process_for_each_test()
@@ -382,22 +370,25 @@ def test_tp_language_generation(
test_options: PPTestOptions,
num_gpus_available,
):
_compare_tp(model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="generate",
is_multimodal=False)
_compare_tp(
model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="generate",
is_multimodal=False,
)
@pytest.mark.parametrize(
("model_id", "parallel_setup", "distributed_backend", "runner",
"test_options"),
("model_id", "parallel_setup", "distributed_backend", "runner", "test_options"),
[
params for model_id, settings in EMBEDDING_MODELS.items()
for params in settings.iter_params(model_id) if model_id in TEST_MODELS
params
for model_id, settings in EMBEDDING_MODELS.items()
for params in settings.iter_params(model_id)
if model_id in TEST_MODELS
],
)
@create_new_process_for_each_test()
@@ -409,22 +400,25 @@ def test_tp_language_embedding(
test_options: PPTestOptions,
num_gpus_available,
):
_compare_tp(model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="encode",
is_multimodal=False)
_compare_tp(
model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="encode",
is_multimodal=False,
)
@pytest.mark.parametrize(
("model_id", "parallel_setup", "distributed_backend", "runner",
"test_options"),
("model_id", "parallel_setup", "distributed_backend", "runner", "test_options"),
[
params for model_id, settings in MULTIMODAL_MODELS.items()
for params in settings.iter_params(model_id) if model_id in TEST_MODELS
params
for model_id, settings in MULTIMODAL_MODELS.items()
for params in settings.iter_params(model_id)
if model_id in TEST_MODELS
],
)
@create_new_process_for_each_test()
@@ -436,11 +430,13 @@ def test_tp_multimodal_generation(
test_options: PPTestOptions,
num_gpus_available,
):
_compare_tp(model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="generate",
is_multimodal=True)
_compare_tp(
model_id,
parallel_setup,
distributed_backend,
runner,
test_options,
num_gpus_available,
method="generate",
is_multimodal=True,
)