Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -6,16 +6,22 @@ import warnings
import pytest
import torch.cuda
from vllm.model_executor.models import (is_pooling_model,
is_text_generation_model,
supports_multimodal)
from vllm.model_executor.models.adapters import (as_embedding_model,
as_reward_model,
as_seq_cls_model)
from vllm.model_executor.models.registry import (_MULTIMODAL_MODELS,
_SPECULATIVE_DECODING_MODELS,
_TEXT_GENERATION_MODELS,
ModelRegistry)
from vllm.model_executor.models import (
is_pooling_model,
is_text_generation_model,
supports_multimodal,
)
from vllm.model_executor.models.adapters import (
as_embedding_model,
as_reward_model,
as_seq_cls_model,
)
from vllm.model_executor.models.registry import (
_MULTIMODAL_MODELS,
_SPECULATIVE_DECODING_MODELS,
_TEXT_GENERATION_MODELS,
ModelRegistry,
)
from vllm.platforms import current_platform
from ..utils import create_new_process_for_each_test
@@ -34,8 +40,7 @@ def test_registry_imports(model_arch):
if model_arch in _SPECULATIVE_DECODING_MODELS:
return # Ignore these models which do not have a unified format
if (model_arch in _TEXT_GENERATION_MODELS
or model_arch in _MULTIMODAL_MODELS):
if model_arch in _TEXT_GENERATION_MODELS or model_arch in _MULTIMODAL_MODELS:
assert is_text_generation_model(model_cls)
# All vLLM models should be convertible to a pooling model
@@ -48,13 +53,16 @@ def test_registry_imports(model_arch):
@create_new_process_for_each_test()
@pytest.mark.parametrize("model_arch,is_mm,init_cuda,is_ce", [
("LlamaForCausalLM", False, False, False),
("LlavaForConditionalGeneration", True, True, False),
("BertForSequenceClassification", False, False, True),
("RobertaForSequenceClassification", False, False, True),
("XLMRobertaForSequenceClassification", False, False, True),
])
@pytest.mark.parametrize(
"model_arch,is_mm,init_cuda,is_ce",
[
("LlamaForCausalLM", False, False, False),
("LlavaForConditionalGeneration", True, True, False),
("BertForSequenceClassification", False, False, True),
("RobertaForSequenceClassification", False, False, True),
("XLMRobertaForSequenceClassification", False, False, True),
],
)
def test_registry_model_property(model_arch, is_mm, init_cuda, is_ce):
model_info = ModelRegistry._try_inspect_model_cls(model_arch)
assert model_info is not None
@@ -70,7 +78,8 @@ def test_registry_model_property(model_arch, is_mm, init_cuda, is_ce):
warnings.warn(
"This model no longer initializes CUDA on import. "
"Please test using a different one.",
stacklevel=2)
stacklevel=2,
)
@create_new_process_for_each_test()
@@ -82,7 +91,8 @@ def test_registry_model_property(model_arch, is_mm, init_cuda, is_ce):
# ("MLPSpeculatorPreTrainedModel", False, False),
("DeepseekV2ForCausalLM", True, False),
("Qwen2VLForConditionalGeneration", True, True),
])
],
)
def test_registry_is_pp(model_arch, is_pp, init_cuda):
model_info = ModelRegistry._try_inspect_model_cls(model_arch)
assert model_info is not None
@@ -97,13 +107,16 @@ def test_registry_is_pp(model_arch, is_pp, init_cuda):
warnings.warn(
"This model no longer initializes CUDA on import. "
"Please test using a different one.",
stacklevel=2)
stacklevel=2,
)
def test_hf_registry_coverage():
untested_archs = (ModelRegistry.get_supported_archs() -
HF_EXAMPLE_MODELS.get_supported_archs())
untested_archs = (
ModelRegistry.get_supported_archs() - HF_EXAMPLE_MODELS.get_supported_archs()
)
assert not untested_archs, (
"Please add the following architectures to "
f"`tests/models/registry.py`: {untested_archs}")
f"`tests/models/registry.py`: {untested_archs}"
)