Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -4,13 +4,15 @@
from setuptools import setup
setup(
name='vllm_add_dummy_platform',
version='0.1',
packages=['vllm_add_dummy_platform'],
name="vllm_add_dummy_platform",
version="0.1",
packages=["vllm_add_dummy_platform"],
entry_points={
'vllm.platform_plugins': [
"vllm.platform_plugins": [
"dummy_platform_plugin = vllm_add_dummy_platform:dummy_platform_plugin" # noqa
],
"vllm.general_plugins":
["dummy_custom_ops = vllm_add_dummy_platform:register_ops"],
})
"vllm.general_plugins": [
"dummy_custom_ops = vllm_add_dummy_platform:register_ops"
],
},
)

View File

@@ -1,12 +1,10 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.attention.backends.placeholder_attn import (
PlaceholderAttentionBackend)
from vllm.attention.backends.placeholder_attn import PlaceholderAttentionBackend
class DummyAttentionBackend(PlaceholderAttentionBackend):
@staticmethod
def get_name() -> str:
return "Dummy_Backend"

View File

@@ -15,6 +15,5 @@ class DummyRotaryEmbedding(RotaryEmbedding):
super().__init__(*args, **kwargs)
self.addition_config = True
def forward_oot(self, *args,
**kwargs) -> tuple[torch.Tensor, torch.Tensor]:
def forward_oot(self, *args, **kwargs) -> tuple[torch.Tensor, torch.Tensor]:
return super().forward_oot(*args, **kwargs)

View File

@@ -24,7 +24,16 @@ class DummyPlatform(Platform):
# Activate custom ops for v1.
compilation_config.custom_ops = ["all"]
def get_attn_backend_cls(self, backend_name, head_size, dtype,
kv_cache_dtype, block_size, use_v1, use_mla,
has_sink, use_sparse):
def get_attn_backend_cls(
self,
backend_name,
head_size,
dtype,
kv_cache_dtype,
block_size,
use_v1,
use_mla,
has_sink,
use_sparse,
):
return "vllm_add_dummy_platform.dummy_attention_backend.DummyAttentionBackend" # noqa E501