Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -4,13 +4,15 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='vllm_add_dummy_platform',
|
||||
version='0.1',
|
||||
packages=['vllm_add_dummy_platform'],
|
||||
name="vllm_add_dummy_platform",
|
||||
version="0.1",
|
||||
packages=["vllm_add_dummy_platform"],
|
||||
entry_points={
|
||||
'vllm.platform_plugins': [
|
||||
"vllm.platform_plugins": [
|
||||
"dummy_platform_plugin = vllm_add_dummy_platform:dummy_platform_plugin" # noqa
|
||||
],
|
||||
"vllm.general_plugins":
|
||||
["dummy_custom_ops = vllm_add_dummy_platform:register_ops"],
|
||||
})
|
||||
"vllm.general_plugins": [
|
||||
"dummy_custom_ops = vllm_add_dummy_platform:register_ops"
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from vllm.attention.backends.placeholder_attn import (
|
||||
PlaceholderAttentionBackend)
|
||||
from vllm.attention.backends.placeholder_attn import PlaceholderAttentionBackend
|
||||
|
||||
|
||||
class DummyAttentionBackend(PlaceholderAttentionBackend):
|
||||
|
||||
@staticmethod
|
||||
def get_name() -> str:
|
||||
return "Dummy_Backend"
|
||||
|
||||
@@ -15,6 +15,5 @@ class DummyRotaryEmbedding(RotaryEmbedding):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.addition_config = True
|
||||
|
||||
def forward_oot(self, *args,
|
||||
**kwargs) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
def forward_oot(self, *args, **kwargs) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
return super().forward_oot(*args, **kwargs)
|
||||
|
||||
@@ -24,7 +24,16 @@ class DummyPlatform(Platform):
|
||||
# Activate custom ops for v1.
|
||||
compilation_config.custom_ops = ["all"]
|
||||
|
||||
def get_attn_backend_cls(self, backend_name, head_size, dtype,
|
||||
kv_cache_dtype, block_size, use_v1, use_mla,
|
||||
has_sink, use_sparse):
|
||||
def get_attn_backend_cls(
|
||||
self,
|
||||
backend_name,
|
||||
head_size,
|
||||
dtype,
|
||||
kv_cache_dtype,
|
||||
block_size,
|
||||
use_v1,
|
||||
use_mla,
|
||||
has_sink,
|
||||
use_sparse,
|
||||
):
|
||||
return "vllm_add_dummy_platform.dummy_attention_backend.DummyAttentionBackend" # noqa E501
|
||||
|
||||
Reference in New Issue
Block a user