Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -4,18 +4,25 @@
import pytest
from vllm.attention.layer import Attention
from vllm.config import (CacheConfig, ModelConfig, SchedulerConfig, VllmConfig,
set_current_vllm_config)
from vllm.config import (
CacheConfig,
ModelConfig,
SchedulerConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.pooling_params import PoolingParams
from vllm.sampling_params import SamplingParams
from vllm.utils import GiB_bytes
from vllm.v1.core.kv_cache_utils import (estimate_max_model_len,
get_kv_cache_configs)
from vllm.v1.core.sched.output import (CachedRequestData, NewRequestData,
SchedulerOutput)
from vllm.v1.core.kv_cache_utils import estimate_max_model_len, get_kv_cache_configs
from vllm.v1.core.sched.output import CachedRequestData, NewRequestData, SchedulerOutput
from vllm.v1.worker.tpu_model_runner import (
TPUModelRunner, _get_padded_num_reqs_with_upper_limit,
_get_padded_token_len, _get_req_paddings, _get_token_paddings)
TPUModelRunner,
_get_padded_num_reqs_with_upper_limit,
_get_padded_token_len,
_get_req_paddings,
_get_token_paddings,
)
def get_vllm_config():
@@ -67,10 +74,11 @@ def _schedule_new_request(*req_ids: str) -> SchedulerOutput:
mm_features=[],
sampling_params=SamplingParams(),
pooling_params=PoolingParams(),
block_ids=([0], ), # block_ids should be tuple[list[int]]
block_ids=([0],), # block_ids should be tuple[list[int]]
num_computed_tokens=0,
lora_request=None,
))
)
)
num_scheduled_tokens[req_id] = 3
total_num_scheduled_tokens += num_scheduled_tokens[req_id]
@@ -99,7 +107,7 @@ def _is_req_added(model_runner, req_id: str) -> bool:
def _is_req_state_block_table_match(model_runner, req_id: str) -> bool:
"""Check if the request state block IDs match the block table.
This function handles both legacy BlockTable and new MultiGroupBlockTable
structures for backward compatibility.
"""
@@ -206,7 +214,7 @@ def test_update_states_request_resumed(model_runner):
req_ids=[req_id],
resumed_from_preemption=[False],
new_token_ids=[[]],
new_block_ids=[([], )],
new_block_ids=[([],)],
num_computed_tokens=[0],
)
@@ -303,27 +311,23 @@ def test_get_paddings():
# Bucketed padding
min_token_size, max_token_size, padding_gap = 16, 512, 64
expected_paddings = [16, 32, 64, 128, 192, 256, 320, 384, 448, 512]
actual_paddings = _get_token_paddings(min_token_size, max_token_size,
padding_gap)
actual_paddings = _get_token_paddings(min_token_size, max_token_size, padding_gap)
# Bucketed padding with max_token_size not a power of two.
max_token_size = 317
expected_paddings = [16, 32, 64, 128, 192, 256, 320]
actual_paddings = _get_token_paddings(min_token_size, max_token_size,
padding_gap)
actual_paddings = _get_token_paddings(min_token_size, max_token_size, padding_gap)
assert actual_paddings == expected_paddings
# Exponential padding.
max_token_size, padding_gap = 1024, 0
expected_paddings = [16, 32, 64, 128, 256, 512, 1024]
actual_paddings = _get_token_paddings(min_token_size, max_token_size,
padding_gap)
actual_paddings = _get_token_paddings(min_token_size, max_token_size, padding_gap)
assert actual_paddings == expected_paddings
# Exponential padding with max_token_size not a power of two.
max_token_size = 317
expected_paddings = [16, 32, 64, 128, 256, 512]
actual_paddings = _get_token_paddings(min_token_size, max_token_size,
padding_gap)
actual_paddings = _get_token_paddings(min_token_size, max_token_size, padding_gap)
assert actual_paddings == expected_paddings
@@ -350,32 +354,31 @@ def test_get_req_paddings():
assert _get_req_paddings(8, 36) == [8, 16, 32, 36]
def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order(
model_runner):
def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order(model_runner):
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
error_msg = f"{layer_1} must come before the current layer"
vllm_config = model_runner.vllm_config
with pytest.raises(ValueError, match=error_msg), \
set_current_vllm_config(vllm_config):
with (
pytest.raises(ValueError, match=error_msg),
set_current_vllm_config(vllm_config),
):
fwd_context = {
# initialization below will fail because target layer is invalid;
# the target layer needs to come before layer 1
layer_0:
Attention(
layer_0: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_0,
kv_sharing_target_layer_name=layer_1,
),
layer_1:
Attention(
layer_1: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_1,
)
),
}
# suppress var not used error
assert fwd_context is not None
@@ -387,25 +390,25 @@ def test_init_kv_cache_with_kv_sharing_target_layer_not_exist(model_runner):
invalid_layer = "model.layers.0.cross_attn.attn"
error_msg = f"{invalid_layer} is not a valid Attention layer in the model"
vllm_config = model_runner.vllm_config
with pytest.raises(ValueError, match=error_msg), \
set_current_vllm_config(vllm_config):
with (
pytest.raises(ValueError, match=error_msg),
set_current_vllm_config(vllm_config),
):
fwd_context = {
layer_0:
Attention(
layer_0: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
layer_1: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_1,
# invalid layer: cross_attn.atn doesn't exist!
kv_sharing_target_layer_name=invalid_layer,
)
),
}
# suppress var not used error
assert fwd_context is not None
@@ -416,26 +419,26 @@ def test_init_kv_cache_with_kv_sharing_target_same_as_current(model_runner):
layer_1 = "model.layers.1.self_attn.attn"
error_msg = f"{layer_1} cannot be the same as the current layer"
vllm_config = model_runner.vllm_config
with pytest.raises(ValueError, match=error_msg), \
set_current_vllm_config(vllm_config):
with (
pytest.raises(ValueError, match=error_msg),
set_current_vllm_config(vllm_config),
):
fwd_context = {
# initialization below will fail because target layer is invalid;
# the target layer needs to come before layer 1
layer_0:
Attention(
layer_0: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
layer_1: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name=layer_1,
)
),
}
# suppress var not used error
assert fwd_context is not None
@@ -447,20 +450,18 @@ def test_init_kv_cache_without_kv_sharing():
vllm_config = get_vllm_config()
with set_current_vllm_config(vllm_config):
fwd_context = {
layer_0:
Attention(
layer_0: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
layer_1: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_1,
)
),
}
# suppress var not used error
assert fwd_context is not None
@@ -475,17 +476,17 @@ def test_init_kv_cache_without_kv_sharing():
available_memory = 20 * GiB_bytes
# page size for each layer KV can be calculated as
# 2 (non-MLA) * 8 (num_heads) * 128 (head_dim)
# * 2 (bfloat16, kv_cache dtype) * 128 (block_size) = 512KB
# * 2 (bfloat16, kv_cache dtype) * 128 (block_size) = 512KB
num_expected_blocks = 20480 # 20GB / 512KB / 2 (num layers)
kv_cache_config = get_kv_cache_configs(vllm_config, [kv_cache_spec],
[available_memory])[0]
kv_cache_config = get_kv_cache_configs(
vllm_config, [kv_cache_spec], [available_memory]
)[0]
assert kv_cache_config.num_blocks == num_expected_blocks
assert len(kv_cache_config.kv_cache_tensors) == 2
assert kv_cache_config.kv_cache_tensors[0].size == available_memory // 2
assert kv_cache_config.kv_cache_tensors[1].size == available_memory // 2
max_context_len =\
estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
max_context_len = estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
# max context len with KV sharing should be 2x as large as without
# max_context_len = available_memory / (page_size / block_size) / num_caches
# max_context_len = 5GB / (512KB / 128) / 2 = 655360
@@ -495,8 +496,9 @@ def test_init_kv_cache_without_kv_sharing():
# this will only allocate 2 block worth of memory (2 * 512kb)
kv_cache_config.num_blocks = 1
for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
kv_cache_tensor.size = (
kv_cache_spec[kv_cache_tensor.shared_by[0]].page_size_bytes)
kv_cache_tensor.size = kv_cache_spec[
kv_cache_tensor.shared_by[0]
].page_size_bytes
model_runner.initialize_kv_cache(kv_cache_config)
@@ -518,21 +520,19 @@ def test_init_kv_cache_with_kv_sharing_valid():
vllm_config = get_vllm_config()
with set_current_vllm_config(vllm_config):
fwd_context = {
layer_0:
Attention(
layer_0: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
layer_1: Attention(
num_heads=8,
head_size=128,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name="model.layers.0.self_attn.attn",
)
),
}
# suppress var not used error
assert fwd_context is not None
@@ -550,24 +550,23 @@ def test_init_kv_cache_with_kv_sharing_valid():
# with KV sharing, we can allocate (available_mem//page_size//1) blocks
# which is twice as many as without KV sharing
num_expected_blocks = 2 * 20480 # 20GB / 512KB
kv_cache_config = get_kv_cache_configs(vllm_config, [kv_cache_spec],
[available_memory])[0]
kv_cache_config = get_kv_cache_configs(
vllm_config, [kv_cache_spec], [available_memory]
)[0]
assert kv_cache_config.num_blocks == num_expected_blocks
assert len(kv_cache_config.kv_cache_tensors) == 1
# Each layer now has twice the available memory for KV cache
# compared to no KV sharing
assert kv_cache_config.kv_cache_tensors[0].size == available_memory
max_context_len =\
estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
max_context_len = estimate_max_model_len(vllm_config, kv_cache_spec, 5 * GiB_bytes)
# max context len with KV sharing should be 2x as large as without
assert max_context_len == (2 * 655360)
# important: override tensor size to prevent large mem alloc during test
# this will only allocate 1 block worth of memory (512kb)
kv_cache_config.num_blocks = 1
kv_cache_config.kv_cache_tensors[0].size =\
kv_cache_spec[layer_0].page_size_bytes
kv_cache_config.kv_cache_tensors[0].size = kv_cache_spec[layer_0].page_size_bytes
model_runner.initialize_kv_cache(kv_cache_config)