Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -29,14 +29,11 @@ CUDA_DEVICES = [
MAX_NUM_PROMPT_TOKENS = 64
def _compare_objs(obj1,
obj2,
skip: Sequence = ("logitsprocs", "batch_update_builder")):
def _compare_objs(obj1, obj2, skip: Sequence = ("logitsprocs", "batch_update_builder")):
attrs = inspect.getmembers(obj1, lambda a: not (inspect.isroutine(a)))
attr_names = set([
a[0] for a in attrs
if not (a[0].startswith('__') and a[0].endswith('__'))
])
attr_names = set(
[a[0] for a in attrs if not (a[0].startswith("__") and a[0].endswith("__"))]
)
for attr_name in attr_names:
if attr_name in skip:
continue
@@ -47,7 +44,7 @@ def _compare_objs(obj1,
is_same = False
if isinstance(a, torch.Tensor):
if a.numel() == 0 or b.numel() == 0:
is_same = (a.numel() == 0 and b.numel() == 0)
is_same = a.numel() == 0 and b.numel() == 0
elif torch.allclose(a, b):
is_same = True
elif isinstance(a, np.ndarray):
@@ -64,12 +61,14 @@ def _compare_objs(obj1,
is_same = True
elif isinstance(a, CpuGpuBuffer):
is_same = np.allclose(a.np, b.np) and torch.allclose(a.gpu, b.gpu)
assert is_same, f"Attribute {attr_name} is different"\
f" in {obj1} and {obj2}: {a} != {b}"
assert is_same, (
f"Attribute {attr_name} is different in {obj1} and {obj2}: {a} != {b}"
)
def _remove_requests(input_batch: InputBatch, batch_size: int,
reqs: list[CachedRequestState]) -> set[str]:
def _remove_requests(
input_batch: InputBatch, batch_size: int, reqs: list[CachedRequestState]
) -> set[str]:
"""
Remove some requests randomly from the batch and returns
set of request removed
@@ -109,10 +108,9 @@ def _construct_expected_sampling_metadata(
temperature = [0.0 for _ in range(num_reqs)]
min_tokens = {}
logit_bias = [None] * num_reqs
allowed_token_ids_mask = torch.zeros(num_reqs,
VOCAB_SIZE,
dtype=torch.bool,
device=device)
allowed_token_ids_mask = torch.zeros(
num_reqs, VOCAB_SIZE, dtype=torch.bool, device=device
)
bad_words_token_ids = {}
for req in reqs:
if req.req_id not in req_ids_retained:
@@ -120,35 +118,40 @@ def _construct_expected_sampling_metadata(
index_in_input_batch = req_id_index_in_input_batch[req.req_id]
output_token_ids[index_in_input_batch] = req.output_token_ids
prompt_token_ids[index_in_input_batch] = req.prompt_token_ids
presence_penalties[
index_in_input_batch] = req.sampling_params.presence_penalty
presence_penalties[index_in_input_batch] = req.sampling_params.presence_penalty
frequency_penalties[index_in_input_batch] = (
req.sampling_params.frequency_penalty)
req.sampling_params.frequency_penalty
)
repetition_penalties[index_in_input_batch] = (
req.sampling_params.repetition_penalty)
req.sampling_params.repetition_penalty
)
top_k[index_in_input_batch] = req.sampling_params.top_k
top_p[index_in_input_batch] = req.sampling_params.top_p
temperature[index_in_input_batch] = req.sampling_params.temperature
min_tokens[index_in_input_batch] = (
req.sampling_params.min_tokens,
req.sampling_params.all_stop_token_ids)
req.sampling_params.all_stop_token_ids,
)
logit_bias[index_in_input_batch] = req.sampling_params.logit_bias
if req.sampling_params.allowed_token_ids:
allowed_token_ids_mask[index_in_input_batch][
req.sampling_params.allowed_token_ids] = True
req.sampling_params.allowed_token_ids
] = True
if req.sampling_params.bad_words_token_ids:
bad_words_token_ids[
index_in_input_batch] = req.sampling_params.bad_words_token_ids
bad_words_token_ids[index_in_input_batch] = (
req.sampling_params.bad_words_token_ids
)
return SamplingMetadata(
temperature=torch.tensor(temperature, dtype=torch.float,
device=device),
temperature=torch.tensor(temperature, dtype=torch.float, device=device),
all_greedy=False,
all_random=True,
top_p=None if all(x == 1.0 for x in top_p) else torch.tensor(
top_p, dtype=torch.float, device=device),
top_k=None if all(x == 0 for x in top_k) else torch.tensor(
top_k, dtype=torch.int, device=device),
top_p=None
if all(x == 1.0 for x in top_p)
else torch.tensor(top_p, dtype=torch.float, device=device),
top_k=None
if all(x == 0 for x in top_k)
else torch.tensor(top_k, dtype=torch.int, device=device),
generators={},
max_num_logprobs=0,
prompt_token_ids=make_tensor_with_pad(
@@ -157,19 +160,21 @@ def _construct_expected_sampling_metadata(
device=torch.device(device),
dtype=torch.int64,
),
frequency_penalties=torch.tensor(frequency_penalties,
dtype=torch.float,
device=device),
presence_penalties=torch.tensor(presence_penalties,
dtype=torch.float,
device=device),
repetition_penalties=torch.tensor(repetition_penalties,
dtype=torch.float,
device=device),
frequency_penalties=torch.tensor(
frequency_penalties, dtype=torch.float, device=device
),
presence_penalties=torch.tensor(
presence_penalties, dtype=torch.float, device=device
),
repetition_penalties=torch.tensor(
repetition_penalties, dtype=torch.float, device=device
),
output_token_ids=output_token_ids,
no_penalties=(all(x == 0 for x in presence_penalties)
and all(x == 0 for x in frequency_penalties)
and all(x == 1 for x in repetition_penalties)),
no_penalties=(
all(x == 0 for x in presence_penalties)
and all(x == 0 for x in frequency_penalties)
and all(x == 1 for x in repetition_penalties)
),
allowed_token_ids_mask=allowed_token_ids_mask,
bad_words_token_ids=bad_words_token_ids,
logitsprocs=LogitsProcessors(),
@@ -185,8 +190,7 @@ def _create_sampling_params():
frequency_penalty=np.random.uniform(-2.0, 2.0),
min_tokens=np.random.randint(1, 10),
stop_token_ids=[
np.random.randint(0, VOCAB_SIZE)
for _ in range(np.random.randint(10))
np.random.randint(0, VOCAB_SIZE) for _ in range(np.random.randint(10))
],
logit_bias={0: np.random.uniform(-3.0, 3.0)},
)
@@ -207,7 +211,7 @@ def _construct_cached_request_state(req_id_suffix: int):
sampling_params=_create_sampling_params(),
pooling_params=None,
mm_features=[],
block_ids=([], ),
block_ids=([],),
generator=None,
num_computed_tokens=len(output_token_ids),
output_token_ids=output_token_ids,
@@ -262,19 +266,18 @@ def test_sampling_metadata_in_input_batch(device: str, batch_size: int):
# Create expected output.
expected_sampling_metadata = _construct_expected_sampling_metadata(
reqs,
req_ids_retained,
input_batch.req_id_to_index,
device=torch.device(device))
reqs, req_ids_retained, input_batch.req_id_to_index, device=torch.device(device)
)
def same(t1: Optional[torch.Tensor], t2: Optional[torch.Tensor]) -> bool:
return (t1 is None
and t2 is None) or (t1 is not None and t2 is not None
and torch.allclose(t1, t2))
return (t1 is None and t2 is None) or (
t1 is not None and t2 is not None and torch.allclose(t1, t2)
)
# Assert the actual and expected output.
assert torch.allclose(expected_sampling_metadata.temperature,
sampling_metadata.temperature)
assert torch.allclose(
expected_sampling_metadata.temperature, sampling_metadata.temperature
)
assert same(expected_sampling_metadata.top_p, sampling_metadata.top_p)
assert same(expected_sampling_metadata.top_k, sampling_metadata.top_k)
assert torch.allclose(
@@ -289,25 +292,29 @@ def test_sampling_metadata_in_input_batch(device: str, batch_size: int):
expected_sampling_metadata.repetition_penalties,
sampling_metadata.repetition_penalties,
)
assert torch.allclose(expected_sampling_metadata.prompt_token_ids,
sampling_metadata.prompt_token_ids)
assert (expected_sampling_metadata.output_token_ids ==
sampling_metadata.output_token_ids)
assert expected_sampling_metadata.no_penalties == \
sampling_metadata.no_penalties
assert torch.allclose(
expected_sampling_metadata.prompt_token_ids, sampling_metadata.prompt_token_ids
)
assert (
expected_sampling_metadata.output_token_ids
== sampling_metadata.output_token_ids
)
assert expected_sampling_metadata.no_penalties == sampling_metadata.no_penalties
if sampling_metadata.allowed_token_ids_mask:
assert torch.allclose(
expected_sampling_metadata.allowed_token_ids_mask,
sampling_metadata.allowed_token_ids_mask)
assert expected_sampling_metadata.bad_words_token_ids == \
sampling_metadata.bad_words_token_ids
sampling_metadata.allowed_token_ids_mask,
)
assert (
expected_sampling_metadata.bad_words_token_ids
== sampling_metadata.bad_words_token_ids
)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("swap_list", [((0, 1), )])
def test_swap_states_in_input_batch(device: str, batch_size: int,
swap_list: list):
@pytest.mark.parametrize("swap_list", [((0, 1),)])
def test_swap_states_in_input_batch(device: str, batch_size: int, swap_list: list):
"""
Tests the logic for managing sampling metadata in the InputBatch.
@@ -352,8 +359,10 @@ def test_swap_states_in_input_batch(device: str, batch_size: int,
reordered_reqs = reqs.copy()
for swap_pair in swap_list:
reordered_reqs[swap_pair[0]], reordered_reqs[swap_pair[1]] = \
reordered_reqs[swap_pair[1]], reordered_reqs[swap_pair[0]]
reordered_reqs[swap_pair[0]], reordered_reqs[swap_pair[1]] = (
reordered_reqs[swap_pair[1]],
reordered_reqs[swap_pair[0]],
)
input_batch.swap_states(swap_pair[0], swap_pair[1])
for req_index in range(batch_size):