Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -20,51 +20,57 @@ calculus, each contributing unique perspectives that would shape this new
field."""
def test_smaller_truncation_size(vllm_runner,
model_name=MODEL_NAME,
input_str=input_str):
def test_smaller_truncation_size(
vllm_runner, model_name=MODEL_NAME, input_str=input_str
):
truncate_prompt_tokens = 10
with vllm_runner(model_name, runner="pooling",
max_model_len=max_model_len) as vllm_model:
with vllm_runner(
model_name, runner="pooling", max_model_len=max_model_len
) as vllm_model:
vllm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens)
input_str, truncate_prompt_tokens=truncate_prompt_tokens
)
prompt_tokens = vllm_output[0].prompt_token_ids
assert len(prompt_tokens) == truncate_prompt_tokens
def test_max_truncation_size(vllm_runner,
model_name=MODEL_NAME,
input_str=input_str):
def test_max_truncation_size(vllm_runner, model_name=MODEL_NAME, input_str=input_str):
truncate_prompt_tokens = -1
with vllm_runner(model_name, runner="pooling",
max_model_len=max_model_len) as vllm_model:
with vllm_runner(
model_name, runner="pooling", max_model_len=max_model_len
) as vllm_model:
vllm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens)
input_str, truncate_prompt_tokens=truncate_prompt_tokens
)
prompt_tokens = vllm_output[0].prompt_token_ids
assert len(prompt_tokens) == max_model_len
def test_bigger_truncation_size(vllm_runner,
model_name=MODEL_NAME,
input_str=input_str):
def test_bigger_truncation_size(
vllm_runner, model_name=MODEL_NAME, input_str=input_str
):
truncate_prompt_tokens = max_model_len + 1
with pytest.raises(ValueError), vllm_runner(
model_name, runner="pooling",
max_model_len=max_model_len) as vllm_model:
with (
pytest.raises(ValueError),
vllm_runner(
model_name, runner="pooling", max_model_len=max_model_len
) as vllm_model,
):
llm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens)
input_str, truncate_prompt_tokens=truncate_prompt_tokens
)
assert llm_output == f"""truncate_prompt_tokens value
assert (
llm_output
== f"""truncate_prompt_tokens value
({truncate_prompt_tokens}) is greater than
max_model_len ({max_model_len}). Please, select
a smaller truncation size."""
)