Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -6,6 +6,7 @@ It should include tests that are reported by users and making sure they
will never happen again.
"""
import gc
import pytest
@@ -18,12 +19,12 @@ from vllm import LLM, SamplingParams
def test_duplicated_ignored_sequence_group():
"""https://github.com/vllm-project/vllm/issues/1655"""
sampling_params = SamplingParams(temperature=0.01,
top_p=0.1,
max_tokens=256)
llm = LLM(model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1)
sampling_params = SamplingParams(temperature=0.01, top_p=0.1, max_tokens=256)
llm = LLM(
model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1,
)
prompts = ["This is a short prompt", "This is a very long prompt " * 1000]
outputs = llm.generate(prompts, sampling_params=sampling_params)
@@ -31,12 +32,12 @@ def test_duplicated_ignored_sequence_group():
def test_max_tokens_none():
sampling_params = SamplingParams(temperature=0.01,
top_p=0.1,
max_tokens=None)
llm = LLM(model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1)
sampling_params = SamplingParams(temperature=0.01, top_p=0.1, max_tokens=None)
llm = LLM(
model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1,
)
prompts = ["Just say hello!"]
outputs = llm.generate(prompts, sampling_params=sampling_params)