Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -50,9 +50,9 @@ def test_oot_registration_embedding(
with monkeypatch.context() as m:
m.setenv("VLLM_PLUGINS", "register_dummy_model")
prompts = ["Hello, my name is", "The text does not matter"]
llm = LLM(model=dummy_gemma2_embedding_path,
load_format="dummy",
max_model_len=2048)
llm = LLM(
model=dummy_gemma2_embedding_path, load_format="dummy", max_model_len=2048
)
outputs = llm.embed(prompts)
for output in outputs:
@@ -69,27 +69,28 @@ def test_oot_registration_multimodal(
):
with monkeypatch.context() as m:
m.setenv("VLLM_PLUGINS", "register_dummy_model")
prompts = [{
"prompt": "What's in the image?<image>",
"multi_modal_data": {
"image": image
prompts = [
{
"prompt": "What's in the image?<image>",
"multi_modal_data": {"image": image},
},
}, {
"prompt": "Describe the image<image>",
"multi_modal_data": {
"image": image
{
"prompt": "Describe the image<image>",
"multi_modal_data": {"image": image},
},
}]
]
sampling_params = SamplingParams(temperature=0)
llm = LLM(model=dummy_llava_path,
load_format="dummy",
max_num_seqs=1,
trust_remote_code=True,
gpu_memory_utilization=0.98,
max_model_len=4096,
enforce_eager=True,
limit_mm_per_prompt={"image": 1})
llm = LLM(
model=dummy_llava_path,
load_format="dummy",
max_num_seqs=1,
trust_remote_code=True,
gpu_memory_utilization=0.98,
max_model_len=4096,
enforce_eager=True,
limit_mm_per_prompt={"image": 1},
)
first_token = llm.get_tokenizer().decode(0)
outputs = llm.generate(prompts, sampling_params)