Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -8,19 +8,20 @@ import pytest
from vllm.config import ModelConfig
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.openai.protocol import (ErrorResponse,
LoadLoRAAdapterRequest,
UnloadLoRAAdapterRequest)
from vllm.entrypoints.openai.serving_models import (BaseModelPath,
OpenAIServingModels)
from vllm.entrypoints.openai.protocol import (
ErrorResponse,
LoadLoRAAdapterRequest,
UnloadLoRAAdapterRequest,
)
from vllm.entrypoints.openai.serving_models import BaseModelPath, OpenAIServingModels
from vllm.lora.request import LoRARequest
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct"
BASE_MODEL_PATHS = [BaseModelPath(name=MODEL_NAME, model_path=MODEL_NAME)]
LORA_LOADING_SUCCESS_MESSAGE = (
"Success: LoRA adapter '{lora_name}' added successfully.")
LORA_LOADING_SUCCESS_MESSAGE = "Success: LoRA adapter '{lora_name}' added successfully."
LORA_UNLOADING_SUCCESS_MESSAGE = (
"Success: LoRA adapter '{lora_name}' removed successfully.")
"Success: LoRA adapter '{lora_name}' removed successfully."
)
async def _async_serving_models_init() -> OpenAIServingModels:
@@ -29,10 +30,12 @@ async def _async_serving_models_init() -> OpenAIServingModels:
# Set the max_model_len attribute to avoid missing attribute
mock_model_config.max_model_len = 2048
serving_models = OpenAIServingModels(engine_client=mock_engine_client,
base_model_paths=BASE_MODEL_PATHS,
model_config=mock_model_config,
lora_modules=None)
serving_models = OpenAIServingModels(
engine_client=mock_engine_client,
base_model_paths=BASE_MODEL_PATHS,
model_config=mock_model_config,
lora_modules=None,
)
await serving_models.init_static_loras()
return serving_models
@@ -42,19 +45,18 @@ async def _async_serving_models_init() -> OpenAIServingModels:
async def test_serving_model_name():
serving_models = await _async_serving_models_init()
assert serving_models.model_name(None) == MODEL_NAME
request = LoRARequest(lora_name="adapter",
lora_path="/path/to/adapter2",
lora_int_id=1)
request = LoRARequest(
lora_name="adapter", lora_path="/path/to/adapter2", lora_int_id=1
)
assert serving_models.model_name(request) == request.lora_name
@pytest.mark.asyncio
async def test_load_lora_adapter_success():
serving_models = await _async_serving_models_init()
request = LoadLoRAAdapterRequest(lora_name="adapter",
lora_path="/path/to/adapter2")
request = LoadLoRAAdapterRequest(lora_name="adapter", lora_path="/path/to/adapter2")
response = await serving_models.load_lora_adapter(request)
assert response == LORA_LOADING_SUCCESS_MESSAGE.format(lora_name='adapter')
assert response == LORA_LOADING_SUCCESS_MESSAGE.format(lora_name="adapter")
assert len(serving_models.lora_requests) == 1
assert "adapter" in serving_models.lora_requests
assert serving_models.lora_requests["adapter"].lora_name == "adapter"
@@ -73,15 +75,16 @@ async def test_load_lora_adapter_missing_fields():
@pytest.mark.asyncio
async def test_load_lora_adapter_duplicate():
serving_models = await _async_serving_models_init()
request = LoadLoRAAdapterRequest(lora_name="adapter1",
lora_path="/path/to/adapter1")
request = LoadLoRAAdapterRequest(
lora_name="adapter1", lora_path="/path/to/adapter1"
)
response = await serving_models.load_lora_adapter(request)
assert response == LORA_LOADING_SUCCESS_MESSAGE.format(
lora_name='adapter1')
assert response == LORA_LOADING_SUCCESS_MESSAGE.format(lora_name="adapter1")
assert len(serving_models.lora_requests) == 1
request = LoadLoRAAdapterRequest(lora_name="adapter1",
lora_path="/path/to/adapter1")
request = LoadLoRAAdapterRequest(
lora_name="adapter1", lora_path="/path/to/adapter1"
)
response = await serving_models.load_lora_adapter(request)
assert isinstance(response, ErrorResponse)
assert response.error.type == "InvalidUserInput"
@@ -92,15 +95,15 @@ async def test_load_lora_adapter_duplicate():
@pytest.mark.asyncio
async def test_unload_lora_adapter_success():
serving_models = await _async_serving_models_init()
request = LoadLoRAAdapterRequest(lora_name="adapter1",
lora_path="/path/to/adapter1")
request = LoadLoRAAdapterRequest(
lora_name="adapter1", lora_path="/path/to/adapter1"
)
response = await serving_models.load_lora_adapter(request)
assert len(serving_models.lora_requests) == 1
request = UnloadLoRAAdapterRequest(lora_name="adapter1")
response = await serving_models.unload_lora_adapter(request)
assert response == LORA_UNLOADING_SUCCESS_MESSAGE.format(
lora_name='adapter1')
assert response == LORA_UNLOADING_SUCCESS_MESSAGE.format(lora_name="adapter1")
assert len(serving_models.lora_requests) == 0