Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -11,7 +11,10 @@ import torch.cuda
|
||||
|
||||
from vllm.engine.arg_utils import EngineArgs
|
||||
from vllm.model_executor.model_loader.tensorizer import (
|
||||
TensorizerConfig, tensorize_lora_adapter, tensorize_vllm_model)
|
||||
TensorizerConfig,
|
||||
tensorize_lora_adapter,
|
||||
tensorize_vllm_model,
|
||||
)
|
||||
|
||||
from ...utils import RemoteOpenAIServer
|
||||
|
||||
@@ -29,21 +32,20 @@ def cleanup():
|
||||
_cleanup()
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
@pytest.fixture(scope="module")
|
||||
def tmp_dir():
|
||||
with tempfile.TemporaryDirectory() as path:
|
||||
yield path
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
@pytest.fixture(scope="module")
|
||||
def model_uri(tmp_dir):
|
||||
yield f"{tmp_dir}/model.tensors"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def tensorize_model_and_lora(tmp_dir, model_uri):
|
||||
tensorizer_config = TensorizerConfig(tensorizer_uri=model_uri,
|
||||
lora_dir=tmp_dir)
|
||||
tensorizer_config = TensorizerConfig(tensorizer_uri=model_uri, lora_dir=tmp_dir)
|
||||
args = EngineArgs(model=MODEL_NAME)
|
||||
|
||||
tensorize_lora_adapter(LORA_PATH, tensorizer_config)
|
||||
@@ -66,8 +68,11 @@ def server(model_uri, tensorize_model_and_lora):
|
||||
|
||||
## Start OpenAI API server
|
||||
args = [
|
||||
"--load-format", "tensorizer", "--served-model-name", MODEL_NAME,
|
||||
"--enable-lora"
|
||||
"--load-format",
|
||||
"tensorizer",
|
||||
"--served-model-name",
|
||||
MODEL_NAME,
|
||||
"--enable-lora",
|
||||
]
|
||||
|
||||
model_dir = os.path.dirname(model_uri)
|
||||
@@ -85,10 +90,9 @@ async def client(server):
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str):
|
||||
_cleanup()
|
||||
completion = await client.completions.create(model=model_name,
|
||||
prompt="Hello, my name is",
|
||||
max_tokens=5,
|
||||
temperature=0.0)
|
||||
completion = await client.completions.create(
|
||||
model=model_name, prompt="Hello, my name is", max_tokens=5, temperature=0.0
|
||||
)
|
||||
|
||||
assert completion.id is not None
|
||||
assert completion.choices is not None and len(completion.choices) == 1
|
||||
@@ -97,4 +101,5 @@ async def test_single_completion(client: openai.AsyncOpenAI, model_name: str):
|
||||
assert len(completion.choices[0].text) >= 5
|
||||
assert completion.choices[0].finish_reason == "length"
|
||||
assert completion.usage == openai.types.CompletionUsage(
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11)
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user