[Frontend] [Core] Add Tensorizer support for V1, LoRA adapter serialization and deserialization (#17926)
Signed-off-by: Sanger Steel <sangersteel@gmail.com>
This commit is contained in:
@@ -1,17 +1,13 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
from functools import partial
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from vllm import SamplingParams
|
||||
from vllm.engine.arg_utils import EngineArgs
|
||||
@@ -22,12 +18,11 @@ from vllm.model_executor.model_loader.tensorizer import (TensorizerConfig,
|
||||
is_vllm_tensorized,
|
||||
load_with_tensorizer,
|
||||
open_stream,
|
||||
serialize_vllm_model,
|
||||
tensorize_vllm_model)
|
||||
# yapf: enable
|
||||
from vllm.utils import PlaceholderModule, import_from_path
|
||||
from vllm.utils import PlaceholderModule
|
||||
|
||||
from ..utils import VLLM_PATH, RemoteOpenAIServer
|
||||
from ..utils import VLLM_PATH
|
||||
|
||||
try:
|
||||
from tensorizer import EncryptionParams
|
||||
@@ -103,6 +98,7 @@ def test_can_deserialize_s3(vllm_runner):
|
||||
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
||||
def test_deserialized_encrypted_vllm_model_has_same_outputs(
|
||||
vllm_runner, tmp_path):
|
||||
args = EngineArgs(model=model_ref)
|
||||
with vllm_runner(model_ref) as vllm_model:
|
||||
model_path = tmp_path / (model_ref + ".tensors")
|
||||
key_path = tmp_path / (model_ref + ".key")
|
||||
@@ -110,15 +106,13 @@ def test_deserialized_encrypted_vllm_model_has_same_outputs(
|
||||
|
||||
outputs = vllm_model.generate(prompts, sampling_params)
|
||||
|
||||
config_for_serializing = TensorizerConfig(tensorizer_uri=model_path,
|
||||
encryption_keyfile=key_path)
|
||||
config_for_serializing = TensorizerConfig(tensorizer_uri=str(model_path),
|
||||
encryption_keyfile=str(key_path))
|
||||
|
||||
vllm_model.apply_model(
|
||||
partial(serialize_vllm_model,
|
||||
tensorizer_config=config_for_serializing))
|
||||
tensorize_vllm_model(args, config_for_serializing)
|
||||
|
||||
config_for_deserializing = TensorizerConfig(tensorizer_uri=model_path,
|
||||
encryption_keyfile=key_path)
|
||||
config_for_deserializing = TensorizerConfig(
|
||||
tensorizer_uri=str(model_path), encryption_keyfile=str(key_path))
|
||||
|
||||
with vllm_runner(model_ref,
|
||||
load_format="tensorizer",
|
||||
@@ -154,113 +148,46 @@ def test_deserialized_hf_model_has_same_outputs(hf_runner, vllm_runner,
|
||||
assert outputs == deserialized_outputs
|
||||
|
||||
|
||||
def test_vllm_model_can_load_with_lora(vllm_runner, tmp_path):
|
||||
multilora_inference = import_from_path(
|
||||
"examples.offline_inference.multilora_inference",
|
||||
EXAMPLES_PATH / "offline_inference/multilora_inference.py",
|
||||
)
|
||||
|
||||
model_ref = "meta-llama/Llama-2-7b-hf"
|
||||
lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test")
|
||||
test_prompts = multilora_inference.create_test_prompts(lora_path)
|
||||
|
||||
# Serialize model before deserializing and binding LoRA adapters
|
||||
with vllm_runner(model_ref) as vllm_model:
|
||||
model_path = tmp_path / (model_ref + ".tensors")
|
||||
|
||||
vllm_model.apply_model(
|
||||
partial(
|
||||
serialize_vllm_model,
|
||||
tensorizer_config=TensorizerConfig(tensorizer_uri=model_path)))
|
||||
|
||||
with vllm_runner(
|
||||
model_ref,
|
||||
load_format="tensorizer",
|
||||
model_loader_extra_config=TensorizerConfig(
|
||||
tensorizer_uri=model_path,
|
||||
num_readers=1,
|
||||
),
|
||||
enable_lora=True,
|
||||
max_loras=1,
|
||||
max_lora_rank=8,
|
||||
max_cpu_loras=2,
|
||||
max_num_seqs=50,
|
||||
max_model_len=1000,
|
||||
) as loaded_vllm_model:
|
||||
multilora_inference.process_requests(
|
||||
loaded_vllm_model.model.llm_engine, test_prompts)
|
||||
|
||||
assert loaded_vllm_model
|
||||
|
||||
|
||||
def test_load_without_tensorizer_load_format(vllm_runner):
|
||||
def test_load_without_tensorizer_load_format(vllm_runner, capfd):
|
||||
model = None
|
||||
with pytest.raises(ValueError):
|
||||
try:
|
||||
model = vllm_runner(
|
||||
model_ref,
|
||||
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
||||
del model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
except RuntimeError:
|
||||
out, err = capfd.readouterr()
|
||||
combined_output = out + err
|
||||
assert ("ValueError: Model loader extra config "
|
||||
"is not supported for load "
|
||||
"format LoadFormat.AUTO") in combined_output
|
||||
finally:
|
||||
del model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
||||
def test_openai_apiserver_with_tensorizer(vllm_runner, tmp_path):
|
||||
## Serialize model
|
||||
with vllm_runner(model_ref) as vllm_model:
|
||||
model_path = tmp_path / (model_ref + ".tensors")
|
||||
|
||||
vllm_model.apply_model(
|
||||
partial(
|
||||
serialize_vllm_model,
|
||||
tensorizer_config=TensorizerConfig(tensorizer_uri=model_path)))
|
||||
|
||||
model_loader_extra_config = {
|
||||
"tensorizer_uri": str(model_path),
|
||||
}
|
||||
|
||||
## Start OpenAI API server
|
||||
openai_args = [
|
||||
"--dtype",
|
||||
"float16",
|
||||
"--load-format",
|
||||
"tensorizer",
|
||||
"--model-loader-extra-config",
|
||||
json.dumps(model_loader_extra_config),
|
||||
]
|
||||
|
||||
with RemoteOpenAIServer(model_ref, openai_args) as server:
|
||||
print("Server ready.")
|
||||
|
||||
client = server.get_client()
|
||||
completion = client.completions.create(model=model_ref,
|
||||
prompt="Hello, my name is",
|
||||
max_tokens=5,
|
||||
temperature=0.0)
|
||||
|
||||
assert completion.id is not None
|
||||
assert len(completion.choices) == 1
|
||||
assert len(completion.choices[0].text) >= 5
|
||||
assert completion.choices[0].finish_reason == "length"
|
||||
assert completion.usage == openai.types.CompletionUsage(
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11)
|
||||
|
||||
|
||||
def test_raise_value_error_on_invalid_load_format(vllm_runner):
|
||||
def test_raise_value_error_on_invalid_load_format(vllm_runner, capfd):
|
||||
model = None
|
||||
with pytest.raises(ValueError):
|
||||
try:
|
||||
model = vllm_runner(
|
||||
model_ref,
|
||||
load_format="safetensors",
|
||||
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
||||
del model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
except RuntimeError:
|
||||
out, err = capfd.readouterr()
|
||||
|
||||
combined_output = out + err
|
||||
assert ("ValueError: Model loader extra config is not supported "
|
||||
"for load format LoadFormat.SAFETENSORS") in combined_output
|
||||
finally:
|
||||
del model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
||||
def test_tensorizer_with_tp_path_without_template(vllm_runner):
|
||||
with pytest.raises(ValueError):
|
||||
def test_tensorizer_with_tp_path_without_template(vllm_runner, capfd):
|
||||
try:
|
||||
model_ref = "EleutherAI/pythia-1.4b"
|
||||
tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors"
|
||||
|
||||
@@ -275,6 +202,13 @@ def test_tensorizer_with_tp_path_without_template(vllm_runner):
|
||||
tensor_parallel_size=2,
|
||||
disable_custom_all_reduce=True,
|
||||
)
|
||||
except RuntimeError:
|
||||
out, err = capfd.readouterr()
|
||||
combined_output = out + err
|
||||
assert ("ValueError: For a sharded model, tensorizer_uri "
|
||||
"should include a string format template like '%04d' "
|
||||
"to be formatted with the rank "
|
||||
"of the shard") in combined_output
|
||||
|
||||
|
||||
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
||||
@@ -288,7 +222,6 @@ def test_deserialized_encrypted_vllm_model_with_tp_has_same_outputs(
|
||||
enforce_eager=True,
|
||||
) as base_model:
|
||||
outputs = base_model.generate(prompts, sampling_params)
|
||||
base_model.model.llm_engine.model_executor.shutdown()
|
||||
|
||||
# load model with two shards and serialize with encryption
|
||||
model_path = str(tmp_path / (model_ref + "-%02d.tensors"))
|
||||
@@ -296,7 +229,7 @@ def test_deserialized_encrypted_vllm_model_with_tp_has_same_outputs(
|
||||
|
||||
tensorizer_config = TensorizerConfig(
|
||||
tensorizer_uri=model_path,
|
||||
encryption_keyfile=key_path,
|
||||
encryption_keyfile=str(key_path),
|
||||
)
|
||||
|
||||
tensorize_vllm_model(
|
||||
@@ -331,14 +264,13 @@ def test_vllm_tensorized_model_has_same_outputs(vllm_runner, tmp_path):
|
||||
model_ref = "facebook/opt-125m"
|
||||
model_path = tmp_path / (model_ref + ".tensors")
|
||||
config = TensorizerConfig(tensorizer_uri=str(model_path))
|
||||
args = EngineArgs(model=model_ref, device="cuda")
|
||||
|
||||
with vllm_runner(model_ref) as vllm_model:
|
||||
outputs = vllm_model.generate(prompts, sampling_params)
|
||||
|
||||
vllm_model.apply_model(
|
||||
partial(serialize_vllm_model, tensorizer_config=config))
|
||||
|
||||
assert is_vllm_tensorized(config)
|
||||
tensorize_vllm_model(args, config)
|
||||
assert is_vllm_tensorized(config)
|
||||
|
||||
with vllm_runner(model_ref,
|
||||
load_format="tensorizer",
|
||||
|
||||
Reference in New Issue
Block a user