[V0 Deprecation] Remove Prompt Adapters (#20588)
Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
@@ -2,6 +2,7 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
# imports for guided decoding tests
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Optional
|
||||
@@ -26,10 +27,6 @@ MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
|
||||
# technically these adapters use a different base model,
|
||||
# but we're not testing generation quality here
|
||||
LORA_NAME = "typeof/zephyr-7b-beta-lora"
|
||||
PA_NAME = "swapnilbp/llama_tweet_ptune"
|
||||
# if PA_NAME changes, PA_NUM_VIRTUAL_TOKENS might also
|
||||
# need to change to match the prompt adapter
|
||||
PA_NUM_VIRTUAL_TOKENS = 8
|
||||
|
||||
GUIDED_DECODING_BACKENDS = ["outlines", "lm-format-enforcer", "xgrammar"]
|
||||
|
||||
@@ -56,13 +53,7 @@ def zephyr_lora_added_tokens_files(zephyr_lora_files):
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def zephyr_pa_files():
|
||||
return snapshot_download(repo_id=PA_NAME)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def default_server_args(zephyr_lora_files, zephyr_lora_added_tokens_files,
|
||||
zephyr_pa_files):
|
||||
def default_server_args(zephyr_lora_files, zephyr_lora_added_tokens_files):
|
||||
return [
|
||||
# use half precision for speed and memory savings in CI environment
|
||||
"--dtype",
|
||||
@@ -81,15 +72,6 @@ def default_server_args(zephyr_lora_files, zephyr_lora_added_tokens_files,
|
||||
"64",
|
||||
"--max-cpu-loras",
|
||||
"2",
|
||||
# pa config
|
||||
"--enable-prompt-adapter",
|
||||
"--prompt-adapters",
|
||||
f"zephyr-pa={zephyr_pa_files}",
|
||||
f"zephyr-pa2={zephyr_pa_files}",
|
||||
"--max-prompt-adapters",
|
||||
"2",
|
||||
"--max-prompt-adapter-token",
|
||||
"128",
|
||||
]
|
||||
|
||||
|
||||
@@ -98,8 +80,19 @@ def default_server_args(zephyr_lora_files, zephyr_lora_added_tokens_files,
|
||||
def server(default_server_args, request):
|
||||
if request.param:
|
||||
default_server_args.append(request.param)
|
||||
with RemoteOpenAIServer(MODEL_NAME, default_server_args) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
original_value = os.environ.get('VLLM_USE_V1')
|
||||
os.environ['VLLM_USE_V1'] = '0'
|
||||
try:
|
||||
with RemoteOpenAIServer(MODEL_NAME,
|
||||
default_server_args) as remote_server:
|
||||
yield remote_server
|
||||
finally:
|
||||
# Restore original env value
|
||||
if original_value is None:
|
||||
os.environ.pop('VLLM_USE_V1', None)
|
||||
else:
|
||||
os.environ['VLLM_USE_V1'] = original_value
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
@@ -110,14 +103,11 @@ async def client(server):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
# first test base model, then test loras, then test prompt adapters
|
||||
"model_name,num_virtual_tokens",
|
||||
[(MODEL_NAME, 0), ("zephyr-lora", 0), ("zephyr-lora2", 0),
|
||||
("zephyr-pa", PA_NUM_VIRTUAL_TOKENS),
|
||||
("zephyr-pa2", PA_NUM_VIRTUAL_TOKENS)],
|
||||
# first test base model, then test loras
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-lora2"],
|
||||
)
|
||||
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str,
|
||||
num_virtual_tokens: int):
|
||||
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str):
|
||||
completion = await client.completions.create(model=model_name,
|
||||
prompt="Hello, my name is",
|
||||
max_tokens=5,
|
||||
@@ -130,9 +120,7 @@ async def test_single_completion(client: openai.AsyncOpenAI, model_name: str,
|
||||
assert len(choice.text) >= 5
|
||||
assert choice.finish_reason == "length"
|
||||
assert completion.usage == openai.types.CompletionUsage(
|
||||
completion_tokens=5,
|
||||
prompt_tokens=6 + num_virtual_tokens,
|
||||
total_tokens=11 + num_virtual_tokens)
|
||||
completion_tokens=5, prompt_tokens=6, total_tokens=11)
|
||||
|
||||
# test using token IDs
|
||||
completion = await client.completions.create(
|
||||
@@ -175,9 +163,9 @@ async def test_added_lora_tokens_base_model(client: openai.AsyncOpenAI):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
# first test base model, then test loras, then test prompt adapters
|
||||
# first test base model, then test loras
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-lora2", "zephyr-pa", "zephyr-pa2"],
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-lora2"],
|
||||
)
|
||||
async def test_no_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
# test using token IDs
|
||||
@@ -194,9 +182,9 @@ async def test_no_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
# just test 1 lora and 1 pa hereafter
|
||||
# just test 1 lora
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_zero_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
# test using token IDs
|
||||
@@ -217,7 +205,7 @@ async def test_zero_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_some_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
# test using token IDs
|
||||
@@ -238,7 +226,7 @@ async def test_some_logprobs(client: openai.AsyncOpenAI, model_name: str):
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI,
|
||||
model_name: str):
|
||||
@@ -314,7 +302,7 @@ async def test_prompt_logprobs_completion(client: openai.AsyncOpenAI,
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
model_name: str):
|
||||
@@ -348,7 +336,7 @@ async def test_completion_streaming(client: openai.AsyncOpenAI,
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
"""Streaming for parallel sampling.
|
||||
@@ -382,7 +370,7 @@ async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
model_name: str):
|
||||
@@ -519,7 +507,7 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[MODEL_NAME, "zephyr-lora", "zephyr-pa"],
|
||||
[MODEL_NAME, "zephyr-lora"],
|
||||
)
|
||||
async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
|
||||
# test both text and token IDs
|
||||
|
||||
@@ -13,7 +13,6 @@ from ...utils import RemoteOpenAIServer
|
||||
from .test_completion import default_server_args # noqa: F401
|
||||
from .test_completion import zephyr_lora_added_tokens_files # noqa: F401
|
||||
from .test_completion import zephyr_lora_files # noqa: F401
|
||||
from .test_completion import zephyr_pa_files # noqa: F401
|
||||
from .test_completion import MODEL_NAME
|
||||
|
||||
|
||||
|
||||
@@ -32,8 +32,7 @@ async def _async_serving_models_init() -> OpenAIServingModels:
|
||||
serving_models = OpenAIServingModels(engine_client=mock_engine_client,
|
||||
base_model_paths=BASE_MODEL_PATHS,
|
||||
model_config=mock_model_config,
|
||||
lora_modules=None,
|
||||
prompt_adapters=None)
|
||||
lora_modules=None)
|
||||
await serving_models.init_static_loras()
|
||||
|
||||
return serving_models
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
import vllm
|
||||
from vllm.prompt_adapter.request import PromptAdapterRequest
|
||||
|
||||
MODEL_PATH = "bigscience/bloomz-560m"
|
||||
PA_PATH = 'stevhliu/bloomz-560m_PROMPT_TUNING_CAUSAL_LM'
|
||||
|
||||
|
||||
def do_sample(llm, pa_name: str, pa_id: int):
|
||||
|
||||
prompts = [
|
||||
"Tweet text : @nationalgridus I have no water and the bill is \
|
||||
current and paid. Can you do something about this? Label : ",
|
||||
"Tweet text : @nationalgridus Looks good thanks! Label : "
|
||||
]
|
||||
sampling_params = vllm.SamplingParams(temperature=0.0,
|
||||
max_tokens=3,
|
||||
stop_token_ids=[3])
|
||||
|
||||
outputs = llm.generate(prompts,
|
||||
sampling_params,
|
||||
prompt_adapter_request=PromptAdapterRequest(
|
||||
pa_name, pa_id, PA_PATH, 8) if pa_id else None)
|
||||
|
||||
# Print the outputs.
|
||||
generated_texts = []
|
||||
for output in outputs:
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text.strip()
|
||||
generated_texts.append(generated_text)
|
||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||
return generated_texts
|
||||
|
||||
|
||||
@pytest.mark.parametrize("enforce_eager", [True, False])
|
||||
def test_twitter_prompt_adapter(enforce_eager: bool):
|
||||
llm = vllm.LLM(MODEL_PATH,
|
||||
enforce_eager=enforce_eager,
|
||||
enable_prompt_adapter=True,
|
||||
max_prompt_adapter_token=8)
|
||||
|
||||
expected_output = ['complaint', 'no complaint']
|
||||
|
||||
assert do_sample(llm, "twitter_pa", pa_id=1) == expected_output
|
||||
@@ -1,56 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from vllm import EngineArgs, LLMEngine, SamplingParams
|
||||
from vllm.prompt_adapter.request import PromptAdapterRequest
|
||||
|
||||
MODEL_PATH = "bigscience/bloomz-560m"
|
||||
pa_path = 'stevhliu/bloomz-560m_PROMPT_TUNING_CAUSAL_LM'
|
||||
pa_path2 = 'swapnilbp/angry_tweet_ptune'
|
||||
|
||||
|
||||
def do_sample(engine):
|
||||
|
||||
prompts = [
|
||||
("Tweet text: I have complaints! Label: ",
|
||||
SamplingParams(temperature=0.0, max_tokens=3, stop_token_ids=[3]),
|
||||
PromptAdapterRequest("hate_speech", 1, pa_path2, 8)),
|
||||
("Tweet text: I have no problems Label: ",
|
||||
SamplingParams(temperature=0.0, max_tokens=3, stop_token_ids=[3]),
|
||||
PromptAdapterRequest("hate_speech2", 2, pa_path2, 8)),
|
||||
("Tweet text: I have complaints! Label: ",
|
||||
SamplingParams(temperature=0.0, max_tokens=3), None),
|
||||
("Tweet text: I have no problems Label: ",
|
||||
SamplingParams(temperature=0.0, max_tokens=3, stop_token_ids=[3]),
|
||||
PromptAdapterRequest("complain", 3, pa_path, 8)),
|
||||
]
|
||||
|
||||
request_id = 0
|
||||
results = set()
|
||||
while prompts or engine.has_unfinished_requests():
|
||||
if prompts:
|
||||
prompt, sampling_params, pa_request = prompts.pop(0)
|
||||
engine.add_request(str(request_id),
|
||||
prompt,
|
||||
sampling_params,
|
||||
prompt_adapter_request=pa_request)
|
||||
request_id += 1
|
||||
|
||||
request_outputs = engine.step()
|
||||
|
||||
for request_output in request_outputs:
|
||||
if request_output.finished:
|
||||
results.add(request_output.outputs[0].text)
|
||||
return results
|
||||
|
||||
|
||||
def test_multi_prompt_adapters():
|
||||
engine_args = EngineArgs(model=MODEL_PATH,
|
||||
max_prompt_adapters=3,
|
||||
enable_prompt_adapter=True,
|
||||
max_prompt_adapter_token=8)
|
||||
engine = LLMEngine.from_engine_args(engine_args)
|
||||
expected_output = {
|
||||
' quot;I', 'hate speech', 'no complaint', 'not hate speech'
|
||||
}
|
||||
assert do_sample(engine) == expected_output
|
||||
@@ -1,64 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from vllm import EngineArgs, LLMEngine, SamplingParams
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.prompt_adapter.request import PromptAdapterRequest
|
||||
|
||||
MODEL_PATH = "meta-llama/Llama-2-7b-hf"
|
||||
pa_path = snapshot_download(repo_id="swapnilbp/llama_tweet_ptune")
|
||||
lora_path = snapshot_download(repo_id="yard1/llama-2-7b-sql-lora-test")
|
||||
|
||||
|
||||
def do_sample(engine):
|
||||
|
||||
prompt_text = "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]" # noqa: E501
|
||||
|
||||
# first prompt with a prompt adapter and second without adapter
|
||||
prompts = [
|
||||
(prompt_text,
|
||||
SamplingParams(temperature=0.0, max_tokens=100,
|
||||
stop=["[/assistant]"]),
|
||||
PromptAdapterRequest("hate_speech", 1, pa_path,
|
||||
8), LoRARequest("sql_test", 1, lora_path)),
|
||||
(prompt_text,
|
||||
SamplingParams(temperature=0.0, max_tokens=100,
|
||||
stop=["[/assistant]"]), None,
|
||||
LoRARequest("sql_test", 1, lora_path)),
|
||||
]
|
||||
|
||||
request_id = 0
|
||||
results = set()
|
||||
while prompts or engine.has_unfinished_requests():
|
||||
if prompts:
|
||||
prompt, sampling_params, pa_request, lora_request = prompts.pop(0)
|
||||
engine.add_request(str(request_id),
|
||||
prompt,
|
||||
sampling_params,
|
||||
prompt_adapter_request=pa_request,
|
||||
lora_request=lora_request)
|
||||
request_id += 1
|
||||
|
||||
request_outputs = engine.step()
|
||||
|
||||
for request_output in request_outputs:
|
||||
if request_output.finished:
|
||||
results.add(request_output.outputs[0].text)
|
||||
return results
|
||||
|
||||
|
||||
def test_lora_prompt_adapter():
|
||||
engine_args = EngineArgs(model=MODEL_PATH,
|
||||
enable_prompt_adapter=True,
|
||||
enable_lora=True,
|
||||
max_num_seqs=60,
|
||||
max_prompt_adapter_token=8)
|
||||
engine = LLMEngine.from_engine_args(engine_args)
|
||||
result = do_sample(engine)
|
||||
|
||||
expected_output = {
|
||||
" SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' " # noqa: E501
|
||||
}
|
||||
assert result == expected_output
|
||||
Reference in New Issue
Block a user