[Deprecation][2/N] Replace --task with --runner and --convert (#21470)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Cyrus Leung
2025-07-28 10:42:40 +08:00
committed by GitHub
parent 8f605ee309
commit 86ae693f20
94 changed files with 1117 additions and 1083 deletions

View File

@@ -21,7 +21,7 @@ max_model_len = int(original_max_position_embeddings * factor)
@pytest.mark.parametrize("model_info", MODELS)
def test_default(model_info, vllm_runner):
with vllm_runner(model_info.name, task="embed",
with vllm_runner(model_info.name, runner="pooling",
max_model_len=None) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
if model_info.name == "nomic-ai/nomic-embed-text-v2-moe":
@@ -36,7 +36,7 @@ def test_default(model_info, vllm_runner):
@pytest.mark.parametrize("model_info", MODELS)
def test_set_max_model_len_legal(model_info, vllm_runner):
# set max_model_len <= 512
with vllm_runner(model_info.name, task="embed",
with vllm_runner(model_info.name, runner="pooling",
max_model_len=256) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
assert model_config.max_model_len == 256
@@ -46,11 +46,12 @@ def test_set_max_model_len_legal(model_info, vllm_runner):
# For nomic-embed-text-v2-moe the length is set to 512
# by sentence_bert_config.json.
with pytest.raises(ValueError):
with vllm_runner(model_info.name, task="embed",
with vllm_runner(model_info.name,
runner="pooling",
max_model_len=1024):
pass
else:
with vllm_runner(model_info.name, task="embed",
with vllm_runner(model_info.name, runner="pooling",
max_model_len=1024) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
assert model_config.max_model_len == 1024
@@ -60,14 +61,15 @@ def test_set_max_model_len_legal(model_info, vllm_runner):
def test_set_max_model_len_illegal(model_info, vllm_runner):
# set max_model_len > 2048
with pytest.raises(ValueError):
with vllm_runner(model_info.name, task="embed", max_model_len=4096):
with vllm_runner(model_info.name, runner="pooling",
max_model_len=4096):
pass
# set max_model_len > 2048 by hf_overrides
hf_overrides = {"max_model_len": 4096}
with pytest.raises(ValueError):
with vllm_runner(model_info.name,
task="embed",
runner="pooling",
max_model_len=None,
hf_overrides=hf_overrides):
pass
@@ -87,7 +89,7 @@ def test_use_rope_scaling_legal(model_info, vllm_runner):
}
with vllm_runner(model_info.name,
task="embed",
runner="pooling",
max_model_len=None,
hf_overrides=hf_overrides):
pass
@@ -107,7 +109,7 @@ def test_use_rope_scaling_illegal(model_info, vllm_runner):
# illegal max_model_len
with pytest.raises(ValueError):
with vllm_runner(model_info.name,
task="embed",
runner="pooling",
max_model_len=max_model_len + 1,
hf_overrides=hf_overrides):
pass
@@ -125,7 +127,7 @@ def test_use_rope_scaling_illegal(model_info, vllm_runner):
# illegal max_model_len by hf_overrides
with pytest.raises(ValueError):
with vllm_runner(model_info.name,
task="embed",
runner="pooling",
max_model_len=None,
hf_overrides=hf_overrides):
pass