2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-06-20 23:30:36 +08:00
|
|
|
|
2024-05-11 11:30:37 -07:00
|
|
|
import pytest
|
2024-10-16 14:31:00 +08:00
|
|
|
|
2024-12-01 08:02:54 +08:00
|
|
|
from vllm.config import PoolerConfig
|
|
|
|
|
|
2025-09-03 17:23:56 +08:00
|
|
|
from ...utils import check_embeddings_close
|
2024-05-11 11:30:37 -07:00
|
|
|
|
2024-10-17 19:21:01 -04:00
|
|
|
|
2024-11-15 12:23:09 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"model",
|
|
|
|
|
[
|
2025-05-29 22:39:25 +08:00
|
|
|
# Be careful of the order of models, decoder-only models should be
|
|
|
|
|
# placed before encoder-only models, otherwise `Qwen2.5-0.5B-Instruct`
|
|
|
|
|
# case won't pass because gte-Qwen2-1.5B-instruct will cache custom
|
|
|
|
|
# model code with bidirectional attention.
|
2025-01-20 14:59:46 +08:00
|
|
|
# [Decoder-only]
|
2025-10-05 15:06:22 +01:00
|
|
|
pytest.param(
|
|
|
|
|
"BAAI/bge-multilingual-gemma2",
|
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.slow_test],
|
|
|
|
|
),
|
2025-07-08 13:13:44 +08:00
|
|
|
pytest.param(
|
|
|
|
|
"intfloat/e5-mistral-7b-instruct",
|
2025-11-12 09:43:06 +08:00
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
2025-10-05 15:06:22 +01:00
|
|
|
),
|
|
|
|
|
pytest.param(
|
|
|
|
|
"ssmits/Qwen2-7B-Instruct-embed-base", marks=[pytest.mark.cpu_model]
|
|
|
|
|
),
|
2025-05-29 22:39:25 +08:00
|
|
|
# [Encoder-only]
|
2025-09-10 15:24:42 +09:00
|
|
|
pytest.param(
|
|
|
|
|
"BAAI/bge-base-en-v1.5",
|
2025-09-12 13:36:50 -04:00
|
|
|
marks=[
|
2025-10-05 15:06:22 +01:00
|
|
|
pytest.mark.core_model,
|
|
|
|
|
pytest.mark.cpu_model,
|
|
|
|
|
pytest.mark.slow_test,
|
2025-09-12 13:36:50 -04:00
|
|
|
],
|
2025-09-10 15:24:42 +09:00
|
|
|
),
|
2025-07-26 10:09:52 -03:00
|
|
|
pytest.param("sentence-transformers/all-MiniLM-L12-v2"),
|
|
|
|
|
pytest.param("intfloat/multilingual-e5-small"),
|
2025-03-13 00:31:19 +08:00
|
|
|
# [Cross-Encoder]
|
2025-09-10 15:24:42 +09:00
|
|
|
pytest.param(
|
|
|
|
|
"sentence-transformers/stsb-roberta-base-v2",
|
|
|
|
|
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
|
|
|
|
),
|
2024-11-15 12:23:09 +08:00
|
|
|
],
|
|
|
|
|
)
|
2024-05-11 11:30:37 -07:00
|
|
|
def test_models(
|
|
|
|
|
hf_runner,
|
|
|
|
|
vllm_runner,
|
|
|
|
|
example_prompts,
|
2024-10-17 19:21:01 -04:00
|
|
|
model,
|
2024-05-11 11:30:37 -07:00
|
|
|
) -> None:
|
2024-11-25 17:51:20 +08:00
|
|
|
vllm_extra_kwargs = {}
|
2024-12-01 08:02:54 +08:00
|
|
|
if model == "ssmits/Qwen2-7B-Instruct-embed-base":
|
2025-10-05 15:06:22 +01:00
|
|
|
vllm_extra_kwargs["pooler_config"] = PoolerConfig(
|
2026-02-04 23:56:02 +08:00
|
|
|
seq_pooling_type="MEAN", use_activation=False
|
2025-10-05 15:06:22 +01:00
|
|
|
)
|
2025-03-13 00:31:19 +08:00
|
|
|
|
2025-10-12 17:51:31 +01:00
|
|
|
max_model_len: int | None = 512
|
2025-07-04 20:47:39 +08:00
|
|
|
if model in [
|
2025-10-05 15:06:22 +01:00
|
|
|
"sentence-transformers/all-MiniLM-L12-v2",
|
|
|
|
|
"sentence-transformers/stsb-roberta-base-v2",
|
2025-07-04 20:47:39 +08:00
|
|
|
]:
|
|
|
|
|
max_model_len = None
|
|
|
|
|
|
2024-10-04 23:57:05 -07:00
|
|
|
# The example_prompts has ending "\n", for example:
|
|
|
|
|
# "Write a short story about a robot that dreams for the first time.\n"
|
|
|
|
|
# sentence_transformers will strip the input texts, see:
|
|
|
|
|
# https://github.com/UKPLab/sentence-transformers/blob/v3.1.1/sentence_transformers/models/Transformer.py#L159
|
|
|
|
|
# This makes the input_ids different between hf_model and vllm_model.
|
|
|
|
|
# So we need to strip the input texts to avoid test failing.
|
|
|
|
|
example_prompts = [str(s).strip() for s in example_prompts]
|
|
|
|
|
|
2025-06-01 11:04:23 +08:00
|
|
|
with hf_runner(model, is_sentence_transformer=True) as hf_model:
|
2024-06-07 22:31:32 -07:00
|
|
|
hf_outputs = hf_model.encode(example_prompts)
|
2024-05-11 11:30:37 -07:00
|
|
|
|
2025-10-05 15:06:22 +01:00
|
|
|
with vllm_runner(
|
|
|
|
|
model, runner="pooling", max_model_len=max_model_len, **vllm_extra_kwargs
|
|
|
|
|
) as vllm_model:
|
2025-06-24 02:38:06 +08:00
|
|
|
vllm_outputs = vllm_model.embed(example_prompts)
|
2025-01-20 15:00:59 +08:00
|
|
|
|
2024-10-16 14:31:00 +08:00
|
|
|
check_embeddings_close(
|
|
|
|
|
embeddings_0_lst=hf_outputs,
|
|
|
|
|
embeddings_1_lst=vllm_outputs,
|
|
|
|
|
name_0="hf",
|
|
|
|
|
name_1="vllm",
|
|
|
|
|
tol=1e-2,
|
|
|
|
|
)
|