[New Model]: nomic-embed-text-v2-moe (#17785)
This commit is contained in:
111
tests/models/language/pooling/mteb_utils.py
Normal file
111
tests/models/language/pooling/mteb_utils.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import math
|
||||
from collections.abc import Sequence
|
||||
|
||||
import mteb
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from tests.models.utils import EmbedModelInfo
|
||||
|
||||
# Most models on the STS12 task (See #17175):
|
||||
# - Model implementation and minor changes in tensor dtype
|
||||
# results in differences less than 1e-4
|
||||
# - Different model results in differences more than 1e-3
|
||||
# 1e-4 is a good tolerance threshold
|
||||
MTEB_EMBED_TASKS = ["STS12"]
|
||||
MTEB_EMBED_TOL = 1e-4
|
||||
|
||||
|
||||
class VllmMtebEncoder(mteb.Encoder):
|
||||
|
||||
def __init__(self, vllm_model):
|
||||
super().__init__()
|
||||
self.model = vllm_model
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def encode(
|
||||
self,
|
||||
sentences: Sequence[str],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> np.ndarray:
|
||||
# Hoping to discover potential scheduling
|
||||
# issues by randomizing the order.
|
||||
r = self.rng.permutation(len(sentences))
|
||||
sentences = [sentences[i] for i in r]
|
||||
outputs = self.model.encode(sentences, use_tqdm=False)
|
||||
embeds = np.array(outputs)
|
||||
embeds = embeds[np.argsort(r)]
|
||||
return embeds
|
||||
|
||||
|
||||
class OpenAIClientMtebEncoder(mteb.Encoder):
|
||||
|
||||
def __init__(self, model_name: str, client):
|
||||
super().__init__()
|
||||
self.model_name = model_name
|
||||
self.client = client
|
||||
self.rng = np.random.default_rng(seed=42)
|
||||
|
||||
def encode(self, sentences: Sequence[str], *args, **kwargs) -> np.ndarray:
|
||||
# Hoping to discover potential scheduling
|
||||
# issues by randomizing the order.
|
||||
r = self.rng.permutation(len(sentences))
|
||||
sentences = [sentences[i] for i in r]
|
||||
|
||||
embeddings = self.client.embeddings.create(model=self.model_name,
|
||||
input=sentences)
|
||||
outputs = [d.embedding for d in embeddings.data]
|
||||
embeds = np.array(outputs)
|
||||
embeds = embeds[np.argsort(r)]
|
||||
return embeds
|
||||
|
||||
|
||||
def run_mteb_embed_task(encoder, tasks):
|
||||
tasks = mteb.get_tasks(tasks=tasks)
|
||||
evaluation = mteb.MTEB(tasks=tasks)
|
||||
results = evaluation.run(encoder, verbosity=0, output_folder=None)
|
||||
|
||||
main_score = results[0].scores["test"][0]["main_score"]
|
||||
return main_score
|
||||
|
||||
|
||||
def run_mteb_embed_task_st(model_name, tasks):
|
||||
from sentence_transformers import SentenceTransformer
|
||||
model = SentenceTransformer(model_name)
|
||||
return run_mteb_embed_task(model, tasks)
|
||||
|
||||
|
||||
def mteb_test_embed_models(hf_runner, vllm_runner, model_info: EmbedModelInfo):
|
||||
if not model_info.enable_test:
|
||||
# A model family has many models with the same architecture,
|
||||
# and we don't need to test each one.
|
||||
pytest.skip("Skipping test.")
|
||||
|
||||
with vllm_runner(model_info.name,
|
||||
task="embed",
|
||||
max_model_len=None,
|
||||
dtype=model_info.dtype) as vllm_model:
|
||||
|
||||
if model_info.architecture:
|
||||
assert (model_info.architecture
|
||||
in vllm_model.model.llm_engine.model_config.architectures)
|
||||
|
||||
vllm_main_score = run_mteb_embed_task(VllmMtebEncoder(vllm_model),
|
||||
MTEB_EMBED_TASKS)
|
||||
vllm_dtype = vllm_model.model.llm_engine.model_config.dtype
|
||||
model_dtype = getattr(
|
||||
vllm_model.model.llm_engine.model_config.hf_config, "torch_dtype",
|
||||
vllm_dtype)
|
||||
|
||||
with hf_runner(model_info.name,
|
||||
is_sentence_transformer=True,
|
||||
dtype=model_dtype) as hf_model:
|
||||
st_main_score = run_mteb_embed_task(hf_model, MTEB_EMBED_TASKS)
|
||||
|
||||
print("VLLM:", vllm_dtype, vllm_main_score)
|
||||
print("SentenceTransformer:", model_dtype, st_main_score)
|
||||
print("Difference:", st_main_score - vllm_main_score)
|
||||
|
||||
assert math.isclose(st_main_score, vllm_main_score, rel_tol=MTEB_EMBED_TOL)
|
||||
47
tests/models/language/pooling/test_nomic.py
Normal file
47
tests/models/language/pooling/test_nomic.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from ...utils import EmbedModelInfo, run_embedding_correctness_test
|
||||
|
||||
MODELS = [
|
||||
EmbedModelInfo("nomic-ai/nomic-embed-text-v1",
|
||||
architecture="NomicBertModel",
|
||||
dtype="float32",
|
||||
enable_test=True),
|
||||
EmbedModelInfo("nomic-ai/nomic-embed-text-v1.5",
|
||||
architecture="NomicBertModel",
|
||||
dtype="float32",
|
||||
enable_test=False),
|
||||
EmbedModelInfo("nomic-ai/nomic-embed-text-v2-moe",
|
||||
architecture="NomicBertModel",
|
||||
dtype="float32",
|
||||
enable_test=True)
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_models_mteb(hf_runner, vllm_runner,
|
||||
model_info: EmbedModelInfo) -> None:
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_models_correctness(hf_runner, vllm_runner, model_info: EmbedModelInfo,
|
||||
example_prompts) -> None:
|
||||
if not model_info.enable_test:
|
||||
pytest.skip("Skipping test.")
|
||||
|
||||
with vllm_runner(model_info.name,
|
||||
task="embed",
|
||||
dtype=model_info.dtype,
|
||||
max_model_len=None) as vllm_model:
|
||||
vllm_outputs = vllm_model.encode(example_prompts)
|
||||
|
||||
with hf_runner(
|
||||
model_info.name,
|
||||
dtype=model_info.dtype,
|
||||
is_sentence_transformer=True,
|
||||
) as hf_model:
|
||||
run_embedding_correctness_test(hf_model, example_prompts, vllm_outputs)
|
||||
@@ -1,12 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import pytest
|
||||
|
||||
from ...utils import EmbedModelInfo, check_embeddings_close
|
||||
|
||||
EMBEDDING_PROMPTS = [
|
||||
'what is snowflake?', 'Where can I get the best tacos?', 'The Data Cloud!',
|
||||
'Mexico City of Course!'
|
||||
]
|
||||
from ...utils import EmbedModelInfo, run_embedding_correctness_test
|
||||
|
||||
MODELS = [
|
||||
EmbedModelInfo("Snowflake/snowflake-arctic-embed-xs",
|
||||
@@ -45,51 +41,34 @@ MODELS = [
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
def test_models(
|
||||
def test_models_mteb(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
example_prompts,
|
||||
model_info: EmbedModelInfo,
|
||||
dtype: str,
|
||||
monkeypatch,
|
||||
) -> None:
|
||||
from .mteb_utils import mteb_test_embed_models
|
||||
mteb_test_embed_models(hf_runner, vllm_runner, model_info)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_info", MODELS)
|
||||
def test_models_correctness(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
model_info: EmbedModelInfo,
|
||||
example_prompts,
|
||||
) -> None:
|
||||
if not model_info.enable_test:
|
||||
# A model family has many models with the same architecture,
|
||||
# and we don't need to test each one.
|
||||
pytest.skip("Skipping test.")
|
||||
|
||||
example_prompts = example_prompts + EMBEDDING_PROMPTS
|
||||
|
||||
vllm_extra_kwargs = {
|
||||
"hf_overrides": {
|
||||
"is_matryoshka": model_info.is_matryoshka
|
||||
}
|
||||
}
|
||||
|
||||
with hf_runner(model_info.name, dtype=dtype,
|
||||
is_sentence_transformer=True) as hf_model:
|
||||
hf_outputs = hf_model.encode(example_prompts)
|
||||
|
||||
with vllm_runner(model_info.name,
|
||||
task="embed",
|
||||
dtype=dtype,
|
||||
max_model_len=None,
|
||||
**vllm_extra_kwargs) as vllm_model:
|
||||
|
||||
assert (vllm_model.model.llm_engine.model_config.is_matryoshka ==
|
||||
model_info.is_matryoshka)
|
||||
|
||||
if model_info.architecture:
|
||||
assert (model_info.architecture
|
||||
in vllm_model.model.llm_engine.model_config.architectures)
|
||||
|
||||
dtype=model_info.dtype,
|
||||
max_model_len=None) as vllm_model:
|
||||
vllm_outputs = vllm_model.encode(example_prompts)
|
||||
|
||||
check_embeddings_close(
|
||||
embeddings_0_lst=hf_outputs,
|
||||
embeddings_1_lst=vllm_outputs,
|
||||
name_0="hf",
|
||||
name_1="vllm",
|
||||
tol=1e-2,
|
||||
)
|
||||
with hf_runner(
|
||||
model_info.name,
|
||||
dtype=model_info.dtype,
|
||||
is_sentence_transformer=True,
|
||||
) as hf_model:
|
||||
run_embedding_correctness_test(hf_model, example_prompts, vllm_outputs)
|
||||
|
||||
Reference in New Issue
Block a user