[Frontend][1/N] Improve all pooling task | Support FP16 Embedding Base64 (Still uses fp32 by default). (#26414)
Signed-off-by: wang.yuqi <noooop@126.com> Co-authored-by: Maximilien de Bayser <maxdebayser@gmail.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
@@ -6,10 +6,11 @@ import base64
|
||||
import numpy as np
|
||||
import pytest
|
||||
import requests
|
||||
import torch
|
||||
|
||||
from tests.models.utils import check_embeddings_close
|
||||
from tests.utils import RemoteOpenAIServer
|
||||
from vllm.entrypoints.openai.protocol import PoolingResponse
|
||||
from vllm.entrypoints.openai.protocol import EMBED_DTYPE_TO_TORCH_DTYPE, PoolingResponse
|
||||
from vllm.transformers_utils.tokenizer import get_tokenizer
|
||||
|
||||
MODEL_NAME = "internlm/internlm2-1_8b-reward"
|
||||
@@ -248,6 +249,80 @@ async def test_batch_base64_pooling(server: RemoteOpenAIServer, model_name: str)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_base64_embed_dtype(server: RemoteOpenAIServer, model_name: str):
|
||||
input_texts = [
|
||||
"The best thing about vLLM is that it supports many different models",
|
||||
]
|
||||
|
||||
url = server.url_for("pooling")
|
||||
float_response = requests.post(
|
||||
url,
|
||||
json={
|
||||
"model": model_name,
|
||||
"input": input_texts,
|
||||
"encoding_format": "float",
|
||||
},
|
||||
)
|
||||
responses_float = PoolingResponse.model_validate(float_response.json())
|
||||
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
|
||||
|
||||
for embed_dtype, torch_dtype in EMBED_DTYPE_TO_TORCH_DTYPE.items():
|
||||
responses_base64 = requests.post(
|
||||
url,
|
||||
json={
|
||||
"model": model_name,
|
||||
"input": input_texts,
|
||||
"encoding_format": "base64",
|
||||
"embed_dtype": embed_dtype,
|
||||
},
|
||||
)
|
||||
|
||||
base64_data = []
|
||||
for data in responses_base64.json()["data"]:
|
||||
base64_data.append(
|
||||
torch.frombuffer(base64.b64decode(data["data"]), dtype=torch_dtype)
|
||||
.to(torch.float32)
|
||||
.tolist()
|
||||
)
|
||||
|
||||
check_embeddings_close(
|
||||
embeddings_0_lst=float_data,
|
||||
embeddings_1_lst=base64_data,
|
||||
name_0="float_data",
|
||||
name_1="base64_data",
|
||||
tol=1e-2,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_base64_embed_dtype_not_supported(
|
||||
server: RemoteOpenAIServer, model_name: str
|
||||
):
|
||||
input_texts = [
|
||||
"The best thing about vLLM is that it supports many different models",
|
||||
]
|
||||
|
||||
bad_embed_dtype = "bad_embed_dtype"
|
||||
|
||||
responses_base64 = requests.post(
|
||||
server.url_for("pooling"),
|
||||
json={
|
||||
"model": model_name,
|
||||
"input": input_texts,
|
||||
"encoding_format": "base64",
|
||||
"embed_dtype": bad_embed_dtype,
|
||||
},
|
||||
)
|
||||
|
||||
assert responses_base64.status_code == 400
|
||||
assert responses_base64.json()["error"]["message"].startswith(
|
||||
f"embed_dtype={bad_embed_dtype!r} is not supported."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invocations(server: RemoteOpenAIServer):
|
||||
input_texts = [
|
||||
|
||||
Reference in New Issue
Block a user