2025-10-14 03:06:43 +08:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
|
"""Example Python client for embedding API using vLLM API server
|
|
|
|
|
NOTE:
|
|
|
|
|
start a supported embeddings model server with `vllm serve`, e.g.
|
|
|
|
|
vllm serve intfloat/e5-small
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
|
|
2026-03-17 22:44:19 +08:00
|
|
|
import pybase64 as base64
|
2025-10-14 03:06:43 +08:00
|
|
|
import requests
|
|
|
|
|
import torch
|
|
|
|
|
|
2026-02-03 18:29:18 +08:00
|
|
|
from vllm.utils.serial_utils import EMBED_DTYPES, ENDIANNESS, binary2tensor
|
2025-10-14 03:06:43 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def post_http_request(prompt: dict, api_url: str) -> requests.Response:
|
|
|
|
|
headers = {"User-Agent": "Test Client"}
|
|
|
|
|
response = requests.post(api_url, headers=headers, json=prompt)
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_args():
|
2026-01-22 18:32:44 +08:00
|
|
|
parse = argparse.ArgumentParser()
|
|
|
|
|
parse.add_argument("--host", type=str, default="localhost")
|
|
|
|
|
parse.add_argument("--port", type=int, default=8000)
|
|
|
|
|
return parse.parse_args()
|
2025-10-14 03:06:43 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(args):
|
2026-01-22 18:32:44 +08:00
|
|
|
base_url = f"http://{args.host}:{args.port}"
|
|
|
|
|
models_url = base_url + "/v1/models"
|
|
|
|
|
embeddings_url = base_url + "/v1/embeddings"
|
|
|
|
|
|
|
|
|
|
response = requests.get(models_url)
|
|
|
|
|
model = response.json()["data"][0]["id"]
|
|
|
|
|
|
|
|
|
|
input_texts = [
|
|
|
|
|
"The best thing about vLLM is that it supports many different models",
|
|
|
|
|
] * 2
|
2025-10-14 03:06:43 +08:00
|
|
|
|
2025-10-22 18:38:57 +08:00
|
|
|
# The OpenAI client does not support the embed_dtype and endianness parameters.
|
2026-02-03 18:29:18 +08:00
|
|
|
for embed_dtype in EMBED_DTYPES:
|
2025-10-22 18:38:57 +08:00
|
|
|
for endianness in ENDIANNESS:
|
|
|
|
|
prompt = {
|
2026-01-22 18:32:44 +08:00
|
|
|
"model": model,
|
|
|
|
|
"input": input_texts,
|
2025-10-22 18:38:57 +08:00
|
|
|
"encoding_format": "base64",
|
|
|
|
|
"embed_dtype": embed_dtype,
|
|
|
|
|
"endianness": endianness,
|
|
|
|
|
}
|
2026-01-22 18:32:44 +08:00
|
|
|
response = post_http_request(prompt=prompt, api_url=embeddings_url)
|
2025-10-22 18:38:57 +08:00
|
|
|
|
|
|
|
|
embedding = []
|
|
|
|
|
for data in response.json()["data"]:
|
|
|
|
|
binary = base64.b64decode(data["embedding"])
|
|
|
|
|
tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
|
|
|
|
|
embedding.append(tensor.to(torch.float32))
|
2026-01-22 18:32:44 +08:00
|
|
|
embedding = torch.stack(embedding)
|
2025-10-22 18:38:57 +08:00
|
|
|
print(embed_dtype, endianness, embedding.shape)
|
2025-10-14 03:06:43 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
args = parse_args()
|
|
|
|
|
main(args)
|