diff --git a/docs/serving/openai_compatible_server.md b/docs/serving/openai_compatible_server.md index 21f6de962..438624d91 100644 --- a/docs/serving/openai_compatible_server.md +++ b/docs/serving/openai_compatible_server.md @@ -197,7 +197,7 @@ The following [sampling parameters](../api/README.md#inference-parameters) are s ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:completion-sampling-params" + --8<-- "vllm/entrypoints/openai/completion/protocol.py:completion-sampling-params" ``` The following extra parameters are supported: @@ -205,7 +205,7 @@ The following extra parameters are supported: ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:completion-extra-params" + --8<-- "vllm/entrypoints/openai/completion/protocol.py:completion-extra-params" ``` ### Chat API @@ -228,7 +228,7 @@ The following [sampling parameters](../api/README.md#inference-parameters) are s ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-sampling-params" + --8<-- "vllm/entrypoints/openai/chat_completion/protocol.py:chat-completion-sampling-params" ``` The following extra parameters are supported: @@ -236,7 +236,7 @@ The following extra parameters are supported: ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-extra-params" + --8<-- "vllm/entrypoints/openai/chat_completion/protocol.py:chat-completion-extra-params" ``` ### Responses API @@ -253,7 +253,7 @@ The following extra parameters in the request object are supported: ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:responses-extra-params" + --8<-- "vllm/entrypoints/openai/responses/protocol.py:responses-extra-params" ``` The following extra parameters in the response object are supported: @@ -261,7 +261,7 @@ The following extra parameters in the response object are supported: ??? code ```python - --8<-- "vllm/entrypoints/openai/protocol.py:responses-response-extra-params" + --8<-- "vllm/entrypoints/openai/responses/protocol.py:responses-response-extra-params" ``` ### Embeddings API @@ -378,23 +378,53 @@ The following [pooling parameters][vllm.PoolingParams] are supported. ```python --8<-- "vllm/pooling_params.py:common-pooling-params" ---8<-- "vllm/pooling_params.py:embedding-pooling-params" +--8<-- "vllm/pooling_params.py:embed-pooling-params" ``` -The following extra parameters are supported by default: +The following Embeddings API parameters are supported: ??? code ```python - --8<-- "vllm/entrypoints/pooling/embed/protocol.py:embedding-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:completion-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:encoding-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:embed-params" ``` -For chat-like input (i.e. if `messages` is passed), these extra parameters are supported instead: +The following extra parameters are supported: ??? code ```python - --8<-- "vllm/entrypoints/pooling/embed/protocol.py:chat-embedding-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:completion-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:encoding-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:embed-extra-params" + ``` + +For chat-like input (i.e. if `messages` is passed), the following parameters are supported: + +The following parameters are supported by default: + +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:chat-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:encoding-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:embed-params" + ``` + +these extra parameters are supported instead: + +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:chat-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:encoding-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:embed-extra-params" ``` ### Transcriptions API @@ -659,14 +689,48 @@ The following [pooling parameters][vllm.PoolingParams] are supported. ```python --8<-- "vllm/pooling_params.py:common-pooling-params" ---8<-- "vllm/pooling_params.py:classification-pooling-params" +--8<-- "vllm/pooling_params.py:classify-pooling-params" ``` +The following Classification API parameters are supported: + +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:completion-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-params" + ``` + The following extra parameters are supported: -```python ---8<-- "vllm/entrypoints/pooling/classify/protocol.py:classification-extra-params" -``` +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:completion-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-extra-params" + ``` + +For chat-like input (i.e. if `messages` is passed), the following parameters are supported: + +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:chat-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-params" + ``` + +these extra parameters are supported instead: + +??? code + + ```python + --8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:chat-extra-params" + --8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-extra-params" + ``` ### Score API @@ -882,12 +946,21 @@ The following [pooling parameters][vllm.PoolingParams] are supported. ```python --8<-- "vllm/pooling_params.py:common-pooling-params" ---8<-- "vllm/pooling_params.py:classification-pooling-params" +--8<-- "vllm/pooling_params.py:classify-pooling-params" +``` + +The following Score API parameters are supported: + +```python +--8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" +--8<-- "vllm/entrypoints/pooling/score/protocol.py:score-extra-params" ``` The following extra parameters are supported: ```python +--8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" +--8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-extra-params" --8<-- "vllm/entrypoints/pooling/score/protocol.py:score-extra-params" ``` @@ -963,12 +1036,22 @@ The following [pooling parameters][vllm.PoolingParams] are supported. ```python --8<-- "vllm/pooling_params.py:common-pooling-params" ---8<-- "vllm/pooling_params.py:classification-pooling-params" +--8<-- "vllm/pooling_params.py:classify-pooling-params" +``` + +The following Re-rank API parameters are supported: + +```python +--8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-params" +--8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-extra-params" +--8<-- "vllm/entrypoints/pooling/score/protocol.py:score-extra-params" ``` The following extra parameters are supported: ```python +--8<-- "vllm/entrypoints/pooling/base/protocol.py:pooling-common-extra-params" +--8<-- "vllm/entrypoints/pooling/base/protocol.py:classify-extra-params" --8<-- "vllm/entrypoints/pooling/score/protocol.py:rerank-extra-params" ``` diff --git a/examples/pooling/score/convert_model_to_seq_cls.py b/examples/pooling/score/convert_model_to_seq_cls.py index a3d31ceb1..21e294166 100644 --- a/examples/pooling/score/convert_model_to_seq_cls.py +++ b/examples/pooling/score/convert_model_to_seq_cls.py @@ -183,9 +183,9 @@ def parse_args(): help="Conversion method to use", ) parser.add_argument( - "--use-pad-token", + "--use-sep-token", action="store_true", - help="Enable padding token in the sequence classification model", + help="Enable separating token in the sequence classification model", ) parser.add_argument( "--path", diff --git a/tests/entrypoints/pooling/classify/test_online_vision.py b/tests/entrypoints/pooling/classify/test_online_vision.py index aeb05c64d..215921374 100644 --- a/tests/entrypoints/pooling/classify/test_online_vision.py +++ b/tests/entrypoints/pooling/classify/test_online_vision.py @@ -1,27 +1,30 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project - import json import pytest import requests +from tests.entrypoints.test_utils import encode_base64_content_from_url from tests.utils import RemoteOpenAIServer from vllm.entrypoints.pooling.classify.protocol import ClassificationResponse -VLM_MODEL_NAME = "muziyongshixin/Qwen2.5-VL-7B-for-VideoCls" +MODEL_NAME = "muziyongshixin/Qwen2.5-VL-7B-for-VideoCls" MAXIMUM_VIDEOS = 1 -TEST_VIDEO_URL = "https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4" HF_OVERRIDES = { "text_config": { "architectures": ["Qwen2_5_VLForSequenceClassification"], }, } +input_text = "This product was excellent and exceeded my expectations" +image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/cat_snow.jpg" +image_base64 = encode_base64_content_from_url(image_url) +video_url = "https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4" @pytest.fixture(scope="module") -def server_vlm_classify(): +def server(): args = [ "--runner", "pooling", @@ -33,26 +36,26 @@ def server_vlm_classify(): ] with RemoteOpenAIServer( - VLM_MODEL_NAME, args, override_hf_configs=HF_OVERRIDES + MODEL_NAME, args, override_hf_configs=HF_OVERRIDES ) as remote_server: yield remote_server -@pytest.mark.parametrize("model_name", [VLM_MODEL_NAME]) -def test_classify_accepts_chat_text_only( - server_vlm_classify: RemoteOpenAIServer, model_name: str -) -> None: +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +def test_chat_text_request(server: RemoteOpenAIServer, model_name: str): messages = [ + { + "role": "assistant", + "content": "Please classify this text request.", + }, { "role": "user", - "content": [ - {"type": "text", "text": "Please classify this text request."}, - ], - } + "content": input_text, + }, ] response = requests.post( - server_vlm_classify.url_for("classify"), + server.url_for("classify"), json={"model": model_name, "messages": messages}, ) response.raise_for_status() @@ -63,25 +66,77 @@ def test_classify_accepts_chat_text_only( assert output.model == model_name assert len(output.data) == 1 assert len(output.data[0].probs) == 2 - assert output.usage.prompt_tokens == 22 + assert output.usage.prompt_tokens == 35 -@pytest.mark.parametrize("model_name", [VLM_MODEL_NAME]) -def test_classify_accepts_chat_video_url( - server_vlm_classify: RemoteOpenAIServer, model_name: str -) -> None: +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +def test_chat_image_url_request(server: RemoteOpenAIServer, model_name: str): messages = [ { "role": "user", "content": [ - {"type": "text", "text": "Please classify this video."}, - {"type": "video_url", "video_url": {"url": TEST_VIDEO_URL}}, + {"type": "text", "text": "Please classify this image."}, + {"type": "image_url", "image_url": {"url": image_url}}, ], } ] response = requests.post( - server_vlm_classify.url_for("classify"), + server.url_for("classify"), + json={"model": model_name, "messages": messages}, + ) + response.raise_for_status() + + output = ClassificationResponse.model_validate(response.json()) + + assert output.object == "list" + assert output.model == model_name + assert len(output.data) == 1 + assert len(output.data[0].probs) == 2 + assert output.usage.prompt_tokens == 47 + + +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +def test_chat_image_base64_request(server: RemoteOpenAIServer, model_name: str): + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Please classify this image."}, + {"type": "image_url", "image_url": image_base64}, + ], + } + ] + + response = requests.post( + server.url_for("classify"), + json={"model": model_name, "messages": messages}, + ) + response.raise_for_status() + + output = ClassificationResponse.model_validate(response.json()) + + assert output.object == "list" + assert output.model == model_name + assert len(output.data) == 1 + assert len(output.data[0].probs) == 2 + assert output.usage.prompt_tokens == 47 + + +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +def test_chat_video_url_request(server: RemoteOpenAIServer, model_name: str): + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Please classify this video."}, + {"type": "video_url", "video_url": {"url": video_url}}, + ], + } + ] + + response = requests.post( + server.url_for("classify"), json={"model": model_name, "messages": messages}, ) response.raise_for_status() diff --git a/tests/entrypoints/test_utils.py b/tests/entrypoints/test_utils.py index dc1101840..1f1b54267 100644 --- a/tests/entrypoints/test_utils.py +++ b/tests/entrypoints/test_utils.py @@ -1,5 +1,9 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import base64 + +import requests + from vllm.entrypoints.utils import sanitize_message @@ -8,3 +12,11 @@ def test_sanitize_message(): sanitize_message("<_io.BytesIO object at 0x7a95e299e750>") == "<_io.BytesIO object>" ) + + +def encode_base64_content_from_url(content_url: str) -> dict[str, str]: + with requests.get(content_url) as response: + response.raise_for_status() + result = base64.b64encode(response.content).decode("utf-8") + + return {"url": f"data:image/jpeg;base64,{result}"} diff --git a/vllm/entrypoints/openai/engine/serving.py b/vllm/entrypoints/openai/engine/serving.py index 2cf33328e..e05c287a0 100644 --- a/vllm/entrypoints/openai/engine/serving.py +++ b/vllm/entrypoints/openai/engine/serving.py @@ -75,6 +75,8 @@ from vllm.entrypoints.pooling.embed.protocol import ( ) from vllm.entrypoints.pooling.pooling.protocol import ( IOProcessorRequest, + PoolingChatRequest, + PoolingCompletionRequest, PoolingResponse, ) from vllm.entrypoints.pooling.score.protocol import ( @@ -138,19 +140,21 @@ logger = init_logger(__name__) CompletionLikeRequest: TypeAlias = ( CompletionRequest + | TokenizeCompletionRequest | DetokenizeRequest | EmbeddingCompletionRequest - | RerankRequest | ClassificationCompletionRequest + | RerankRequest | ScoreRequest - | TokenizeCompletionRequest + | PoolingCompletionRequest ) ChatLikeRequest: TypeAlias = ( ChatCompletionRequest - | EmbeddingChatRequest | TokenizeChatRequest + | EmbeddingChatRequest | ClassificationChatRequest + | PoolingChatRequest ) SpeechToTextRequest: TypeAlias = TranscriptionRequest | TranslationRequest AnyRequest: TypeAlias = ( diff --git a/vllm/entrypoints/pooling/base/protocol.py b/vllm/entrypoints/pooling/base/protocol.py index 1a079306c..dd185e574 100644 --- a/vllm/entrypoints/pooling/base/protocol.py +++ b/vllm/entrypoints/pooling/base/protocol.py @@ -6,16 +6,22 @@ from typing import Annotated, Any from pydantic import Field, model_validator +from vllm import PoolingParams +from vllm.config.pooler import get_use_activation from vllm.entrypoints.chat_utils import ChatCompletionMessageParam from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel from vllm.utils import random_uuid +from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness class PoolingBasicRequestMixin(OpenAIBaseModel): + # --8<-- [start:pooling-common-params] model: str | None = None user: str | None = None - truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None + # --8<-- [end:pooling-common-params] + # --8<-- [start:pooling-common-extra-params] + truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None request_id: str = Field( default_factory=random_uuid, description=( @@ -24,7 +30,6 @@ class PoolingBasicRequestMixin(OpenAIBaseModel): "through out the inference process and return in response." ), ) - priority: int = Field( default=0, description=( @@ -33,11 +38,15 @@ class PoolingBasicRequestMixin(OpenAIBaseModel): "if the served model does not use priority scheduling." ), ) + # --8<-- [end:pooling-common-extra-params] class CompletionRequestMixin(OpenAIBaseModel): + # --8<-- [start:completion-params] input: list[int] | list[list[int]] | str | list[str] + # --8<-- [end:completion-params] + # --8<-- [start:completion-extra-params] add_special_tokens: bool = Field( default=True, description=( @@ -45,11 +54,15 @@ class CompletionRequestMixin(OpenAIBaseModel): "the prompt." ), ) + # --8<-- [end:completion-extra-params] class ChatRequestMixin(OpenAIBaseModel): + # --8<-- [start:chat-params] messages: list[ChatCompletionMessageParam] + # --8<-- [end:chat-params] + # --8<-- [start:chat-extra-params] add_generation_prompt: bool = Field( default=False, description=( @@ -58,7 +71,6 @@ class ChatRequestMixin(OpenAIBaseModel): "model." ), ) - continue_final_message: bool = Field( default=False, description=( @@ -69,7 +81,6 @@ class ChatRequestMixin(OpenAIBaseModel): "Cannot be used at the same time as `add_generation_prompt`." ), ) - add_special_tokens: bool = Field( default=False, description=( @@ -80,7 +91,6 @@ class ChatRequestMixin(OpenAIBaseModel): "default)." ), ) - chat_template: str | None = Field( default=None, description=( @@ -90,7 +100,6 @@ class ChatRequestMixin(OpenAIBaseModel): "does not define one." ), ) - chat_template_kwargs: dict[str, Any] | None = Field( default=None, description=( @@ -98,6 +107,7 @@ class ChatRequestMixin(OpenAIBaseModel): "Will be accessible by the chat template." ), ) + # --8<-- [end:chat-extra-params] @model_validator(mode="before") @classmethod @@ -108,3 +118,72 @@ class ChatRequestMixin(OpenAIBaseModel): "`add_generation_prompt` to True." ) return data + + +class EncodingRequestMixin(OpenAIBaseModel): + # --8<-- [start:encoding-params] + encoding_format: EncodingFormat = "float" + # --8<-- [end:encoding-params] + + # --8<-- [start:encoding-extra-params] + embed_dtype: EmbedDType = Field( + default="float32", + description=( + "What dtype to use for encoding. Default to using float32 for base64 " + "encoding to match the OpenAI python client behavior. " + "This parameter will affect base64 and binary_response." + ), + ) + endianness: Endianness = Field( + default="native", + description=( + "What endianness to use for encoding. Default to using native for " + "base64 encoding to match the OpenAI python client behavior." + "This parameter will affect base64 and binary_response." + ), + ) + # --8<-- [end:encoding-extra-params] + + +class EmbedRequestMixin(EncodingRequestMixin): + # --8<-- [start:embed-params] + dimensions: int | None = None + # --8<-- [end:embed-params] + + # --8<-- [start:embed-extra-params] + normalize: bool | None = Field( + default=None, + description="Whether to normalize the embeddings outputs. Default is True.", + ) + # --8<-- [end:embed-extra-params] + + def to_pooling_params(self): + return PoolingParams( + dimensions=self.dimensions, + use_activation=self.normalize, + truncate_prompt_tokens=getattr(self, "truncate_prompt_tokens", None), + ) + + +class ClassifyRequestMixin(OpenAIBaseModel): + # --8<-- [start:classify-extra-params] + softmax: bool | None = Field( + default=None, + description="softmax will be deprecated, please use use_activation instead.", + ) + activation: bool | None = Field( + default=None, + description="activation will be deprecated, please use use_activation instead.", + ) + use_activation: bool | None = Field( + default=None, + description="Whether to use activation for classification outputs. " + "Default is True.", + ) + # --8<-- [end:classify-extra-params] + + def to_pooling_params(self): + return PoolingParams( + use_activation=get_use_activation(self), + truncate_prompt_tokens=getattr(self, "truncate_prompt_tokens", None), + ) diff --git a/vllm/entrypoints/pooling/classify/protocol.py b/vllm/entrypoints/pooling/classify/protocol.py index d90665bf8..a94c7b49e 100644 --- a/vllm/entrypoints/pooling/classify/protocol.py +++ b/vllm/entrypoints/pooling/classify/protocol.py @@ -8,73 +8,31 @@ from pydantic import ( Field, ) -from vllm import PoolingParams -from vllm.config.pooler import get_use_activation from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo from vllm.entrypoints.pooling.base.protocol import ( ChatRequestMixin, + ClassifyRequestMixin, CompletionRequestMixin, PoolingBasicRequestMixin, ) from vllm.utils import random_uuid -class ClassificationCompletionRequest(PoolingBasicRequestMixin, CompletionRequestMixin): - # --8<-- [start:classification-extra-params] - softmax: bool | None = Field( - default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "Default is True.", - ) - # --8<-- [end:classification-extra-params] - - def to_pooling_params(self): - return PoolingParams( - truncate_prompt_tokens=self.truncate_prompt_tokens, - use_activation=get_use_activation(self), - ) +class ClassificationCompletionRequest( + PoolingBasicRequestMixin, CompletionRequestMixin, ClassifyRequestMixin +): + pass -class ClassificationChatRequest(PoolingBasicRequestMixin, ChatRequestMixin): +class ClassificationChatRequest( + PoolingBasicRequestMixin, ChatRequestMixin, ClassifyRequestMixin +): # --8<-- [start:chat-classification-extra-params] mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) - softmax: bool | None = Field( - default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "Default is True.", - ) - # --8<-- [end:chat-classification-extra-params] - - def to_pooling_params(self): - return PoolingParams( - truncate_prompt_tokens=self.truncate_prompt_tokens, - use_activation=get_use_activation(self), - ) - ClassificationRequest: TypeAlias = ( ClassificationCompletionRequest | ClassificationChatRequest diff --git a/vllm/entrypoints/pooling/embed/protocol.py b/vllm/entrypoints/pooling/embed/protocol.py index ece014f4a..6cebe046d 100644 --- a/vllm/entrypoints/pooling/embed/protocol.py +++ b/vllm/entrypoints/pooling/embed/protocol.py @@ -7,92 +7,31 @@ from pydantic import ( Field, ) -from vllm import PoolingParams from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo from vllm.entrypoints.pooling.base.protocol import ( ChatRequestMixin, CompletionRequestMixin, + EmbedRequestMixin, PoolingBasicRequestMixin, ) from vllm.utils import random_uuid -from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness -class EmbeddingCompletionRequest(PoolingBasicRequestMixin, CompletionRequestMixin): +class EmbeddingCompletionRequest( + PoolingBasicRequestMixin, CompletionRequestMixin, EmbedRequestMixin +): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/embeddings - - encoding_format: EncodingFormat = "float" - dimensions: int | None = None - - # --8<-- [start:embedding-extra-params] - normalize: bool | None = Field( - default=None, - description="Whether to normalize the embeddings outputs. Default is True.", - ) - embed_dtype: EmbedDType = Field( - default="float32", - description=( - "What dtype to use for encoding. Default to using float32 for base64 " - "encoding to match the OpenAI python client behavior. " - "This parameter will affect base64 and binary_response." - ), - ) - endianness: Endianness = Field( - default="native", - description=( - "What endianness to use for encoding. Default to using native for " - "base64 encoding to match the OpenAI python client behavior." - "This parameter will affect base64 and binary_response." - ), - ) - # --8<-- [end:embedding-extra-params] - - def to_pooling_params(self): - return PoolingParams( - dimensions=self.dimensions, - use_activation=self.normalize, - truncate_prompt_tokens=self.truncate_prompt_tokens, - ) + pass -class EmbeddingChatRequest(PoolingBasicRequestMixin, ChatRequestMixin): - encoding_format: EncodingFormat = "float" - dimensions: int | None = None - - # --8<-- [start:chat-embedding-extra-params] +class EmbeddingChatRequest( + PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin +): mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) - normalize: bool | None = Field( - default=None, - description="Whether to normalize the embeddings outputs. Default is True.", - ) - embed_dtype: EmbedDType = Field( - default="float32", - description=( - "What dtype to use for encoding. Default to using float32 for base64 " - "encoding to match the OpenAI python client behavior. " - "This parameter will affect base64 and binary_response." - ), - ) - endianness: Endianness = Field( - default="native", - description=( - "What endianness to use for encoding. Default to using native for " - "base64 encoding to match the OpenAI python client behavior." - "This parameter will affect base64 and binary_response." - ), - ) - # --8<-- [end:chat-embedding-extra-params] - - def to_pooling_params(self): - return PoolingParams( - truncate_prompt_tokens=self.truncate_prompt_tokens, - dimensions=self.dimensions, - use_activation=self.normalize, - ) EmbeddingRequest: TypeAlias = EmbeddingCompletionRequest | EmbeddingChatRequest diff --git a/vllm/entrypoints/pooling/pooling/protocol.py b/vllm/entrypoints/pooling/pooling/protocol.py index daf85d311..f3b043ca0 100644 --- a/vllm/entrypoints/pooling/pooling/protocol.py +++ b/vllm/entrypoints/pooling/pooling/protocol.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time -from typing import Generic, TypeAlias, TypeVar +from typing import Any, Generic, TypeAlias, TypeVar from pydantic import ( Field, @@ -10,32 +10,25 @@ from pydantic import ( from vllm import PoolingParams from vllm.config.pooler import get_use_activation from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo -from vllm.entrypoints.pooling.base.protocol import PoolingBasicRequestMixin -from vllm.entrypoints.pooling.embed.protocol import ( - EmbeddingChatRequest, - EmbeddingCompletionRequest, +from vllm.entrypoints.pooling.base.protocol import ( + ChatRequestMixin, + ClassifyRequestMixin, + CompletionRequestMixin, + EmbedRequestMixin, + EncodingRequestMixin, + PoolingBasicRequestMixin, ) from vllm.tasks import PoolingTask from vllm.utils import random_uuid -from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness -class PoolingCompletionRequest(EmbeddingCompletionRequest): +class PoolingCompletionRequest( + PoolingBasicRequestMixin, + CompletionRequestMixin, + EmbedRequestMixin, + ClassifyRequestMixin, +): task: PoolingTask | None = None - softmax: bool | None = Field( - default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "If it is a classify or token_classify task, the default is True; " - "for other tasks, this value should be None.", - ) def to_pooling_params(self): return PoolingParams( @@ -45,21 +38,14 @@ class PoolingCompletionRequest(EmbeddingCompletionRequest): ) -class PoolingChatRequest(EmbeddingChatRequest): +class PoolingChatRequest( + PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin, ClassifyRequestMixin +): task: PoolingTask | None = None - softmax: bool | None = Field( + + mm_processor_kwargs: dict[str, Any] | None = Field( default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "If it is a classify or token_classify task, the default is True; " - "for other tasks, this value should be None.", + description=("Additional kwargs to pass to the HF processor."), ) def to_pooling_params(self): @@ -73,26 +59,9 @@ class PoolingChatRequest(EmbeddingChatRequest): T = TypeVar("T") -class IOProcessorRequest(PoolingBasicRequestMixin, Generic[T]): +class IOProcessorRequest(PoolingBasicRequestMixin, EncodingRequestMixin, Generic[T]): data: T task: PoolingTask = "plugin" - encoding_format: EncodingFormat = "float" - embed_dtype: EmbedDType = Field( - default="float32", - description=( - "What dtype to use for encoding. Default to using float32 for base64 " - "encoding to match the OpenAI python client behavior. " - "This parameter will affect base64 and binary_response." - ), - ) - endianness: Endianness = Field( - default="native", - description=( - "What endianness to use for encoding. Default to using native for " - "base64 encoding to match the OpenAI python client behavior." - "This parameter will affect base64 and binary_response." - ), - ) def to_pooling_params(self): return PoolingParams() diff --git a/vllm/entrypoints/pooling/score/protocol.py b/vllm/entrypoints/pooling/score/protocol.py index 67f1a34dd..2af43c4a8 100644 --- a/vllm/entrypoints/pooling/score/protocol.py +++ b/vllm/entrypoints/pooling/score/protocol.py @@ -11,7 +11,10 @@ from pydantic import ( from vllm import PoolingParams from vllm.config.pooler import get_use_activation from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo -from vllm.entrypoints.pooling.base.protocol import PoolingBasicRequestMixin +from vllm.entrypoints.pooling.base.protocol import ( + ClassifyRequestMixin, + PoolingBasicRequestMixin, +) from vllm.entrypoints.pooling.score.utils import ( ScoreContentPartParam, ScoreMultiModalParam, @@ -19,28 +22,12 @@ from vllm.entrypoints.pooling.score.utils import ( from vllm.utils import random_uuid -class ScoreRequestMixin(PoolingBasicRequestMixin): +class ScoreRequestMixin(PoolingBasicRequestMixin, ClassifyRequestMixin): # --8<-- [start:score-extra-params] mm_processor_kwargs: dict[str, Any] | None = Field( default=None, description=("Additional kwargs to pass to the HF processor."), ) - - softmax: bool | None = Field( - default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "Default is True.", - ) # --8<-- [end:score-extra-params] def to_pooling_params(self): @@ -86,7 +73,7 @@ ScoreRequest: TypeAlias = ( ) -class RerankRequest(PoolingBasicRequestMixin): +class RerankRequest(PoolingBasicRequestMixin, ClassifyRequestMixin): query: str | ScoreMultiModalParam documents: list[str] | ScoreMultiModalParam top_n: int = Field(default_factory=lambda: 0) @@ -96,29 +83,8 @@ class RerankRequest(PoolingBasicRequestMixin): default=None, description=("Additional kwargs to pass to the HF processor."), ) - softmax: bool | None = Field( - default=None, - description="softmax will be deprecated, please use use_activation instead.", - ) - - activation: bool | None = Field( - default=None, - description="activation will be deprecated, please use use_activation instead.", - ) - - use_activation: bool | None = Field( - default=None, - description="Whether to use activation for classification outputs. " - "Default is True.", - ) # --8<-- [end:rerank-extra-params] - def to_pooling_params(self): - return PoolingParams( - truncate_prompt_tokens=self.truncate_prompt_tokens, - use_activation=get_use_activation(self), - ) - class RerankDocument(BaseModel): text: str | None = None diff --git a/vllm/pooling_params.py b/vllm/pooling_params.py index 84101e1ae..cc750afd8 100644 --- a/vllm/pooling_params.py +++ b/vllm/pooling_params.py @@ -38,17 +38,17 @@ class PoolingParams( # --8<-- [end:common-pooling-params] ## for embeddings models - # --8<-- [start:embedding-pooling-params] + # --8<-- [start:embed-pooling-params] dimensions: int | None = None normalize: bool | None = None - # --8<-- [end:embedding-pooling-params] + # --8<-- [end:embed-pooling-params] ## for classification, scoring and rerank - # --8<-- [start:classification-pooling-params] + # --8<-- [start:classify-pooling-params] softmax: bool | None = None activation: bool | None = None use_activation: bool | None = None - # --8<-- [end:classification-pooling-params] + # --8<-- [end:classify-pooling-params] ## for step pooling models step_tag_id: int | None = None