[Frontend][3/n] Make pooling entrypoints request schema consensus | EmbedRequest & ClassifyRequest (#32905)
Signed-off-by: wang.yuqi <yuqi.wang@daocloud.io> Signed-off-by: wang.yuqi <noooop@126.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -75,6 +75,8 @@ from vllm.entrypoints.pooling.embed.protocol import (
|
||||
)
|
||||
from vllm.entrypoints.pooling.pooling.protocol import (
|
||||
IOProcessorRequest,
|
||||
PoolingChatRequest,
|
||||
PoolingCompletionRequest,
|
||||
PoolingResponse,
|
||||
)
|
||||
from vllm.entrypoints.pooling.score.protocol import (
|
||||
@@ -138,19 +140,21 @@ logger = init_logger(__name__)
|
||||
|
||||
CompletionLikeRequest: TypeAlias = (
|
||||
CompletionRequest
|
||||
| TokenizeCompletionRequest
|
||||
| DetokenizeRequest
|
||||
| EmbeddingCompletionRequest
|
||||
| RerankRequest
|
||||
| ClassificationCompletionRequest
|
||||
| RerankRequest
|
||||
| ScoreRequest
|
||||
| TokenizeCompletionRequest
|
||||
| PoolingCompletionRequest
|
||||
)
|
||||
|
||||
ChatLikeRequest: TypeAlias = (
|
||||
ChatCompletionRequest
|
||||
| EmbeddingChatRequest
|
||||
| TokenizeChatRequest
|
||||
| EmbeddingChatRequest
|
||||
| ClassificationChatRequest
|
||||
| PoolingChatRequest
|
||||
)
|
||||
SpeechToTextRequest: TypeAlias = TranscriptionRequest | TranslationRequest
|
||||
AnyRequest: TypeAlias = (
|
||||
|
||||
@@ -6,16 +6,22 @@ from typing import Annotated, Any
|
||||
|
||||
from pydantic import Field, model_validator
|
||||
|
||||
from vllm import PoolingParams
|
||||
from vllm.config.pooler import get_use_activation
|
||||
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
|
||||
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel
|
||||
from vllm.utils import random_uuid
|
||||
from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness
|
||||
|
||||
|
||||
class PoolingBasicRequestMixin(OpenAIBaseModel):
|
||||
# --8<-- [start:pooling-common-params]
|
||||
model: str | None = None
|
||||
user: str | None = None
|
||||
truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None
|
||||
# --8<-- [end:pooling-common-params]
|
||||
|
||||
# --8<-- [start:pooling-common-extra-params]
|
||||
truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None
|
||||
request_id: str = Field(
|
||||
default_factory=random_uuid,
|
||||
description=(
|
||||
@@ -24,7 +30,6 @@ class PoolingBasicRequestMixin(OpenAIBaseModel):
|
||||
"through out the inference process and return in response."
|
||||
),
|
||||
)
|
||||
|
||||
priority: int = Field(
|
||||
default=0,
|
||||
description=(
|
||||
@@ -33,11 +38,15 @@ class PoolingBasicRequestMixin(OpenAIBaseModel):
|
||||
"if the served model does not use priority scheduling."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:pooling-common-extra-params]
|
||||
|
||||
|
||||
class CompletionRequestMixin(OpenAIBaseModel):
|
||||
# --8<-- [start:completion-params]
|
||||
input: list[int] | list[list[int]] | str | list[str]
|
||||
# --8<-- [end:completion-params]
|
||||
|
||||
# --8<-- [start:completion-extra-params]
|
||||
add_special_tokens: bool = Field(
|
||||
default=True,
|
||||
description=(
|
||||
@@ -45,11 +54,15 @@ class CompletionRequestMixin(OpenAIBaseModel):
|
||||
"the prompt."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:completion-extra-params]
|
||||
|
||||
|
||||
class ChatRequestMixin(OpenAIBaseModel):
|
||||
# --8<-- [start:chat-params]
|
||||
messages: list[ChatCompletionMessageParam]
|
||||
# --8<-- [end:chat-params]
|
||||
|
||||
# --8<-- [start:chat-extra-params]
|
||||
add_generation_prompt: bool = Field(
|
||||
default=False,
|
||||
description=(
|
||||
@@ -58,7 +71,6 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"model."
|
||||
),
|
||||
)
|
||||
|
||||
continue_final_message: bool = Field(
|
||||
default=False,
|
||||
description=(
|
||||
@@ -69,7 +81,6 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"Cannot be used at the same time as `add_generation_prompt`."
|
||||
),
|
||||
)
|
||||
|
||||
add_special_tokens: bool = Field(
|
||||
default=False,
|
||||
description=(
|
||||
@@ -80,7 +91,6 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"default)."
|
||||
),
|
||||
)
|
||||
|
||||
chat_template: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
@@ -90,7 +100,6 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"does not define one."
|
||||
),
|
||||
)
|
||||
|
||||
chat_template_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
@@ -98,6 +107,7 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"Will be accessible by the chat template."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:chat-extra-params]
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -108,3 +118,72 @@ class ChatRequestMixin(OpenAIBaseModel):
|
||||
"`add_generation_prompt` to True."
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
class EncodingRequestMixin(OpenAIBaseModel):
|
||||
# --8<-- [start:encoding-params]
|
||||
encoding_format: EncodingFormat = "float"
|
||||
# --8<-- [end:encoding-params]
|
||||
|
||||
# --8<-- [start:encoding-extra-params]
|
||||
embed_dtype: EmbedDType = Field(
|
||||
default="float32",
|
||||
description=(
|
||||
"What dtype to use for encoding. Default to using float32 for base64 "
|
||||
"encoding to match the OpenAI python client behavior. "
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
endianness: Endianness = Field(
|
||||
default="native",
|
||||
description=(
|
||||
"What endianness to use for encoding. Default to using native for "
|
||||
"base64 encoding to match the OpenAI python client behavior."
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:encoding-extra-params]
|
||||
|
||||
|
||||
class EmbedRequestMixin(EncodingRequestMixin):
|
||||
# --8<-- [start:embed-params]
|
||||
dimensions: int | None = None
|
||||
# --8<-- [end:embed-params]
|
||||
|
||||
# --8<-- [start:embed-extra-params]
|
||||
normalize: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to normalize the embeddings outputs. Default is True.",
|
||||
)
|
||||
# --8<-- [end:embed-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
dimensions=self.dimensions,
|
||||
use_activation=self.normalize,
|
||||
truncate_prompt_tokens=getattr(self, "truncate_prompt_tokens", None),
|
||||
)
|
||||
|
||||
|
||||
class ClassifyRequestMixin(OpenAIBaseModel):
|
||||
# --8<-- [start:classify-extra-params]
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"Default is True.",
|
||||
)
|
||||
# --8<-- [end:classify-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
use_activation=get_use_activation(self),
|
||||
truncate_prompt_tokens=getattr(self, "truncate_prompt_tokens", None),
|
||||
)
|
||||
|
||||
@@ -8,73 +8,31 @@ from pydantic import (
|
||||
Field,
|
||||
)
|
||||
|
||||
from vllm import PoolingParams
|
||||
from vllm.config.pooler import get_use_activation
|
||||
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo
|
||||
from vllm.entrypoints.pooling.base.protocol import (
|
||||
ChatRequestMixin,
|
||||
ClassifyRequestMixin,
|
||||
CompletionRequestMixin,
|
||||
PoolingBasicRequestMixin,
|
||||
)
|
||||
from vllm.utils import random_uuid
|
||||
|
||||
|
||||
class ClassificationCompletionRequest(PoolingBasicRequestMixin, CompletionRequestMixin):
|
||||
# --8<-- [start:classification-extra-params]
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"Default is True.",
|
||||
)
|
||||
# --8<-- [end:classification-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
truncate_prompt_tokens=self.truncate_prompt_tokens,
|
||||
use_activation=get_use_activation(self),
|
||||
)
|
||||
class ClassificationCompletionRequest(
|
||||
PoolingBasicRequestMixin, CompletionRequestMixin, ClassifyRequestMixin
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClassificationChatRequest(PoolingBasicRequestMixin, ChatRequestMixin):
|
||||
class ClassificationChatRequest(
|
||||
PoolingBasicRequestMixin, ChatRequestMixin, ClassifyRequestMixin
|
||||
):
|
||||
# --8<-- [start:chat-classification-extra-params]
|
||||
mm_processor_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description=("Additional kwargs to pass to the HF processor."),
|
||||
)
|
||||
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"Default is True.",
|
||||
)
|
||||
# --8<-- [end:chat-classification-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
truncate_prompt_tokens=self.truncate_prompt_tokens,
|
||||
use_activation=get_use_activation(self),
|
||||
)
|
||||
|
||||
|
||||
ClassificationRequest: TypeAlias = (
|
||||
ClassificationCompletionRequest | ClassificationChatRequest
|
||||
|
||||
@@ -7,92 +7,31 @@ from pydantic import (
|
||||
Field,
|
||||
)
|
||||
|
||||
from vllm import PoolingParams
|
||||
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo
|
||||
from vllm.entrypoints.pooling.base.protocol import (
|
||||
ChatRequestMixin,
|
||||
CompletionRequestMixin,
|
||||
EmbedRequestMixin,
|
||||
PoolingBasicRequestMixin,
|
||||
)
|
||||
from vllm.utils import random_uuid
|
||||
from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness
|
||||
|
||||
|
||||
class EmbeddingCompletionRequest(PoolingBasicRequestMixin, CompletionRequestMixin):
|
||||
class EmbeddingCompletionRequest(
|
||||
PoolingBasicRequestMixin, CompletionRequestMixin, EmbedRequestMixin
|
||||
):
|
||||
# Ordered by official OpenAI API documentation
|
||||
# https://platform.openai.com/docs/api-reference/embeddings
|
||||
|
||||
encoding_format: EncodingFormat = "float"
|
||||
dimensions: int | None = None
|
||||
|
||||
# --8<-- [start:embedding-extra-params]
|
||||
normalize: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to normalize the embeddings outputs. Default is True.",
|
||||
)
|
||||
embed_dtype: EmbedDType = Field(
|
||||
default="float32",
|
||||
description=(
|
||||
"What dtype to use for encoding. Default to using float32 for base64 "
|
||||
"encoding to match the OpenAI python client behavior. "
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
endianness: Endianness = Field(
|
||||
default="native",
|
||||
description=(
|
||||
"What endianness to use for encoding. Default to using native for "
|
||||
"base64 encoding to match the OpenAI python client behavior."
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:embedding-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
dimensions=self.dimensions,
|
||||
use_activation=self.normalize,
|
||||
truncate_prompt_tokens=self.truncate_prompt_tokens,
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
class EmbeddingChatRequest(PoolingBasicRequestMixin, ChatRequestMixin):
|
||||
encoding_format: EncodingFormat = "float"
|
||||
dimensions: int | None = None
|
||||
|
||||
# --8<-- [start:chat-embedding-extra-params]
|
||||
class EmbeddingChatRequest(
|
||||
PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin
|
||||
):
|
||||
mm_processor_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description=("Additional kwargs to pass to the HF processor."),
|
||||
)
|
||||
normalize: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to normalize the embeddings outputs. Default is True.",
|
||||
)
|
||||
embed_dtype: EmbedDType = Field(
|
||||
default="float32",
|
||||
description=(
|
||||
"What dtype to use for encoding. Default to using float32 for base64 "
|
||||
"encoding to match the OpenAI python client behavior. "
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
endianness: Endianness = Field(
|
||||
default="native",
|
||||
description=(
|
||||
"What endianness to use for encoding. Default to using native for "
|
||||
"base64 encoding to match the OpenAI python client behavior."
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
# --8<-- [end:chat-embedding-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
truncate_prompt_tokens=self.truncate_prompt_tokens,
|
||||
dimensions=self.dimensions,
|
||||
use_activation=self.normalize,
|
||||
)
|
||||
|
||||
|
||||
EmbeddingRequest: TypeAlias = EmbeddingCompletionRequest | EmbeddingChatRequest
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import time
|
||||
from typing import Generic, TypeAlias, TypeVar
|
||||
from typing import Any, Generic, TypeAlias, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
Field,
|
||||
@@ -10,32 +10,25 @@ from pydantic import (
|
||||
from vllm import PoolingParams
|
||||
from vllm.config.pooler import get_use_activation
|
||||
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo
|
||||
from vllm.entrypoints.pooling.base.protocol import PoolingBasicRequestMixin
|
||||
from vllm.entrypoints.pooling.embed.protocol import (
|
||||
EmbeddingChatRequest,
|
||||
EmbeddingCompletionRequest,
|
||||
from vllm.entrypoints.pooling.base.protocol import (
|
||||
ChatRequestMixin,
|
||||
ClassifyRequestMixin,
|
||||
CompletionRequestMixin,
|
||||
EmbedRequestMixin,
|
||||
EncodingRequestMixin,
|
||||
PoolingBasicRequestMixin,
|
||||
)
|
||||
from vllm.tasks import PoolingTask
|
||||
from vllm.utils import random_uuid
|
||||
from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness
|
||||
|
||||
|
||||
class PoolingCompletionRequest(EmbeddingCompletionRequest):
|
||||
class PoolingCompletionRequest(
|
||||
PoolingBasicRequestMixin,
|
||||
CompletionRequestMixin,
|
||||
EmbedRequestMixin,
|
||||
ClassifyRequestMixin,
|
||||
):
|
||||
task: PoolingTask | None = None
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"If it is a classify or token_classify task, the default is True; "
|
||||
"for other tasks, this value should be None.",
|
||||
)
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
@@ -45,21 +38,14 @@ class PoolingCompletionRequest(EmbeddingCompletionRequest):
|
||||
)
|
||||
|
||||
|
||||
class PoolingChatRequest(EmbeddingChatRequest):
|
||||
class PoolingChatRequest(
|
||||
PoolingBasicRequestMixin, ChatRequestMixin, EmbedRequestMixin, ClassifyRequestMixin
|
||||
):
|
||||
task: PoolingTask | None = None
|
||||
softmax: bool | None = Field(
|
||||
|
||||
mm_processor_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"If it is a classify or token_classify task, the default is True; "
|
||||
"for other tasks, this value should be None.",
|
||||
description=("Additional kwargs to pass to the HF processor."),
|
||||
)
|
||||
|
||||
def to_pooling_params(self):
|
||||
@@ -73,26 +59,9 @@ class PoolingChatRequest(EmbeddingChatRequest):
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class IOProcessorRequest(PoolingBasicRequestMixin, Generic[T]):
|
||||
class IOProcessorRequest(PoolingBasicRequestMixin, EncodingRequestMixin, Generic[T]):
|
||||
data: T
|
||||
task: PoolingTask = "plugin"
|
||||
encoding_format: EncodingFormat = "float"
|
||||
embed_dtype: EmbedDType = Field(
|
||||
default="float32",
|
||||
description=(
|
||||
"What dtype to use for encoding. Default to using float32 for base64 "
|
||||
"encoding to match the OpenAI python client behavior. "
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
endianness: Endianness = Field(
|
||||
default="native",
|
||||
description=(
|
||||
"What endianness to use for encoding. Default to using native for "
|
||||
"base64 encoding to match the OpenAI python client behavior."
|
||||
"This parameter will affect base64 and binary_response."
|
||||
),
|
||||
)
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams()
|
||||
|
||||
@@ -11,7 +11,10 @@ from pydantic import (
|
||||
from vllm import PoolingParams
|
||||
from vllm.config.pooler import get_use_activation
|
||||
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel, UsageInfo
|
||||
from vllm.entrypoints.pooling.base.protocol import PoolingBasicRequestMixin
|
||||
from vllm.entrypoints.pooling.base.protocol import (
|
||||
ClassifyRequestMixin,
|
||||
PoolingBasicRequestMixin,
|
||||
)
|
||||
from vllm.entrypoints.pooling.score.utils import (
|
||||
ScoreContentPartParam,
|
||||
ScoreMultiModalParam,
|
||||
@@ -19,28 +22,12 @@ from vllm.entrypoints.pooling.score.utils import (
|
||||
from vllm.utils import random_uuid
|
||||
|
||||
|
||||
class ScoreRequestMixin(PoolingBasicRequestMixin):
|
||||
class ScoreRequestMixin(PoolingBasicRequestMixin, ClassifyRequestMixin):
|
||||
# --8<-- [start:score-extra-params]
|
||||
mm_processor_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description=("Additional kwargs to pass to the HF processor."),
|
||||
)
|
||||
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"Default is True.",
|
||||
)
|
||||
# --8<-- [end:score-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
@@ -86,7 +73,7 @@ ScoreRequest: TypeAlias = (
|
||||
)
|
||||
|
||||
|
||||
class RerankRequest(PoolingBasicRequestMixin):
|
||||
class RerankRequest(PoolingBasicRequestMixin, ClassifyRequestMixin):
|
||||
query: str | ScoreMultiModalParam
|
||||
documents: list[str] | ScoreMultiModalParam
|
||||
top_n: int = Field(default_factory=lambda: 0)
|
||||
@@ -96,29 +83,8 @@ class RerankRequest(PoolingBasicRequestMixin):
|
||||
default=None,
|
||||
description=("Additional kwargs to pass to the HF processor."),
|
||||
)
|
||||
softmax: bool | None = Field(
|
||||
default=None,
|
||||
description="softmax will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
activation: bool | None = Field(
|
||||
default=None,
|
||||
description="activation will be deprecated, please use use_activation instead.",
|
||||
)
|
||||
|
||||
use_activation: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to use activation for classification outputs. "
|
||||
"Default is True.",
|
||||
)
|
||||
# --8<-- [end:rerank-extra-params]
|
||||
|
||||
def to_pooling_params(self):
|
||||
return PoolingParams(
|
||||
truncate_prompt_tokens=self.truncate_prompt_tokens,
|
||||
use_activation=get_use_activation(self),
|
||||
)
|
||||
|
||||
|
||||
class RerankDocument(BaseModel):
|
||||
text: str | None = None
|
||||
|
||||
@@ -38,17 +38,17 @@ class PoolingParams(
|
||||
# --8<-- [end:common-pooling-params]
|
||||
|
||||
## for embeddings models
|
||||
# --8<-- [start:embedding-pooling-params]
|
||||
# --8<-- [start:embed-pooling-params]
|
||||
dimensions: int | None = None
|
||||
normalize: bool | None = None
|
||||
# --8<-- [end:embedding-pooling-params]
|
||||
# --8<-- [end:embed-pooling-params]
|
||||
|
||||
## for classification, scoring and rerank
|
||||
# --8<-- [start:classification-pooling-params]
|
||||
# --8<-- [start:classify-pooling-params]
|
||||
softmax: bool | None = None
|
||||
activation: bool | None = None
|
||||
use_activation: bool | None = None
|
||||
# --8<-- [end:classification-pooling-params]
|
||||
# --8<-- [end:classify-pooling-params]
|
||||
|
||||
## for step pooling models
|
||||
step_tag_id: int | None = None
|
||||
|
||||
Reference in New Issue
Block a user