2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2025-01-13 01:37:48 -07:00
|
|
|
import contextlib
|
2025-04-27 13:05:00 +08:00
|
|
|
import copy
|
2024-04-16 11:34:39 -07:00
|
|
|
import os
|
2024-08-27 14:40:02 +02:00
|
|
|
import warnings
|
2025-02-19 21:13:50 +08:00
|
|
|
from functools import lru_cache
|
2024-08-06 07:54:23 +08:00
|
|
|
from pathlib import Path
|
2025-02-19 21:13:50 +08:00
|
|
|
from typing import TYPE_CHECKING, Any, TypeAlias
|
2023-05-09 16:03:44 -07:00
|
|
|
|
2024-04-27 09:59:55 -07:00
|
|
|
import huggingface_hub
|
2023-06-28 09:46:58 -07:00
|
|
|
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
|
2025-09-17 01:42:59 -07:00
|
|
|
from typing_extensions import assert_never
|
2023-05-09 16:03:44 -07:00
|
|
|
|
2025-05-25 12:51:21 +08:00
|
|
|
from vllm import envs
|
2023-06-17 03:07:40 -07:00
|
|
|
from vllm.logger import init_logger
|
2025-07-18 17:10:47 +08:00
|
|
|
from vllm.transformers_utils.config import get_sentence_transformer_tokenizer_config
|
2024-09-28 16:11:25 +08:00
|
|
|
from vllm.transformers_utils.tokenizers import MistralTokenizer
|
2024-09-02 20:43:26 +08:00
|
|
|
from vllm.transformers_utils.utils import check_gguf_file
|
2023-05-23 20:46:32 -07:00
|
|
|
|
2025-02-19 21:13:50 +08:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from vllm.config import ModelConfig
|
2025-07-10 16:02:40 +01:00
|
|
|
from vllm.transformers_utils.tokenizer_base import TokenizerBase
|
|
|
|
|
else:
|
|
|
|
|
ModelConfig = Any
|
|
|
|
|
TokenizerBase = Any
|
2025-02-19 21:13:50 +08:00
|
|
|
|
2023-05-23 20:46:32 -07:00
|
|
|
logger = init_logger(__name__)
|
|
|
|
|
|
2024-08-27 14:40:02 +02:00
|
|
|
AnyTokenizer: TypeAlias = PreTrainedTokenizer | PreTrainedTokenizerFast | TokenizerBase
|
2024-08-13 09:20:20 +08:00
|
|
|
|
2023-05-09 16:03:44 -07:00
|
|
|
|
2025-01-04 19:40:53 +08:00
|
|
|
def decode_tokens(
|
|
|
|
|
tokenizer: AnyTokenizer,
|
|
|
|
|
token_ids: list[int],
|
|
|
|
|
*,
|
2025-03-15 14:28:27 +01:00
|
|
|
skip_special_tokens: bool | None = None,
|
2025-01-04 19:40:53 +08:00
|
|
|
) -> str:
|
|
|
|
|
"""
|
|
|
|
|
Backend-agnostic equivalent of HF's
|
2025-05-04 03:42:43 +01:00
|
|
|
`tokenizer.decode(token_ids, ...)`.
|
2025-03-15 14:28:27 +01:00
|
|
|
|
2025-05-04 03:42:43 +01:00
|
|
|
`skip_special_tokens=None` means to use the backend's default
|
2025-03-15 14:28:27 +01:00
|
|
|
settings.
|
2025-01-04 19:40:53 +08:00
|
|
|
"""
|
2025-03-15 14:28:27 +01:00
|
|
|
if skip_special_tokens is not None:
|
2025-08-23 12:16:48 +08:00
|
|
|
return tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
|
2025-03-15 14:28:27 +01:00
|
|
|
|
2025-08-23 12:16:48 +08:00
|
|
|
return tokenizer.decode(token_ids)
|
2025-01-04 19:40:53 +08:00
|
|
|
|
|
|
|
|
|
2025-01-03 03:39:19 -05:00
|
|
|
def encode_tokens(
|
|
|
|
|
tokenizer: AnyTokenizer,
|
|
|
|
|
text: str,
|
|
|
|
|
*,
|
2025-04-29 22:24:57 -03:00
|
|
|
truncation: bool | None = None,
|
|
|
|
|
max_length: int | None = None,
|
2025-01-03 03:39:19 -05:00
|
|
|
add_special_tokens: bool | None = None,
|
|
|
|
|
) -> list[int]:
|
|
|
|
|
"""
|
|
|
|
|
Backend-agnostic equivalent of HF's
|
2025-05-04 03:42:43 +01:00
|
|
|
`tokenizer.encode(text, ...)`.
|
2025-03-15 14:28:27 +01:00
|
|
|
|
2025-05-04 03:42:43 +01:00
|
|
|
`add_special_tokens=None` means to use the backend's default
|
2025-03-15 14:28:27 +01:00
|
|
|
settings.
|
2025-01-03 03:39:19 -05:00
|
|
|
"""
|
2025-04-29 22:24:57 -03:00
|
|
|
|
|
|
|
|
kw_args: dict[str, Any] = {}
|
|
|
|
|
if max_length is not None:
|
|
|
|
|
kw_args["max_length"] = max_length
|
|
|
|
|
|
|
|
|
|
if truncation is not None:
|
|
|
|
|
kw_args["truncation"] = truncation
|
|
|
|
|
|
2025-02-11 20:25:58 -08:00
|
|
|
if add_special_tokens is not None:
|
2025-04-29 22:24:57 -03:00
|
|
|
kw_args["add_special_tokens"] = add_special_tokens
|
2025-03-15 14:28:27 +01:00
|
|
|
|
2025-04-29 22:24:57 -03:00
|
|
|
return tokenizer.encode(text, **kw_args)
|
2025-01-03 03:39:19 -05:00
|
|
|
|
|
|
|
|
|
2024-08-07 17:12:05 +08:00
|
|
|
def get_cached_tokenizer(tokenizer: AnyTokenizer) -> AnyTokenizer:
|
2025-04-27 13:05:00 +08:00
|
|
|
"""
|
2024-03-15 16:37:01 -07:00
|
|
|
By default, transformers will recompute multiple tokenizer properties
|
2025-04-27 13:05:00 +08:00
|
|
|
each time they are called, leading to a significant slowdown.
|
|
|
|
|
This proxy caches these properties for faster access.
|
|
|
|
|
"""
|
|
|
|
|
cached_tokenizer = copy.copy(tokenizer)
|
2024-03-15 16:37:01 -07:00
|
|
|
|
2025-04-27 13:05:00 +08:00
|
|
|
tokenizer_all_special_ids = tokenizer.all_special_ids
|
|
|
|
|
tokenizer_all_special_tokens = tokenizer.all_special_tokens
|
2024-03-15 16:37:01 -07:00
|
|
|
tokenizer_all_special_tokens_extended = tokenizer.all_special_tokens_extended
|
2025-01-22 19:08:31 +08:00
|
|
|
tokenizer_vocab = tokenizer.get_vocab()
|
2024-03-29 18:46:39 -07:00
|
|
|
tokenizer_len = len(tokenizer)
|
2025-01-13 01:37:48 -07:00
|
|
|
|
2025-01-22 19:08:31 +08:00
|
|
|
max_token_id = max(tokenizer_vocab.values())
|
2025-01-13 01:37:48 -07:00
|
|
|
# Some tokenizers (e.g., QwenTokenizer) have special tokens that
|
|
|
|
|
# are added and included in the implementation of the vocab_size
|
|
|
|
|
# property, but not in get_vocab(); if there is an implementation
|
|
|
|
|
# of vocab size, we should take the greater value.
|
|
|
|
|
if hasattr(tokenizer, "vocab_size"):
|
|
|
|
|
with contextlib.suppress(NotImplementedError):
|
|
|
|
|
max_token_id = max(max_token_id, tokenizer.vocab_size)
|
2024-03-15 16:37:01 -07:00
|
|
|
|
2024-04-13 06:35:50 +09:00
|
|
|
class CachedTokenizer(tokenizer.__class__): # type: ignore
|
2024-03-15 16:37:01 -07:00
|
|
|
@property
|
2025-04-27 13:05:00 +08:00
|
|
|
def all_special_ids(self) -> list[int]:
|
2024-03-15 16:37:01 -07:00
|
|
|
return tokenizer_all_special_ids
|
|
|
|
|
|
|
|
|
|
@property
|
2025-04-27 13:05:00 +08:00
|
|
|
def all_special_tokens(self) -> list[str]:
|
2024-03-15 16:37:01 -07:00
|
|
|
return tokenizer_all_special_tokens
|
|
|
|
|
|
|
|
|
|
@property
|
2025-04-27 13:05:00 +08:00
|
|
|
def all_special_tokens_extended(self) -> list[str]:
|
2024-03-15 16:37:01 -07:00
|
|
|
return tokenizer_all_special_tokens_extended
|
|
|
|
|
|
2024-10-29 16:13:20 -05:00
|
|
|
@property
|
2025-04-27 13:05:00 +08:00
|
|
|
def max_token_id(self) -> int:
|
2024-10-29 16:13:20 -05:00
|
|
|
return max_token_id
|
|
|
|
|
|
2025-04-27 13:05:00 +08:00
|
|
|
def get_vocab(self) -> dict[str, int]:
|
2025-01-22 19:08:31 +08:00
|
|
|
return tokenizer_vocab
|
|
|
|
|
|
2025-04-27 13:05:00 +08:00
|
|
|
def __len__(self) -> int:
|
2024-03-29 18:46:39 -07:00
|
|
|
return tokenizer_len
|
|
|
|
|
|
2025-04-27 13:05:00 +08:00
|
|
|
def __reduce__(self):
|
|
|
|
|
return get_cached_tokenizer, (tokenizer,)
|
|
|
|
|
|
2024-03-15 16:37:01 -07:00
|
|
|
CachedTokenizer.__name__ = f"Cached{tokenizer.__class__.__name__}"
|
|
|
|
|
|
2025-04-27 13:05:00 +08:00
|
|
|
cached_tokenizer.__class__ = CachedTokenizer
|
|
|
|
|
return cached_tokenizer
|
2024-03-15 16:37:01 -07:00
|
|
|
|
|
|
|
|
|
2023-05-09 16:03:44 -07:00
|
|
|
def get_tokenizer(
|
2024-08-06 07:54:23 +08:00
|
|
|
tokenizer_name: str | Path,
|
2023-05-09 16:03:44 -07:00
|
|
|
*args,
|
2023-07-03 11:31:55 -07:00
|
|
|
tokenizer_mode: str = "auto",
|
2023-07-07 20:04:58 +02:00
|
|
|
trust_remote_code: bool = False,
|
2024-04-26 10:10:48 +08:00
|
|
|
revision: str | None = None,
|
2024-04-16 11:34:39 -07:00
|
|
|
download_dir: str | None = None,
|
2023-05-09 16:03:44 -07:00
|
|
|
**kwargs,
|
2024-08-07 17:12:05 +08:00
|
|
|
) -> AnyTokenizer:
|
2024-04-26 10:10:48 +08:00
|
|
|
"""Gets a tokenizer for the given model name via HuggingFace or ModelScope."""
|
2025-05-25 12:51:21 +08:00
|
|
|
if envs.VLLM_USE_MODELSCOPE:
|
2024-04-16 11:34:39 -07:00
|
|
|
# download model from ModelScope hub,
|
|
|
|
|
# lazy import so that modelscope is not required for normal use.
|
|
|
|
|
# pylint: disable=C.
|
|
|
|
|
from modelscope.hub.snapshot_download import snapshot_download
|
|
|
|
|
|
2025-03-01 14:10:28 +08:00
|
|
|
# avoid circuit import
|
|
|
|
|
from vllm.model_executor.model_loader.weight_utils import get_lock
|
|
|
|
|
|
2024-04-16 11:34:39 -07:00
|
|
|
# Only set the tokenizer here, model will be downloaded on the workers.
|
|
|
|
|
if not os.path.exists(tokenizer_name):
|
2025-03-01 14:10:28 +08:00
|
|
|
# Use file lock to prevent multiple processes from
|
|
|
|
|
# downloading the same file at the same time.
|
|
|
|
|
with get_lock(tokenizer_name, download_dir):
|
|
|
|
|
tokenizer_path = snapshot_download(
|
|
|
|
|
model_id=tokenizer_name,
|
|
|
|
|
cache_dir=download_dir,
|
|
|
|
|
revision=revision,
|
|
|
|
|
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
|
|
|
|
|
# Ignore weights - we only need the tokenizer.
|
|
|
|
|
ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"],
|
|
|
|
|
)
|
|
|
|
|
tokenizer_name = tokenizer_path
|
2024-04-16 11:34:39 -07:00
|
|
|
|
2023-06-28 14:19:22 -07:00
|
|
|
if tokenizer_mode == "slow":
|
|
|
|
|
if kwargs.get("use_fast", False):
|
|
|
|
|
raise ValueError("Cannot use the fast tokenizer in slow tokenizer mode.")
|
|
|
|
|
kwargs["use_fast"] = False
|
|
|
|
|
|
2024-07-18 00:13:30 -07:00
|
|
|
if "truncation_side" not in kwargs:
|
|
|
|
|
kwargs["truncation_side"] = "left"
|
|
|
|
|
|
2024-08-06 07:54:23 +08:00
|
|
|
# Separate model folder from file path for GGUF models
|
2024-09-02 20:43:26 +08:00
|
|
|
is_gguf = check_gguf_file(tokenizer_name)
|
2024-08-06 07:54:23 +08:00
|
|
|
if is_gguf:
|
|
|
|
|
kwargs["gguf_file"] = Path(tokenizer_name).name
|
|
|
|
|
tokenizer_name = Path(tokenizer_name).parent
|
|
|
|
|
|
2024-08-27 14:40:02 +02:00
|
|
|
# if tokenizer is from official mistral org
|
|
|
|
|
is_from_mistral_org = str(tokenizer_name).split("/")[0] == "mistralai"
|
|
|
|
|
if is_from_mistral_org and tokenizer_mode != "mistral":
|
|
|
|
|
warnings.warn(
|
|
|
|
|
"It is strongly recommended to run mistral models with "
|
2024-12-18 03:34:08 -03:00
|
|
|
'`--tokenizer-mode "mistral"` to ensure correct '
|
2024-08-27 14:40:02 +02:00
|
|
|
"encoding and decoding.",
|
|
|
|
|
FutureWarning,
|
|
|
|
|
stacklevel=2,
|
|
|
|
|
)
|
2025-02-11 20:25:58 -08:00
|
|
|
|
|
|
|
|
tokenizer: AnyTokenizer
|
2024-08-27 14:40:02 +02:00
|
|
|
if tokenizer_mode == "mistral":
|
|
|
|
|
tokenizer = MistralTokenizer.from_pretrained(
|
|
|
|
|
str(tokenizer_name), revision=revision
|
|
|
|
|
)
|
2025-02-11 20:25:58 -08:00
|
|
|
elif tokenizer_mode == "custom":
|
2025-07-10 16:02:40 +01:00
|
|
|
from vllm.transformers_utils.tokenizer_base import TokenizerRegistry
|
2025-10-05 15:06:22 +01:00
|
|
|
|
2025-02-11 20:25:58 -08:00
|
|
|
tokenizer = TokenizerRegistry.get_tokenizer(
|
|
|
|
|
str(tokenizer_name),
|
|
|
|
|
*args,
|
|
|
|
|
revision=revision,
|
|
|
|
|
download_dir=download_dir,
|
|
|
|
|
**kwargs,
|
|
|
|
|
)
|
2024-08-27 14:40:02 +02:00
|
|
|
else:
|
|
|
|
|
try:
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(
|
2023-11-30 18:35:50 -08:00
|
|
|
tokenizer_name,
|
|
|
|
|
*args,
|
|
|
|
|
trust_remote_code=trust_remote_code,
|
2024-04-26 10:10:48 +08:00
|
|
|
revision=revision,
|
2024-08-27 14:40:02 +02:00
|
|
|
**kwargs,
|
|
|
|
|
)
|
|
|
|
|
except ValueError as e:
|
|
|
|
|
# If the error pertains to the tokenizer class not existing or not
|
|
|
|
|
# currently being imported,
|
|
|
|
|
# suggest using the --trust-remote-code flag.
|
|
|
|
|
if not trust_remote_code and (
|
|
|
|
|
"does not exist or is not currently imported." in str(e)
|
|
|
|
|
or "requires you to execute the tokenizer file" in str(e)
|
|
|
|
|
):
|
|
|
|
|
err_msg = (
|
|
|
|
|
"Failed to load the tokenizer. If the tokenizer "
|
|
|
|
|
"is a custom tokenizer not yet available in the "
|
|
|
|
|
"HuggingFace transformers library, consider "
|
|
|
|
|
"setting `trust_remote_code=True` in LLM or using "
|
|
|
|
|
"the `--trust-remote-code` flag in the CLI."
|
|
|
|
|
)
|
|
|
|
|
raise RuntimeError(err_msg) from e
|
|
|
|
|
else:
|
|
|
|
|
raise e
|
|
|
|
|
|
2025-07-18 17:10:47 +08:00
|
|
|
# The special_tokens in tokenizer should also be
|
|
|
|
|
# controlled by do_lower_case in encoder_config
|
|
|
|
|
encoder_config = get_sentence_transformer_tokenizer_config(
|
|
|
|
|
tokenizer_name, revision
|
|
|
|
|
)
|
|
|
|
|
if isinstance(encoder_config, dict) and encoder_config.get(
|
|
|
|
|
"do_lower_case", False
|
|
|
|
|
):
|
|
|
|
|
special_tokens_map = {
|
|
|
|
|
k: v.lower() for k, v in tokenizer.special_tokens_map.items()
|
|
|
|
|
}
|
|
|
|
|
tokenizer.add_special_tokens(special_tokens_map)
|
|
|
|
|
|
2024-08-27 14:40:02 +02:00
|
|
|
if not isinstance(tokenizer, PreTrainedTokenizerFast):
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Using a slow tokenizer. This might cause a significant "
|
|
|
|
|
"slowdown. Consider using a fast tokenizer instead."
|
|
|
|
|
)
|
|
|
|
|
tokenizer = get_cached_tokenizer(tokenizer)
|
2023-06-28 09:46:58 -07:00
|
|
|
|
2024-08-27 14:40:02 +02:00
|
|
|
return tokenizer
|
2023-05-23 20:46:32 -07:00
|
|
|
|
|
|
|
|
|
2025-02-19 21:13:50 +08:00
|
|
|
cached_get_tokenizer = lru_cache(get_tokenizer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cached_tokenizer_from_config(
|
2025-07-10 16:02:40 +01:00
|
|
|
model_config: ModelConfig,
|
2025-02-19 21:13:50 +08:00
|
|
|
**kwargs: Any,
|
|
|
|
|
):
|
|
|
|
|
return cached_get_tokenizer(
|
|
|
|
|
model_config.tokenizer,
|
|
|
|
|
tokenizer_mode=model_config.tokenizer_mode,
|
2025-07-27 11:07:57 +08:00
|
|
|
revision=model_config.tokenizer_revision,
|
2025-02-19 21:13:50 +08:00
|
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
|
|
|
**kwargs,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2025-09-17 01:42:59 -07:00
|
|
|
def init_tokenizer_from_configs(model_config: ModelConfig):
|
|
|
|
|
runner_type = model_config.runner_type
|
|
|
|
|
if runner_type == "generate" or runner_type == "draft":
|
|
|
|
|
truncation_side = "left"
|
|
|
|
|
elif runner_type == "pooling":
|
|
|
|
|
truncation_side = "right"
|
|
|
|
|
else:
|
|
|
|
|
assert_never(runner_type)
|
2024-01-24 00:26:37 +01:00
|
|
|
|
2025-09-17 01:42:59 -07:00
|
|
|
return get_tokenizer(
|
|
|
|
|
model_config.tokenizer,
|
|
|
|
|
tokenizer_mode=model_config.tokenizer_mode,
|
|
|
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
|
|
|
revision=model_config.tokenizer_revision,
|
|
|
|
|
truncation_side=truncation_side,
|
|
|
|
|
)
|