[Refactor] TokenizerRegistry only uses lazy imports (#30609)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
@@ -3,38 +3,39 @@
|
||||
from typing import _get_protocol_attrs # type: ignore
|
||||
|
||||
import pytest
|
||||
from transformers import PreTrainedTokenizerBase
|
||||
from transformers import (
|
||||
PreTrainedTokenizer,
|
||||
PreTrainedTokenizerBase,
|
||||
PreTrainedTokenizerFast,
|
||||
)
|
||||
|
||||
from vllm.tokenizers import TokenizerLike, get_tokenizer
|
||||
from vllm.tokenizers.mistral import MistralTokenizer
|
||||
|
||||
|
||||
def _get_missing_attrs(obj: object, target: type):
|
||||
return [k for k in _get_protocol_attrs(target) if not hasattr(obj, k)]
|
||||
|
||||
|
||||
def _assert_tokenizer_like(tokenizer: object):
|
||||
missing_attrs = _get_missing_attrs(tokenizer, TokenizerLike)
|
||||
assert not missing_attrs, f"Missing attrs: {missing_attrs}"
|
||||
|
||||
|
||||
def test_tokenizer_like_protocol():
|
||||
assert not (
|
||||
missing_attrs := _get_missing_attrs(
|
||||
get_tokenizer("gpt2", use_fast=False),
|
||||
TokenizerLike,
|
||||
)
|
||||
), f"Missing attrs: {missing_attrs}"
|
||||
tokenizer = get_tokenizer("gpt2", use_fast=False)
|
||||
assert isinstance(tokenizer, PreTrainedTokenizer)
|
||||
_assert_tokenizer_like(tokenizer)
|
||||
|
||||
assert not (
|
||||
missing_attrs := _get_missing_attrs(
|
||||
get_tokenizer("gpt2", use_fast=True),
|
||||
TokenizerLike,
|
||||
)
|
||||
), f"Missing attrs: {missing_attrs}"
|
||||
tokenizer = get_tokenizer("gpt2", use_fast=True)
|
||||
assert isinstance(tokenizer, PreTrainedTokenizerFast)
|
||||
_assert_tokenizer_like(tokenizer)
|
||||
|
||||
assert not (
|
||||
missing_attrs := _get_missing_attrs(
|
||||
get_tokenizer(
|
||||
"mistralai/Mistral-7B-Instruct-v0.3", tokenizer_mode="mistral"
|
||||
),
|
||||
TokenizerLike,
|
||||
)
|
||||
), f"Missing attrs: {missing_attrs}"
|
||||
tokenizer = get_tokenizer(
|
||||
"mistralai/Mistral-7B-Instruct-v0.3", tokenizer_mode="mistral"
|
||||
)
|
||||
assert isinstance(tokenizer, MistralTokenizer)
|
||||
_assert_tokenizer_like(tokenizer)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tokenizer_name", ["facebook/opt-125m", "gpt2"])
|
||||
|
||||
@@ -2,7 +2,14 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
from pathlib import Path
|
||||
|
||||
from vllm.tokenizers import TokenizerLike, TokenizerRegistry, get_tokenizer
|
||||
import pytest
|
||||
|
||||
from vllm.tokenizers import TokenizerLike
|
||||
from vllm.tokenizers.registry import (
|
||||
TokenizerRegistry,
|
||||
get_tokenizer,
|
||||
resolve_tokenizer_args,
|
||||
)
|
||||
|
||||
|
||||
class TestTokenizer(TokenizerLike):
|
||||
@@ -40,10 +47,22 @@ class TestTokenizer(TokenizerLike):
|
||||
return True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("runner_type", ["generate", "pooling"])
|
||||
def test_resolve_tokenizer_args_idempotent(runner_type):
|
||||
tokenizer_mode, tokenizer_name, args, kwargs = resolve_tokenizer_args(
|
||||
"facebook/opt-125m",
|
||||
runner_type=runner_type,
|
||||
)
|
||||
|
||||
assert (tokenizer_mode, tokenizer_name, args, kwargs) == resolve_tokenizer_args(
|
||||
tokenizer_name, *args, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def test_customized_tokenizer():
|
||||
TokenizerRegistry.register("test_tokenizer", __name__, TestTokenizer.__name__)
|
||||
|
||||
tokenizer = TokenizerRegistry.get_tokenizer("test_tokenizer", "abc")
|
||||
tokenizer = TokenizerRegistry.load_tokenizer("test_tokenizer", "abc")
|
||||
assert isinstance(tokenizer, TestTokenizer)
|
||||
assert tokenizer.path_or_repo_id == "abc"
|
||||
assert tokenizer.bos_token_id == 0
|
||||
|
||||
Reference in New Issue
Block a user