[Misc] Remove vLLM patch of BaichuanTokenizer (#8921)
This commit is contained in:
@@ -11,8 +11,7 @@ from transformers import (AutoTokenizer, PreTrainedTokenizer,
|
||||
from vllm.envs import VLLM_USE_MODELSCOPE
|
||||
from vllm.logger import init_logger
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.transformers_utils.tokenizers import (BaichuanTokenizer,
|
||||
MistralTokenizer)
|
||||
from vllm.transformers_utils.tokenizers import MistralTokenizer
|
||||
from vllm.transformers_utils.utils import check_gguf_file
|
||||
from vllm.utils import make_async
|
||||
|
||||
@@ -139,19 +138,6 @@ def get_tokenizer(
|
||||
raise RuntimeError(err_msg) from e
|
||||
else:
|
||||
raise e
|
||||
except AttributeError as e:
|
||||
if "BaichuanTokenizer" in str(e):
|
||||
# This is for the error "'BaichuanTokenizer' object has no
|
||||
# attribute 'sp_model'".
|
||||
tokenizer = BaichuanTokenizer.from_pretrained(
|
||||
tokenizer_name,
|
||||
*args,
|
||||
trust_remote_code=trust_remote_code,
|
||||
revision=revision,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
||||
# NOTE: We can remove this after https://github.com/THUDM/ChatGLM3/issues/1324
|
||||
if type(tokenizer).__name__ in ("ChatGLMTokenizer",
|
||||
|
||||
Reference in New Issue
Block a user