Skip models that cannot currently init on Transformers v5 (#28471)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-11-12 23:43:57 +00:00
committed by GitHub
parent 69d0e90313
commit 51c599f0ec
3 changed files with 6 additions and 16 deletions

View File

@@ -82,7 +82,8 @@ enable_hf_transfer()
class DisabledTqdm(tqdm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, disable=True)
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def get_lock(model_name_or_path: str | Path, cache_dir: str | None = None):

View File

@@ -13,7 +13,6 @@ from transformers import (
BatchFeature,
WhisperConfig,
WhisperFeatureExtractor,
WhisperProcessor,
)
from transformers.models.whisper.modeling_whisper import sinusoids
@@ -660,16 +659,6 @@ class WhisperProcessingInfo(BaseProcessingInfo):
def get_hf_config(self) -> WhisperConfig:
return self.ctx.get_hf_config(WhisperConfig)
def get_hf_processor(self, **kwargs: object) -> WhisperProcessor:
# HACK: Transformers 4.53.2 has issue with whisper tokenizer to
# initialize processor. We use a monkeypatch to fix it here.
# See: https://github.com/vllm-project/vllm/issues/20224
processor_class = WhisperProcessor
tokenizer_class = ("WhisperTokenizer", "WhisperTokenizerFast")
if processor_class.tokenizer_class != tokenizer_class:
processor_class.tokenizer_class = tokenizer_class
return self.ctx.get_hf_processor(processor_class, **kwargs)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": 1}