Bump transformers version for Llama 3.1 hotfix and patch Chameleon (#6690)
This commit is contained in:
@@ -16,8 +16,6 @@ _GENERATION_MODELS = {
|
||||
"BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
|
||||
"BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
|
||||
"BloomForCausalLM": ("bloom", "BloomForCausalLM"),
|
||||
#TODO(ywang96): remove this when huggingface fixes the model repo
|
||||
"ChameleonForCausalLM": ("chameleon", "ChameleonForConditionalGeneration"),
|
||||
"ChameleonForConditionalGeneration":
|
||||
("chameleon", "ChameleonForConditionalGeneration"),
|
||||
"ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
|
||||
|
||||
@@ -6,6 +6,7 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
from torch import nn
|
||||
from transformers import ChameleonConfig, ChameleonVQVAEConfig
|
||||
|
||||
from vllm.attention import Attention, AttentionMetadata
|
||||
from vllm.config import CacheConfig, MultiModalConfig
|
||||
@@ -30,8 +31,6 @@ from vllm.multimodal import MULTIMODAL_REGISTRY
|
||||
from vllm.multimodal.image import (cached_get_tokenizer,
|
||||
repeat_and_pad_image_tokens)
|
||||
from vllm.sequence import IntermediateTensors, SamplerOutput, SequenceData
|
||||
from vllm.transformers_utils.configs import (ChameleonConfig,
|
||||
ChameleonVQVAEConfig)
|
||||
from vllm.utils import print_warning_once
|
||||
|
||||
from .interfaces import SupportsVision
|
||||
|
||||
Reference in New Issue
Block a user