[Model] support MiniMax-VL-01 model (#16328)

Signed-off-by: qingjun <qingjun@minimaxi.com>
This commit is contained in:
qscqesze
2025-04-29 12:05:50 +08:00
committed by GitHub
parent 96e06e3cb7
commit cde384cd92
11 changed files with 954 additions and 19 deletions

View File

@@ -15,6 +15,8 @@ from vllm.transformers_utils.configs.internvl import InternVLChatConfig
from vllm.transformers_utils.configs.jais import JAISConfig
from vllm.transformers_utils.configs.kimi_vl import KimiVLConfig
from vllm.transformers_utils.configs.medusa import MedusaConfig
from vllm.transformers_utils.configs.minimax_text_01 import MiniMaxText01Config
from vllm.transformers_utils.configs.minimax_vl_01 import MiniMaxVL01Config
from vllm.transformers_utils.configs.mllama import MllamaConfig
from vllm.transformers_utils.configs.mlp_speculator import MLPSpeculatorConfig
from vllm.transformers_utils.configs.moonvit import MoonViTConfig
@@ -39,6 +41,8 @@ __all__ = [
"MedusaConfig",
"EAGLEConfig",
"ExaoneConfig",
"MiniMaxText01Config",
"MiniMaxVL01Config",
"MllamaConfig",
"MLPSpeculatorConfig",
"MoonViTConfig",

View File

@@ -0,0 +1,69 @@
# SPDX-License-Identifier: Apache-2.0
""" MiniMaxText01 model configuration"""
from transformers.configuration_utils import PretrainedConfig
class MiniMaxText01Config(PretrainedConfig):
model_type = "MiniMaxText01"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=14336,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=8,
hidden_act="silu",
max_position_embeddings=4096 * 32,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
pad_token_id=None,
bos_token_id=None,
eos_token_id=None,
tie_word_embeddings=False,
rope_theta=1e6,
sliding_window=None,
attention_dropout=0.0,
num_experts_per_tok=2,
num_local_experts=8,
output_router_logits=False,
router_aux_loss_coef=0.001,
router_jitter_noise=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.router_jitter_noise = router_jitter_noise
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)

View File

@@ -0,0 +1,70 @@
# SPDX-License-Identifier: Apache-2.0
"""MiniMaxVL01 model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.models.auto import CONFIG_MAPPING
from .minimax_text_01 import MiniMaxText01Config
class MiniMaxVL01Config(PretrainedConfig):
model_type = "minimax_vl_01"
def __init__(
self,
vision_config=None,
text_config=None,
ignore_index=-100,
image_token_index=32000,
projector_hidden_act="gelu",
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
image_seq_length=576,
**kwargs,
):
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.image_seq_length = image_seq_length
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError("vision_feature_select_strategy should " +
"be one of 'default', 'full'." +
f"Got: {vision_feature_select_strategy}")
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints if image_grid_pinpoints is not None else
[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]])
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
if "model_type" not in vision_config:
vision_config["model_type"] = "clip_vision_model"
vision_config = CONFIG_MAPPING[vision_config["model_type"]](
**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if text_config is not None:
text_config = MiniMaxText01Config(**text_config)
else:
text_config = MiniMaxText01Config()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)