[Core] Parse vLLM engine required fields from hf_config to model_arch_config (#28454)
Signed-off-by: Xingyu Liu <charlotteliu12x@gmail.com> Signed-off-by: Xingyu Liu <38244988+charlotte12l@users.noreply.github.com>
This commit is contained in:
359
tests/config/base_model_arch_groundtruth.json
Normal file
359
tests/config/base_model_arch_groundtruth.json
Normal file
@@ -0,0 +1,359 @@
|
||||
{
|
||||
"state-spaces/mamba-130m-hf": {
|
||||
"architectures": [
|
||||
"MambaForCausalLM"
|
||||
],
|
||||
"model_type": "mamba",
|
||||
"text_model_type": "mamba",
|
||||
"hidden_size": 768,
|
||||
"total_num_hidden_layers": 24,
|
||||
"total_num_attention_heads": 0,
|
||||
"head_size": 0,
|
||||
"vocab_size": 50280,
|
||||
"total_num_kv_heads": 0,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.float32"
|
||||
},
|
||||
"mistralai/Mamba-Codestral-7B-v0.1": {
|
||||
"architectures": [
|
||||
"Mamba2ForCausalLM"
|
||||
],
|
||||
"model_type": "mamba",
|
||||
"text_model_type": "mamba",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 64,
|
||||
"total_num_attention_heads": 0,
|
||||
"head_size": 0,
|
||||
"vocab_size": 32768,
|
||||
"total_num_kv_heads": 0,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11": {
|
||||
"architectures": [
|
||||
"Terratorch"
|
||||
],
|
||||
"model_type": "timm_wrapper",
|
||||
"text_model_type": "timm_wrapper",
|
||||
"hidden_size": 0,
|
||||
"total_num_hidden_layers": 0,
|
||||
"total_num_attention_heads": 0,
|
||||
"head_size": 0,
|
||||
"vocab_size": 0,
|
||||
"total_num_kv_heads": 0,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": true,
|
||||
"dtype": "torch.float32"
|
||||
},
|
||||
"tiiuae/falcon-mamba-7b-instruct": {
|
||||
"architectures": [
|
||||
"FalconMambaForCausalLM"
|
||||
],
|
||||
"model_type": "falcon_mamba",
|
||||
"text_model_type": "falcon_mamba",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 64,
|
||||
"total_num_attention_heads": 0,
|
||||
"head_size": 0,
|
||||
"vocab_size": 65024,
|
||||
"total_num_kv_heads": 0,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"Zyphra/Zamba2-7B-instruct": {
|
||||
"architectures": [
|
||||
"Zamba2ForCausalLM"
|
||||
],
|
||||
"model_type": "zamba2",
|
||||
"text_model_type": "zamba2",
|
||||
"hidden_size": 3584,
|
||||
"total_num_hidden_layers": 81,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 224,
|
||||
"vocab_size": 32000,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"mosaicml/mpt-7b": {
|
||||
"architectures": [
|
||||
"MPTForCausalLM"
|
||||
],
|
||||
"model_type": "mpt",
|
||||
"text_model_type": "mpt",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 32,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 128,
|
||||
"vocab_size": 50432,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"databricks/dbrx-instruct": {
|
||||
"architectures": [
|
||||
"DbrxForCausalLM"
|
||||
],
|
||||
"model_type": "dbrx",
|
||||
"text_model_type": "dbrx",
|
||||
"hidden_size": 6144,
|
||||
"total_num_hidden_layers": 40,
|
||||
"total_num_attention_heads": 48,
|
||||
"head_size": 128,
|
||||
"vocab_size": 100352,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"tiiuae/falcon-7b": {
|
||||
"architectures": [
|
||||
"FalconForCausalLM"
|
||||
],
|
||||
"model_type": "falcon",
|
||||
"text_model_type": "falcon",
|
||||
"hidden_size": 4544,
|
||||
"total_num_hidden_layers": 32,
|
||||
"total_num_attention_heads": 71,
|
||||
"head_size": 64,
|
||||
"vocab_size": 65024,
|
||||
"total_num_kv_heads": 1,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"tiiuae/falcon-40b": {
|
||||
"architectures": [
|
||||
"FalconForCausalLM"
|
||||
],
|
||||
"model_type": "falcon",
|
||||
"text_model_type": "falcon",
|
||||
"hidden_size": 8192,
|
||||
"total_num_hidden_layers": 60,
|
||||
"total_num_attention_heads": 128,
|
||||
"head_size": 64,
|
||||
"vocab_size": 65024,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"luccafong/deepseek_mtp_main_random": {
|
||||
"architectures": [
|
||||
"DeepseekV3ForCausalLM"
|
||||
],
|
||||
"model_type": "deepseek_v3",
|
||||
"text_model_type": "deepseek_v3",
|
||||
"hidden_size": 2560,
|
||||
"total_num_hidden_layers": 5,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 576,
|
||||
"vocab_size": 129280,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 72,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"luccafong/deepseek_mtp_draft_random": {
|
||||
"architectures": [
|
||||
"DeepseekV3ForCausalLM"
|
||||
],
|
||||
"model_type": "deepseek_v3",
|
||||
"text_model_type": "deepseek_v3",
|
||||
"hidden_size": 2560,
|
||||
"total_num_hidden_layers": 10,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 576,
|
||||
"vocab_size": 129280,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 72,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"Qwen/Qwen3-Next-80B-A3B-Instruct": {
|
||||
"architectures": [
|
||||
"Qwen3NextForCausalLM"
|
||||
],
|
||||
"model_type": "qwen3_next",
|
||||
"text_model_type": "qwen3_next",
|
||||
"hidden_size": 2048,
|
||||
"total_num_hidden_layers": 48,
|
||||
"total_num_attention_heads": 16,
|
||||
"head_size": 256,
|
||||
"vocab_size": 151936,
|
||||
"total_num_kv_heads": 2,
|
||||
"num_experts": 512,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"tiny-random/qwen3-next-moe": {
|
||||
"architectures": [
|
||||
"Qwen3NextForCausalLM"
|
||||
],
|
||||
"model_type": "qwen3_next",
|
||||
"text_model_type": "qwen3_next",
|
||||
"hidden_size": 8,
|
||||
"total_num_hidden_layers": 4,
|
||||
"total_num_attention_heads": 16,
|
||||
"head_size": 32,
|
||||
"vocab_size": 151936,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 32,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"zai-org/GLM-4.5": {
|
||||
"architectures": [
|
||||
"Glm4MoeForCausalLM"
|
||||
],
|
||||
"model_type": "glm4_moe",
|
||||
"text_model_type": "glm4_moe",
|
||||
"hidden_size": 5120,
|
||||
"total_num_hidden_layers": 92,
|
||||
"total_num_attention_heads": 96,
|
||||
"head_size": 128,
|
||||
"vocab_size": 151552,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 160,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"baidu/ERNIE-4.5-21B-A3B-PT": {
|
||||
"architectures": [
|
||||
"Ernie4_5_MoeForCausalLM"
|
||||
],
|
||||
"model_type": "ernie4_5_moe",
|
||||
"text_model_type": "ernie4_5_moe",
|
||||
"hidden_size": 2560,
|
||||
"total_num_hidden_layers": 28,
|
||||
"total_num_attention_heads": 20,
|
||||
"head_size": 128,
|
||||
"vocab_size": 103424,
|
||||
"total_num_kv_heads": 4,
|
||||
"num_experts": 64,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"lmsys/gpt-oss-20b-bf16": {
|
||||
"architectures": [
|
||||
"GptOssForCausalLM"
|
||||
],
|
||||
"model_type": "gpt_oss",
|
||||
"text_model_type": "gpt_oss",
|
||||
"hidden_size": 2880,
|
||||
"total_num_hidden_layers": 24,
|
||||
"total_num_attention_heads": 64,
|
||||
"head_size": 64,
|
||||
"vocab_size": 201088,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 32,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"deepseek-ai/DeepSeek-V3.2-Exp": {
|
||||
"architectures": [
|
||||
"DeepseekV32ForCausalLM"
|
||||
],
|
||||
"model_type": "deepseek_v32",
|
||||
"text_model_type": "deepseek_v32",
|
||||
"hidden_size": 7168,
|
||||
"total_num_hidden_layers": 61,
|
||||
"total_num_attention_heads": 128,
|
||||
"head_size": 576,
|
||||
"vocab_size": 129280,
|
||||
"total_num_kv_heads": 128,
|
||||
"num_experts": 256,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"meta-llama/Llama-4-Scout-17B-16E-Instruct": {
|
||||
"architectures": [
|
||||
"Llama4ForConditionalGeneration"
|
||||
],
|
||||
"model_type": "llama4",
|
||||
"text_model_type": "llama4_text",
|
||||
"hidden_size": 5120,
|
||||
"total_num_hidden_layers": 48,
|
||||
"total_num_attention_heads": 40,
|
||||
"head_size": 128,
|
||||
"vocab_size": 202048,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 16,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": true,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"nvidia/Llama-3_3-Nemotron-Super-49B-v1": {
|
||||
"architectures": [
|
||||
"DeciLMForCausalLM"
|
||||
],
|
||||
"model_type": "nemotron-nas",
|
||||
"text_model_type": "nemotron-nas",
|
||||
"hidden_size": 8192,
|
||||
"total_num_hidden_layers": 80,
|
||||
"total_num_attention_heads": 64,
|
||||
"head_size": 128,
|
||||
"vocab_size": 128256,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"XiaomiMiMo/MiMo-7B-RL": {
|
||||
"architectures": [
|
||||
"MiMoForCausalLM"
|
||||
],
|
||||
"model_type": "mimo",
|
||||
"text_model_type": "mimo",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 36,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 128,
|
||||
"vocab_size": 151680,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"meituan-longcat/LongCat-Flash-Chat": {
|
||||
"architectures": [
|
||||
"LongcatFlashForCausalLM"
|
||||
],
|
||||
"model_type": "longcat_flash",
|
||||
"text_model_type": "longcat_flash",
|
||||
"hidden_size": 6144,
|
||||
"total_num_hidden_layers": 28,
|
||||
"total_num_attention_heads": 64,
|
||||
"head_size": 576,
|
||||
"vocab_size": 131072,
|
||||
"total_num_kv_heads": 64,
|
||||
"num_experts": 512,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.float32"
|
||||
}
|
||||
}
|
||||
87
tests/config/draft_model_arch_groundtruth.json
Normal file
87
tests/config/draft_model_arch_groundtruth.json
Normal file
@@ -0,0 +1,87 @@
|
||||
{
|
||||
"abhigoyal/vllm-medusa-llama-68m-random": {
|
||||
"architectures": [
|
||||
"MedusaModel"
|
||||
],
|
||||
"model_type": "medusa",
|
||||
"text_model_type": "medusa",
|
||||
"hidden_size": 768,
|
||||
"total_num_hidden_layers": 1,
|
||||
"total_num_attention_heads": 0,
|
||||
"head_size": "Error: integer division or modulo by zero",
|
||||
"vocab_size": 32000,
|
||||
"total_num_kv_heads": 0,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.float32"
|
||||
},
|
||||
"luccafong/deepseek_mtp_draft_random": {
|
||||
"architectures": [
|
||||
"DeepSeekMTPModel"
|
||||
],
|
||||
"model_type": "deepseek_mtp",
|
||||
"text_model_type": "deepseek_mtp",
|
||||
"hidden_size": 2560,
|
||||
"total_num_hidden_layers": 1,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 576,
|
||||
"vocab_size": 129280,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 72,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "torch.bfloat16"
|
||||
},
|
||||
"eagle618/eagle-deepseek-v3-random": {
|
||||
"architectures": [
|
||||
"EagleDeepSeekMTPModel"
|
||||
],
|
||||
"model_type": "eagle",
|
||||
"text_model_type": "deepseek_mtp",
|
||||
"hidden_size": 2560,
|
||||
"total_num_hidden_layers": 1,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 576,
|
||||
"vocab_size": 129280,
|
||||
"total_num_kv_heads": 32,
|
||||
"num_experts": 72,
|
||||
"is_deepseek_mla": true,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "bfloat16"
|
||||
},
|
||||
"yuhuili/EAGLE-LLaMA3-Instruct-8B": {
|
||||
"architectures": [
|
||||
"EagleLlamaForCausalLM"
|
||||
],
|
||||
"model_type": "eagle",
|
||||
"text_model_type": "llama",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 1,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 128,
|
||||
"vocab_size": 128256,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "float16"
|
||||
},
|
||||
"yuhuili/EAGLE3-LLaMA3.1-Instruct-8B": {
|
||||
"architectures": [
|
||||
"Eagle3LlamaForCausalLM"
|
||||
],
|
||||
"model_type": "eagle",
|
||||
"text_model_type": "llama",
|
||||
"hidden_size": 4096,
|
||||
"total_num_hidden_layers": 1,
|
||||
"total_num_attention_heads": 32,
|
||||
"head_size": 128,
|
||||
"vocab_size": 128256,
|
||||
"total_num_kv_heads": 8,
|
||||
"num_experts": 0,
|
||||
"is_deepseek_mla": false,
|
||||
"is_multimodal_model": false,
|
||||
"dtype": "float16"
|
||||
}
|
||||
}
|
||||
152
tests/config/test_model_arch_config.py
Normal file
152
tests/config/test_model_arch_config.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""Tests for ModelArchitectureConfig and its integration with ModelConfig."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from vllm.config import ModelConfig, ParallelConfig, SpeculativeConfig
|
||||
from vllm.transformers_utils.model_arch_config_convertor import (
|
||||
ModelArchConfigConvertorBase,
|
||||
)
|
||||
|
||||
BASE_TRUST_REMOTE_CODE_MODELS = {
|
||||
"nvidia/Llama-3_3-Nemotron-Super-49B-v1",
|
||||
"XiaomiMiMo/MiMo-7B-RL",
|
||||
# Excluded: Not available online right now
|
||||
# "FreedomIntelligence/openPangu-Ultra-MoE-718B-V1.1",
|
||||
"meituan-longcat/LongCat-Flash-Chat",
|
||||
}
|
||||
|
||||
BASE_MODELS_TO_TEST = [
|
||||
"state-spaces/mamba-130m-hf",
|
||||
"mistralai/Mamba-Codestral-7B-v0.1",
|
||||
# Excluded: terratorch/torchgeo version mismatch in CPU CI environment
|
||||
# (NonGeoDataset import error). Tested in model initialization tests.
|
||||
# "ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11",
|
||||
"Zyphra/Zamba2-7B-instruct",
|
||||
# FIXME: mosaicml/mpt-7b has been deleted
|
||||
# "mosaicml/mpt-7b",
|
||||
# FIXME: databricks/dbrx-instruct has been deleted
|
||||
# "databricks/dbrx-instruct",
|
||||
"tiiuae/falcon-7b",
|
||||
"tiiuae/falcon-40b",
|
||||
"luccafong/deepseek_mtp_main_random",
|
||||
"Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
"tiny-random/qwen3-next-moe",
|
||||
"zai-org/GLM-4.5",
|
||||
"baidu/ERNIE-4.5-21B-A3B-PT",
|
||||
# Models using base convertor
|
||||
"lmsys/gpt-oss-20b-bf16",
|
||||
"deepseek-ai/DeepSeek-V3.2-Exp",
|
||||
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||
] + list(BASE_TRUST_REMOTE_CODE_MODELS)
|
||||
|
||||
# (target_model, draft_model, trust_remote_code)
|
||||
SPECULATIVE_MODELS = [
|
||||
("JackFram/llama-68m", "abhigoyal/vllm-medusa-llama-68m-random", False),
|
||||
("luccafong/deepseek_mtp_main_random", "luccafong/deepseek_mtp_draft_random", True),
|
||||
("eagle618/deepseek-v3-random", "eagle618/eagle-deepseek-v3-random", True),
|
||||
("meta-llama/Meta-Llama-3-8B-Instruct", "yuhuili/EAGLE-LLaMA3-Instruct-8B", True),
|
||||
("meta-llama/Llama-3.1-8B-Instruct", "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B", True),
|
||||
]
|
||||
|
||||
|
||||
def _load_groundtruth(filename: str) -> dict:
|
||||
"""Load groundtruth JSON from the test directory."""
|
||||
groundtruth_path = Path(__file__).parent / filename
|
||||
with open(groundtruth_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _assert_model_arch_config(
|
||||
model_config, expected: dict, check_head_size: bool = True
|
||||
):
|
||||
"""Assert model_arch_config matches expected values."""
|
||||
model_arch_config = model_config.model_arch_config
|
||||
assert model_arch_config.architectures == expected["architectures"]
|
||||
assert model_arch_config.model_type == expected["model_type"]
|
||||
assert model_arch_config.text_model_type == expected["text_model_type"]
|
||||
assert model_arch_config.hidden_size == expected["hidden_size"]
|
||||
assert (
|
||||
model_arch_config.total_num_hidden_layers == expected["total_num_hidden_layers"]
|
||||
)
|
||||
assert (
|
||||
model_arch_config.total_num_attention_heads
|
||||
== expected["total_num_attention_heads"]
|
||||
)
|
||||
assert model_arch_config.vocab_size == expected["vocab_size"]
|
||||
assert model_arch_config.total_num_kv_heads == expected["total_num_kv_heads"]
|
||||
assert model_arch_config.num_experts == expected["num_experts"]
|
||||
assert model_arch_config.is_deepseek_mla == expected["is_deepseek_mla"]
|
||||
|
||||
torch_dtype = ModelArchConfigConvertorBase.get_torch_dtype(
|
||||
model_config.hf_config, model_config.model, revision=model_config.revision
|
||||
)
|
||||
assert str(torch_dtype) == expected["dtype"]
|
||||
|
||||
if check_head_size:
|
||||
assert model_arch_config.head_size == expected["head_size"]
|
||||
|
||||
|
||||
def _assert_model_config_methods(
|
||||
model_config, expected: dict, check_head_size: bool = True
|
||||
):
|
||||
"""Assert model_config methods return expected values."""
|
||||
assert model_config.architectures == expected["architectures"]
|
||||
assert model_config.get_vocab_size() == expected["vocab_size"]
|
||||
assert model_config.get_hidden_size() == expected["hidden_size"]
|
||||
assert model_config.get_total_num_kv_heads() == expected["total_num_kv_heads"]
|
||||
assert model_config.get_num_experts() == expected["num_experts"]
|
||||
assert (
|
||||
model_config.get_total_num_hidden_layers()
|
||||
== expected["total_num_hidden_layers"]
|
||||
)
|
||||
|
||||
if check_head_size:
|
||||
assert model_config.get_head_size() == expected["head_size"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", BASE_MODELS_TO_TEST)
|
||||
def test_base_model_arch_config(model: str):
|
||||
"""Test model architecture config for base models."""
|
||||
groundtruth = _load_groundtruth("base_model_arch_groundtruth.json")
|
||||
expected = groundtruth[model]
|
||||
|
||||
model_config = ModelConfig(
|
||||
model, trust_remote_code=model in BASE_TRUST_REMOTE_CODE_MODELS
|
||||
)
|
||||
|
||||
_assert_model_arch_config(model_config, expected)
|
||||
_assert_model_config_methods(model_config, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"target_model,draft_model,trust_remote_code", SPECULATIVE_MODELS
|
||||
)
|
||||
def test_draft_model_arch_config(
|
||||
target_model: str, draft_model: str, trust_remote_code: bool
|
||||
):
|
||||
"""Test model architecture config for draft/speculative models."""
|
||||
groundtruth = _load_groundtruth("draft_model_arch_groundtruth.json")
|
||||
expected = groundtruth[draft_model]
|
||||
|
||||
target_model_config = ModelConfig(target_model, trust_remote_code=trust_remote_code)
|
||||
speculative_config = SpeculativeConfig(
|
||||
model=draft_model,
|
||||
num_speculative_tokens=1,
|
||||
target_model_config=target_model_config,
|
||||
target_parallel_config=ParallelConfig(),
|
||||
)
|
||||
model_config = speculative_config.draft_model_config
|
||||
|
||||
# For medusa models, head_size may cause division by zero before
|
||||
# model_arch_config was introduced, so we conditionally check it
|
||||
check_head_size = isinstance(expected["head_size"], int)
|
||||
|
||||
_assert_model_arch_config(model_config, expected, check_head_size=check_head_size)
|
||||
_assert_model_config_methods(
|
||||
model_config, expected, check_head_size=check_head_size
|
||||
)
|
||||
Reference in New Issue
Block a user