[Bugfix] Exclude language_model_only key from MM AOT compile hash but include in model one (#34508)

Signed-off-by: Roger Wang <hey@rogerw.io>
This commit is contained in:
Roger Wang
2026-02-13 05:59:00 -08:00
committed by GitHub
parent 5885e330ef
commit 1dae7b7843
3 changed files with 24 additions and 1 deletions

View File

@@ -3,6 +3,7 @@
import pytest
from vllm.config.model import ModelConfig
from vllm.config.multimodal import MultiModalConfig
from vllm.v1.attention.backends.registry import AttentionBackendEnum
@@ -23,3 +24,20 @@ def test_mm_encoder_attn_backend_hash_updates():
mm_encoder_attn_backend=AttentionBackendEnum.FLASH_ATTN
).compute_hash()
assert base_hash != overridden_hash
def test_language_model_only_does_not_affect_mm_hash():
"""language_model_only does not affect the ViT computation graph,
so it should not change the multimodal config hash."""
base_hash = MultiModalConfig().compute_hash()
lm_only_hash = MultiModalConfig(language_model_only=True).compute_hash()
assert base_hash == lm_only_hash
def test_language_model_only_affects_model_hash():
"""language_model_only affects the LM computation graph,
so it should change the model config hash."""
model = "llava-hf/llava-1.5-7b-hf"
base_hash = ModelConfig(model).compute_hash()
lm_only_hash = ModelConfig(model, language_model_only=True).compute_hash()
assert base_hash != lm_only_hash