[Bugfix]: resolve torch.compile cache conflict between mm_encoder_tp_modes (#32842)

Signed-off-by: Hongjian Zhang <zhanghongjian@xiaohongshu.com>
Signed-off-by: Xingran Wang <wangxingran123456@outlook.com>
Co-authored-by: Xingran Wang <wangxingran123456@outlook.com>
This commit is contained in:
Hiroken.
2026-01-24 22:45:14 +08:00
committed by GitHub
parent 5fa0f6efa9
commit 1209b784f2
2 changed files with 8 additions and 1 deletions

View File

@@ -213,7 +213,8 @@ class MultiModalConfig:
factors: list[Any] = [
self.mm_encoder_attn_backend.name
if self.mm_encoder_attn_backend is not None
else None
else None,
self.mm_encoder_tp_mode,
]
hash_str = safe_hash(str(factors).encode(), usedforsecurity=False).hexdigest()
return hash_str

View File

@@ -263,6 +263,12 @@ class VllmConfig:
vllm_factors.append(__version__)
if self.model_config:
vllm_factors.append(self.model_config.compute_hash())
if (
self.compilation_config
and getattr(self.compilation_config, "compile_mm_encoder", False)
and self.model_config.multimodal_config
):
vllm_factors.append(self.model_config.multimodal_config.compute_hash())
else:
vllm_factors.append("None")
if self.cache_config: