From 1209b784f2ba976eff2ea24bc33c61f35c6eb213 Mon Sep 17 00:00:00 2001 From: "Hiroken." <105287758+HirokenOvo@users.noreply.github.com> Date: Sat, 24 Jan 2026 22:45:14 +0800 Subject: [PATCH] [Bugfix]: resolve torch.compile cache conflict between mm_encoder_tp_modes (#32842) Signed-off-by: Hongjian Zhang Signed-off-by: Xingran Wang Co-authored-by: Xingran Wang --- vllm/config/multimodal.py | 3 ++- vllm/config/vllm.py | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/vllm/config/multimodal.py b/vllm/config/multimodal.py index 9fa2d8ae3..f4e834f64 100644 --- a/vllm/config/multimodal.py +++ b/vllm/config/multimodal.py @@ -213,7 +213,8 @@ class MultiModalConfig: factors: list[Any] = [ self.mm_encoder_attn_backend.name if self.mm_encoder_attn_backend is not None - else None + else None, + self.mm_encoder_tp_mode, ] hash_str = safe_hash(str(factors).encode(), usedforsecurity=False).hexdigest() return hash_str diff --git a/vllm/config/vllm.py b/vllm/config/vllm.py index 69cce72f1..1f8f5e5db 100644 --- a/vllm/config/vllm.py +++ b/vllm/config/vllm.py @@ -263,6 +263,12 @@ class VllmConfig: vllm_factors.append(__version__) if self.model_config: vllm_factors.append(self.model_config.compute_hash()) + if ( + self.compilation_config + and getattr(self.compilation_config, "compile_mm_encoder", False) + and self.model_config.multimodal_config + ): + vllm_factors.append(self.model_config.multimodal_config.compute_hash()) else: vllm_factors.append("None") if self.cache_config: