[Bugfix]: resolve torch.compile cache conflict between mm_encoder_tp_modes (#32842)
Signed-off-by: Hongjian Zhang <zhanghongjian@xiaohongshu.com> Signed-off-by: Xingran Wang <wangxingran123456@outlook.com> Co-authored-by: Xingran Wang <wangxingran123456@outlook.com>
This commit is contained in:
@@ -213,7 +213,8 @@ class MultiModalConfig:
|
||||
factors: list[Any] = [
|
||||
self.mm_encoder_attn_backend.name
|
||||
if self.mm_encoder_attn_backend is not None
|
||||
else None
|
||||
else None,
|
||||
self.mm_encoder_tp_mode,
|
||||
]
|
||||
hash_str = safe_hash(str(factors).encode(), usedforsecurity=False).hexdigest()
|
||||
return hash_str
|
||||
|
||||
@@ -263,6 +263,12 @@ class VllmConfig:
|
||||
vllm_factors.append(__version__)
|
||||
if self.model_config:
|
||||
vllm_factors.append(self.model_config.compute_hash())
|
||||
if (
|
||||
self.compilation_config
|
||||
and getattr(self.compilation_config, "compile_mm_encoder", False)
|
||||
and self.model_config.multimodal_config
|
||||
):
|
||||
vllm_factors.append(self.model_config.multimodal_config.compute_hash())
|
||||
else:
|
||||
vllm_factors.append("None")
|
||||
if self.cache_config:
|
||||
|
||||
Reference in New Issue
Block a user