[Core] Store only the keys for multi-modal data in P0 (#22198)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2025-08-07 16:45:04 +08:00
committed by GitHub
parent 289b18e670
commit 766bc8162c
17 changed files with 325 additions and 234 deletions

View File

@@ -35,7 +35,7 @@ from vllm.v1.engine import (EngineCoreOutputs, EngineCoreRequest,
EngineCoreRequestType,
ReconfigureDistributedRequest, ReconfigureRankType,
UtilityOutput, UtilityResult)
from vllm.v1.engine.mm_input_cache import MirroredProcessingCache
from vllm.v1.engine.mm_input_cache import MultiModalInputCacheServer
from vllm.v1.engine.utils import EngineHandshakeMetadata, EngineZmqAddresses
from vllm.v1.executor.abstract import Executor
from vllm.v1.kv_cache_interface import KVCacheConfig
@@ -124,8 +124,7 @@ class EngineCore:
log_stats=self.log_stats,
)
# Setup MM Input Mapper.
self.mm_input_cache_server = MirroredProcessingCache(
self.mm_input_cache_server = MultiModalInputCacheServer(
vllm_config.model_config)
# Setup batch queue for pipeline parallelism.
@@ -413,7 +412,7 @@ class EngineCore:
# Note on thread safety: no race condition.
# `mm_input_cache_server` is reset at the end of LLMEngine init,
# and will only accessed in the input processing thread afterwards.
request.mm_inputs = self.mm_input_cache_server.get_and_update_p1(
request.mm_inputs = self.mm_input_cache_server.get_and_update(
request.mm_inputs, request.mm_hashes)
req = Request.from_engine_core_request(request)