[V0 Deprecation] Remove unused MM placeholders in request output (#34944)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-02-20 22:19:23 +08:00
committed by GitHub
parent 59965affbd
commit f9ac19204f

View File

@@ -13,7 +13,6 @@ from typing_extensions import TypeVar
from vllm.logger import init_logger
from vllm.logprobs import PromptLogprobs, SampleLogprobs
from vllm.lora.request import LoRARequest
from vllm.multimodal.inputs import MultiModalPlaceholderDict
from vllm.v1.metrics.stats import RequestStateStats
logger = init_logger(__name__)
@@ -121,7 +120,6 @@ class RequestOutput:
encoder_prompt_token_ids: list[int] | None = None,
num_cached_tokens: int | None = None,
*,
multi_modal_placeholders: MultiModalPlaceholderDict | None = None,
kv_transfer_params: dict[str, Any] | None = None,
# Forward compatibility, code that uses args added in new release can
# still run with older versions of vLLM without breaking.
@@ -134,7 +132,6 @@ class RequestOutput:
self.request_id = request_id
self.prompt = prompt
self.prompt_token_ids = prompt_token_ids
self.multi_modal_placeholders = multi_modal_placeholders or {}
self.prompt_logprobs = prompt_logprobs
self.outputs = outputs
self.finished = finished
@@ -187,8 +184,7 @@ class RequestOutput:
f"finished={self.finished}, "
f"metrics={self.metrics}, "
f"lora_request={self.lora_request}, "
f"num_cached_tokens={self.num_cached_tokens}, "
f"multi_modal_placeholders={self.multi_modal_placeholders})"
f"num_cached_tokens={self.num_cached_tokens})"
)