2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2024-11-11 18:05:38 -05:00
|
|
|
import enum
|
2025-02-11 15:14:00 +00:00
|
|
|
import time
|
2025-09-12 08:10:39 +08:00
|
|
|
from collections.abc import Mapping
|
2025-03-03 01:34:51 +00:00
|
|
|
from typing import Any, Optional, Union
|
2024-11-11 18:05:38 -05:00
|
|
|
|
|
|
|
|
import msgspec
|
2025-06-19 01:36:33 -03:00
|
|
|
import torch
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-02-09 19:35:56 -08:00
|
|
|
from vllm.lora.request import LoRARequest
|
2025-08-29 03:36:57 -07:00
|
|
|
from vllm.multimodal.inputs import MultiModalFeatureSpec
|
2025-06-19 01:36:33 -03:00
|
|
|
from vllm.pooling_params import PoolingParams
|
2025-02-09 19:35:56 -08:00
|
|
|
from vllm.sampling_params import SamplingParams
|
2025-01-12 16:02:02 -05:00
|
|
|
from vllm.v1.metrics.stats import SchedulerStats
|
[V1] Logprobs and prompt logprobs support (#9880)
This PR is adding support for sample logprobs & prompt logprobs to vLLM v1.
New behavior:
- During model execution, model runner computes sample logprobs (if user-provided logprobs setting is not None) and prompt logprobs (if user-provided prompt_logprobs setting is not None). For both sample and prompt logprobs, the engine core returns 3 vectors: token ids, token logprob values, token ranks. Ranks reflect tokens' 1-indexed positions in the vocabulary vector after sorting the vocabulary by log probability in descending order.
- In scheduler.update_from_output(), sample and prompt logprobs are incorporated into the EngineCoreOutput data structure which is transferred to the engine client. If multiprocessing is enabled, then sample and prompt logprobs will be (de)serialized when the EngineCoreOutput data structure is (de)serialized.
- During output processing, the LogprobsProcessor transforms the triplet of token ids, token logprobs values, and token ranks into the OpenAI-compatible List[Dict[token id,Logprob]] format (for sample and prompt logprobs respectively.)
- Each Logprob instance (whether sample- or prompt-) consists of a token's log-probability, rank, and detokenized string representation. Note that logprob detokenization is handled by the LogprobsProcessor not the detokenizer.
Signed-off-by: Andrew Feldman <afeldman@neuralmagic.com>
Signed-off-by: Nick Hill <nhill@redhat.com>
Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-02-07 10:26:20 -05:00
|
|
|
from vllm.v1.outputs import LogprobsLists, LogprobsTensors
|
2025-01-12 16:02:02 -05:00
|
|
|
|
2025-02-04 22:43:02 -08:00
|
|
|
# These are possible values of RequestOutput.finish_reason,
|
|
|
|
|
# so form part of the external API.
|
|
|
|
|
FINISH_REASON_STRINGS = ("stop", "length", "abort")
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-02-04 22:43:02 -08:00
|
|
|
|
|
|
|
|
class FinishReason(enum.IntEnum):
|
2025-02-05 00:46:54 +00:00
|
|
|
"""
|
|
|
|
|
Reason a request finished - stop, length, or abort.
|
|
|
|
|
|
2025-02-04 22:43:02 -08:00
|
|
|
Int rather than Str for more compact serialization.
|
|
|
|
|
|
2025-02-05 00:46:54 +00:00
|
|
|
stop - a stop string was emitted
|
|
|
|
|
length - max_tokens was consumed, or max_model_len was reached
|
|
|
|
|
abort - aborted for another reason
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
STOP = 0
|
|
|
|
|
LENGTH = 1
|
|
|
|
|
ABORT = 2
|
|
|
|
|
|
|
|
|
|
def __str__(self):
|
2025-02-04 22:43:02 -08:00
|
|
|
return FINISH_REASON_STRINGS[self.value]
|
2025-02-05 00:46:54 +00:00
|
|
|
|
|
|
|
|
|
2025-02-09 19:35:56 -08:00
|
|
|
class EngineCoreRequest(
|
|
|
|
|
msgspec.Struct,
|
|
|
|
|
array_like=True, # type: ignore[call-arg]
|
|
|
|
|
omit_defaults=True, # type: ignore[call-arg]
|
|
|
|
|
gc=False): # type: ignore[call-arg]
|
2024-11-11 18:05:38 -05:00
|
|
|
|
|
|
|
|
request_id: str
|
2025-03-03 01:34:51 +00:00
|
|
|
prompt_token_ids: list[int]
|
2025-08-29 03:36:57 -07:00
|
|
|
mm_features: Optional[list[MultiModalFeatureSpec]]
|
2025-06-19 01:36:33 -03:00
|
|
|
sampling_params: Optional[SamplingParams]
|
|
|
|
|
pooling_params: Optional[PoolingParams]
|
2024-11-11 18:05:38 -05:00
|
|
|
eos_token_id: Optional[int]
|
|
|
|
|
arrival_time: float
|
2025-02-09 19:35:56 -08:00
|
|
|
lora_request: Optional[LoRARequest]
|
2025-04-30 14:27:21 +02:00
|
|
|
cache_salt: Optional[str]
|
2025-06-04 08:26:47 -07:00
|
|
|
data_parallel_rank: Optional[int]
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-05-30 08:17:00 -07:00
|
|
|
# Index of the client, used to ensure outputs are sent back to the same
|
|
|
|
|
# client for this request when scaling out the front-end.
|
|
|
|
|
client_index: int = 0
|
|
|
|
|
|
2025-04-22 19:12:15 -07:00
|
|
|
# Used in DP case to indicate which wave of requests this is expected to
|
|
|
|
|
# belong to, to cover a race condition where the request is sent before
|
|
|
|
|
# a wave finished notification is received.
|
|
|
|
|
current_wave: int = 0
|
2025-06-23 06:18:08 +03:00
|
|
|
priority: int = 0
|
2025-04-22 19:12:15 -07:00
|
|
|
|
2025-09-12 08:10:39 +08:00
|
|
|
trace_headers: Optional[Mapping[str, str]] = None
|
|
|
|
|
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-02-11 15:14:00 +00:00
|
|
|
class EngineCoreEventType(enum.IntEnum):
|
|
|
|
|
"""The type of engine core request event."""
|
|
|
|
|
QUEUED = 1
|
|
|
|
|
SCHEDULED = 2
|
2025-02-27 04:04:59 +00:00
|
|
|
PREEMPTED = 3
|
2025-02-11 15:14:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class EngineCoreEvent(msgspec.Struct):
|
|
|
|
|
"""A timestamped engine core event associated with a request.
|
|
|
|
|
|
|
|
|
|
The timestamp is a monotonic timestamps and is used for by the engine
|
|
|
|
|
frontend to calculate intervals between engine core events. These
|
|
|
|
|
timestamps should not be compared with timestamps from other processes.
|
|
|
|
|
"""
|
|
|
|
|
type: EngineCoreEventType
|
|
|
|
|
timestamp: float
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def new_event(cls,
|
|
|
|
|
event_type: EngineCoreEventType,
|
|
|
|
|
timestamp: Optional[float] = None) -> "EngineCoreEvent":
|
|
|
|
|
timestamp = time.monotonic() if timestamp is None else timestamp
|
|
|
|
|
return cls(event_type, timestamp)
|
|
|
|
|
|
|
|
|
|
|
2024-12-14 17:54:04 +00:00
|
|
|
class EngineCoreOutput(
|
|
|
|
|
msgspec.Struct,
|
|
|
|
|
array_like=True, # type: ignore[call-arg]
|
|
|
|
|
omit_defaults=True, # type: ignore[call-arg]
|
|
|
|
|
gc=False): # type: ignore[call-arg]
|
2024-11-11 18:05:38 -05:00
|
|
|
|
|
|
|
|
request_id: str
|
2025-03-03 01:34:51 +00:00
|
|
|
new_token_ids: list[int]
|
[V1] Logprobs and prompt logprobs support (#9880)
This PR is adding support for sample logprobs & prompt logprobs to vLLM v1.
New behavior:
- During model execution, model runner computes sample logprobs (if user-provided logprobs setting is not None) and prompt logprobs (if user-provided prompt_logprobs setting is not None). For both sample and prompt logprobs, the engine core returns 3 vectors: token ids, token logprob values, token ranks. Ranks reflect tokens' 1-indexed positions in the vocabulary vector after sorting the vocabulary by log probability in descending order.
- In scheduler.update_from_output(), sample and prompt logprobs are incorporated into the EngineCoreOutput data structure which is transferred to the engine client. If multiprocessing is enabled, then sample and prompt logprobs will be (de)serialized when the EngineCoreOutput data structure is (de)serialized.
- During output processing, the LogprobsProcessor transforms the triplet of token ids, token logprobs values, and token ranks into the OpenAI-compatible List[Dict[token id,Logprob]] format (for sample and prompt logprobs respectively.)
- Each Logprob instance (whether sample- or prompt-) consists of a token's log-probability, rank, and detokenized string representation. Note that logprob detokenization is handled by the LogprobsProcessor not the detokenizer.
Signed-off-by: Andrew Feldman <afeldman@neuralmagic.com>
Signed-off-by: Nick Hill <nhill@redhat.com>
Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-02-07 10:26:20 -05:00
|
|
|
|
|
|
|
|
new_logprobs: Optional[LogprobsLists] = None
|
|
|
|
|
new_prompt_logprobs_tensors: Optional[LogprobsTensors] = None
|
|
|
|
|
|
2025-06-19 01:36:33 -03:00
|
|
|
pooling_output: Optional[torch.Tensor] = None
|
|
|
|
|
|
2025-02-04 22:43:02 -08:00
|
|
|
finish_reason: Optional[FinishReason] = None
|
2024-11-11 18:05:38 -05:00
|
|
|
stop_reason: Union[int, str, None] = None
|
2025-03-03 01:34:51 +00:00
|
|
|
events: Optional[list[EngineCoreEvent]] = None
|
2025-05-12 12:46:16 -04:00
|
|
|
kv_transfer_params: Optional[dict[str, Any]] = None
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-09-12 08:10:39 +08:00
|
|
|
trace_headers: Optional[Mapping[str, str]] = None
|
2025-05-23 16:41:03 +08:00
|
|
|
# The number of tokens with prefix cache hits.
|
|
|
|
|
num_cached_tokens: int = 0
|
|
|
|
|
|
[V1] Logprobs and prompt logprobs support (#9880)
This PR is adding support for sample logprobs & prompt logprobs to vLLM v1.
New behavior:
- During model execution, model runner computes sample logprobs (if user-provided logprobs setting is not None) and prompt logprobs (if user-provided prompt_logprobs setting is not None). For both sample and prompt logprobs, the engine core returns 3 vectors: token ids, token logprob values, token ranks. Ranks reflect tokens' 1-indexed positions in the vocabulary vector after sorting the vocabulary by log probability in descending order.
- In scheduler.update_from_output(), sample and prompt logprobs are incorporated into the EngineCoreOutput data structure which is transferred to the engine client. If multiprocessing is enabled, then sample and prompt logprobs will be (de)serialized when the EngineCoreOutput data structure is (de)serialized.
- During output processing, the LogprobsProcessor transforms the triplet of token ids, token logprobs values, and token ranks into the OpenAI-compatible List[Dict[token id,Logprob]] format (for sample and prompt logprobs respectively.)
- Each Logprob instance (whether sample- or prompt-) consists of a token's log-probability, rank, and detokenized string representation. Note that logprob detokenization is handled by the LogprobsProcessor not the detokenizer.
Signed-off-by: Andrew Feldman <afeldman@neuralmagic.com>
Signed-off-by: Nick Hill <nhill@redhat.com>
Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-02-07 10:26:20 -05:00
|
|
|
@property
|
|
|
|
|
def finished(self) -> bool:
|
|
|
|
|
return self.finish_reason is not None
|
|
|
|
|
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-07-30 18:20:20 +01:00
|
|
|
class UtilityResult:
|
|
|
|
|
"""Wrapper for special handling when serializing/deserializing."""
|
|
|
|
|
|
|
|
|
|
def __init__(self, r: Any = None):
|
|
|
|
|
self.result = r
|
|
|
|
|
|
|
|
|
|
|
2025-02-19 01:09:22 -08:00
|
|
|
class UtilityOutput(
|
|
|
|
|
msgspec.Struct,
|
|
|
|
|
array_like=True, # type: ignore[call-arg]
|
|
|
|
|
gc=False): # type: ignore[call-arg]
|
|
|
|
|
|
|
|
|
|
call_id: int
|
|
|
|
|
|
|
|
|
|
# Non-None implies the call failed, result should be None.
|
|
|
|
|
failure_message: Optional[str] = None
|
2025-07-30 18:20:20 +01:00
|
|
|
result: Optional[UtilityResult] = None
|
2025-02-19 01:09:22 -08:00
|
|
|
|
|
|
|
|
|
2024-12-14 17:54:04 +00:00
|
|
|
class EngineCoreOutputs(
|
|
|
|
|
msgspec.Struct,
|
|
|
|
|
array_like=True, # type: ignore[call-arg]
|
|
|
|
|
omit_defaults=True, # type: ignore[call-arg]
|
|
|
|
|
gc=False): # type: ignore[call-arg]
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-09-12 08:10:39 +08:00
|
|
|
# NOTE(Nick): We could consider ways to make this more compact,
|
2025-02-05 00:46:54 +00:00
|
|
|
# e.g. columnwise layout
|
2024-11-11 18:05:38 -05:00
|
|
|
|
2025-03-27 16:14:41 -07:00
|
|
|
engine_index: int = 0
|
|
|
|
|
|
2024-11-11 18:05:38 -05:00
|
|
|
# [num_reqs]
|
2025-03-03 01:34:51 +00:00
|
|
|
outputs: list[EngineCoreOutput] = []
|
2025-02-19 01:09:22 -08:00
|
|
|
scheduler_stats: Optional[SchedulerStats] = None
|
2025-02-11 15:14:00 +00:00
|
|
|
timestamp: float = 0.0
|
|
|
|
|
|
2025-02-19 01:09:22 -08:00
|
|
|
utility_output: Optional[UtilityOutput] = None
|
2025-03-27 16:14:41 -07:00
|
|
|
finished_requests: Optional[set[str]] = None
|
|
|
|
|
|
2025-04-22 19:12:15 -07:00
|
|
|
# In DP case, used to signal that the current wave of requests
|
|
|
|
|
# has finished and the engines are paused.
|
|
|
|
|
wave_complete: Optional[int] = None
|
|
|
|
|
# In DP case, used to signal that a request was received for an
|
|
|
|
|
# "old" wave, so the next wave needs to be started in other engines.
|
|
|
|
|
start_wave: Optional[int] = None
|
2025-02-19 01:09:22 -08:00
|
|
|
|
2025-02-11 15:14:00 +00:00
|
|
|
def __post_init__(self):
|
|
|
|
|
if self.timestamp == 0.0:
|
|
|
|
|
self.timestamp = time.monotonic()
|
2024-11-11 18:05:38 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class EngineCoreRequestType(enum.Enum):
|
|
|
|
|
"""
|
|
|
|
|
Request types defined as hex byte strings, so it can be sent over sockets
|
|
|
|
|
without separate encoding step.
|
|
|
|
|
"""
|
|
|
|
|
ADD = b'\x00'
|
|
|
|
|
ABORT = b'\x01'
|
2025-04-22 19:12:15 -07:00
|
|
|
START_DP_WAVE = b'\x02'
|
2025-03-27 16:14:41 -07:00
|
|
|
UTILITY = b'\x03'
|
2025-04-16 22:48:34 -04:00
|
|
|
# Sentinel used within EngineCoreProc.
|
|
|
|
|
EXECUTOR_FAILED = b'\x04'
|
2025-07-18 17:46:09 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class ReconfigureDistributedRequest(msgspec.Struct):
|
|
|
|
|
new_data_parallel_size: int
|
|
|
|
|
new_data_parallel_rank: int
|
|
|
|
|
new_data_parallel_rank_local: int
|
|
|
|
|
new_data_parallel_master_ip: str
|
|
|
|
|
new_data_parallel_master_port: int
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ReconfigureRankType(enum.IntEnum):
|
|
|
|
|
"""
|
|
|
|
|
Rank type for reconfiguring distributed request.
|
|
|
|
|
"""
|
|
|
|
|
KEEP_CURRENT_RANK = -1
|
|
|
|
|
SHUTDOWN_CURRENT_RANK = -2
|