Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -3,7 +3,7 @@
import random
from dataclasses import dataclass
from typing import Optional, Union
from typing import TypeAlias
import torch
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
@@ -12,7 +12,7 @@ from vllm.engine.arg_utils import EngineArgs
from vllm.v1.engine import EngineCoreOutput, FinishReason
from vllm.v1.outputs import LogprobsLists, LogprobsTensors
GeneralTokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
GeneralTokenizerType: TypeAlias = PreTrainedTokenizer | PreTrainedTokenizerFast
# Number of sample logprobs to request when testing sample logprobs
NUM_SAMPLE_LOGPROBS_UNDER_TEST = 5
@@ -332,16 +332,15 @@ class MockEngineCore:
# For each request, for each sampled token offset,
# a tuple of
# (list of topk token ids, list of sample logprob vals, rank)
generated_logprobs_raw: Optional[
list[list[tuple[list[int], list[float], int]]]
] = None,
generated_logprobs_raw: list[list[tuple[list[int], list[float], int]]]
| None = None,
# For each request, a tuple of
# (prompt logprob val matrix, prompt logprob tok id matrix);
# each matrix has dimensions
# (num prompt toks) x (num prompt logprobs+1)
prompt_logprobs_raw: Optional[list[LogprobsTensors]] = None,
eos_token_id: Optional[int] = None,
stop_token_ids: Optional[list[int]] = None,
prompt_logprobs_raw: list[LogprobsTensors] | None = None,
eos_token_id: int | None = None,
stop_token_ids: list[int] | None = None,
ignore_eos: bool = False,
) -> None:
self.num_requests = len(tokens_list)