Update deprecated Python 3.8 typing (#13971)

This commit is contained in:
Harry Mellor
2025-03-03 01:34:51 +00:00
committed by GitHub
parent bf33700ecd
commit cf069aa8aa
300 changed files with 2294 additions and 2347 deletions

View File

@@ -1,6 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
from typing import Any, List, Optional, Tuple, Type, TypedDict, Union
from typing import Any, Optional, TypedDict, Union
import numpy.typing as npt
import pytest
@@ -69,21 +69,21 @@ class Qwen2VLPromptVideoEmbeddingInput(TypedDict):
def batch_make_image_embeddings(
image_batches: List[Union[Image.Image, List[Image.Image]]], processor,
llm: VllmRunner) -> List[Qwen2VLPromptImageEmbeddingInput]:
image_batches: list[Union[Image.Image, list[Image.Image]]], processor,
llm: VllmRunner) -> list[Qwen2VLPromptImageEmbeddingInput]:
"""batched image embeddings for Qwen2-VL
This will infer all images' embeddings in a single batch,
and split the result according to input batches.
image_batches:
- Single-image batches: `List[Image.Image]`
- Multiple-image batches: `List[List[Image.Image]]]`
- Single-image batches: `list[Image.Image]`
- Multiple-image batches: `list[list[Image.Image]]]`
returns: `List[Qwen2VLPromptImageEmbeddingInput]`
returns: `list[Qwen2VLPromptImageEmbeddingInput]`
"""
image_batches_: List[Any] = image_batches[:]
image_batches_: list[Any] = image_batches[:]
# convert single-image batches to multiple-image batches
for idx in range(len(image_batches_)):
@@ -93,7 +93,7 @@ def batch_make_image_embeddings(
assert isinstance(image_batches_[idx], list)
# append all images into a list (as a batch)
images: List[Image.Image] = []
images: list[Image.Image] = []
for image_batch in image_batches_:
images += image_batch
@@ -121,7 +121,7 @@ def batch_make_image_embeddings(
image_embeds = torch.concat(llm.apply_model(get_image_embeds))
# split into original batches
result: List[Qwen2VLPromptImageEmbeddingInput] = []
result: list[Qwen2VLPromptImageEmbeddingInput] = []
image_counter = 0
embed_counter = 0
for image_batch in image_batches_:
@@ -153,7 +153,7 @@ def batch_make_image_embeddings(
def batch_make_video_embeddings(
video_batches: PromptVideoInput, processor,
llm: VllmRunner) -> List[Qwen2VLPromptVideoEmbeddingInput]:
llm: VllmRunner) -> list[Qwen2VLPromptVideoEmbeddingInput]:
"""batched video embeddings for Qwen2-VL
A NDArray represents a single video's all frames.
@@ -162,21 +162,21 @@ def batch_make_video_embeddings(
and split the result according to input batches.
video_batches:
- Single-video batches: `List[NDArray]`
- Multiple-video batches: `List[List[NDArray]]`
- Single-video batches: `list[NDArray]`
- Multiple-video batches: `list[list[NDArray]]`
"""
video_batches_: List[Any] = video_batches[:]
video_batches_: list[Any] = video_batches[:]
for idx in range(len(video_batches_)):
if not isinstance(video_batches_[idx], list):
single_video_batch: List[npt.NDArray] = [video_batches_[idx]]
single_video_batch: list[npt.NDArray] = [video_batches_[idx]]
video_batches_[idx] = single_video_batch
assert isinstance(video_batches_[idx], list)
# append all videos into a list (as a batch)
videos: List[npt.NDArray] = []
videos: list[npt.NDArray] = []
for video_batch in video_batches_:
videos += video_batch
@@ -204,7 +204,7 @@ def batch_make_video_embeddings(
video_embeds = torch.concat(llm.apply_model(get_image_embeds))
# split into original batches
result: List[Qwen2VLPromptVideoEmbeddingInput] = []
result: list[Qwen2VLPromptVideoEmbeddingInput] = []
video_counter = 0
embed_counter = 0
for video_batch in video_batches_:
@@ -235,8 +235,8 @@ def batch_make_video_embeddings(
def run_embedding_input_test(
vllm_runner: Type[VllmRunner],
inputs: List[Tuple[List[str], PromptImageInput, PromptVideoInput]],
vllm_runner: type[VllmRunner],
inputs: list[tuple[list[str], PromptImageInput, PromptVideoInput]],
model: str,
*,
dtype: str,
@@ -323,8 +323,8 @@ def test_qwen2_vl_image_embeddings_input(vllm_runner, image_assets, model,
num_logprobs: int) -> None:
images = [asset.pil_image for asset in image_assets]
inputs_per_case: List[Tuple[
List[str], PromptImageInput, PromptVideoInput]] = [(
inputs_per_case: list[tuple[
list[str], PromptImageInput, PromptVideoInput]] = [(
[prompt for _ in size_factors],
[rescale_image_size(image, factor) for factor in size_factors],
[],
@@ -365,7 +365,7 @@ def test_qwen2_vl_multiple_image_embeddings_input(vllm_runner, image_assets,
num_logprobs: int) -> None:
images = [asset.pil_image for asset in image_assets]
inputs_per_case: List[Tuple[List[str], PromptImageInput,
inputs_per_case: list[tuple[list[str], PromptImageInput,
PromptVideoInput]] = [(
[MULTIIMAGE_PROMPT for _ in size_factors],
[[
@@ -413,8 +413,8 @@ def test_qwen2_vl_video_embeddings_input(vllm_runner, video_assets, model,
for asset in video_assets
]
inputs_per_case: List[Tuple[
List[str], PromptImageInput, PromptVideoInput]] = [(
inputs_per_case: list[tuple[
list[str], PromptImageInput, PromptVideoInput]] = [(
[prompt for _ in size_factors],
[],
[rescale_video_size(video, factor) for factor in size_factors],