Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -4,7 +4,7 @@
"""Command-A-Vision (Cohere2Vision) multimodal model implementation for vLLM."""
from collections.abc import Iterable, Mapping, Sequence
from typing import Annotated, Literal, Optional, Union
from typing import Annotated, Literal
import torch
from torch import nn
@@ -148,7 +148,7 @@ class Cohere2VisionProcessingInfo(BaseProcessingInfo):
def get_image_processor(self, **kwargs: object):
return self.get_hf_processor(**kwargs).image_processor
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_image_size_with_most_features(self) -> ImageSize:
@@ -163,7 +163,7 @@ class Cohere2VisionProcessingInfo(BaseProcessingInfo):
*,
image_width: int,
image_height: int,
processor: Optional[Cohere2VisionProcessor],
processor: Cohere2VisionProcessor | None,
) -> int:
"""
Calculate the number of image patches for a given image.
@@ -217,7 +217,7 @@ class Cohere2VisionDummyInputsBuilder(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Optional[Mapping[str, BaseDummyOptions]] = None,
mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
image_size = self.info.get_image_size_with_most_features()
@@ -404,7 +404,7 @@ class Cohere2VisionForConditionalGeneration(nn.Module, SupportsMultiModal, Suppo
def _parse_and_validate_image_input(
self, **kwargs: object
) -> Optional[Cohere2VisionImagePixelInputs]:
) -> Cohere2VisionImagePixelInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
num_patches = kwargs.pop("num_patches", None)
image_embeds = kwargs.pop("image_embeds", None)
@@ -450,10 +450,10 @@ class Cohere2VisionForConditionalGeneration(nn.Module, SupportsMultiModal, Suppo
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
@@ -468,5 +468,5 @@ class Cohere2VisionForConditionalGeneration(nn.Module, SupportsMultiModal, Suppo
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> Optional[torch.Tensor]:
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)