Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -25,7 +25,6 @@
from collections.abc import Iterable
from itertools import islice
from typing import Optional, Union
import torch
from torch import nn
@@ -94,8 +93,8 @@ class LayerNorm(nn.Module):
class CohereMLP(nn.Module):
def __init__(
self,
config: Union[CohereConfig, Cohere2Config],
quant_config: Optional[QuantizationConfig] = None,
config: CohereConfig | Cohere2Config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
@@ -128,9 +127,9 @@ class CohereMLP(nn.Module):
class CohereAttention(nn.Module):
def __init__(
self,
config: Union[CohereConfig, Cohere2Config],
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
config: CohereConfig | Cohere2Config,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
@@ -241,9 +240,9 @@ class CohereAttention(nn.Module):
class CohereDecoderLayer(nn.Module):
def __init__(
self,
config: Union[CohereConfig, Cohere2Config],
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
config: CohereConfig | Cohere2Config,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
@@ -265,7 +264,7 @@ class CohereDecoderLayer(nn.Module):
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor],
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
residual = hidden_states
@@ -324,9 +323,9 @@ class CohereModel(nn.Module):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors],
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
@@ -452,9 +451,9 @@ class CohereForCausalLM(nn.Module, SupportsLoRA, SupportsPP, SupportsQuant):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
@@ -463,7 +462,7 @@ class CohereForCausalLM(nn.Module, SupportsLoRA, SupportsPP, SupportsQuant):
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> Optional[torch.Tensor]:
) -> torch.Tensor | None:
is_not_lora = hasattr(self.model.embed_tokens, "weight")
if is_not_lora:
logits = self.logits_processor(self.model.embed_tokens, hidden_states)