Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -27,7 +27,7 @@
import math
from collections.abc import Iterable
from itertools import islice
from typing import Any, Optional, Union
from typing import Any
import torch
from torch import nn
@@ -89,8 +89,8 @@ class MiniCPMMoE(nn.Module):
top_k: int,
hidden_size: int,
intermediate_size: int,
params_dtype: Optional[torch.dtype] = None,
tp_size: Optional[int] = None,
params_dtype: torch.dtype | None = None,
tp_size: int | None = None,
):
super().__init__()
self.tp_size = tp_size or get_tensor_model_parallel_world_size()
@@ -190,7 +190,7 @@ class MiniCPMMLP(nn.Module):
intermediate_size: int,
hidden_act: str,
hidden_act_param: float,
quant_config: Optional[QuantizationConfig] = None,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
@@ -223,10 +223,10 @@ class MiniCPMAttention(nn.Module):
num_heads: int,
num_kv_heads: int,
rope_theta: float = 10000,
rope_scaling: Optional[dict[str, Any]] = None,
rope_scaling: dict[str, Any] | None = None,
max_position_embeddings: int = 8192,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
@@ -305,8 +305,8 @@ class MiniCPMDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
@@ -362,7 +362,7 @@ class MiniCPMDecoderLayer(nn.Module):
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor],
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
residual = hidden_states
@@ -425,8 +425,8 @@ class MiniCPMModel(nn.Module):
self,
prefix: str,
config: PretrainedConfig,
cache_config: Optional[CacheConfig],
quant_config: Optional[QuantizationConfig],
cache_config: CacheConfig | None,
quant_config: QuantizationConfig | None,
):
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
@@ -444,11 +444,9 @@ class MiniCPMModel(nn.Module):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[
torch.Tensor, IntermediateTensors, tuple[torch.Tensor, list[torch.Tensor]]
]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
@@ -633,11 +631,9 @@ class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, SupportsEagle3):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[
torch.Tensor, IntermediateTensors, tuple[torch.Tensor, list[torch.Tensor]]
]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
model_output = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
@@ -658,7 +654,7 @@ class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, SupportsEagle3):
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> Optional[torch.Tensor]:
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits