Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -27,7 +27,7 @@
from collections.abc import Iterable
from itertools import islice
from typing import Any, Optional, Union
from typing import Any
import torch
import torch.nn.functional as F
@@ -77,9 +77,9 @@ class Qwen2MoeMLP(nn.Module):
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: Optional[QuantizationConfig] = None,
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
expert_gate: Optional[torch.nn.Linear] = None,
expert_gate: torch.nn.Linear | None = None,
prefix: str = "",
) -> None:
super().__init__()
@@ -120,7 +120,7 @@ class Qwen2MoeSparseMoeBlock(nn.Module):
def __init__(
self,
config: Qwen2MoeConfig,
quant_config: Optional[QuantizationConfig] = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
@@ -195,12 +195,12 @@ class Qwen2MoeAttention(nn.Module):
num_heads: int,
num_kv_heads: int,
rope_theta: float = 10000,
rope_scaling: Optional[dict[str, Any]] = None,
rope_scaling: dict[str, Any] | None = None,
max_position_embeddings: int = 8192,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
dual_chunk_attention_config: Optional[dict[str, Any]] = None,
dual_chunk_attention_config: dict[str, Any] | None = None,
) -> None:
super().__init__()
self.hidden_size = hidden_size
@@ -285,8 +285,8 @@ class Qwen2MoeDecoderLayer(nn.Module):
def __init__(
self,
config: Qwen2MoeConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
@@ -339,7 +339,7 @@ class Qwen2MoeDecoderLayer(nn.Module):
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor],
residual: torch.Tensor | None,
) -> torch.Tensor:
# Self Attention
if residual is None:
@@ -396,9 +396,9 @@ class Qwen2MoeModel(nn.Module):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
@@ -548,12 +548,7 @@ class Qwen2MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA):
getattr(config, "mlp_only_layers", [])
or config.shared_expert_intermediate_size > 0
):
self.packed_modules_mapping["gate_up_proj"] = (
[
"gate_proj",
"up_proj",
],
)
self.packed_modules_mapping["gate_up_proj"] = ["gate_proj", "up_proj"]
self.model = Qwen2MoeModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
@@ -578,9 +573,9 @@ class Qwen2MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
@@ -589,7 +584,7 @@ class Qwen2MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA):
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> Optional[torch.Tensor]:
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits