Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@@ -7,7 +7,6 @@ fp8 block-quantized case.
"""
import dataclasses
from typing import Optional
import pytest
import torch.distributed
@@ -92,13 +91,13 @@ class TestConfig:
block_size: list[int]
# configs for testing low-latency kernels
low_latency: bool
use_fp8_dispatch: Optional[bool] = False
use_fp8_dispatch: bool | None = False
@dataclasses.dataclass
class TestTensors:
rank_tokens: torch.Tensor # all ranks make this many tokens
rank_token_scales: Optional[torch.Tensor]
rank_token_scales: torch.Tensor | None
topk: torch.Tensor
topk_weights: torch.Tensor
config: TestConfig
@@ -143,7 +142,7 @@ def make_ll_modular_kernel(
max_tokens_per_rank: int,
dp_size: int,
hidden_size: int,
q_dtype: Optional[torch.dtype],
q_dtype: torch.dtype | None,
test_config: TestConfig,
quant_config: FusedMoEQuantConfig,
) -> FusedMoEModularKernel:
@@ -179,7 +178,7 @@ def make_ht_modular_kernel(
pgi: ProcessGroupInfo,
dp_size: int,
num_local_experts: int,
q_dtype: Optional[torch.dtype],
q_dtype: torch.dtype | None,
test_config: TestConfig,
quant_config: FusedMoEQuantConfig,
) -> FusedMoEModularKernel:
@@ -249,8 +248,8 @@ def deepep_deepgemm_moe_impl(
test_tensors: TestTensors,
w1: torch.Tensor,
w2: torch.Tensor,
w1_scale: Optional[torch.Tensor],
w2_scale: Optional[torch.Tensor],
w1_scale: torch.Tensor | None,
w2_scale: torch.Tensor | None,
) -> torch.Tensor:
test_config = test_tensors.config
num_experts = test_config.num_experts