[Platform] Deprecate seed_everything (#31659)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2026-01-05 10:34:04 +08:00
committed by GitHub
parent 367856de14
commit bb4337b34c
77 changed files with 219 additions and 171 deletions

View File

@@ -12,7 +12,7 @@ from vllm.model_executor.layers.mamba.ops.causal_conv1d import (
causal_conv1d_fn,
causal_conv1d_update,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
def causal_conv1d_ref(
@@ -154,7 +154,7 @@ def test_causal_conv1d_update(dim, width, seqlen, has_bias, silu_activation, ity
if itype == torch.bfloat16:
rtol, atol = 1e-2, 5e-2
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
batch = 2
x = torch.randn(batch, dim, seqlen, device=device, dtype=itype)
x_ref = x.clone()
@@ -201,7 +201,7 @@ def test_causal_conv1d_update_with_batch_gather(
rtol, atol = 1e-2, 5e-2
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
padding = 5 if with_padding else 0
padded_batch_size = batch_size + padding
@@ -278,7 +278,7 @@ def test_causal_conv1d_varlen(
if itype == torch.bfloat16:
rtol, atol = 1e-2, 5e-2
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
seqlens = []
batch_size = batch
padding = 3 if with_padding else 0

View File

@@ -12,8 +12,8 @@ from vllm.distributed.parallel_state import (
initialize_model_parallel,
)
from vllm.model_executor.layers.mamba.mamba_mixer2 import Mixer2RMSNormGated
from vllm.platforms import current_platform
from vllm.utils.system_utils import update_environment_variables
from vllm.utils.torch_utils import set_random_seed
@multi_gpu_test(num_gpus=2)
@@ -68,7 +68,7 @@ def mixer2_gated_norm_tensor_parallel(
dtype: torch.dtype,
device: str,
):
current_platform.seed_everything(0)
set_random_seed(0)
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)

View File

@@ -13,7 +13,7 @@ from vllm.model_executor.layers.mamba.ops.mamba_ssm import (
selective_scan_fn,
selective_state_update,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
def selective_state_update_ref(
@@ -271,7 +271,7 @@ def test_selective_scan(
rtolw = max(rtolw, rtol)
atolw = max(atolw, atol)
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
batch_size = 1
dim = 4
dstate = 8
@@ -401,7 +401,7 @@ def test_selective_state_update(dim, dstate, has_z, itype):
if torch.version.hip:
atol *= 2
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
batch_size = 1
state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device)
x = torch.randn(batch_size, dim, device=device, dtype=itype)
@@ -438,7 +438,7 @@ def test_selective_state_update_varlen(dim, dstate, has_z, itype, max_seq_len):
if torch.version.hip:
atol *= 2
# set seed
current_platform.seed_everything(0)
set_random_seed(0)
batch_size = 4
token_counts = torch.randint(1, max_seq_len + 1, (batch_size,), device=device)
total_tokens = int(token_counts.sum().item())
@@ -857,7 +857,7 @@ def test_selective_state_update_with_num_accepted_tokens(
if torch.version.hip:
atol *= 2
current_platform.seed_everything(0)
set_random_seed(0)
batch_size = 4
tokens_per_seq = torch.randint(1, max_seq_len + 1, (batch_size,), device=device)
@@ -983,7 +983,7 @@ def test_selective_state_update_varlen_with_num_accepted(
if torch.version.hip:
atol *= 2
current_platform.seed_everything(0)
set_random_seed(0)
batch_size = 4
tokens_per_seq = torch.randint(1, max_seq_len + 1, (batch_size,), device=device)

View File

@@ -9,7 +9,7 @@ from einops import rearrange, repeat
from vllm.model_executor.layers.mamba.ops.ssd_combined import (
mamba_chunk_scan_combined_varlen,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
from vllm.v1.attention.backends.mamba2_attn import compute_varlen_chunk_metadata
# Added by the IBM Team, 2024
@@ -82,7 +82,7 @@ def ssd_minimal_discrete(X, A, B, C, block_len, initial_states=None):
def generate_random_inputs(batch_size, seqlen, n_heads, d_head, itype, device="cuda"):
current_platform.seed_everything(0)
set_random_seed(0)
A = -torch.exp(torch.rand(n_heads, dtype=itype, device=device))
dt = F.softplus(
torch.randn(batch_size, seqlen, n_heads, dtype=itype, device=device) - 4