[CI/Build] Avoid CUDA initialization (#8534)
This commit is contained in:
@@ -7,6 +7,7 @@ from tests.kernels.utils import opcheck
|
||||
from vllm.model_executor.layers.activation import (FastGELU, GeluAndMul,
|
||||
NewGELU, QuickGELU,
|
||||
SiluAndMul)
|
||||
from vllm.utils import seed_everything
|
||||
|
||||
from .allclose_default import get_default_atol, get_default_rtol
|
||||
|
||||
@@ -34,9 +35,7 @@ def test_act_and_mul(
|
||||
seed: int,
|
||||
device: str,
|
||||
) -> None:
|
||||
torch.random.manual_seed(seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed(seed)
|
||||
seed_everything(seed)
|
||||
torch.set_default_device(device)
|
||||
x = torch.randn(num_tokens, 2 * d, dtype=dtype)
|
||||
if activation == "silu":
|
||||
@@ -77,9 +76,7 @@ def test_activation(
|
||||
seed: int,
|
||||
device: str,
|
||||
) -> None:
|
||||
torch.random.manual_seed(seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed(seed)
|
||||
seed_everything(seed)
|
||||
torch.set_default_device(device)
|
||||
x = torch.randn(num_tokens, d, dtype=dtype)
|
||||
layer = activation[0]()
|
||||
|
||||
Reference in New Issue
Block a user