[CI/Build] Avoid CUDA initialization (#8534)

This commit is contained in:
Cyrus Leung
2024-09-18 18:38:11 +08:00
committed by GitHub
parent e351572900
commit 6ffa3f314c
55 changed files with 256 additions and 256 deletions

View File

@@ -5,6 +5,7 @@ import pytest
import torch
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.utils import seed_everything
from .allclose_default import get_default_atol, get_default_rtol
@@ -46,9 +47,8 @@ def test_rotary_embedding(
) -> None:
if rotary_dim is None:
rotary_dim = head_size
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
seed_everything(seed)
torch.set_default_device(device)
if rotary_dim is None:
rotary_dim = head_size
@@ -100,9 +100,7 @@ def test_batched_rotary_embedding(
max_position: int = 8192,
base: int = 10000,
) -> None:
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
seed_everything(seed)
torch.set_default_device(device)
if rotary_dim is None:
rotary_dim = head_size
@@ -162,9 +160,7 @@ def test_batched_rotary_embedding_multi_lora(
max_position: int = 8192,
base: int = 10000,
) -> None:
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
seed_everything(seed)
torch.set_default_device(device)
if rotary_dim is None:
rotary_dim = head_size