[CI/Build] Fix amd model executor test (#27612)
Signed-off-by: zhewenli <zhewenli@meta.com>
This commit is contained in:
@@ -1,7 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
|
||||
from vllm import SamplingParams
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
test_model = "openai-community/gpt2"
|
||||
|
||||
@@ -15,6 +18,9 @@ prompts = [
|
||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, seed=0)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not current_platform.is_cuda(), reason="fastsafetensors requires CUDA/NVIDIA GPUs"
|
||||
)
|
||||
def test_model_loader_download_files(vllm_runner):
|
||||
with vllm_runner(test_model, load_format="fastsafetensors") as llm:
|
||||
deserialized_outputs = llm.generate(prompts, sampling_params)
|
||||
|
||||
@@ -5,6 +5,7 @@ import glob
|
||||
import tempfile
|
||||
|
||||
import huggingface_hub.constants
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from vllm.model_executor.model_loader.weight_utils import (
|
||||
@@ -12,8 +13,12 @@ from vllm.model_executor.model_loader.weight_utils import (
|
||||
fastsafetensors_weights_iterator,
|
||||
safetensors_weights_iterator,
|
||||
)
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not current_platform.is_cuda(), reason="fastsafetensors requires CUDA/NVIDIA GPUs"
|
||||
)
|
||||
def test_fastsafetensors_model_loader():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
huggingface_hub.constants.HF_HUB_OFFLINE = False
|
||||
|
||||
Reference in New Issue
Block a user