[ci] Use env var to control whether to use S3 bucket in CI (#13634)

This commit is contained in:
Kevin H. Luu
2025-02-22 19:19:45 -08:00
committed by GitHub
parent 322d2a27d6
commit 2c5e637b57
30 changed files with 222 additions and 231 deletions

View File

@@ -10,9 +10,6 @@ import gc
import torch
from vllm import LLM, SamplingParams
from vllm.config import LoadFormat
from .conftest import MODEL_WEIGHTS_S3_BUCKET
def test_duplicated_ignored_sequence_group():
@@ -21,8 +18,7 @@ def test_duplicated_ignored_sequence_group():
sampling_params = SamplingParams(temperature=0.01,
top_p=0.1,
max_tokens=256)
llm = LLM(model=f"{MODEL_WEIGHTS_S3_BUCKET}/distilbert/distilgpt2",
load_format=LoadFormat.RUNAI_STREAMER,
llm = LLM(model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1)
prompts = ["This is a short prompt", "This is a very long prompt " * 1000]
@@ -35,8 +31,7 @@ def test_max_tokens_none():
sampling_params = SamplingParams(temperature=0.01,
top_p=0.1,
max_tokens=None)
llm = LLM(model=f"{MODEL_WEIGHTS_S3_BUCKET}/distilbert/distilgpt2",
load_format=LoadFormat.RUNAI_STREAMER,
llm = LLM(model="distilbert/distilgpt2",
max_num_batched_tokens=4096,
tensor_parallel_size=1)
prompts = ["Just say hello!"]
@@ -46,9 +41,7 @@ def test_max_tokens_none():
def test_gc():
llm = LLM(model=f"{MODEL_WEIGHTS_S3_BUCKET}/distilbert/distilgpt2",
load_format=LoadFormat.RUNAI_STREAMER,
enforce_eager=True)
llm = LLM(model="distilbert/distilgpt2", enforce_eager=True)
del llm
gc.collect()