[1/n][CI] Load models in CI from S3 instead of HF (#13205)

Signed-off-by: <>
Co-authored-by: EC2 Default User <ec2-user@ip-172-31-20-117.us-west-2.compute.internal>
This commit is contained in:
Kevin H. Luu
2025-02-18 23:34:59 -08:00
committed by GitHub
parent fd84857f64
commit d5d214ac7f
43 changed files with 225 additions and 76 deletions

View File

@@ -3,6 +3,7 @@
import pytest
from vllm import LLM
from vllm.config import LoadFormat
@pytest.fixture(autouse=True)
@@ -14,13 +15,17 @@ def v1(run_with_both_engines):
def test_empty_prompt():
llm = LLM(model="gpt2", enforce_eager=True)
llm = LLM(model="s3://vllm-ci-model-weights/gpt2",
load_format=LoadFormat.RUNAI_STREAMER,
enforce_eager=True)
with pytest.raises(ValueError, match='Prompt cannot be empty'):
llm.generate([""])
@pytest.mark.skip_v1
def test_out_of_vocab_token():
llm = LLM(model="gpt2", enforce_eager=True)
llm = LLM(model="s3://vllm-ci-model-weights/gpt2",
load_format=LoadFormat.RUNAI_STREAMER,
enforce_eager=True)
with pytest.raises(ValueError, match='out of vocabulary'):
llm.generate({"prompt_token_ids": [999999]})