[Refactor] Relocate completion and chat completion tests (#37125)

Signed-off-by: sfeng33 <4florafeng@gmail.com>
This commit is contained in:
Flora Feng
2026-03-16 23:31:23 -04:00
committed by GitHub
parent f04d5226f8
commit 384dc7f77b
26 changed files with 41 additions and 48 deletions

View File

@@ -4,12 +4,11 @@ import weakref
import pytest
from tests.entrypoints.openai.chat_completion.test_vision import TEST_IMAGE_ASSETS
from vllm import LLM
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.sampling_params import SamplingParams
from ..openai.test_vision import TEST_IMAGE_ASSETS
@pytest.fixture(scope="function")
def text_llm():

View File

@@ -6,13 +6,12 @@ import logging
import pytest
import regex as re
from tests.entrypoints.openai.chat_completion.test_vision import TEST_IMAGE_ASSETS
from vllm import LLM
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
from vllm.v1.metrics import loggers as stat_loggers
from vllm.v1.metrics.reader import Counter, Metric
from ..openai.test_vision import TEST_IMAGE_ASSETS
def _make_messages(image_url: str) -> list[ChatCompletionMessageParam]:
return [

View File

@@ -7,11 +7,10 @@ import openai
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from vllm.assets.audio import AudioAsset
from vllm.multimodal.utils import encode_audio_base64, encode_audio_url, fetch_audio
from ...utils import RemoteOpenAIServer
MODEL_NAME = "fixie-ai/ultravox-v0_5-llama-3_2-1b"
TEST_AUDIO_URLS = [
AudioAsset("winning_call").url,

View File

@@ -8,8 +8,8 @@ import openai
import pytest
import pytest_asyncio
from ...conftest import VideoTestAssets
from ...utils import RemoteOpenAIServer
from tests.conftest import VideoTestAssets
from tests.utils import RemoteOpenAIServer
MODEL_NAME = "Qwen/Qwen2.5-Omni-3B"

View File

@@ -8,8 +8,8 @@ import pytest
import pytest_asyncio
from huggingface_hub import snapshot_download
from ...conftest import AudioTestAssets
from ...utils import RemoteOpenAIServer
from tests.conftest import AudioTestAssets
from tests.utils import RemoteOpenAIServer
# NOTE - the tests in this module are currently analogous to test_chat, but are
# separated to avoid OOM killing due to module-scoped servers, since we

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from ...utils import VLLM_PATH, RemoteOpenAIServer
from tests.utils import VLLM_PATH, RemoteOpenAIServer
chatml_jinja_path = VLLM_PATH / "examples/template_chatml.jinja"
assert chatml_jinja_path.exists()

View File

@@ -8,7 +8,7 @@ from typing import Any, NamedTuple
import openai # use the official client for correctness check
import pytest
from ...utils import RemoteOpenAIServer
from tests.utils import RemoteOpenAIServer
# # any model with a chat template should work here
MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct"

View File

@@ -7,11 +7,10 @@ import openai
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from vllm.multimodal.utils import encode_video_url, fetch_video
from vllm.platforms import current_platform
from ...utils import RemoteOpenAIServer
MODEL_NAME = "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
MAXIMUM_VIDEOS = 3

View File

@@ -8,12 +8,11 @@ import pytest
import pytest_asyncio
from transformers import AutoProcessor
from tests.utils import ROCM_ENV_OVERRIDES, ROCM_EXTRA_ARGS, RemoteOpenAIServer
from vllm.multimodal.media import MediaWithBytes
from vllm.multimodal.utils import encode_image_url, fetch_image
from vllm.platforms import current_platform
from ...utils import ROCM_ENV_OVERRIDES, ROCM_EXTRA_ARGS, RemoteOpenAIServer
MODEL_NAME = "microsoft/Phi-3.5-vision-instruct"
MAXIMUM_IMAGES = 2

View File

@@ -8,10 +8,9 @@ import pytest
import requests
import torch
from tests.utils import RemoteOpenAIServer
from vllm.utils.serial_utils import tensor2base64
from ...utils import RemoteOpenAIServer
@pytest.mark.parametrize(
"model_name", ["ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL-Sen1Floods11"]

View File

@@ -14,7 +14,7 @@ import torch
from openai import BadRequestError
from transformers import AutoConfig
from ...utils import RemoteOpenAIServer
from tests.utils import RemoteOpenAIServer
# any model with a chat template should work here
MODEL_NAME = "facebook/opt-125m"

View File

@@ -11,11 +11,10 @@ import pytest
import regex as re
import torch
from tests.utils import RemoteOpenAIServer
from vllm.config import ModelConfig
from vllm.renderers.embed_utils import safe_load_prompt_embeds
from ...utils import RemoteOpenAIServer
@pytest.mark.asyncio
async def test_empty_prompt():

View File

@@ -9,6 +9,7 @@ import pytest
import pytest_asyncio
import torch.cuda
from tests.utils import RemoteOpenAIServer
from vllm.engine.arg_utils import EngineArgs
from vllm.model_executor.model_loader.tensorizer import (
TensorizerConfig,
@@ -17,8 +18,6 @@ from vllm.model_executor.model_loader.tensorizer import (
)
from vllm.platforms import current_platform
from ...utils import RemoteOpenAIServer
MODEL_NAME = "unsloth/llama-3.2-1b-Instruct"
LORA_PATH = "davzoku/finqa_adapter_1b"

View File

@@ -6,11 +6,10 @@ import tempfile
import pytest
from tests.utils import RemoteOpenAIServer
from vllm.model_executor.model_loader.weight_utils import download_weights_from_hf
from vllm.tokenizers import get_tokenizer
from ...utils import RemoteOpenAIServer
MODEL_NAME = "Qwen/Qwen3-0.6B"
MODEL_PATH = os.path.join(tempfile.gettempdir(), "qwen3_06b")