[Misc] Move utils to avoid conflicts with stdlib, and move tests (#27169)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2025-10-19 20:20:55 +08:00
committed by GitHub
parent 7a6c8c3fa1
commit d31f7844f8
52 changed files with 246 additions and 237 deletions

View File

@@ -51,7 +51,7 @@ from vllm.transformers_utils.chat_templates import get_chat_template_fallback_pa
from vllm.transformers_utils.processor import cached_get_processor
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils import random_uuid
from vllm.utils.functools import supports_kw
from vllm.utils.func_utils import supports_kw
logger = init_logger(__name__)

View File

@@ -76,7 +76,7 @@ from vllm.transformers_utils.tokenizer import (
)
from vllm.usage.usage_lib import UsageContext
from vllm.utils import Counter, Device
from vllm.utils.collections import as_iter, is_list_of
from vllm.utils.collection_utils import as_iter, is_list_of
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.llm_engine import LLMEngine
from vllm.v1.sample.logits_processor import LogitsProcessor

View File

@@ -70,7 +70,7 @@ from vllm.transformers_utils.tokenizers import (
truncate_tool_call_ids,
validate_request_params,
)
from vllm.utils.collections import as_list
from vllm.utils.collection_utils import as_list
logger = init_logger(__name__)

View File

@@ -34,8 +34,8 @@ from vllm.logprobs import Logprob
from vllm.outputs import RequestOutput
from vllm.sampling_params import BeamSearchParams, SamplingParams
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.utils.asyncio import merge_async_iterators
from vllm.utils.collections import as_list
from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.collection_utils import as_list
logger = init_logger(__name__)

View File

@@ -39,8 +39,8 @@ from vllm.outputs import (
RequestOutput,
)
from vllm.pooling_params import PoolingParams
from vllm.utils.asyncio import merge_async_iterators
from vllm.utils.collections import chunk_list
from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.collection_utils import chunk_list
logger = init_logger(__name__)

View File

@@ -91,13 +91,13 @@ from vllm.tracing import (
)
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils import random_uuid
from vllm.utils.asyncio import (
from vllm.utils.async_utils import (
AsyncMicrobatchTokenizer,
collect_from_async_generator,
make_async,
merge_async_iterators,
)
from vllm.utils.collections import is_list_of
from vllm.utils.collection_utils import is_list_of
from vllm.v1.engine import EngineCoreRequest
logger = init_logger(__name__)

View File

@@ -36,7 +36,7 @@ from vllm.entrypoints.utils import _validate_truncation_size
from vllm.logger import init_logger
from vllm.outputs import PoolingOutput, PoolingRequestOutput
from vllm.tasks import SupportedTask
from vllm.utils.asyncio import merge_async_iterators
from vllm.utils.async_utils import merge_async_iterators
logger = init_logger(__name__)

View File

@@ -37,7 +37,7 @@ from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.outputs import PoolingRequestOutput, ScoringRequestOutput
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils.asyncio import make_async, merge_async_iterators
from vllm.utils.async_utils import make_async, merge_async_iterators
logger = init_logger(__name__)

View File

@@ -12,7 +12,7 @@ from vllm.entrypoints.openai.protocol import (
)
from vllm.logger import init_logger
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.utils.collections import is_list_of
from vllm.utils.collection_utils import is_list_of
from vllm.utils.import_utils import import_from_path
logger = init_logger(__name__)

View File

@@ -17,7 +17,7 @@ from vllm.inputs.data import TextPrompt as EngineTextPrompt
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
from vllm.inputs.parse import get_prompt_components, parse_raw_prompts
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.utils.asyncio import AsyncMicrobatchTokenizer
from vllm.utils.async_utils import AsyncMicrobatchTokenizer
@dataclass(frozen=True)