[Chore] Rename utils submodules (#26920)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2025-10-16 11:58:13 +08:00
committed by GitHub
parent 509cdc0370
commit f6cdc9a02f
24 changed files with 28 additions and 24 deletions

View File

@@ -51,7 +51,7 @@ from vllm.transformers_utils.chat_templates import get_chat_template_fallback_pa
from vllm.transformers_utils.processor import cached_get_processor
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils import random_uuid
from vllm.utils.func import supports_kw
from vllm.utils.functools import supports_kw
logger = init_logger(__name__)

View File

@@ -35,7 +35,7 @@ from vllm.outputs import RequestOutput
from vllm.sampling_params import BeamSearchParams, SamplingParams
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.utils import as_list
from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.asyncio import merge_async_iterators
logger = init_logger(__name__)

View File

@@ -40,7 +40,7 @@ from vllm.outputs import (
)
from vllm.pooling_params import PoolingParams
from vllm.utils import chunk_list
from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.asyncio import merge_async_iterators
logger = init_logger(__name__)

View File

@@ -91,7 +91,7 @@ from vllm.tracing import (
)
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils import is_list_of, random_uuid
from vllm.utils.async_utils import (
from vllm.utils.asyncio import (
AsyncMicrobatchTokenizer,
collect_from_async_generator,
make_async,

View File

@@ -36,7 +36,7 @@ from vllm.entrypoints.utils import _validate_truncation_size
from vllm.logger import init_logger
from vllm.outputs import PoolingOutput, PoolingRequestOutput
from vllm.tasks import SupportedTask
from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.asyncio import merge_async_iterators
logger = init_logger(__name__)

View File

@@ -37,7 +37,7 @@ from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.outputs import PoolingRequestOutput, ScoringRequestOutput
from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
from vllm.utils.async_utils import make_async, merge_async_iterators
from vllm.utils.asyncio import make_async, merge_async_iterators
logger = init_logger(__name__)

View File

@@ -17,7 +17,7 @@ from vllm.inputs.data import TextPrompt as EngineTextPrompt
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
from vllm.inputs.parse import get_prompt_components, parse_raw_prompts
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.utils.async_utils import AsyncMicrobatchTokenizer
from vllm.utils.asyncio import AsyncMicrobatchTokenizer
@dataclass(frozen=True)