[Renderer] Deprecate code paths for old input processing (#34775)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-02-18 16:35:04 +08:00
committed by GitHub
parent 1faa8cb73c
commit a766b30349
6 changed files with 70 additions and 51 deletions

View File

@@ -519,7 +519,6 @@ class LLM:
),
params=seq_params,
lora_requests=seq_lora_requests,
tokenization_kwargs=tokenization_kwargs,
priorities=seq_priority,
)
@@ -1813,7 +1812,6 @@ class LLM:
params=seq_params,
use_tqdm=use_tqdm,
lora_requests=seq_lora_requests,
tokenization_kwargs=tokenization_kwargs,
priorities=seq_priority,
)
@@ -1872,7 +1870,6 @@ class LLM:
params=seq_params,
lora_requests=seq_lora_requests,
use_tqdm=use_tqdm,
tokenization_kwargs=tokenization_kwargs,
)
def _render_and_run_requests(
@@ -1881,7 +1878,6 @@ class LLM:
params: Sequence[SamplingParams | PoolingParams],
*,
lora_requests: Sequence[LoRARequest | None] | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
priorities: Sequence[int] | None = None,
use_tqdm: bool | Callable[..., tqdm] = True,
):
@@ -1899,7 +1895,6 @@ class LLM:
prompts=prompts,
params=params,
lora_requests=lora_requests,
tokenization_kwargs=tokenization_kwargs,
priorities=priorities,
)
@@ -1911,7 +1906,6 @@ class LLM:
params: Sequence[SamplingParams | PoolingParams],
*,
lora_requests: Sequence[LoRARequest | None] | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
priorities: Sequence[int] | None = None,
) -> list[str]:
added_request_ids: list[str] = []
@@ -1922,7 +1916,6 @@ class LLM:
prompt,
params[i],
lora_request=None if lora_requests is None else lora_requests[i],
tokenization_kwargs=tokenization_kwargs,
priority=0 if priorities is None else priorities[i],
)
added_request_ids.append(request_id)
@@ -1938,7 +1931,6 @@ class LLM:
prompt: ProcessorInputs,
params: SamplingParams | PoolingParams,
lora_request: LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
priority: int = 0,
) -> str:
if isinstance(params, SamplingParams):
@@ -1947,27 +1939,11 @@ class LLM:
request_id = str(next(self.request_counter))
if params.truncate_prompt_tokens is not None:
params_type = type(params).__name__
warnings.warn(
f"The `truncate_prompt_tokens` parameter in `{params_type}` "
"is deprecated and will be removed in v0.16. "
"Please pass it via `tokenization_kwargs` instead.",
DeprecationWarning,
stacklevel=2,
)
tokenization_kwargs = merge_kwargs(
tokenization_kwargs,
dict(truncate_prompt_tokens=params.truncate_prompt_tokens),
)
return self.llm_engine.add_request(
request_id,
prompt,
params,
lora_request=lora_request,
tokenization_kwargs=tokenization_kwargs,
priority=priority,
)