Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -7,8 +7,6 @@ for offline inference.
|
||||
Requires HuggingFace credentials for access to Llama2.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams
|
||||
@@ -17,7 +15,7 @@ from vllm.lora.request import LoRARequest
|
||||
|
||||
def create_test_prompts(
|
||||
lora_path: str,
|
||||
) -> list[tuple[str, SamplingParams, Optional[LoRARequest]]]:
|
||||
) -> list[tuple[str, SamplingParams, LoRARequest | None]]:
|
||||
"""Create a list of test prompts with their sampling parameters.
|
||||
|
||||
2 requests for base model, 4 requests for the LoRA. We define 2
|
||||
@@ -68,7 +66,7 @@ def create_test_prompts(
|
||||
|
||||
def process_requests(
|
||||
engine: LLMEngine,
|
||||
test_prompts: list[tuple[str, SamplingParams, Optional[LoRARequest]]],
|
||||
test_prompts: list[tuple[str, SamplingParams, LoRARequest | None]],
|
||||
):
|
||||
"""Continuously process a list of prompts and handle the outputs."""
|
||||
request_id = 0
|
||||
|
||||
Reference in New Issue
Block a user