Update deprecated Python 3.8 typing (#13971)

This commit is contained in:
Harry Mellor
2025-03-03 01:34:51 +00:00
committed by GitHub
parent bf33700ecd
commit cf069aa8aa
300 changed files with 2294 additions and 2347 deletions

View File

@@ -3,7 +3,6 @@
# Adapted from
# https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/tests/lora/test_llama.py
from dataclasses import dataclass
from typing import List
import pytest
@@ -19,7 +18,7 @@ class ModelWithQuantization:
quantization: str
MODELS: List[ModelWithQuantization]
MODELS: list[ModelWithQuantization]
#AWQ quantization is currently not supported in ROCm.
if current_platform.is_rocm():
MODELS = [
@@ -41,7 +40,7 @@ else:
def do_sample(llm: vllm.LLM,
lora_path: str,
lora_id: int,
max_tokens: int = 256) -> List[str]:
max_tokens: int = 256) -> list[str]:
raw_prompts = [
"Give me an orange-ish brown color",
"Give me a neon pink color",
@@ -61,7 +60,7 @@ def do_sample(llm: vllm.LLM,
lora_request=LoRARequest(str(lora_id), lora_id, lora_path)
if lora_id else None)
# Print the outputs.
generated_texts: List[str] = []
generated_texts: list[str] = []
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text