[LoRA] Cleanup LoRA unused code (#29611)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk>
This commit is contained in:
Jee Jee Li
2025-11-29 14:52:58 +08:00
committed by GitHub
parent 4a80ad0a25
commit 39e63dec7c
46 changed files with 126 additions and 173 deletions

View File

@@ -8,13 +8,13 @@ import pytest_asyncio
from ...utils import RemoteOpenAIServer
# any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
MODEL_NAME = "Qwen/Qwen3-0.6B"
# technically this needs Mistral-7B-v0.1 as base, but we're not testing
# generation quality here
@pytest.fixture(scope="module")
def server(zephyr_lora_files):
def server(qwen3_lora_files):
args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
@@ -25,7 +25,7 @@ def server(zephyr_lora_files):
# lora config below
"--enable-lora",
"--lora-modules",
f"zephyr-lora={zephyr_lora_files}",
f"qwen3-lora={qwen3_lora_files}",
"--max-lora-rank",
"64",
"--max-cpu-loras",
@@ -45,12 +45,12 @@ async def client(server):
@pytest.mark.asyncio
async def test_check_models(client: openai.AsyncOpenAI, zephyr_lora_files):
async def test_check_models(client: openai.AsyncOpenAI, qwen3_lora_files):
models = await client.models.list()
models = models.data
served_model = models[0]
lora_models = models[1:]
assert served_model.id == MODEL_NAME
assert served_model.root == MODEL_NAME
assert all(lora_model.root == zephyr_lora_files for lora_model in lora_models)
assert lora_models[0].id == "zephyr-lora"
assert all(lora_model.root == qwen3_lora_files for lora_model in lora_models)
assert lora_models[0].id == "qwen3-lora"