[Bugfix] Fix LoRA extra vocab size (#15047)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
Jee Jee Li
2025-03-19 00:40:29 +08:00
committed by GitHub
parent 179a619c21
commit 46c759c165
5 changed files with 1 additions and 5 deletions

View File

@@ -100,7 +100,6 @@ def run_test(
distributed_executor_backend=distributed_executor_backend,
enable_lora=True,
max_lora_rank=320,
lora_extra_vocab_size=0,
gpu_memory_utilization=0.8, # set to 0.8 to avoid OOM in CI
enforce_eager=True,
) as vllm_model: