Test Prompt Embeds/LoRA compatibility and Enable LoRA Support for OPT Models (#25717)

Signed-off-by: Andrew Sansom <andrew@protopia.ai>
This commit is contained in:
Andrew Sansom
2025-09-29 19:10:58 -05:00
committed by GitHub
parent 6a113d9aed
commit 78a47f87ce
5 changed files with 40 additions and 11 deletions

View File

@@ -43,7 +43,7 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsPP
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter,
make_empty_intermediate_tensors_factory, make_layers,
maybe_prefix)
@@ -352,10 +352,9 @@ class OPTModel(nn.Module):
return loaded_params
class OPTForCausalLM(nn.Module, SupportsPP):
class OPTForCausalLM(nn.Module, SupportsPP, SupportsLoRA):
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"]
}
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={