[Misc] Qwen MoE model supports LoRA (#20932)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
Jee Jee Li
2025-07-18 02:32:52 +08:00
committed by GitHub
parent 90bd2ab6e3
commit a3a6c695f4
4 changed files with 20 additions and 8 deletions

View File

@@ -50,7 +50,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.sampling_metadata import SamplingMetadata
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsPP
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory, make_layers,
@@ -482,7 +482,7 @@ class Qwen3MoeModel(nn.Module):
return loaded_params
class Qwen3MoeForCausalLM(nn.Module, SupportsPP):
class Qwen3MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",