[BugFix] LoRA: Support loading base_layer of experts (#31104)
Signed-off-by: Hollow Man <hollowman@opensuse.org>
This commit is contained in:
@@ -2007,6 +2007,7 @@ class FusedMoE(CustomOp):
|
||||
@classmethod
|
||||
def make_expert_params_mapping(
|
||||
cls,
|
||||
model: torch.nn.Module,
|
||||
ckpt_gate_proj_name: str,
|
||||
ckpt_down_proj_name: str,
|
||||
ckpt_up_proj_name: str,
|
||||
@@ -2025,13 +2026,19 @@ class FusedMoE(CustomOp):
|
||||
)
|
||||
)
|
||||
|
||||
base_layer = (
|
||||
"base_layer."
|
||||
if any(".base_layer." in name for name, _ in model.named_parameters())
|
||||
else ""
|
||||
)
|
||||
|
||||
return [
|
||||
# (param_name, weight_name, expert_id, shard_id)
|
||||
(
|
||||
"experts.w13_"
|
||||
f"experts.{base_layer}w13_"
|
||||
if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]
|
||||
else "experts.w2_",
|
||||
f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.",
|
||||
else f"experts.{base_layer}w2_",
|
||||
f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.{base_layer}",
|
||||
expert_id,
|
||||
shard_id,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user