[LoRA] Support dynamically initialize packed_modules_mapping for VLM with arbitrary components (#18987)

Signed-off-by: isotr0py <2037008807@qq.com>
Signed-off-by: Isotr0py <2037008807@qq.com>
This commit is contained in:
Isotr0py
2025-06-01 11:06:57 +08:00
committed by GitHub
parent 6aa8f9a4e7
commit a35ca765a5
7 changed files with 32 additions and 38 deletions

View File

@@ -1,5 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
"""Utils for model executor."""
import copy
from typing import Any, Optional
import torch
@@ -51,3 +52,23 @@ def _make_synced_weight_loader(original_weight_loader):
torch._sync(param)
return _synced_weight_loader
def get_packed_modules_mapping(model: torch.nn.Module) -> dict[str, list[str]]:
parent_map = copy.deepcopy(getattr(model, "packed_modules_mapping", {}))
# don't infer mapping if the model has defined it explicitly.
if parent_map:
return parent_map
# We only check main components instead of whole model submodules
for child in model.children():
child_map = getattr(child, "packed_modules_mapping", {})
if any((k in parent_map and parent_map[k] != v)
for k, v in child_map.items()):
raise ValueError(
f"Can't update {type(model).__name__}'s packed_modules_mapping "
f"safely because of conflicts from {type(child).__name__}.")
else:
parent_map.update(child_map)
return parent_map