Enable loading of fused expert weights in the Transformers modelling backend (#36997)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -156,6 +156,17 @@ class MoEMixin(MixtureOfExperts):
|
||||
Params for weights, fp8 weight scales, fp8 activation scales
|
||||
(param_name, weight_name, expert_id, shard_id)
|
||||
"""
|
||||
# Models saved with fused experts. These are checkpoints released:
|
||||
# - After Transformers v5
|
||||
# - Before Transformers v5, but re-saved with save_original_format=False
|
||||
# In the fused experts case, we repurpose the expert_id as shard_idx for
|
||||
# deconcatenating w1 and w3 in FusedMoE.load_weights.
|
||||
expert_mapping = [
|
||||
("experts.w13_weight", "experts.gate_up_proj", 0, "w1"),
|
||||
("experts.w13_weight", "experts.gate_up_proj", 1, "w3"),
|
||||
("experts.w2_weight", "experts.down_proj", 0, "w2"),
|
||||
]
|
||||
# Models saved with ModuleList experts
|
||||
ckpt_names = [
|
||||
# (ckpt_gate_proj_name, ckpt_down_proj_name, ckpt_up_proj_name)
|
||||
("gate_proj", "down_proj", "up_proj"), # Most common MoE style
|
||||
@@ -164,7 +175,6 @@ class MoEMixin(MixtureOfExperts):
|
||||
]
|
||||
num_experts = self.model_config.get_num_experts()
|
||||
num_redundant_experts = self.parallel_config.eplb_config.num_redundant_experts
|
||||
expert_mapping = []
|
||||
for gate_proj, down_proj, up_proj in ckpt_names:
|
||||
expert_mapping.extend(
|
||||
FusedMoE.make_expert_params_mapping(
|
||||
|
||||
Reference in New Issue
Block a user