Enable ModelOpt Llama4 fp8 checkpoint deployment (#20419)

Signed-off-by: Zhiyu Cheng <zhiyuc@nvidia.com>
This commit is contained in:
Zhiyu
2025-07-11 23:07:16 -07:00
committed by GitHub
parent 5de8d9f111
commit 4afe687a82
5 changed files with 501 additions and 35 deletions

View File

@@ -35,7 +35,8 @@ from vllm.model_executor.layers.linear import (QKVParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader, maybe_remap_kv_scale_name)
from .llama import LlamaForCausalLM, LlamaMLP, LlamaModel
from .utils import (AutoWeightsLoader, extract_layer_index, fast_topk,
@@ -432,12 +433,24 @@ class Llama4Model(LlamaModel):
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name or "experts" in name:
continue
name = name.replace(weight_name, param_name)
# This check is for ModelOpt ckpts with kv cache quant enabled
if not (name.endswith(
(".k_scale", ".v_scale")) and "self_attn" in name):
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, self):
continue
if name.endswith("scale") and "expert" not in name:
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
weight_loader = getattr(param, "weight_loader",
default_weight_loader)
if weight_loader == default_weight_loader:
weight_loader(param, loaded_weight)
else:
weight_loader(param, loaded_weight, shard_id)
loaded_params.add(name)
break
else:
@@ -452,6 +465,44 @@ class Llama4Model(LlamaModel):
if not moe_loaded:
if is_pp_missing_parameter(name, self):
continue
# Handle flat expert scale parameters that
# don't match per-expert patterns
if ("experts." in name and ("w13_input_scale" in name
or "w13_weight_scale" in name
or "w2_input_scale" in name
or "w2_weight_scale" in name)):
# These are flat expert scales that apply to all experts
param = params_dict[name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)
# Check for MoE-specific loading support via
# attribute instead of expensive runtime reflection
supports_moe = getattr(weight_loader,
'supports_moe_loading', False)
if supports_moe:
# This is a MoE weight loader
if "w13_" in name:
shard_id = "w1"
elif "w2_" in name:
shard_id = "w2"
else:
shard_id = "w1"
weight_loader(param,
loaded_weight,
name,
shard_id=shard_id,
expert_id=0)
else:
# Regular weight loader (handles both
# param.weight_loader and default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)