[Bug] Fix DeepSeek V3 weight loading caused by incorrect prefix (#34876)

Signed-off-by: wzhao18 <wzhao18.sz@gmail.com>
This commit is contained in:
Wei Zhao
2026-02-19 02:20:30 -05:00
committed by GitHub
parent 4611af1663
commit 7f51e93864

View File

@@ -716,7 +716,7 @@ class DeepSeekV2FusedQkvAProj(MergedColumnParallelLinear):
def __init__( def __init__(
self, self,
input_size: int, input_size: int,
output_size: int, output_size: list[int],
quant_config: QuantizationConfig | None = None, quant_config: QuantizationConfig | None = None,
prefix: str = "", prefix: str = "",
): ):
@@ -726,7 +726,7 @@ class DeepSeekV2FusedQkvAProj(MergedColumnParallelLinear):
bias=False, bias=False,
quant_config=quant_config, quant_config=quant_config,
disable_tp=True, disable_tp=True,
prefix=f"{prefix}.kv_a_proj_with_mqa", prefix=prefix,
) )
# Check if the DeepSeek V3 fused A GEMM kernel can be used. # Check if the DeepSeek V3 fused A GEMM kernel can be used.