Add support for Eagle with separate lm-head and embed_tokens layers (#28549)

Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com>
This commit is contained in:
Eldar Kurtić
2025-11-15 15:12:02 +01:00
committed by GitHub
parent 085a525332
commit e439c784fa
12 changed files with 205 additions and 64 deletions

View File

@@ -67,6 +67,10 @@ def test_mtp_load_model_unified(mock_get_model, mock_get_layers, mock_get_pp_gro
mock_model = mock.MagicMock()
mock_model.model.embed_tokens.weight.shape = (131072, 4096)
mock_get_model.return_value = mock_model
# MTP does not have its own embed_tokens or lm_head
# so it should share them with the target model
mock_model.has_own_embed_tokens = False
mock_model.has_own_lm_head = False
target_attn_layers = {"target_attn_1": mock.MagicMock()}
all_attn_layers = {**target_attn_layers, "draft_attn_1": mock.MagicMock()}