Add support for Eagle with separate lm-head and embed_tokens layers (#28549)
Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com>
This commit is contained in:
@@ -85,7 +85,7 @@ from vllm.v1.attention.backends.mla.indexer import (
|
||||
)
|
||||
from vllm.v1.kv_cache_interface import KVCacheSpec, MLAAttentionSpec
|
||||
|
||||
from .interfaces import MixtureOfExperts, SupportsLoRA, SupportsPP
|
||||
from .interfaces import MixtureOfExperts, SupportsEagle, SupportsLoRA, SupportsPP
|
||||
from .utils import (
|
||||
PPMissingLayer,
|
||||
is_pp_missing_parameter,
|
||||
@@ -1311,7 +1311,7 @@ class DeepseekV2MixtureOfExperts(MixtureOfExperts):
|
||||
|
||||
|
||||
class DeepseekV2ForCausalLM(
|
||||
nn.Module, SupportsPP, DeepseekV2MixtureOfExperts, SupportsLoRA
|
||||
nn.Module, SupportsPP, DeepseekV2MixtureOfExperts, SupportsLoRA, SupportsEagle
|
||||
):
|
||||
packed_modules_mapping = {
|
||||
"gate_up_proj": ["gate_proj", "up_proj"],
|
||||
|
||||
Reference in New Issue
Block a user