Add support for Eagle with separate lm-head and embed_tokens layers (#28549)
Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com>
This commit is contained in:
@@ -17,7 +17,7 @@ from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmb
|
||||
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
||||
from vllm.model_executor.models.llama import LlamaDecoderLayer, LlamaForCausalLM
|
||||
|
||||
from .utils import AutoWeightsLoader, maybe_prefix
|
||||
from .utils import AutoWeightsLoader, maybe_prefix, process_eagle_weight
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
@@ -179,6 +179,7 @@ class EagleLlamaForCausalLM(LlamaForCausalLM):
|
||||
name, loaded_weight = inputs
|
||||
if "lm_head" not in name:
|
||||
name = "model." + name
|
||||
process_eagle_weight(self, name)
|
||||
return name, loaded_weight
|
||||
|
||||
loader = AutoWeightsLoader(
|
||||
|
||||
Reference in New Issue
Block a user