Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -30,11 +30,9 @@ from vllm.model_executor.layers.layernorm import RMSNorm
|
||||
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.model_executor.layers.quantization.torchao import TorchAOConfig
|
||||
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
||||
VocabParallelEmbedding)
|
||||
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding
|
||||
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
||||
from vllm.model_executor.models.llama4 import (Llama4DecoderLayer,
|
||||
Llama4ForCausalLM)
|
||||
from vllm.model_executor.models.llama4 import Llama4DecoderLayer, Llama4ForCausalLM
|
||||
from vllm.model_executor.models.utils import extract_layer_index
|
||||
|
||||
from .interfaces import SupportsMultiModal
|
||||
@@ -45,7 +43,6 @@ logger = init_logger(__name__)
|
||||
|
||||
@support_torch_compile
|
||||
class LlamaModel(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
@@ -55,8 +52,7 @@ class LlamaModel(nn.Module):
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.config = (
|
||||
vllm_config.speculative_config.draft_model_config.hf_config)
|
||||
self.config = vllm_config.speculative_config.draft_model_config.hf_config
|
||||
self.validate_and_update_config(start_layer_id, quant_config)
|
||||
self.vocab_size = self.config.vocab_size
|
||||
self.embed_tokens = VocabParallelEmbedding(
|
||||
@@ -65,18 +61,20 @@ class LlamaModel(nn.Module):
|
||||
prefix=maybe_prefix(prefix, "embed_tokens"),
|
||||
)
|
||||
|
||||
self.layers = nn.ModuleList([
|
||||
Llama4DecoderLayer(
|
||||
vllm_config=vllm_config,
|
||||
prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
|
||||
config=self.config,
|
||||
) for i in range(self.config.num_hidden_layers)
|
||||
])
|
||||
self.fc = torch.nn.Linear(self.config.hidden_size * 2,
|
||||
self.config.hidden_size,
|
||||
bias=False)
|
||||
self.norm = RMSNorm(self.config.hidden_size,
|
||||
eps=self.config.rms_norm_eps)
|
||||
self.layers = nn.ModuleList(
|
||||
[
|
||||
Llama4DecoderLayer(
|
||||
vllm_config=vllm_config,
|
||||
prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
|
||||
config=self.config,
|
||||
)
|
||||
for i in range(self.config.num_hidden_layers)
|
||||
]
|
||||
)
|
||||
self.fc = torch.nn.Linear(
|
||||
self.config.hidden_size * 2, self.config.hidden_size, bias=False
|
||||
)
|
||||
self.norm = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps)
|
||||
|
||||
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
||||
return self.embed_tokens(input_ids)
|
||||
@@ -90,8 +88,7 @@ class LlamaModel(nn.Module):
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.get_input_embeddings(input_ids)
|
||||
hidden_states = self.fc(
|
||||
torch.cat((inputs_embeds, hidden_states), dim=-1))
|
||||
hidden_states = self.fc(torch.cat((inputs_embeds, hidden_states), dim=-1))
|
||||
residual = None
|
||||
for layer in self.layers:
|
||||
hidden_states, residual = layer(
|
||||
@@ -102,8 +99,7 @@ class LlamaModel(nn.Module):
|
||||
hidden_states, _ = self.norm(hidden_states, residual)
|
||||
return hidden_states, hidden_states
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> set[str]:
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
||||
stacked_params_mapping = [
|
||||
# (param_name, shard_name, shard_id)
|
||||
(".qkv_proj", ".q_proj", "q"),
|
||||
@@ -126,69 +122,65 @@ class LlamaModel(nn.Module):
|
||||
break
|
||||
else:
|
||||
# if PP disabled then draft will share embed with target
|
||||
if get_pp_group().world_size == 1 and \
|
||||
"embed_tokens." in name:
|
||||
if get_pp_group().world_size == 1 and "embed_tokens." in name:
|
||||
continue
|
||||
param = params_dict[name]
|
||||
weight_loader = getattr(param, "weight_loader",
|
||||
default_weight_loader)
|
||||
weight_loader = getattr(param, "weight_loader", default_weight_loader)
|
||||
weight_loader(param, loaded_weight)
|
||||
loaded_params.add(name)
|
||||
for name in params_dict:
|
||||
# if PP disabled then draft will share embed with target
|
||||
if get_pp_group().world_size == 1 and \
|
||||
"embed_tokens." in name:
|
||||
if get_pp_group().world_size == 1 and "embed_tokens." in name:
|
||||
continue
|
||||
assert name in loaded_params, f"{name} is not loaded!"
|
||||
return loaded_params
|
||||
|
||||
def validate_and_update_config(
|
||||
self,
|
||||
start_layer_id: int,
|
||||
quant_config: Optional[QuantizationConfig] = None) -> None:
|
||||
self, start_layer_id: int, quant_config: Optional[QuantizationConfig] = None
|
||||
) -> None:
|
||||
# yoco and moe is not supported by draft model yet
|
||||
assert self.config.yoco_global_kv_layer is None
|
||||
assert self.config.yoco_local_kv_layer is None
|
||||
assert len(self.config.moe_layers) == 0
|
||||
# draft model layer index is increased by start_layer_id,
|
||||
# so we need to pad relevant configs accordingly
|
||||
self.config.no_rope_layers = [
|
||||
0
|
||||
] * start_layer_id + self.config.no_rope_layers
|
||||
self.config.no_rope_layers = [0] * start_layer_id + self.config.no_rope_layers
|
||||
# currently only TorchAO quantization is supported
|
||||
if isinstance(quant_config, TorchAOConfig):
|
||||
|
||||
def pad_layer_name(layer: str) -> str:
|
||||
layer_index = extract_layer_index(layer)
|
||||
return layer.replace(str(layer_index),
|
||||
str(layer_index + start_layer_id))
|
||||
return layer.replace(
|
||||
str(layer_index), str(layer_index + start_layer_id)
|
||||
)
|
||||
|
||||
quant_config.torchao_config.module_fqn_to_config = {
|
||||
pad_layer_name(layer): quantization
|
||||
for layer, quantization in
|
||||
quant_config.torchao_config.module_fqn_to_config.items()
|
||||
for layer, quantization in quant_config.torchao_config.module_fqn_to_config.items()
|
||||
}
|
||||
|
||||
|
||||
class EagleLlama4ForCausalLM(Llama4ForCausalLM):
|
||||
|
||||
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
||||
nn.Module.__init__(self)
|
||||
self.config = (
|
||||
vllm_config.speculative_config.draft_model_config.hf_config)
|
||||
self.config = vllm_config.speculative_config.draft_model_config.hf_config
|
||||
target_layer_num = vllm_config.model_config.get_num_layers(
|
||||
vllm_config.parallel_config)
|
||||
vllm_config.parallel_config
|
||||
)
|
||||
# draft model quantization config may differ from target model
|
||||
quant_config = VllmConfig.get_quantization_config(
|
||||
vllm_config.speculative_config.draft_model_config,
|
||||
vllm_config.load_config)
|
||||
self.model = LlamaModel(vllm_config=vllm_config,
|
||||
prefix="model",
|
||||
start_layer_id=target_layer_num,
|
||||
quant_config=quant_config)
|
||||
vllm_config.speculative_config.draft_model_config, vllm_config.load_config
|
||||
)
|
||||
self.model = LlamaModel(
|
||||
vllm_config=vllm_config,
|
||||
prefix="model",
|
||||
start_layer_id=target_layer_num,
|
||||
quant_config=quant_config,
|
||||
)
|
||||
logit_scale = getattr(self.config, "logit_scale", 1.0)
|
||||
self.logits_processor = LogitsProcessor(self.config.vocab_size,
|
||||
scale=logit_scale)
|
||||
self.logits_processor = LogitsProcessor(
|
||||
self.config.vocab_size, scale=logit_scale
|
||||
)
|
||||
|
||||
def get_language_model(self) -> torch.nn.Module:
|
||||
return self.model
|
||||
@@ -204,13 +196,10 @@ class EagleLlama4ForCausalLM(Llama4ForCausalLM):
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
return self.model(input_ids, positions, hidden_states, inputs_embeds)
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> None:
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> None:
|
||||
def transform(inputs):
|
||||
name, loaded_weight = inputs
|
||||
name, weight = self.permute_qk_weight_for_rotary(
|
||||
name, loaded_weight)
|
||||
name, weight = self.permute_qk_weight_for_rotary(name, loaded_weight)
|
||||
if "lm_head" not in name:
|
||||
name = "model." + name
|
||||
return name, weight
|
||||
|
||||
Reference in New Issue
Block a user