Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -13,13 +13,14 @@ from vllm.logger import init_logger
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import QKVParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig)
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding)
DEFAULT_VOCAB_PADDING_SIZE,
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.llama import (LlamaDecoderLayer,
LlamaForCausalLM)
from vllm.model_executor.models.llama import LlamaDecoderLayer, LlamaForCausalLM
from .utils import AutoWeightsLoader, maybe_prefix
@@ -27,11 +28,12 @@ logger = init_logger(__name__)
class LlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self,
vllm_config: VllmConfig,
prefix: str = "",
config: Optional[LlamaConfig] = None) -> None:
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
config: Optional[LlamaConfig] = None,
) -> None:
super().__init__(vllm_config, prefix=prefix, config=config)
config = config or vllm_config.model_config.hf_config
@@ -55,26 +57,27 @@ class LlamaDecoderLayer(LlamaDecoderLayer):
else:
self._residual_norm = self._norm_after_residual
def get_quant_config(
self, vllm_config: VllmConfig) -> Optional[QuantizationConfig]:
def get_quant_config(self, vllm_config: VllmConfig) -> Optional[QuantizationConfig]:
"""Use drafter's quantization config instead of verifier's."""
draft_model_config = vllm_config.speculative_config.draft_model_config
draft_load_config = vllm_config.load_config
return VllmConfig.get_quantization_config(
draft_model_config,
draft_load_config) if draft_model_config else None
return (
VllmConfig.get_quantization_config(draft_model_config, draft_load_config)
if draft_model_config
else None
)
def _norm_before_residual(
self,
hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
self, hidden_states: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
hidden_states = self.hidden_norm(hidden_states)
residual = hidden_states
return hidden_states, residual
def _norm_after_residual(
self,
hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
self, hidden_states: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
residual = hidden_states
hidden_states = self.hidden_norm(hidden_states)
return hidden_states, residual
@@ -86,11 +89,9 @@ class LlamaDecoderLayer(LlamaDecoderLayer):
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
embeds = self.input_layernorm(embeds)
hidden_states, residual = self._residual_norm(
hidden_states=hidden_states)
hidden_states, residual = self._residual_norm(hidden_states=hidden_states)
hidden_states = torch.cat([embeds, hidden_states], dim=-1)
# Self Attention
@@ -99,8 +100,7 @@ class LlamaDecoderLayer(LlamaDecoderLayer):
hidden_states=hidden_states,
)
hidden_states, residual = self.post_attention_layernorm(
hidden_states, residual)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
# Fully Connected
hidden_states = self.mlp(hidden_states)
@@ -109,7 +109,6 @@ class LlamaDecoderLayer(LlamaDecoderLayer):
class LlamaModel(nn.Module):
def __init__(
self,
*,
@@ -118,8 +117,7 @@ class LlamaModel(nn.Module):
prefix: str = "",
) -> None:
super().__init__()
self.config = vllm_config. \
speculative_config.draft_model_config.hf_config
self.config = vllm_config.speculative_config.draft_model_config.hf_config
self.vocab_size = self.config.vocab_size
current_vllm_config = get_current_vllm_config()
@@ -130,21 +128,23 @@ class LlamaModel(nn.Module):
prefix=maybe_prefix(prefix, "embed_tokens"),
)
self.layers = nn.ModuleList([
LlamaDecoderLayer(
current_vllm_config,
prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
config=self.config,
)
])
self.layers = nn.ModuleList(
[
LlamaDecoderLayer(
current_vllm_config,
prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
config=self.config,
)
]
)
if hasattr(self.config, "target_hidden_size"):
self.fc = torch.nn.Linear(self.config.target_hidden_size * 3,
self.config.hidden_size,
bias=False)
self.fc = torch.nn.Linear(
self.config.target_hidden_size * 3, self.config.hidden_size, bias=False
)
else:
self.fc = torch.nn.Linear(self.config.hidden_size * 3,
self.config.hidden_size,
bias=False)
self.fc = torch.nn.Linear(
self.config.hidden_size * 3, self.config.hidden_size, bias=False
)
self.norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
@@ -175,8 +175,7 @@ class LlamaModel(nn.Module):
hidden_states, hidden_prenorm = self.norm(hidden_states, residual)
return hidden_states, hidden_prenorm
def load_weights(self, weights: Iterable[tuple[str,
torch.Tensor]]) -> set[str]:
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
@@ -188,8 +187,8 @@ class LlamaModel(nn.Module):
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if 'midlayer.' in name:
name = name.replace('midlayer.', 'layers.0.')
if "midlayer." in name:
name = name.replace("midlayer.", "layers.0.")
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
@@ -200,33 +199,31 @@ class LlamaModel(nn.Module):
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader",
default_weight_loader)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class Eagle3LlamaForCausalLM(LlamaForCausalLM):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
nn.Module.__init__(self)
self.config = vllm_config. \
speculative_config.draft_model_config.hf_config
self.config = vllm_config.speculative_config.draft_model_config.hf_config
# Ensure draft_vocab_size is set
# default to the base vocab size when absent
if getattr(self.config, "draft_vocab_size", None) is None:
base_vocab_size = getattr(self.config, "vocab_size", None)
self.config.draft_vocab_size = base_vocab_size
target_layer_num = vllm_config.model_config.get_num_layers(
vllm_config.parallel_config)
vllm_config.parallel_config
)
# Store target layer count in draft config for
# proper layer_types indexing in draft models
self.config.target_layer_count = target_layer_num
self.model = LlamaModel(vllm_config=vllm_config,
prefix="model",
start_layer_id=target_layer_num)
self.model = LlamaModel(
vllm_config=vllm_config, prefix="model", start_layer_id=target_layer_num
)
logit_scale = getattr(self.config, "logit_scale", 1.0)
self.lm_head = ParallelLMHead(
@@ -234,9 +231,11 @@ class Eagle3LlamaForCausalLM(LlamaForCausalLM):
self.config.hidden_size,
org_num_embeddings=self.config.draft_vocab_size,
padding_size=(DEFAULT_VOCAB_PADDING_SIZE),
prefix=maybe_prefix(prefix, "lm_head"))
self.logits_processor = LogitsProcessor(self.config.draft_vocab_size,
scale=logit_scale)
prefix=maybe_prefix(prefix, "lm_head"),
)
self.logits_processor = LogitsProcessor(
self.config.draft_vocab_size, scale=logit_scale
)
self.draft_id_to_target_id = nn.Parameter(
torch.zeros(self.config.draft_vocab_size, dtype=torch.long),
requires_grad=False,
@@ -260,17 +259,21 @@ class Eagle3LlamaForCausalLM(LlamaForCausalLM):
) -> Optional[torch.Tensor]:
logits = self.logits_processor(self.lm_head, hidden_states)
if self.draft_id_to_target_id is None:
assert logits.shape[1] == self.config.vocab_size, \
"Expected logits to have shape " \
assert logits.shape[1] == self.config.vocab_size, (
"Expected logits to have shape "
f"(*, {self.config.vocab_size}), but got {logits.shape}"
)
return logits
base = torch.arange(self.config.draft_vocab_size, device=logits.device)
targets = base + self.draft_id_to_target_id
logits_new = logits.new_full((
logits.shape[0],
self.config.vocab_size,
), float('-inf'))
logits_new = logits.new_full(
(
logits.shape[0],
self.config.vocab_size,
),
float("-inf"),
)
logits_new[:, targets] = logits
return logits_new