Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -23,6 +23,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Inference-only PhiMoE model."""
|
||||
|
||||
from collections.abc import Iterable
|
||||
from itertools import islice
|
||||
from typing import Optional, Union
|
||||
@@ -36,26 +37,36 @@ from vllm.compilation.decorators import support_torch_compile
|
||||
from vllm.config import CacheConfig, VllmConfig
|
||||
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
|
||||
from vllm.model_executor.layers.fused_moe import FusedMoE
|
||||
from vllm.model_executor.layers.linear import (QKVParallelLinear,
|
||||
ReplicatedLinear,
|
||||
RowParallelLinear)
|
||||
from vllm.model_executor.layers.linear import (
|
||||
QKVParallelLinear,
|
||||
ReplicatedLinear,
|
||||
RowParallelLinear,
|
||||
)
|
||||
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.model_executor.layers.rotary_embedding import get_rope
|
||||
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
||||
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding)
|
||||
DEFAULT_VOCAB_PADDING_SIZE,
|
||||
ParallelLMHead,
|
||||
VocabParallelEmbedding,
|
||||
)
|
||||
from vllm.model_executor.model_loader.weight_utils import (
|
||||
default_weight_loader, maybe_remap_kv_scale_name)
|
||||
default_weight_loader,
|
||||
maybe_remap_kv_scale_name,
|
||||
)
|
||||
from vllm.sequence import IntermediateTensors
|
||||
|
||||
from .interfaces import SupportsLoRA, SupportsPP
|
||||
from .utils import (AutoWeightsLoader, is_pp_missing_parameter,
|
||||
make_empty_intermediate_tensors_factory, make_layers,
|
||||
maybe_prefix)
|
||||
from .utils import (
|
||||
AutoWeightsLoader,
|
||||
is_pp_missing_parameter,
|
||||
make_empty_intermediate_tensors_factory,
|
||||
make_layers,
|
||||
maybe_prefix,
|
||||
)
|
||||
|
||||
|
||||
class PhiMoEConfig(PretrainedConfig):
|
||||
|
||||
model_type = "phimoe"
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
|
||||
@@ -128,7 +139,6 @@ class PhiMoEConfig(PretrainedConfig):
|
||||
|
||||
|
||||
class mp(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(
|
||||
ctx,
|
||||
@@ -173,8 +183,9 @@ def sparsemixer(scores, jitter_eps=0.01):
|
||||
# compute mask for sparsity
|
||||
mask_logits_threshold, max_ind = scores.max(dim=-1, keepdim=True)
|
||||
factor = scores.abs().clamp(min=mask_logits_threshold)
|
||||
mask_logits_threshold = ((mask_logits_threshold - scores) /
|
||||
factor) > (2 * jitter_eps)
|
||||
mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (
|
||||
2 * jitter_eps
|
||||
)
|
||||
|
||||
# apply mask
|
||||
masked_gates = scores.masked_fill(mask_logits_threshold, float("-inf"))
|
||||
@@ -195,24 +206,21 @@ def sparsemixer(scores, jitter_eps=0.01):
|
||||
)
|
||||
with torch.no_grad():
|
||||
# compute mask for sparsity
|
||||
mask_logits_threshold, max_ind = masked_scores.max(dim=-1,
|
||||
keepdim=True)
|
||||
mask_logits_threshold, max_ind = masked_scores.max(dim=-1, keepdim=True)
|
||||
factor = scores.abs().clamp(min=mask_logits_threshold)
|
||||
mask_logits_threshold = ((mask_logits_threshold - scores) /
|
||||
factor) > (2 * jitter_eps)
|
||||
mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (
|
||||
2 * jitter_eps
|
||||
)
|
||||
|
||||
# apply mask
|
||||
masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold,
|
||||
float("-inf"))
|
||||
masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold, float("-inf"))
|
||||
selected_experts_top2 = max_ind
|
||||
# compute scores for gradients
|
||||
masked_gates_top2 = torch.softmax(masked_gates_top2, dim=-1)
|
||||
multiplier_top2 = masked_gates_top2.gather(dim=-1,
|
||||
index=selected_experts_top2)
|
||||
multiplier_top2 = masked_gates_top2.gather(dim=-1, index=selected_experts_top2)
|
||||
|
||||
multiplier = torch.concat((multiplier, multiplier_top2), dim=-1)
|
||||
selected_experts = torch.concat((selected_experts, selected_experts_top2),
|
||||
dim=-1)
|
||||
selected_experts = torch.concat((selected_experts, selected_experts_top2), dim=-1)
|
||||
|
||||
return (
|
||||
multiplier,
|
||||
@@ -226,8 +234,7 @@ def phimoe_routing_function(
|
||||
topk: int,
|
||||
renormalize: bool,
|
||||
):
|
||||
assert hidden_states.shape[0] == gating_output.shape[0], (
|
||||
"Number of tokens mismatch")
|
||||
assert hidden_states.shape[0] == gating_output.shape[0], "Number of tokens mismatch"
|
||||
assert topk == 2, "Only top-2 routing is supported"
|
||||
assert renormalize is False, "Renormalization is not supported"
|
||||
|
||||
@@ -278,7 +285,8 @@ class PhiMoE(nn.Module):
|
||||
quant_config=quant_config,
|
||||
tp_size=tp_size,
|
||||
custom_routing_function=phimoe_routing_function,
|
||||
prefix=f"{prefix}.experts")
|
||||
prefix=f"{prefix}.experts",
|
||||
)
|
||||
|
||||
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
||||
# NOTE: hidden_states can have either 1D or 2D shape.
|
||||
@@ -291,7 +299,6 @@ class PhiMoE(nn.Module):
|
||||
|
||||
|
||||
class PhiMoEAttention(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
@@ -376,7 +383,6 @@ class PhiMoEAttention(nn.Module):
|
||||
|
||||
|
||||
class PhiMoEDecoderLayer(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: PhiMoEConfig,
|
||||
@@ -393,8 +399,9 @@ class PhiMoEDecoderLayer(nn.Module):
|
||||
num_heads=config.num_attention_heads,
|
||||
max_position=config.max_position_embeddings,
|
||||
num_kv_heads=config.num_key_value_heads,
|
||||
head_dim=getattr(config, "head_dim",
|
||||
self.hidden_size // config.num_attention_heads),
|
||||
head_dim=getattr(
|
||||
config, "head_dim", self.hidden_size // config.num_attention_heads
|
||||
),
|
||||
rope_theta=rope_theta,
|
||||
cache_config=cache_config,
|
||||
quant_config=quant_config,
|
||||
@@ -409,12 +416,12 @@ class PhiMoEDecoderLayer(nn.Module):
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.block_sparse_moe",
|
||||
)
|
||||
self.input_layernorm = nn.LayerNorm(config.hidden_size,
|
||||
eps=config.rms_norm_eps,
|
||||
elementwise_affine=True)
|
||||
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size,
|
||||
eps=config.rms_norm_eps,
|
||||
elementwise_affine=True)
|
||||
self.input_layernorm = nn.LayerNorm(
|
||||
config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True
|
||||
)
|
||||
self.post_attention_layernorm = nn.LayerNorm(
|
||||
config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
@@ -444,7 +451,6 @@ class PhiMoEDecoderLayer(nn.Module):
|
||||
|
||||
@support_torch_compile
|
||||
class PhiMoEModel(nn.Module):
|
||||
|
||||
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
||||
super().__init__()
|
||||
|
||||
@@ -453,8 +459,11 @@ class PhiMoEModel(nn.Module):
|
||||
quant_config = vllm_config.quant_config
|
||||
lora_config = vllm_config.lora_config
|
||||
|
||||
lora_vocab = ((lora_config.lora_extra_vocab_size *
|
||||
(lora_config.max_loras or 1)) if lora_config else 0)
|
||||
lora_vocab = (
|
||||
(lora_config.lora_extra_vocab_size * (lora_config.max_loras or 1))
|
||||
if lora_config
|
||||
else 0
|
||||
)
|
||||
self.vocab_size = config.vocab_size + lora_vocab
|
||||
self.org_vocab_size = config.vocab_size
|
||||
self.config = config
|
||||
@@ -468,15 +477,17 @@ class PhiMoEModel(nn.Module):
|
||||
self.start_layer, self.end_layer, self.layers = make_layers(
|
||||
config.num_hidden_layers,
|
||||
lambda prefix: PhiMoEDecoderLayer(
|
||||
config, cache_config, quant_config, prefix=prefix),
|
||||
prefix=f"{prefix}.layers")
|
||||
self.norm = nn.LayerNorm(config.hidden_size,
|
||||
eps=config.rms_norm_eps,
|
||||
elementwise_affine=True)
|
||||
config, cache_config, quant_config, prefix=prefix
|
||||
),
|
||||
prefix=f"{prefix}.layers",
|
||||
)
|
||||
self.norm = nn.LayerNorm(
|
||||
config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True
|
||||
)
|
||||
|
||||
self.make_empty_intermediate_tensors = (
|
||||
make_empty_intermediate_tensors_factory(
|
||||
["hidden_states", "residual"], config.hidden_size))
|
||||
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
|
||||
["hidden_states", "residual"], config.hidden_size
|
||||
)
|
||||
|
||||
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
||||
return self.embed_tokens(input_ids)
|
||||
@@ -507,10 +518,9 @@ class PhiMoEModel(nn.Module):
|
||||
)
|
||||
|
||||
if not get_pp_group().is_last_rank:
|
||||
return IntermediateTensors({
|
||||
"hidden_states": hidden_states,
|
||||
"residual": residual
|
||||
})
|
||||
return IntermediateTensors(
|
||||
{"hidden_states": hidden_states, "residual": residual}
|
||||
)
|
||||
|
||||
hidden_states = self.norm(hidden_states)
|
||||
return hidden_states
|
||||
@@ -523,8 +533,7 @@ class PhiMoEModel(nn.Module):
|
||||
num_experts=self.config.num_local_experts,
|
||||
)
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> set[str]:
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
||||
stacked_params_mapping = [
|
||||
# (param_name, shard_name, shard_id)
|
||||
("qkv_proj", "q_proj", "q"),
|
||||
@@ -536,14 +545,15 @@ class PhiMoEModel(nn.Module):
|
||||
loaded_params: set[str] = set()
|
||||
expert_params_mapping = self.get_expert_mapping()
|
||||
for name, loaded_weight in weights:
|
||||
if (self.quant_config is not None and
|
||||
(scale_name := self.quant_config.get_cache_scale(name))):
|
||||
if self.quant_config is not None and (
|
||||
scale_name := self.quant_config.get_cache_scale(name)
|
||||
):
|
||||
# Loading kv cache quantization scales
|
||||
param = params_dict[scale_name]
|
||||
weight_loader = getattr(param, "weight_loader",
|
||||
default_weight_loader)
|
||||
loaded_weight = (loaded_weight if loaded_weight.dim() == 0 else
|
||||
loaded_weight[0])
|
||||
weight_loader = getattr(param, "weight_loader", default_weight_loader)
|
||||
loaded_weight = (
|
||||
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
|
||||
)
|
||||
weight_loader(param, loaded_weight)
|
||||
loaded_params.add(scale_name)
|
||||
continue
|
||||
@@ -594,8 +604,9 @@ class PhiMoEModel(nn.Module):
|
||||
continue
|
||||
|
||||
param = params_dict[name]
|
||||
weight_loader = getattr(param, "weight_loader",
|
||||
default_weight_loader)
|
||||
weight_loader = getattr(
|
||||
param, "weight_loader", default_weight_loader
|
||||
)
|
||||
weight_loader(param, loaded_weight)
|
||||
loaded_params.add(name)
|
||||
return loaded_params
|
||||
@@ -627,8 +638,9 @@ class PhiMoEForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
|
||||
self.lora_config = lora_config
|
||||
self.quant_config = vllm_config.quant_config
|
||||
|
||||
self.model = PhiMoEModel(vllm_config=vllm_config,
|
||||
prefix=maybe_prefix(prefix, "model"))
|
||||
self.model = PhiMoEModel(
|
||||
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
|
||||
)
|
||||
self.unpadded_vocab_size = config.vocab_size
|
||||
if lora_config:
|
||||
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size
|
||||
@@ -640,16 +652,20 @@ class PhiMoEForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
|
||||
DEFAULT_VOCAB_PADDING_SIZE
|
||||
# We need bigger padding if using lora for kernel
|
||||
# compatibility
|
||||
if not lora_config else lora_config.lora_vocab_padding_size),
|
||||
if not lora_config
|
||||
else lora_config.lora_vocab_padding_size
|
||||
),
|
||||
quant_config=None,
|
||||
bias=True,
|
||||
prefix=maybe_prefix(prefix, "lm_head"),
|
||||
)
|
||||
self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
|
||||
config.vocab_size)
|
||||
self.logits_processor = LogitsProcessor(
|
||||
self.unpadded_vocab_size, config.vocab_size
|
||||
)
|
||||
|
||||
self.make_empty_intermediate_tensors = (
|
||||
self.model.make_empty_intermediate_tensors)
|
||||
self.model.make_empty_intermediate_tensors
|
||||
)
|
||||
|
||||
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
||||
return self.model.get_input_embeddings(input_ids)
|
||||
@@ -661,16 +677,16 @@ class PhiMoEForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
|
||||
intermediate_tensors: Optional[IntermediateTensors] = None,
|
||||
inputs_embeds: Optional[torch.Tensor] = None,
|
||||
) -> Union[torch.Tensor, IntermediateTensors]:
|
||||
hidden_states = self.model(input_ids, positions, intermediate_tensors,
|
||||
inputs_embeds)
|
||||
hidden_states = self.model(
|
||||
input_ids, positions, intermediate_tensors, inputs_embeds
|
||||
)
|
||||
return hidden_states
|
||||
|
||||
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
||||
logits = self.logits_processor(self.lm_head, hidden_states)
|
||||
return logits
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> set[str]:
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
||||
loader = AutoWeightsLoader(self)
|
||||
return loader.load_weights(weights)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user