[MM] Align the prefix of MMEncoderAttention with Attention (#33750)
Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
@@ -130,7 +130,7 @@ class AIMv2Attention(nn.Module):
|
||||
self.num_heads_per_partition,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@@ -126,7 +126,7 @@ class BlipAttention(nn.Module):
|
||||
self.num_heads_per_partition,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
||||
|
||||
@@ -296,7 +296,7 @@ class Glm4vVisionAttention(nn.Module):
|
||||
num_heads=self.num_attention_heads_per_partition,
|
||||
head_size=self.hidden_size_per_attention_head,
|
||||
scale=self.hidden_size_per_attention_head**-0.5,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
|
||||
|
||||
@@ -139,7 +139,7 @@ class EVA2CLIPAttention(nn.Module):
|
||||
self.num_heads_per_rank,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
self.output_dropout = torch.nn.Dropout(config.dropout_prob)
|
||||
|
||||
|
||||
@@ -137,6 +137,7 @@ class GlmOcrVisionAttention(nn.Module):
|
||||
num_heads=self.num_attention_heads_per_partition,
|
||||
head_size=self.hidden_size_per_attention_head,
|
||||
scale=self.hidden_size_per_attention_head**-0.5,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
|
||||
|
||||
|
||||
@@ -166,7 +166,7 @@ class Idefics2VisionAttention(nn.Module):
|
||||
self.num_heads_per_partition,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -215,7 +215,7 @@ class InternParallelAttention(nn.Module):
|
||||
self.num_heads_per_partition,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor):
|
||||
|
||||
@@ -220,7 +220,7 @@ class InternSdpaAttention(nn.Module):
|
||||
self.num_heads,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@@ -257,7 +257,7 @@ class Llama4VisionAttention(nn.Module):
|
||||
self.num_local_heads,
|
||||
self.head_dim,
|
||||
self.scaling,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
if use_data_parallel:
|
||||
|
||||
@@ -235,7 +235,7 @@ class MultiHeadDotProductAttention(nn.Module):
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
num_kv_heads=self.num_kv_heads,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -611,7 +611,7 @@ class ImagePoolingAttention(nn.Module):
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
num_kv_heads=self.num_kv_heads,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward_sdpa(
|
||||
|
||||
@@ -125,6 +125,7 @@ class OpenPanguVisionAttention(nn.Module):
|
||||
num_heads=self.num_attention_heads_per_partition,
|
||||
head_size=self.hidden_size_per_attention_head,
|
||||
scale=self.hidden_size_per_attention_head**-0.5,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
|
||||
|
||||
|
||||
@@ -345,7 +345,7 @@ class Qwen2_5_VisionAttention(nn.Module):
|
||||
num_heads=self.num_attention_heads_per_partition,
|
||||
head_size=self.hidden_size_per_attention_head,
|
||||
scale=self.hidden_size_per_attention_head**-0.5,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
|
||||
|
||||
@@ -319,7 +319,7 @@ class Qwen2VisionAttention(nn.Module):
|
||||
num_heads=self.num_attention_heads_per_partition,
|
||||
head_size=self.hidden_size_per_attention_head,
|
||||
scale=self.hidden_size_per_attention_head**-0.5,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
|
||||
|
||||
@@ -194,7 +194,7 @@ class Qwen3OmniMoeAudioAttention(nn.Module):
|
||||
num_heads=self.num_local_heads,
|
||||
head_size=self.head_dim,
|
||||
scale=self.scaling,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -763,7 +763,7 @@ class Step3VisionAttention(nn.Module):
|
||||
self.num_heads,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -224,7 +224,7 @@ class PerceptionEncoderVisionAttention(nn.Module):
|
||||
self.num_heads,
|
||||
self.head_dim,
|
||||
self.scale,
|
||||
prefix=prefix,
|
||||
prefix=f"{prefix}.attn",
|
||||
)
|
||||
self.rope = PerceptionEncoderRope2D(
|
||||
dim=self.head_dim,
|
||||
|
||||
Reference in New Issue
Block a user