[MM Encoder]: Make MMEncoderAttention's scale takes effect properly (#31950)

Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
This commit is contained in:
Isotr0py
2026-01-08 18:33:48 +08:00
committed by GitHub
parent 5576227bc1
commit 2972a05473
11 changed files with 32 additions and 8 deletions

View File

@@ -271,6 +271,7 @@ class DotsVisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
prefix=f"{prefix}.attn",
)

View File

@@ -152,6 +152,7 @@ class Ernie4_5_VisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
prefix=f"{prefix}.attn",
)

View File

@@ -304,6 +304,7 @@ class Glm4vVisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
)

View File

@@ -188,6 +188,7 @@ class GlmAsrEncoderAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_heads_per_rank,
head_size=self.head_dim,
scale=self.head_dim**-0.5,
num_kv_heads=self.num_kv_heads_per_rank,
prefix=f"{prefix}.attn",
)

View File

@@ -984,6 +984,7 @@ class Siglip2VisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
prefix=f"{prefix}.attn",
multimodal_config=multimodal_config,
)

View File

@@ -390,6 +390,7 @@ class MoonVitEncoderLayer(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
prefix=f"{prefix}.attn",
)

View File

@@ -564,6 +564,7 @@ class SiglipAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
prefix=f"{prefix}.attn",
)

View File

@@ -352,6 +352,7 @@ class Qwen2_5_VisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
)

View File

@@ -327,6 +327,7 @@ class Qwen2VisionAttention(nn.Module):
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
multimodal_config=multimodal_config,
)