[Model] Avoid hardcoding pooling type (#32119)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-01-12 13:28:12 +08:00
committed by GitHub
parent 025a32f9ed
commit 9101dc756c
6 changed files with 47 additions and 22 deletions

View File

@@ -9,7 +9,6 @@ from transformers import RobertaConfig
from vllm.config import ModelConfig, VllmConfig
from vllm.model_executor.layers.pooler import DispatchPooler
from vllm.model_executor.layers.pooler.seqwise import CLSPool
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding
from vllm.model_executor.models.bert import (
TOKEN_TYPE_SHIFT,
@@ -86,7 +85,7 @@ class RobertaClassificationHead(nn.Module):
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# CLSPool has already been applied in `pooling`
# Token extraction has already been applied in `pooler.pooling`
x = self.dense(x)
x = torch.tanh(x)
x = self.out_proj(x)
@@ -194,7 +193,6 @@ class RobertaForSequenceClassification(nn.Module, SupportsCrossEncoding):
self.pooler = DispatchPooler.for_seq_cls(
pooler_config,
pooling=CLSPool(),
classifier=self.classifier,
)