[Models] Replace all nn.Conv2d with vLLM's Conv2dLayer (#28842)

Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
This commit is contained in:
Isotr0py
2025-11-19 02:56:04 +08:00
committed by GitHub
parent c64c0b78de
commit e4bb2684bc
20 changed files with 83 additions and 45 deletions

View File

@@ -16,6 +16,7 @@ from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.attention.layer import maybe_get_vit_flash_attn_backend
from vllm.distributed import divide, get_tensor_model_parallel_world_size
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.conv import Conv2dLayer
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
LinearBase,
@@ -67,7 +68,7 @@ class Siglip2VisionEmbeddings(nn.Module):
self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
else:
self.patch_embedding = nn.Conv2d(
self.patch_embedding = Conv2dLayer(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
@@ -99,7 +100,7 @@ class Siglip2VisionEmbeddings(nn.Module):
target_dtype = self.patch_embedding.weight.dtype
if isinstance(self.patch_embedding, LinearBase):
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
elif isinstance(self.patch_embedding, nn.Conv2d):
elif isinstance(self.patch_embedding, Conv2dLayer):
pixel_values = pixel_values.view(
-1,
self.config.num_channels * self.config.temporal_patch_size,