[VLM] Minor space optimization for ClipVisionModel (#6436)

This commit is contained in:
Roger Wang
2024-07-15 02:29:51 -07:00
committed by GitHub
parent 22e79ee8f3
commit 6ae1597ddf
4 changed files with 66 additions and 39 deletions

View File

@@ -214,22 +214,24 @@ class CLIPEncoder(nn.Module):
def __init__(self,
config: CLIPVisionConfig,
quant_config: Optional[QuantizationConfig] = None):
quant_config: Optional[QuantizationConfig] = None,
num_hidden_layers_override: Optional[int] = None):
super().__init__()
self.config = config
if num_hidden_layers_override is None:
num_hidden_layers = config.num_hidden_layers
else:
num_hidden_layers = num_hidden_layers_override
self.layers = nn.ModuleList([
CLIPEncoderLayer(config=config, quant_config=quant_config)
for _ in range(config.num_hidden_layers)
for _ in range(num_hidden_layers)
])
def forward(self,
inputs_embeds: torch.Tensor,
vision_feature_layer: int = -1):
def forward(self, inputs_embeds: torch.Tensor):
# Encoder forward pass only up to the required layer
num_layer = len(self.layers) + vision_feature_layer + 1
hidden_states = inputs_embeds
for encoder_layer in self.layers[:num_layer]:
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states)
return hidden_states
@@ -239,7 +241,8 @@ class CLIPVisionTransformer(nn.Module):
def __init__(self,
config: CLIPVisionConfig,
quant_config: Optional[QuantizationConfig] = None):
quant_config: Optional[QuantizationConfig] = None,
num_hidden_layers_override: Optional[int] = None):
super().__init__()
self.config = config
embed_dim = config.hidden_size
@@ -249,18 +252,19 @@ class CLIPVisionTransformer(nn.Module):
# NOTE: This typo of "layrnorm" is not fixed on purpose to match
# the original transformers code and name of the model weights.
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = CLIPEncoder(config=config, quant_config=quant_config)
self.encoder = CLIPEncoder(
config=config,
quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers_override)
def forward(
self,
pixel_values: torch.Tensor,
vision_feature_layer: int = -1,
) -> torch.Tensor:
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
hidden_states = self.encoder(inputs_embeds=hidden_states,
vision_feature_layer=vision_feature_layer)
hidden_states = self.encoder(inputs_embeds=hidden_states)
return hidden_states
@@ -272,17 +276,17 @@ class CLIPVisionModel(nn.Module):
def __init__(self,
config: CLIPVisionConfig,
quant_config: Optional[QuantizationConfig] = None):
quant_config: Optional[QuantizationConfig] = None,
num_hidden_layers_override: Optional[int] = None):
super().__init__()
self.vision_model = CLIPVisionTransformer(config=config,
quant_config=quant_config)
self.vision_model = CLIPVisionTransformer(
config=config,
quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers_override)
def forward(self,
pixel_values: Optional[torch.Tensor] = None,
vision_feature_layer: int = -1):
def forward(self, pixel_values: Optional[torch.Tensor] = None):
return self.vision_model(pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer)
return self.vision_model(pixel_values=pixel_values)
@property
def device(self):