[Bugfix] Fix prefix strings for quantized VLMs (#9772)

This commit is contained in:
Michael Goin
2024-10-29 19:02:59 -04:00
committed by GitHub
parent 8d7724104a
commit bc73e9821c
20 changed files with 288 additions and 97 deletions

View File

@@ -415,10 +415,16 @@ class LlavaOnevisionForConditionalGeneration(nn.Module, SupportsMultiModal,
# Initialize the vision tower only up to the required feature layer
self.vision_tower = init_vision_tower_for_llava(
config, quant_config, require_post_norm=False)
config,
quant_config,
require_post_norm=False,
prefix="vision_tower")
self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config)
self.language_model = init_vllm_registered_model(
config.text_config, cache_config, quant_config)
config.text_config,
cache_config,
quant_config,
prefix="language_model")
self.image_newline = nn.Parameter(
torch.empty(config.text_config.hidden_size))