[4/N] Initialize MM components in context managers (M-P) (#32663)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-01-20 22:06:32 +08:00
committed by GitHub
parent bb9172030e
commit fda3f03eb2
24 changed files with 290 additions and 353 deletions

View File

@@ -707,30 +707,30 @@ class MiDashengLMModel(nn.Module, SupportsMultiModal, SupportsPP):
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
# Initialize audio components
self.audio_encoder = DashengAudioTransformer(
config.audio_encoder_config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "audio_encoder"),
)
self.audio_projector = AudioProjectorSubsample(
in_dim=config.audio_encoder_config.embed_dim,
out_dim=config.text_config.hidden_size,
downsample_rate=config.subsample_factor,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "audio_projector"),
)
# Initialize language model (decoder)
self.decoder = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "decoder"),
architectures=["Qwen2ForCausalLM"],
)
self.quant_config = quant_config
with self._mark_tower_model(vllm_config, "audio"):
self.audio_encoder = DashengAudioTransformer(
config.audio_encoder_config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "audio_encoder"),
)
self.audio_projector = AudioProjectorSubsample(
in_dim=config.audio_encoder_config.embed_dim,
out_dim=config.text_config.hidden_size,
downsample_rate=config.subsample_factor,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "audio_projector"),
)
with self._mark_language_model(vllm_config):
self.decoder = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "decoder"),
architectures=["Qwen2ForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.decoder.make_empty_intermediate_tensors
)
@@ -787,9 +787,6 @@ class MiDashengLMModel(nn.Module, SupportsMultiModal, SupportsPP):
return torch.split(masked_audio_features, audio_output_lengths.tolist())
def get_language_model(self) -> torch.nn.Module:
return self.decoder
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
audio_input = self._parse_and_validate_audio_input(**kwargs)