diff --git a/vllm/model_executor/models/eagle2_5_vl.py b/vllm/model_executor/models/eagle2_5_vl.py index 3ce9b9c4d..19d21de5b 100644 --- a/vllm/model_executor/models/eagle2_5_vl.py +++ b/vllm/model_executor/models/eagle2_5_vl.py @@ -222,22 +222,24 @@ class Eagle2_5_VLForConditionalGeneration( self.select_layer = getattr(config, "select_layer", -1) - # Vision encoder (SigLIP) - self.vision_model = self._init_vision_model( - config, - quant_config=quant_config, - prefix=maybe_prefix(prefix, "vision_model"), - ) + with self._mark_tower_model(vllm_config, "image"): + # Vision encoder (SigLIP) + self.vision_model = self._init_vision_model( + config, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "vision_model"), + ) - # Language model (Qwen2) - self.language_model = init_vllm_registered_model( - vllm_config=vllm_config, - hf_config=config.text_config, - prefix=maybe_prefix(prefix, "language_model"), - ) + # MLP projection + self.mlp1 = self._init_mlp1(config) - # MLP projection - self.mlp1 = self._init_mlp1(config) + with self._mark_language_model(vllm_config): + # Language model (Qwen2) + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.img_context_token_id = None @@ -399,9 +401,6 @@ class Eagle2_5_VLForConditionalGeneration( ] return image_embeds.split(image_feature_sizes) - def get_language_model(self) -> torch.nn.Module: - return self.language_model - def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings: """Embed multimodal inputs.""" image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/funaudiochat.py b/vllm/model_executor/models/funaudiochat.py index 16afec3cf..b7b8659a4 100644 --- a/vllm/model_executor/models/funaudiochat.py +++ b/vllm/model_executor/models/funaudiochat.py @@ -820,9 +820,6 @@ class FunAudioChatForConditionalGeneration(nn.Module, SupportsMultiModal, Suppor self.language_model.make_empty_intermediate_tensors ) - def get_language_model(self) -> torch.nn.Module: - return self.language_model - def _get_continuous_audio_features( self, input_features: torch.Tensor, diff --git a/vllm/model_executor/models/openpangu_vl.py b/vllm/model_executor/models/openpangu_vl.py index d7df2cbb4..e9288e6dd 100644 --- a/vllm/model_executor/models/openpangu_vl.py +++ b/vllm/model_executor/models/openpangu_vl.py @@ -843,20 +843,24 @@ class OpenPanguVLForConditionalGeneration( self.config = config self.vllm_config = vllm_config quant_config = vllm_config.quant_config - self.visual = OpenPanguVisionTransformer( - vision_config=config.vision_config, - out_hidden_size=config.vision_config.out_hidden_size, - hidden_size=config.hidden_size, - norm_eps=getattr(config.vision_config, "rms_norm_eps", 1e-6), - quant_config=self._maybe_ignore_quant_config(quant_config), - prefix=maybe_prefix(prefix, "visual"), - ) - self.language_model = init_vllm_registered_model( - vllm_config=vllm_config, - prefix=maybe_prefix("openpangu", "language_model"), - architectures=["PanguEmbeddedForCausalLM"], - ) + with self._mark_tower_model(vllm_config, {"image", "video"}): + self.visual = OpenPanguVisionTransformer( + vision_config=config.vision_config, + out_hidden_size=config.vision_config.out_hidden_size, + hidden_size=config.hidden_size, + norm_eps=getattr(config.vision_config, "rms_norm_eps", 1e-6), + quant_config=self._maybe_ignore_quant_config(quant_config), + prefix=maybe_prefix(prefix, "visual"), + ) + + with self._mark_language_model(vllm_config): + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + prefix=maybe_prefix("openpangu", "language_model"), + architectures=["PanguEmbeddedForCausalLM"], + ) + self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors ) @@ -1008,9 +1012,6 @@ class OpenPanguVLForConditionalGeneration( ) return mm_input_by_modality - def get_language_model(self) -> torch.nn.Module: - return self.language_model - def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None: mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs) if not mm_input_by_modality: diff --git a/vllm/model_executor/models/qwen3_asr.py b/vllm/model_executor/models/qwen3_asr.py index e63e03e23..9dac8d75b 100644 --- a/vllm/model_executor/models/qwen3_asr.py +++ b/vllm/model_executor/models/qwen3_asr.py @@ -296,19 +296,21 @@ class Qwen3ASRForConditionalGeneration( multimodal_config = vllm_config.model_config.multimodal_config self.config = thinker_config self.multimodal_config = multimodal_config - - self.audio_tower = Qwen3OmniMoeAudioEncoder( - thinker_config.audio_config, - prefix=maybe_prefix(prefix, "audio_tower"), - ) self.quant_config = quant_config - self.language_model = Qwen3ForCausalLM( - vllm_config=vllm_config.with_hf_config( - thinker_config.text_config, architectures=["Qwen3ForCausalLM"] - ), - prefix=maybe_prefix(prefix, "language_model"), - ) + with self._mark_tower_model(vllm_config, "audio"): + self.audio_tower = Qwen3OmniMoeAudioEncoder( + thinker_config.audio_config, + prefix=maybe_prefix(prefix, "audio_tower"), + ) + + with self._mark_language_model(vllm_config): + self.language_model = Qwen3ForCausalLM( + vllm_config=vllm_config.with_hf_config( + thinker_config.text_config, architectures=["Qwen3ForCausalLM"] + ), + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors @@ -363,9 +365,6 @@ class Qwen3ASRForConditionalGeneration( ) return audio_features.split(audio_output_lengths.tolist()) - def get_language_model(self) -> torch.nn.Module: - return self.language_model - def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None: mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs) if not mm_input_by_modality: