[Model] Apply #32631 for recent models (#33785)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-02-04 20:23:01 +08:00
committed by GitHub
parent f8516a1ab9
commit e57ef99b40
4 changed files with 46 additions and 50 deletions

View File

@@ -222,22 +222,24 @@ class Eagle2_5_VLForConditionalGeneration(
self.select_layer = getattr(config, "select_layer", -1) self.select_layer = getattr(config, "select_layer", -1)
# Vision encoder (SigLIP) with self._mark_tower_model(vllm_config, "image"):
self.vision_model = self._init_vision_model( # Vision encoder (SigLIP)
config, self.vision_model = self._init_vision_model(
quant_config=quant_config, config,
prefix=maybe_prefix(prefix, "vision_model"), quant_config=quant_config,
) prefix=maybe_prefix(prefix, "vision_model"),
)
# Language model (Qwen2) # MLP projection
self.language_model = init_vllm_registered_model( self.mlp1 = self._init_mlp1(config)
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
# MLP projection with self._mark_language_model(vllm_config):
self.mlp1 = self._init_mlp1(config) # Language model (Qwen2)
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
self.img_context_token_id = None self.img_context_token_id = None
@@ -399,9 +401,6 @@ class Eagle2_5_VLForConditionalGeneration(
] ]
return image_embeds.split(image_feature_sizes) return image_embeds.split(image_feature_sizes)
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings: def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
"""Embed multimodal inputs.""" """Embed multimodal inputs."""
image_input = self._parse_and_validate_image_input(**kwargs) image_input = self._parse_and_validate_image_input(**kwargs)

View File

@@ -820,9 +820,6 @@ class FunAudioChatForConditionalGeneration(nn.Module, SupportsMultiModal, Suppor
self.language_model.make_empty_intermediate_tensors self.language_model.make_empty_intermediate_tensors
) )
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def _get_continuous_audio_features( def _get_continuous_audio_features(
self, self,
input_features: torch.Tensor, input_features: torch.Tensor,

View File

@@ -843,20 +843,24 @@ class OpenPanguVLForConditionalGeneration(
self.config = config self.config = config
self.vllm_config = vllm_config self.vllm_config = vllm_config
quant_config = vllm_config.quant_config quant_config = vllm_config.quant_config
self.visual = OpenPanguVisionTransformer(
vision_config=config.vision_config,
out_hidden_size=config.vision_config.out_hidden_size,
hidden_size=config.hidden_size,
norm_eps=getattr(config.vision_config, "rms_norm_eps", 1e-6),
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "visual"),
)
self.language_model = init_vllm_registered_model( with self._mark_tower_model(vllm_config, {"image", "video"}):
vllm_config=vllm_config, self.visual = OpenPanguVisionTransformer(
prefix=maybe_prefix("openpangu", "language_model"), vision_config=config.vision_config,
architectures=["PanguEmbeddedForCausalLM"], out_hidden_size=config.vision_config.out_hidden_size,
) hidden_size=config.hidden_size,
norm_eps=getattr(config.vision_config, "rms_norm_eps", 1e-6),
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "visual"),
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
prefix=maybe_prefix("openpangu", "language_model"),
architectures=["PanguEmbeddedForCausalLM"],
)
self.make_empty_intermediate_tensors = ( self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors self.language_model.make_empty_intermediate_tensors
) )
@@ -1008,9 +1012,6 @@ class OpenPanguVLForConditionalGeneration(
) )
return mm_input_by_modality return mm_input_by_modality
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None: def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs) mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs)
if not mm_input_by_modality: if not mm_input_by_modality:

View File

@@ -296,19 +296,21 @@ class Qwen3ASRForConditionalGeneration(
multimodal_config = vllm_config.model_config.multimodal_config multimodal_config = vllm_config.model_config.multimodal_config
self.config = thinker_config self.config = thinker_config
self.multimodal_config = multimodal_config self.multimodal_config = multimodal_config
self.audio_tower = Qwen3OmniMoeAudioEncoder(
thinker_config.audio_config,
prefix=maybe_prefix(prefix, "audio_tower"),
)
self.quant_config = quant_config self.quant_config = quant_config
self.language_model = Qwen3ForCausalLM( with self._mark_tower_model(vllm_config, "audio"):
vllm_config=vllm_config.with_hf_config( self.audio_tower = Qwen3OmniMoeAudioEncoder(
thinker_config.text_config, architectures=["Qwen3ForCausalLM"] thinker_config.audio_config,
), prefix=maybe_prefix(prefix, "audio_tower"),
prefix=maybe_prefix(prefix, "language_model"), )
)
with self._mark_language_model(vllm_config):
self.language_model = Qwen3ForCausalLM(
vllm_config=vllm_config.with_hf_config(
thinker_config.text_config, architectures=["Qwen3ForCausalLM"]
),
prefix=maybe_prefix(prefix, "language_model"),
)
self.make_empty_intermediate_tensors = ( self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors self.language_model.make_empty_intermediate_tensors
@@ -363,9 +365,6 @@ class Qwen3ASRForConditionalGeneration(
) )
return audio_features.split(audio_output_lengths.tolist()) return audio_features.split(audio_output_lengths.tolist())
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None: def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs) mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs)
if not mm_input_by_modality: if not mm_input_by_modality: