[Bugfix] Fix qwen2.5-vl overflow issue (#13968)

Signed-off-by: Isotr0py <2037008807@qq.com>
This commit is contained in:
Isotr0py
2025-02-28 01:30:39 +08:00
committed by GitHub
parent 1dd422b64a
commit 7864875879
4 changed files with 22 additions and 15 deletions

View File

@@ -63,7 +63,7 @@ from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP
from .qwen2_vl import Qwen2VLDummyInputsBuilder as Qwen2_5_VLDummyInputsBuilder
from .qwen2_vl import (Qwen2VLMultiModalProcessor, Qwen2VLProcessingInfo,
apply_rotary_pos_emb_vision)
from .utils import (AutoWeightsLoader, WeightsMapper,
from .utils import (AutoWeightsLoader, WeightsMapper, cast_overflow_tensors,
init_vllm_registered_model, maybe_prefix,
merge_multimodal_embeddings)
from .vision import get_vit_attn_backend
@@ -641,6 +641,11 @@ class Qwen2_5_VisionTransformer(nn.Module):
cu_seqlens=cu_seqlens_now,
rotary_pos_emb=rotary_pos_emb)
# For Qwen2.5-VL-3B, float16 will overflow at last block
# for long visual tokens sequences.
if hidden_states.dtype == torch.float16:
hidden_states = cast_overflow_tensors(hidden_states)
# adapter
hidden_states = self.merger(hidden_states)
reverse_indices = torch.argsort(window_index)