[Model] Fix Qwen3VL and Qwen3Omni after torch.compile changes (#27705)

Signed-off-by: Lukas Geiger <lukas.geiger94@gmail.com>
Signed-off-by: Roger Wang <hey@rogerw.io>
Co-authored-by: Roger Wang <hey@rogerw.io>
This commit is contained in:
Lukas Geiger
2025-10-29 05:28:20 +00:00
committed by GitHub
parent d2c33c397a
commit 0d8161b075
3 changed files with 17 additions and 16 deletions

View File

@@ -836,10 +836,8 @@ class Qwen2_5_VisionTransformer(nn.Module):
self,
cu_seqlens: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
max_seqlen, seqlens = (
torch.zeros(1, device=cu_seqlens.device),
torch.zeros(1, device=cu_seqlens.device),
)
max_seqlen = torch.zeros([], device=cu_seqlens.device)
seqlens = torch.zeros(1, device=cu_seqlens.device)
if (
self.attn_backend == _Backend.FLASH_ATTN
or self.attn_backend == _Backend.ROCM_AITER_FA