Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -10,40 +10,52 @@ from torch.nn import LayerNorm
|
||||
from transformers.models.qwen2_vl import Qwen2VLProcessor
|
||||
|
||||
from vllm.attention.backends.registry import _Backend
|
||||
from vllm.attention.layer import (check_upstream_fa_availability,
|
||||
maybe_get_vit_flash_attn_backend)
|
||||
from vllm.attention.layer import (
|
||||
check_upstream_fa_availability,
|
||||
maybe_get_vit_flash_attn_backend,
|
||||
)
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.config.multimodal import BaseDummyOptions
|
||||
from vllm.distributed import utils as dist_utils
|
||||
from vllm.distributed.parallel_state import (
|
||||
get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
|
||||
get_tensor_model_parallel_rank,
|
||||
get_tensor_model_parallel_world_size,
|
||||
)
|
||||
from vllm.model_executor.layers.activation import SiluAndMul
|
||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
|
||||
MergedColumnParallelLinear,
|
||||
QKVParallelLinear,
|
||||
RowParallelLinear)
|
||||
from vllm.model_executor.layers.linear import (
|
||||
ColumnParallelLinear,
|
||||
MergedColumnParallelLinear,
|
||||
QKVParallelLinear,
|
||||
RowParallelLinear,
|
||||
)
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
||||
from vllm.model_executor.models.interfaces import (MultiModalEmbeddings,
|
||||
SupportsLoRA,
|
||||
SupportsMultiModal,
|
||||
SupportsPP)
|
||||
from vllm.model_executor.models.interfaces import (
|
||||
MultiModalEmbeddings,
|
||||
SupportsLoRA,
|
||||
SupportsMultiModal,
|
||||
SupportsPP,
|
||||
)
|
||||
from vllm.model_executor.models.module_mapping import MultiModelKeys
|
||||
from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM
|
||||
from vllm.model_executor.models.qwen2_5_vl import Qwen2_5_VisionAttention
|
||||
from vllm.model_executor.models.qwen2_vl import (Qwen2VLDummyInputsBuilder,
|
||||
Qwen2VLMultiModalProcessor,
|
||||
Qwen2VLProcessingInfo)
|
||||
from vllm.model_executor.models.utils import (AutoWeightsLoader, WeightsMapper,
|
||||
init_vllm_registered_model,
|
||||
maybe_prefix)
|
||||
from vllm.model_executor.models.qwen2_vl import (
|
||||
Qwen2VLDummyInputsBuilder,
|
||||
Qwen2VLMultiModalProcessor,
|
||||
Qwen2VLProcessingInfo,
|
||||
)
|
||||
from vllm.model_executor.models.utils import (
|
||||
AutoWeightsLoader,
|
||||
WeightsMapper,
|
||||
init_vllm_registered_model,
|
||||
maybe_prefix,
|
||||
)
|
||||
from vllm.model_executor.models.vision import get_vit_attn_backend
|
||||
from vllm.multimodal import MULTIMODAL_REGISTRY
|
||||
from vllm.multimodal.inputs import MultiModalDataDict
|
||||
from vllm.sequence import IntermediateTensors
|
||||
from vllm.transformers_utils.configs.dotsocr import (DotsOCRConfig,
|
||||
DotsVisionConfig)
|
||||
from vllm.transformers_utils.configs.dotsocr import DotsOCRConfig, DotsVisionConfig
|
||||
from vllm.utils.tensor_schema import TensorSchema, TensorShape
|
||||
|
||||
from .vision import run_dp_sharded_mrope_vision_model
|
||||
@@ -59,6 +71,7 @@ class DotsOCRImagePixelInputs(TensorSchema):
|
||||
- ni: Number of images
|
||||
- cps: Number of channels * patch_size * patch_size
|
||||
"""
|
||||
|
||||
type: Literal["pixel_values"]
|
||||
|
||||
pixel_values: Annotated[torch.Tensor, TensorShape("np", "cps")]
|
||||
@@ -72,18 +85,17 @@ class DotsOCRImageEmbeddingInputs(TensorSchema):
|
||||
- hs: Hidden size
|
||||
- ni: Number of images
|
||||
"""
|
||||
|
||||
type: Literal["image_embeds"]
|
||||
|
||||
image_embeds: Annotated[torch.Tensor, TensorShape("nf", "hs")]
|
||||
image_grid_thw: Annotated[torch.Tensor, TensorShape("ni", 3)]
|
||||
|
||||
|
||||
DotsOCRImageInputs = Union[DotsOCRImagePixelInputs,
|
||||
DotsOCRImageEmbeddingInputs]
|
||||
DotsOCRImageInputs = Union[DotsOCRImagePixelInputs, DotsOCRImageEmbeddingInputs]
|
||||
|
||||
|
||||
class DotsOCRDummyInputsBuilder(Qwen2VLDummyInputsBuilder):
|
||||
|
||||
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
|
||||
num_images = mm_counts.get("image", 0)
|
||||
return IMAGE_TOKEN * num_images
|
||||
@@ -102,23 +114,22 @@ class DotsOCRDummyInputsBuilder(Qwen2VLDummyInputsBuilder):
|
||||
image_overrides = mm_options.get("image") if mm_options else None
|
||||
|
||||
return {
|
||||
"image":
|
||||
self._get_dummy_images(width=target_width,
|
||||
height=target_height,
|
||||
num_images=num_images,
|
||||
overrides=image_overrides),
|
||||
"image": self._get_dummy_images(
|
||||
width=target_width,
|
||||
height=target_height,
|
||||
num_images=num_images,
|
||||
overrides=image_overrides,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class DotsOCRProcessingInfo(Qwen2VLProcessingInfo):
|
||||
|
||||
def get_hf_config(self) -> DotsOCRConfig:
|
||||
config = self.ctx.get_hf_config()
|
||||
if not config.__class__.__name__ == 'DotsOCRConfig':
|
||||
if not config.__class__.__name__ == "DotsOCRConfig":
|
||||
raise TypeError(f"Expected DotsOCRConfig, got {type(config)}")
|
||||
|
||||
if hasattr(config, "vision_config") and isinstance(
|
||||
config.vision_config, dict):
|
||||
if hasattr(config, "vision_config") and isinstance(config.vision_config, dict):
|
||||
config.vision_config = DotsVisionConfig(**config.vision_config)
|
||||
|
||||
return config
|
||||
@@ -138,8 +149,7 @@ class DotsOCRProcessingInfo(Qwen2VLProcessingInfo):
|
||||
self,
|
||||
**kwargs: object,
|
||||
) -> Qwen2VLProcessor:
|
||||
self.get_tokenizer(
|
||||
).image_token = IMAGE_TOKEN # Ensure image token is set
|
||||
self.get_tokenizer().image_token = IMAGE_TOKEN # Ensure image token is set
|
||||
processor = self.ctx.get_hf_processor(
|
||||
Qwen2VLProcessor,
|
||||
**kwargs,
|
||||
@@ -151,13 +161,14 @@ class DotsOCRProcessingInfo(Qwen2VLProcessingInfo):
|
||||
|
||||
def rotate_half(x):
|
||||
"""Rotates half the hidden dims of the input."""
|
||||
x1 = x[..., :x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2:]
|
||||
x1 = x[..., : x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2 :]
|
||||
return torch.cat((-x2, x1), dim=-1)
|
||||
|
||||
|
||||
def apply_rotary_pos_emb_vision(tensor: torch.Tensor,
|
||||
freqs: torch.Tensor) -> torch.Tensor:
|
||||
def apply_rotary_pos_emb_vision(
|
||||
tensor: torch.Tensor, freqs: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
orig_dtype = tensor.dtype
|
||||
tensor = tensor.float()
|
||||
|
||||
@@ -175,23 +186,20 @@ def apply_rotary_pos_emb_vision(tensor: torch.Tensor,
|
||||
|
||||
|
||||
class VisionRotaryEmbedding(nn.Module):
|
||||
|
||||
def __init__(self, dim: int, theta: float = 10000.0) -> None:
|
||||
super().__init__()
|
||||
inv_freq = 1.0 / (theta
|
||||
**(torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
||||
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
|
||||
def forward(self, seqlen: int) -> torch.Tensor:
|
||||
seq = torch.arange(seqlen,
|
||||
device=self.inv_freq.device,
|
||||
dtype=self.inv_freq.dtype)
|
||||
seq = torch.arange(
|
||||
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
|
||||
)
|
||||
freqs = torch.outer(seq, self.inv_freq)
|
||||
return freqs
|
||||
|
||||
|
||||
class PatchMerger(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
@@ -210,19 +218,23 @@ class PatchMerger(nn.Module):
|
||||
self.ln_q = RMSNorm(context_dim, eps=1e-6)
|
||||
|
||||
self.mlp = nn.Sequential(
|
||||
ColumnParallelLinear(self.hidden_size,
|
||||
self.hidden_size,
|
||||
bias=True,
|
||||
return_bias=False,
|
||||
prefix=f"{prefix}.0",
|
||||
disable_tp=use_data_parallel),
|
||||
ColumnParallelLinear(
|
||||
self.hidden_size,
|
||||
self.hidden_size,
|
||||
bias=True,
|
||||
return_bias=False,
|
||||
prefix=f"{prefix}.0",
|
||||
disable_tp=use_data_parallel,
|
||||
),
|
||||
nn.GELU(),
|
||||
RowParallelLinear(self.hidden_size,
|
||||
dim,
|
||||
bias=True,
|
||||
return_bias=False,
|
||||
prefix=f"{prefix}.2",
|
||||
disable_tp=use_data_parallel),
|
||||
RowParallelLinear(
|
||||
self.hidden_size,
|
||||
dim,
|
||||
bias=True,
|
||||
return_bias=False,
|
||||
prefix=f"{prefix}.2",
|
||||
disable_tp=use_data_parallel,
|
||||
),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
@@ -234,26 +246,28 @@ class PatchMerger(nn.Module):
|
||||
|
||||
|
||||
class DotsVisionAttention(nn.Module):
|
||||
|
||||
def __init__(self,
|
||||
config,
|
||||
dim: int,
|
||||
num_heads: int = 16,
|
||||
bias: bool = True,
|
||||
*,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = "",
|
||||
use_data_parallel: bool = False) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
dim: int,
|
||||
num_heads: int = 16,
|
||||
bias: bool = True,
|
||||
*,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = "",
|
||||
use_data_parallel: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.embed_dim = dim
|
||||
self.tp_size = (1 if use_data_parallel else
|
||||
get_tensor_model_parallel_world_size())
|
||||
self.tp_rank = (0 if use_data_parallel else
|
||||
get_tensor_model_parallel_rank())
|
||||
self.tp_size = (
|
||||
1 if use_data_parallel else get_tensor_model_parallel_world_size()
|
||||
)
|
||||
self.tp_rank = 0 if use_data_parallel else get_tensor_model_parallel_rank()
|
||||
self.hidden_size_per_attention_head = dist_utils.divide(dim, num_heads)
|
||||
self.num_attention_heads_per_partition = dist_utils.divide(
|
||||
num_heads, self.tp_size)
|
||||
num_heads, self.tp_size
|
||||
)
|
||||
# qkv/proj follow Qwen2-VL style; bias controlled by arg
|
||||
self.qkv = QKVParallelLinear(
|
||||
hidden_size=dim,
|
||||
@@ -262,31 +276,40 @@ class DotsVisionAttention(nn.Module):
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.qkv",
|
||||
disable_tp=use_data_parallel)
|
||||
self.proj = RowParallelLinear(input_size=dim,
|
||||
output_size=dim,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.proj",
|
||||
disable_tp=use_data_parallel)
|
||||
disable_tp=use_data_parallel,
|
||||
)
|
||||
self.proj = RowParallelLinear(
|
||||
input_size=dim,
|
||||
output_size=dim,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.proj",
|
||||
disable_tp=use_data_parallel,
|
||||
)
|
||||
# Select attention backend
|
||||
self.attn_backend = get_vit_attn_backend(
|
||||
self.hidden_size_per_attention_head, torch.get_default_dtype())
|
||||
self.hidden_size_per_attention_head, torch.get_default_dtype()
|
||||
)
|
||||
self.use_upstream_fa = False
|
||||
|
||||
self.attn_backend, self.flash_attn_varlen_func \
|
||||
= maybe_get_vit_flash_attn_backend(
|
||||
self.attn_backend, self.flash_attn_varlen_func = (
|
||||
maybe_get_vit_flash_attn_backend(
|
||||
self.attn_backend,
|
||||
self.use_upstream_fa,
|
||||
)
|
||||
)
|
||||
if self.attn_backend not in {
|
||||
_Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS,
|
||||
_Backend.ROCM_AITER_FA
|
||||
_Backend.FLASH_ATTN,
|
||||
_Backend.TORCH_SDPA,
|
||||
_Backend.XFORMERS,
|
||||
_Backend.ROCM_AITER_FA,
|
||||
}:
|
||||
raise RuntimeError(
|
||||
f"Unsupported vision attention backend: {self.attn_backend}")
|
||||
f"Unsupported vision attention backend: {self.attn_backend}"
|
||||
)
|
||||
self.is_flash_attn_backend = self.attn_backend in {
|
||||
_Backend.FLASH_ATTN, _Backend.ROCM_AITER_FA
|
||||
_Backend.FLASH_ATTN,
|
||||
_Backend.ROCM_AITER_FA,
|
||||
}
|
||||
|
||||
def forward(
|
||||
@@ -317,18 +340,23 @@ class DotsVisionAttention(nn.Module):
|
||||
q_ = q.reshape(bs * q.shape[1], q.shape[2], q.shape[3])
|
||||
k_ = k.reshape(bs * k.shape[1], k.shape[2], k.shape[3])
|
||||
v_ = v.reshape(bs * v.shape[1], v.shape[2], v.shape[3])
|
||||
output = self.flash_attn_varlen_func(q_,
|
||||
k_,
|
||||
v_,
|
||||
cu_seqlens_q=cu_seqlens,
|
||||
cu_seqlens_k=cu_seqlens,
|
||||
max_seqlen_q=max_seqlen,
|
||||
max_seqlen_k=max_seqlen,
|
||||
dropout_p=0.0,
|
||||
causal=False)
|
||||
context_layer = output.view(bs, -1,
|
||||
self.num_attention_heads_per_partition,
|
||||
self.hidden_size_per_attention_head)
|
||||
output = self.flash_attn_varlen_func(
|
||||
q_,
|
||||
k_,
|
||||
v_,
|
||||
cu_seqlens_q=cu_seqlens,
|
||||
cu_seqlens_k=cu_seqlens,
|
||||
max_seqlen_q=max_seqlen,
|
||||
max_seqlen_k=max_seqlen,
|
||||
dropout_p=0.0,
|
||||
causal=False,
|
||||
)
|
||||
context_layer = output.view(
|
||||
bs,
|
||||
-1,
|
||||
self.num_attention_heads_per_partition,
|
||||
self.hidden_size_per_attention_head,
|
||||
)
|
||||
elif self.attn_backend == _Backend.TORCH_SDPA:
|
||||
outputs = []
|
||||
for i in range(1, len(cu_seqlens)):
|
||||
@@ -337,21 +365,20 @@ class DotsVisionAttention(nn.Module):
|
||||
q_i = q[:, s:e].permute(0, 2, 1, 3)
|
||||
k_i = k[:, s:e].permute(0, 2, 1, 3)
|
||||
v_i = v[:, s:e].permute(0, 2, 1, 3)
|
||||
out_i = F.scaled_dot_product_attention(q_i,
|
||||
k_i,
|
||||
v_i,
|
||||
dropout_p=0.0)
|
||||
out_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0)
|
||||
out_i = out_i.permute(0, 2, 1, 3)
|
||||
outputs.append(out_i)
|
||||
context_layer = torch.cat(outputs, dim=1) if outputs else q[:, :0]
|
||||
elif self.attn_backend == _Backend.XFORMERS:
|
||||
from xformers import ops as xops
|
||||
from xformers.ops.fmha.attn_bias import BlockDiagonalMask
|
||||
attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
|
||||
kv_seqlen=None,
|
||||
device=q.device)
|
||||
|
||||
attn_bias = BlockDiagonalMask.from_seqlens(
|
||||
q_seqlen=seqlens, kv_seqlen=None, device=q.device
|
||||
)
|
||||
context_layer = xops.memory_efficient_attention_forward(
|
||||
q, k, v, attn_bias=attn_bias, p=0, scale=None)
|
||||
q, k, v, attn_bias=attn_bias, p=0, scale=None
|
||||
)
|
||||
else:
|
||||
raise RuntimeError("Unsupported attention backend")
|
||||
|
||||
@@ -363,31 +390,36 @@ class DotsVisionAttention(nn.Module):
|
||||
|
||||
|
||||
class DotsSwiGLUFFN(nn.Module):
|
||||
|
||||
def __init__(self,
|
||||
config,
|
||||
*,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = "",
|
||||
use_data_parallel: bool = False):
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
*,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
prefix: str = "",
|
||||
use_data_parallel: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
hidden_features = config.intermediate_size
|
||||
in_features = config.embed_dim
|
||||
bias = config.use_bias
|
||||
|
||||
# Referenced aimv2.py AIMv2SwiGLUFFN
|
||||
self.fc13 = MergedColumnParallelLinear(in_features,
|
||||
[hidden_features] * 2,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.fc13",
|
||||
disable_tp=use_data_parallel)
|
||||
self.fc2 = RowParallelLinear(hidden_features,
|
||||
in_features,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.fc2",
|
||||
disable_tp=use_data_parallel)
|
||||
self.fc13 = MergedColumnParallelLinear(
|
||||
in_features,
|
||||
[hidden_features] * 2,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.fc13",
|
||||
disable_tp=use_data_parallel,
|
||||
)
|
||||
self.fc2 = RowParallelLinear(
|
||||
hidden_features,
|
||||
in_features,
|
||||
bias=bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.fc2",
|
||||
disable_tp=use_data_parallel,
|
||||
)
|
||||
self.act_fn = SiluAndMul()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
@@ -396,8 +428,7 @@ class DotsSwiGLUFFN(nn.Module):
|
||||
x, _ = self.fc2(x)
|
||||
return x
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> set[str]:
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
||||
stacked_params_mapping = [
|
||||
("fc13", "fc1", 0),
|
||||
("fc13", "fc3", 1),
|
||||
@@ -405,7 +436,6 @@ class DotsSwiGLUFFN(nn.Module):
|
||||
params_dict = dict(self.named_parameters())
|
||||
loaded_params: set[str] = set()
|
||||
for name, loaded_weight in weights:
|
||||
|
||||
for param_name, weight_name, shard_id in stacked_params_mapping:
|
||||
if weight_name not in name:
|
||||
continue
|
||||
@@ -423,15 +453,13 @@ class DotsSwiGLUFFN(nn.Module):
|
||||
continue
|
||||
|
||||
param = params_dict[name]
|
||||
weight_loader = getattr(param, "weight_loader",
|
||||
default_weight_loader)
|
||||
weight_loader = getattr(param, "weight_loader", default_weight_loader)
|
||||
weight_loader(param, loaded_weight)
|
||||
loaded_params.add(name)
|
||||
return loaded_params
|
||||
|
||||
|
||||
class DotsPatchEmbed(nn.Module):
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.num_channels = config.num_channels
|
||||
@@ -448,15 +476,19 @@ class DotsPatchEmbed(nn.Module):
|
||||
self.norm = RMSNorm(config.embed_dim, eps=config.rms_norm_eps)
|
||||
|
||||
def forward(self, x: torch.Tensor, grid_thw=None) -> torch.Tensor:
|
||||
x = x.view(-1, self.num_channels, self.temporal_patch_size,
|
||||
self.patch_size, self.patch_size)[:, :, 0]
|
||||
x = x.view(
|
||||
-1,
|
||||
self.num_channels,
|
||||
self.temporal_patch_size,
|
||||
self.patch_size,
|
||||
self.patch_size,
|
||||
)[:, :, 0]
|
||||
x = self.proj(x).view(-1, self.embed_dim)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class DotsViTPreprocessor(nn.Module):
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.patch_h = config.patch_size
|
||||
@@ -471,7 +503,6 @@ class DotsViTPreprocessor(nn.Module):
|
||||
|
||||
|
||||
class DotsVisionBlock(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
@@ -482,27 +513,33 @@ class DotsVisionBlock(nn.Module):
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.attn = DotsVisionAttention(config,
|
||||
config.embed_dim,
|
||||
num_heads=config.num_attention_heads,
|
||||
bias=config.use_bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.attn",
|
||||
use_data_parallel=use_data_parallel)
|
||||
self.attn = DotsVisionAttention(
|
||||
config,
|
||||
config.embed_dim,
|
||||
num_heads=config.num_attention_heads,
|
||||
bias=config.use_bias,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.attn",
|
||||
use_data_parallel=use_data_parallel,
|
||||
)
|
||||
self.norm1 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps)
|
||||
self.mlp = DotsSwiGLUFFN(config,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.mlp",
|
||||
use_data_parallel=use_data_parallel)
|
||||
self.mlp = DotsSwiGLUFFN(
|
||||
config,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.mlp",
|
||||
use_data_parallel=use_data_parallel,
|
||||
)
|
||||
self.norm2 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps)
|
||||
|
||||
def forward(self,
|
||||
hidden_states: torch.Tensor,
|
||||
*,
|
||||
cu_seqlens: torch.Tensor,
|
||||
rotary_pos_emb: torch.Tensor,
|
||||
max_seqlen: Optional[int] = None,
|
||||
seqlens: Optional[list[int]] = None) -> torch.Tensor:
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
*,
|
||||
cu_seqlens: torch.Tensor,
|
||||
rotary_pos_emb: torch.Tensor,
|
||||
max_seqlen: Optional[int] = None,
|
||||
seqlens: Optional[list[int]] = None,
|
||||
) -> torch.Tensor:
|
||||
hidden_states = hidden_states + self.attn(
|
||||
self.norm1(hidden_states),
|
||||
cu_seqlens=cu_seqlens,
|
||||
@@ -515,7 +552,6 @@ class DotsVisionBlock(nn.Module):
|
||||
|
||||
|
||||
class DotsVisionTransformer(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: DotsVisionConfig,
|
||||
@@ -535,26 +571,34 @@ class DotsVisionTransformer(nn.Module):
|
||||
head_dim = config.embed_dim // config.num_attention_heads
|
||||
self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2)
|
||||
self.attn_backend = get_vit_attn_backend(
|
||||
head_size=head_dim, dtype=torch.get_default_dtype())
|
||||
if self.attn_backend != _Backend.FLASH_ATTN and \
|
||||
check_upstream_fa_availability(torch.get_default_dtype()):
|
||||
head_size=head_dim, dtype=torch.get_default_dtype()
|
||||
)
|
||||
if self.attn_backend != _Backend.FLASH_ATTN and check_upstream_fa_availability(
|
||||
torch.get_default_dtype()
|
||||
):
|
||||
self.attn_backend = _Backend.FLASH_ATTN
|
||||
self.out_hidden_size = config.hidden_size
|
||||
# Keep blocks for compatibility with other vision towers
|
||||
num_layers = (config.num_hidden_layers if num_hidden_layers_override
|
||||
is None else num_hidden_layers_override)
|
||||
self.blocks = nn.ModuleList([
|
||||
DotsVisionBlock(config,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.blocks.{i}",
|
||||
use_data_parallel=use_data_parallel)
|
||||
for i in range(num_layers)
|
||||
])
|
||||
num_layers = (
|
||||
config.num_hidden_layers
|
||||
if num_hidden_layers_override is None
|
||||
else num_hidden_layers_override
|
||||
)
|
||||
self.blocks = nn.ModuleList(
|
||||
[
|
||||
DotsVisionBlock(
|
||||
config,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.blocks.{i}",
|
||||
use_data_parallel=use_data_parallel,
|
||||
)
|
||||
for i in range(num_layers)
|
||||
]
|
||||
)
|
||||
if require_post_norm is None:
|
||||
require_post_norm = (len(self.blocks) == config.num_hidden_layers)
|
||||
require_post_norm = len(self.blocks) == config.num_hidden_layers
|
||||
if require_post_norm and self.config.post_norm:
|
||||
self.post_trunk_norm = RMSNorm(config.embed_dim,
|
||||
eps=config.rms_norm_eps)
|
||||
self.post_trunk_norm = RMSNorm(config.embed_dim, eps=config.rms_norm_eps)
|
||||
else:
|
||||
self.post_trunk_norm = None
|
||||
|
||||
@@ -595,8 +639,7 @@ class DotsVisionTransformer(nn.Module):
|
||||
)
|
||||
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
|
||||
wpos_ids = wpos_ids.flatten()
|
||||
pos_ids.append(
|
||||
torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
|
||||
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
|
||||
|
||||
return pos_ids
|
||||
|
||||
@@ -609,42 +652,45 @@ class DotsVisionTransformer(nn.Module):
|
||||
return rotary_pos_emb
|
||||
|
||||
def compute_attn_mask_seqlen(
|
||||
self, cu_seqlens: torch.Tensor
|
||||
self, cu_seqlens: torch.Tensor
|
||||
) -> tuple[Optional[int], Optional[list[int]]]:
|
||||
max_seqlen, seqlens = None, None
|
||||
if (self.attn_backend == _Backend.FLASH_ATTN
|
||||
or self.attn_backend == _Backend.ROCM_AITER_FA):
|
||||
if (
|
||||
self.attn_backend == _Backend.FLASH_ATTN
|
||||
or self.attn_backend == _Backend.ROCM_AITER_FA
|
||||
):
|
||||
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
|
||||
elif self.attn_backend == _Backend.XFORMERS:
|
||||
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
|
||||
return max_seqlen, seqlens
|
||||
|
||||
def forward(self, hidden_states: torch.Tensor,
|
||||
grid_thw: list[list[int]]) -> torch.Tensor:
|
||||
def forward(
|
||||
self, hidden_states: torch.Tensor, grid_thw: list[list[int]]
|
||||
) -> torch.Tensor:
|
||||
# Convert grid_thw to tensor (always expecting list format now)
|
||||
grid_thw = torch.tensor(grid_thw,
|
||||
device=hidden_states.device,
|
||||
dtype=torch.long)
|
||||
grid_thw = torch.tensor(grid_thw, device=hidden_states.device, dtype=torch.long)
|
||||
hidden_states = hidden_states.to(self.dtype)
|
||||
hidden_states = self.patch_embed(hidden_states, grid_thw)
|
||||
|
||||
rotary_pos_emb = self.rot_pos_emb(grid_thw)
|
||||
|
||||
cu_seqlens = torch.repeat_interleave(
|
||||
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
|
||||
dim=0,
|
||||
dtype=grid_thw.dtype
|
||||
if torch.jit.is_tracing() else torch.int32,
|
||||
)
|
||||
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]
|
||||
).cumsum(
|
||||
dim=0,
|
||||
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
|
||||
)
|
||||
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
|
||||
|
||||
max_seqlen, seqlens = self.compute_attn_mask_seqlen(cu_seqlens)
|
||||
for blk in self.blocks:
|
||||
hidden_states = blk(hidden_states,
|
||||
cu_seqlens=cu_seqlens,
|
||||
rotary_pos_emb=rotary_pos_emb,
|
||||
max_seqlen=max_seqlen,
|
||||
seqlens=seqlens)
|
||||
hidden_states = blk(
|
||||
hidden_states,
|
||||
cu_seqlens=cu_seqlens,
|
||||
rotary_pos_emb=rotary_pos_emb,
|
||||
max_seqlen=max_seqlen,
|
||||
seqlens=seqlens,
|
||||
)
|
||||
|
||||
if self.post_trunk_norm is not None:
|
||||
hidden_states = self.post_trunk_norm(hidden_states)
|
||||
@@ -658,8 +704,7 @@ class DotsVisionTransformer(nn.Module):
|
||||
info=DotsOCRProcessingInfo,
|
||||
dummy_inputs=DotsOCRDummyInputsBuilder,
|
||||
)
|
||||
class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
SupportsLoRA):
|
||||
class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA):
|
||||
merge_by_field_config = True
|
||||
|
||||
hf_to_vllm_mapper = WeightsMapper(
|
||||
@@ -709,7 +754,8 @@ class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
vision_config,
|
||||
quant_config=self.quant_config,
|
||||
prefix=maybe_prefix(prefix, "vision_tower"),
|
||||
use_data_parallel=self.use_data_parallel)
|
||||
use_data_parallel=self.use_data_parallel,
|
||||
)
|
||||
self.language_model: Qwen2ForCausalLM = init_vllm_registered_model(
|
||||
vllm_config=vllm_config,
|
||||
hf_config=self.config,
|
||||
@@ -718,7 +764,8 @@ class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
)
|
||||
|
||||
def _parse_and_validate_image_input(
|
||||
self, **kwargs: object) -> Optional[DotsOCRImageInputs]:
|
||||
self, **kwargs: object
|
||||
) -> Optional[DotsOCRImageInputs]:
|
||||
pixel_values = kwargs.pop("pixel_values", None)
|
||||
image_embeds = kwargs.pop("image_embeds", None)
|
||||
image_grid_thw = kwargs.pop("image_grid_thw", None)
|
||||
@@ -727,27 +774,30 @@ class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
return None
|
||||
|
||||
if pixel_values is not None:
|
||||
return DotsOCRImagePixelInputs(type="pixel_values",
|
||||
pixel_values=pixel_values,
|
||||
image_grid_thw=image_grid_thw)
|
||||
return DotsOCRImagePixelInputs(
|
||||
type="pixel_values",
|
||||
pixel_values=pixel_values,
|
||||
image_grid_thw=image_grid_thw,
|
||||
)
|
||||
|
||||
if image_embeds is not None:
|
||||
return DotsOCRImageEmbeddingInputs(type="image_embeds",
|
||||
image_embeds=image_embeds,
|
||||
image_grid_thw=image_grid_thw)
|
||||
return DotsOCRImageEmbeddingInputs(
|
||||
type="image_embeds",
|
||||
image_embeds=image_embeds,
|
||||
image_grid_thw=image_grid_thw,
|
||||
)
|
||||
|
||||
def _process_image_input(
|
||||
self, image_input: DotsOCRImageInputs) -> tuple[torch.Tensor, ...]:
|
||||
self, image_input: DotsOCRImageInputs
|
||||
) -> tuple[torch.Tensor, ...]:
|
||||
grid_thw = image_input["image_grid_thw"]
|
||||
assert grid_thw.ndim == 2
|
||||
grid_thw_list = grid_thw.tolist()
|
||||
|
||||
if image_input["type"] == "image_embeds":
|
||||
image_embeds = image_input["image_embeds"].type(
|
||||
self.vision_tower.dtype)
|
||||
image_embeds = image_input["image_embeds"].type(self.vision_tower.dtype)
|
||||
else:
|
||||
pixel_values = image_input["pixel_values"].type(
|
||||
self.vision_tower.dtype)
|
||||
pixel_values = image_input["pixel_values"].type(self.vision_tower.dtype)
|
||||
|
||||
if self.use_data_parallel:
|
||||
return run_dp_sharded_mrope_vision_model(
|
||||
@@ -757,21 +807,23 @@ class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
rope_type="rope_3d",
|
||||
)
|
||||
else:
|
||||
image_embeds = self.vision_tower(
|
||||
pixel_values, grid_thw)[:, :self.config.hidden_size]
|
||||
image_embeds = self.vision_tower(pixel_values, grid_thw)[
|
||||
:, : self.config.hidden_size
|
||||
]
|
||||
|
||||
# Split concatenated embeddings for each image item.
|
||||
merge_size = self.vision_tower.spatial_merge_size
|
||||
sizes = (torch.tensor(grid_thw_list, dtype=torch.long).prod(-1) //
|
||||
(merge_size * merge_size)).tolist()
|
||||
sizes = (
|
||||
torch.tensor(grid_thw_list, dtype=torch.long).prod(-1)
|
||||
// (merge_size * merge_size)
|
||||
).tolist()
|
||||
|
||||
return image_embeds.split(sizes)
|
||||
|
||||
def get_language_model(self) -> torch.nn.Module:
|
||||
return self.language_model
|
||||
|
||||
def get_multimodal_embeddings(self,
|
||||
**kwargs: object) -> MultiModalEmbeddings:
|
||||
def get_multimodal_embeddings(self, **kwargs: object) -> MultiModalEmbeddings:
|
||||
image_input = self._parse_and_validate_image_input(**kwargs)
|
||||
if image_input is None:
|
||||
return []
|
||||
@@ -812,8 +864,7 @@ class DotsOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
|
||||
) -> Optional[torch.Tensor]:
|
||||
return self.language_model.compute_logits(hidden_states)
|
||||
|
||||
def load_weights(self, weights: Iterable[tuple[str,
|
||||
torch.Tensor]]) -> set[str]:
|
||||
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
||||
loader = AutoWeightsLoader(self)
|
||||
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user