[V1] [Hybrid] Enable piecewise CUDA Graph for mamba layers (#21194)

Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com>
This commit is contained in:
Thomas Parnell
2025-07-19 21:27:21 +02:00
committed by GitHub
parent 9f414a12ad
commit 881e3cbe3b
10 changed files with 100 additions and 31 deletions

View File

@@ -11,6 +11,7 @@ from transformers import BambaConfig
from vllm import envs
from vllm.attention.layer import Attention
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.distributed.parallel_state import get_pp_group
@@ -122,11 +123,10 @@ class BambaMixerDecoderLayer(nn.Module):
hidden_states, residual = self.input_layernorm(
hidden_states, residual)
hidden_states = self.mamba(hidden_states, mamba_cache_params,
mamba2_metadata)
output = torch.empty_like(hidden_states)
self.mamba(hidden_states, output, mamba_cache_params, mamba2_metadata)
# Fully Connected
hidden_states, residual = self.pre_ff_layernorm(
hidden_states, residual)
hidden_states, residual = self.pre_ff_layernorm(output, residual)
hidden_states = self.feed_forward(hidden_states)
return hidden_states, residual
@@ -169,7 +169,7 @@ class BambaAttentionDecoderLayer(nn.Module):
self.max_position_embeddings = max_position_embeddings
if hasattr(config, "partial_rotary_factor"):
rotary_dim = self.head_dim * config.partial_rotary_factor
rotary_dim = int(self.head_dim * config.partial_rotary_factor)
elif hasattr(config, "attn_rotary_emb"):
rotary_dim = config.attn_rotary_emb # for backward compatibility
else:
@@ -258,6 +258,7 @@ ALL_DECODER_LAYER_TYPES = {
}
@support_torch_compile
class BambaModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):