fix: allow LFM2 MoE prefix caching (align) (#33376)

Signed-off-by: Tianshu Yu <tianshuyu.formal@gmail.com>
This commit is contained in:
tianshu-Michael-yu
2026-01-30 00:23:14 -08:00
committed by GitHub
parent ba45bedfd1
commit f45870b53f
2 changed files with 11 additions and 3 deletions

View File

@@ -651,9 +651,11 @@ class Lfm2MoeForCausalLM(
quant_config = vllm_config.quant_config
cache_config = vllm_config.cache_config
assert not cache_config.enable_prefix_caching, (
"Lfm2Moe currently does not support prefix caching"
)
if cache_config.mamba_cache_mode == "all":
raise NotImplementedError(
"Lfm2Moe currently does not support 'all' prefix caching, "
"please use '--mamba-cache-mode=align' instead"
)
super().__init__()
self.config = config

View File

@@ -22,6 +22,8 @@ from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.forward_context import set_forward_context
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
@@ -584,6 +586,10 @@ class Lfm2VLForConditionalGeneration(
conv_kernel=hf_language_config.conv_L_cache,
)
@classmethod
def get_mamba_state_copy_func(cls) -> tuple[MambaStateCopyFunc]:
return MambaStateCopyFuncCalculator.short_conv_state_copy_func()
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
super().__init__()
config: Lfm2VlConfig = vllm_config.model_config.hf_config