[V0 deprecation] Remove VLLM_USE_V1 usage in most modules (#27955)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-11-05 12:51:16 +08:00
committed by GitHub
parent 878fd5a16f
commit 428bc7bf1c
19 changed files with 107 additions and 238 deletions

View File

@@ -285,10 +285,6 @@ class MambaModelConfig(VerifyAndUpdateConfig):
Args:
vllm_config: vLLM Config
"""
if not envs.VLLM_USE_V1:
return
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
@@ -329,10 +325,6 @@ class HybridAttentionMambaModelConfig(VerifyAndUpdateConfig):
Args:
vllm_config: vLLM Config
"""
if not envs.VLLM_USE_V1:
return
# Save the user input before it gets modified by MambaModelConfig
mamba_block_size = vllm_config.cache_config.mamba_block_size
# Enable FULL_AND_PIECEWISE by default