[small][batch invariance] Rename the env and internal flags to simplify usage (#26855)

Signed-off-by: Bram Wasti <bwasti@meta.com>
This commit is contained in:
Bram Wasti
2025-10-16 14:40:25 -07:00
committed by GitHub
parent 23583ee28c
commit b2f78cbad4
20 changed files with 61 additions and 61 deletions

View File

@@ -20,7 +20,7 @@ import vllm.envs as envs
from vllm.distributed.device_communicators.cuda_wrapper import CudaRTLibrary
from vllm.logger import init_logger
from vllm.model_executor.layers.batch_invariant import (
vllm_kernel_override_batch_invariant,
vllm_is_batch_invariant,
)
from vllm.utils import cuda_device_count_stateless, update_environment_variables
@@ -74,7 +74,7 @@ def should_nccl_symm_mem_allreduce(world_size: int, input_tensor: torch.Tensor)
is_symmetric_memory_enabled,
)
if vllm_kernel_override_batch_invariant():
if vllm_is_batch_invariant():
return False
if not is_symmetric_memory_enabled():

View File

@@ -10,7 +10,7 @@ from vllm.distributed.device_communicators.all_reduce_utils import (
)
from vllm.logger import init_logger
from vllm.model_executor.layers.batch_invariant import (
vllm_kernel_override_batch_invariant,
vllm_is_batch_invariant,
)
from vllm.platforms import current_platform
@@ -103,7 +103,7 @@ class SymmMemCommunicator:
return
self.force_multimem = force_multimem
self.disabled = False
if vllm_kernel_override_batch_invariant():
if vllm_is_batch_invariant():
self.disabled = True
def should_use_symm_mem(self, inp: torch.Tensor):