[BugFix] Add env variable to control PDL in LoRA (#32836)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
@@ -251,6 +251,7 @@ if TYPE_CHECKING:
|
||||
VLLM_LOG_MODEL_INSPECTION: bool = False
|
||||
VLLM_DEBUG_MFU_METRICS: bool = False
|
||||
VLLM_DISABLE_LOG_LOGO: bool = False
|
||||
VLLM_LORA_DISABLE_PDL: bool = False
|
||||
|
||||
|
||||
def get_default_cache_root():
|
||||
@@ -1618,8 +1619,12 @@ environment_variables: dict[str, Callable[[], Any]] = {
|
||||
),
|
||||
# Disable logging of vLLM logo at server startup time.
|
||||
"VLLM_DISABLE_LOG_LOGO": lambda: bool(int(os.getenv("VLLM_DISABLE_LOG_LOGO", "0"))),
|
||||
# Disable PDL for LoRA, as enabling PDL with LoRA on SM100 causes
|
||||
# Triton compilation to fail.
|
||||
"VLLM_LORA_DISABLE_PDL": lambda: bool(int(os.getenv("VLLM_LORA_DISABLE_PDL", "0"))),
|
||||
}
|
||||
|
||||
|
||||
# --8<-- [end:env-vars-definition]
|
||||
|
||||
|
||||
|
||||
@@ -310,4 +310,9 @@ def supports_pdl(device: torch.device | None = None) -> bool:
|
||||
Refer to: https://github.com/triton-lang/triton/blob/v3.5.0/python/tutorials/11-programmatic-dependent-launch.py
|
||||
"""
|
||||
# PDL requires compute capability SM90 or above
|
||||
return current_platform.is_cuda() and current_platform.has_device_capability(90)
|
||||
|
||||
return (
|
||||
current_platform.is_cuda()
|
||||
and current_platform.has_device_capability(90)
|
||||
and not envs.VLLM_LORA_DISABLE_PDL
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user