[TPU] Support Pathways in vLLM (#21417)

Signed-off-by: wenxindongwork <wenxindong@google.com>
This commit is contained in:
wenxindongwork
2025-07-30 10:02:12 -07:00
committed by GitHub
parent f4135232b9
commit 8f0d516715
2 changed files with 17 additions and 6 deletions

View File

@@ -124,6 +124,7 @@ if TYPE_CHECKING:
VLLM_V1_USE_OUTLINES_CACHE: bool = False
VLLM_TPU_BUCKET_PADDING_GAP: int = 0
VLLM_TPU_MOST_MODEL_LEN: Optional[int] = None
VLLM_TPU_USING_PATHWAYS: bool = False
VLLM_USE_DEEP_GEMM: bool = False
VLLM_USE_FLASHINFER_MOE_FP8: bool = False
VLLM_USE_FLASHINFER_MOE_FP4: bool = False
@@ -900,6 +901,10 @@ environment_variables: dict[str, Callable[[], Any]] = {
"VLLM_TPU_MOST_MODEL_LEN":
lambda: maybe_convert_int(os.environ.get("VLLM_TPU_MOST_MODEL_LEN", None)),
# Whether using Pathways
"VLLM_TPU_USING_PATHWAYS":
lambda: bool("proxy" in os.getenv("JAX_PLATFORMS", "").lower()),
# Allow use of DeepGemm kernels for fused moe ops.
"VLLM_USE_DEEP_GEMM":
lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM", "0"))),