[Bugfix] adding chunking mechanism to fused_moe to handle large inputs (#6029)

This commit is contained in:
Avshalom Manevich
2024-07-02 00:08:29 +03:00
committed by GitHub
parent dec6fc6f3b
commit 12a59959ed
3 changed files with 74 additions and 48 deletions

View File

@@ -32,6 +32,7 @@ if TYPE_CHECKING:
VLLM_OPENVINO_CPU_KV_CACHE_PRECISION: Optional[str] = None
VLLM_OPENVINO_ENABLE_QUANTIZED_WEIGHTS: bool = False
VLLM_XLA_CACHE_PATH: str = "~/.vllm/xla_cache/"
VLLM_FUSED_MOE_CHUNK_SIZE: int = 64 * 1024
VLLM_USE_RAY_COMPILED_DAG: bool = False
VLLM_WORKER_MULTIPROC_METHOD: str = "fork"
VLLM_IMAGE_FETCH_TIMEOUT: int = 5
@@ -248,6 +249,8 @@ environment_variables: Dict[str, Callable[[], Any]] = {
# Only used for XLA devices such as TPUs.
"VLLM_XLA_CACHE_PATH":
lambda: os.getenv("VLLM_XLA_CACHE_PATH", "~/.vllm/xla_cache/"),
"VLLM_FUSED_MOE_CHUNK_SIZE":
lambda: int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "65536")),
}
# end-env-vars-definition